diff --git a/ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e6f6227fa07ff4e0ba95d4f559ad9c50261c1e22 --- /dev/null +++ b/ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7a8eb4ef8b3a4658ebc534a4b6dc5432f9eebd027d6bb12ceda7668ffdab87d +size 16778396 diff --git a/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..01620085ca0ea77b43f398d4010c9a0afe6c5c6e --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5ebc1128a44ae3c445a0a95f50ffb143ebf08760b3a98925f758af7b9ebea88 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..eebff04db07aee95b9587d5a1e4b3da3684369c4 --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5345922f7704d6731c97aa71da69444eeff5bfce98eb2a060ff29452a7c49fab +size 33555627 diff --git a/ckpts/universal/global_step120/zero/9.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/9.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5be511eddaa953199a7f5a69b0f889c02e7b65cb --- /dev/null +++ b/ckpts/universal/global_step120/zero/9.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ef841dd22ee9d4328da498d9ab5d68f09462ad5ea07251cb984e42c4e2e5c90 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/torch/testing/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58b8f828e3546b0e311c0f9f4738314f38e9a195 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/__init__.py @@ -0,0 +1,3 @@ +from torch._C import FileCheck as FileCheck +from ._comparison import assert_allclose, assert_close as assert_close +from ._creation import make_tensor as make_tensor diff --git a/venv/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7021fc7f6a51e9b66330e65719733b82e99b814 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14ed90be15d829b9c349c9021f52e52f259e69c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7c3f0c04ce528119107e2a209c0ccf65150346b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_comparison.py b/venv/lib/python3.10/site-packages/torch/testing/_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..42ef9be1b129b718ec6a85320ef77c387a700710 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_comparison.py @@ -0,0 +1,1575 @@ +import abc +import cmath +import collections.abc +import contextlib +import warnings +from typing import ( + Any, + Callable, + Collection, + Dict, + List, + NoReturn, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +import torch + +try: + import numpy as np + + NUMPY_AVAILABLE = True +except ModuleNotFoundError: + NUMPY_AVAILABLE = False + + +class ErrorMeta(Exception): + """Internal testing exception that makes that carries error metadata.""" + + def __init__( + self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = () + ) -> None: + super().__init__( + "If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, please `raise ErrorMeta().to_error()` " + "for user facing errors." + ) + self.type = type + self.msg = msg + self.id = id + + def to_error( + self, msg: Optional[Union[str, Callable[[str], str]]] = None + ) -> Exception: + if not isinstance(msg, str): + generated_msg = self.msg + if self.id: + generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}" + + msg = msg(generated_msg) if callable(msg) else generated_msg + + return self.type(msg) + + +# Some analysis of tolerance by logging tests from test_torch.py can be found in +# https://github.com/pytorch/pytorch/pull/32538. +# {dtype: (rtol, atol)} +_DTYPE_PRECISIONS = { + torch.float16: (0.001, 1e-5), + torch.bfloat16: (0.016, 1e-5), + torch.float32: (1.3e-6, 1e-5), + torch.float64: (1e-7, 1e-7), + torch.complex32: (0.001, 1e-5), + torch.complex64: (1.3e-6, 1e-5), + torch.complex128: (1e-7, 1e-7), +} +# The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in +# their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values` +_DTYPE_PRECISIONS.update( + dict.fromkeys( + (torch.quint8, torch.quint2x4, torch.quint4x2, torch.qint8, torch.qint32), + _DTYPE_PRECISIONS[torch.float32], + ) +) + + +def default_tolerances( + *inputs: Union[torch.Tensor, torch.dtype], + dtype_precisions: Optional[Dict[torch.dtype, Tuple[float, float]]] = None, +) -> Tuple[float, float]: + """Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype. + + See :func:`assert_close` for a table of the default tolerance for each dtype. + + Returns: + (Tuple[float, float]): Loosest tolerances of all input dtypes. + """ + dtypes = [] + for input in inputs: + if isinstance(input, torch.Tensor): + dtypes.append(input.dtype) + elif isinstance(input, torch.dtype): + dtypes.append(input) + else: + raise TypeError( + f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead." + ) + dtype_precisions = dtype_precisions or _DTYPE_PRECISIONS + rtols, atols = zip(*[dtype_precisions.get(dtype, (0.0, 0.0)) for dtype in dtypes]) + return max(rtols), max(atols) + + +def get_tolerances( + *inputs: Union[torch.Tensor, torch.dtype], + rtol: Optional[float], + atol: Optional[float], + id: Tuple[Any, ...] = (), +) -> Tuple[float, float]: + """Gets absolute and relative to be used for numeric comparisons. + + If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of + :func:`default_tolerances` is used. + + Raises: + ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified. + + Returns: + (Tuple[float, float]): Valid absolute and relative tolerances. + """ + if (rtol is None) ^ (atol is None): + # We require both tolerance to be omitted or specified, because specifying only one might lead to surprising + # results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0. + raise ErrorMeta( + ValueError, + f"Both 'rtol' and 'atol' must be either specified or omitted, " + f"but got no {'rtol' if rtol is None else 'atol'}.", + id=id, + ) + elif rtol is not None and atol is not None: + return rtol, atol + else: + return default_tolerances(*inputs) + + +def _make_mismatch_msg( + *, + default_identifier: str, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + extra: Optional[str] = None, + abs_diff: float, + abs_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None, + atol: float, + rel_diff: float, + rel_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None, + rtol: float, +) -> str: + """Makes a mismatch error message for numeric values. + + Args: + default_identifier (str): Default description of the compared values, e.g. "Tensor-likes". + identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides + ``default_identifier``. Can be passed as callable in which case it will be called with + ``default_identifier`` to create the description at runtime. + extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics. + abs_diff (float): Absolute difference. + abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference. + atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are + ``> 0``. + rel_diff (float): Relative difference. + rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference. + rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are + ``> 0``. + """ + equality = rtol == 0 and atol == 0 + + def make_diff_msg( + *, + type: str, + diff: float, + idx: Optional[Union[int, Tuple[int, ...]]], + tol: float, + ) -> str: + if idx is None: + msg = f"{type.title()} difference: {diff}" + else: + msg = f"Greatest {type} difference: {diff} at index {idx}" + if not equality: + msg += f" (up to {tol} allowed)" + return msg + "\n" + + if identifier is None: + identifier = default_identifier + elif callable(identifier): + identifier = identifier(default_identifier) + + msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n" + + if extra: + msg += f"{extra.strip()}\n" + + msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol) + msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol) + + return msg.strip() + + +def make_scalar_mismatch_msg( + actual: Union[bool, int, float, complex], + expected: Union[bool, int, float, complex], + *, + rtol: float, + atol: float, + identifier: Optional[Union[str, Callable[[str], str]]] = None, +) -> str: + """Makes a mismatch error message for scalars. + + Args: + actual (Union[bool, int, float, complex]): Actual scalar. + expected (Union[bool, int, float, complex]): Expected scalar. + rtol (float): Relative tolerance. + atol (float): Absolute tolerance. + identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed + as callable in which case it will be called by the default value to create the description at runtime. + Defaults to "Scalars". + """ + abs_diff = abs(actual - expected) + rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected) + return _make_mismatch_msg( + default_identifier="Scalars", + identifier=identifier, + extra=f"Expected {expected} but got {actual}.", + abs_diff=abs_diff, + atol=atol, + rel_diff=rel_diff, + rtol=rtol, + ) + + +def make_tensor_mismatch_msg( + actual: torch.Tensor, + expected: torch.Tensor, + matches: torch.Tensor, + *, + rtol: float, + atol: float, + identifier: Optional[Union[str, Callable[[str], str]]] = None, +): + """Makes a mismatch error message for tensors. + + Args: + actual (torch.Tensor): Actual tensor. + expected (torch.Tensor): Expected tensor. + matches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the + location of matches. + rtol (float): Relative tolerance. + atol (float): Absolute tolerance. + identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed + as callable in which case it will be called by the default value to create the description at runtime. + Defaults to "Tensor-likes". + """ + + def unravel_flat_index(flat_index: int) -> Tuple[int, ...]: + if not matches.shape: + return () + + inverse_index = [] + for size in matches.shape[::-1]: + div, mod = divmod(flat_index, size) + flat_index = div + inverse_index.append(mod) + + return tuple(inverse_index[::-1]) + + number_of_elements = matches.numel() + total_mismatches = number_of_elements - int(torch.sum(matches)) + extra = ( + f"Mismatched elements: {total_mismatches} / {number_of_elements} " + f"({total_mismatches / number_of_elements:.1%})" + ) + + actual_flat = actual.flatten() + expected_flat = expected.flatten() + matches_flat = matches.flatten() + + if not actual.dtype.is_floating_point and not actual.dtype.is_complex: + # TODO: Instead of always upcasting to int64, it would be sufficient to cast to the next higher dtype to avoid + # overflow + actual_flat = actual_flat.to(torch.int64) + expected_flat = expected_flat.to(torch.int64) + + abs_diff = torch.abs(actual_flat - expected_flat) + # Ensure that only mismatches are used for the max_abs_diff computation + abs_diff[matches_flat] = 0 + max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0) + + rel_diff = abs_diff / torch.abs(expected_flat) + # Ensure that only mismatches are used for the max_rel_diff computation + rel_diff[matches_flat] = 0 + max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0) + return _make_mismatch_msg( + default_identifier="Tensor-likes", + identifier=identifier, + extra=extra, + abs_diff=max_abs_diff.item(), + abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)), + atol=atol, + rel_diff=max_rel_diff.item(), + rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)), + rtol=rtol, + ) + + +class UnsupportedInputs(Exception): # noqa: B903 + """Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs.""" + + +class Pair(abc.ABC): + """ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`. + + Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison. + + Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the + super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to + handle the inputs and the next pair type will be tried. + + All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can + be used to automatically handle overwriting the message with a user supplied one and id handling. + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + **unknown_parameters: Any, + ) -> None: + self.actual = actual + self.expected = expected + self.id = id + self._unknown_parameters = unknown_parameters + + @staticmethod + def _inputs_not_supported() -> NoReturn: + raise UnsupportedInputs() + + @staticmethod + def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]): + """Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise.""" + if not all(isinstance(input, cls) for input in inputs): + Pair._inputs_not_supported() + + def _fail( + self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = () + ) -> NoReturn: + """Raises an :class:`ErrorMeta` from a given exception type and message and the stored id. + + .. warning:: + + If you use this before the ``super().__init__(...)`` call in the constructor, you have to pass the ``id`` + explicitly. + """ + raise ErrorMeta(type, msg, id=self.id if not id and hasattr(self, "id") else id) + + @abc.abstractmethod + def compare(self) -> None: + """Compares the inputs and raises an :class`ErrorMeta` in case they mismatch.""" + + def extra_repr(self) -> Sequence[Union[str, Tuple[str, Any]]]: + """Returns extra information that will be included in the representation. + + Should be overwritten by all subclasses that use additional options. The representation of the object will only + be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of + key-value-pairs or attribute names. + """ + return [] + + def __repr__(self) -> str: + head = f"{type(self).__name__}(" + tail = ")" + body = [ + f" {name}={value!s}," + for name, value in [ + ("id", self.id), + ("actual", self.actual), + ("expected", self.expected), + *[ + (extra, getattr(self, extra)) if isinstance(extra, str) else extra + for extra in self.extra_repr() + ], + ] + ] + return "\n".join((head, *body, *tail)) + + +class ObjectPair(Pair): + """Pair for any type of inputs that will be compared with the `==` operator. + + .. note:: + + Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs + couldn't handle the inputs. + + """ + + def compare(self) -> None: + try: + equal = self.actual == self.expected + except Exception as error: + # We are not using `self._raise_error_meta` here since we need the exception chaining + raise ErrorMeta( + ValueError, + f"{self.actual} == {self.expected} failed with:\n{error}.", + id=self.id, + ) from error + + if not equal: + self._fail(AssertionError, f"{self.actual} != {self.expected}") + + +class NonePair(Pair): + """Pair for ``None`` inputs.""" + + def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None: + if not (actual is None or expected is None): + self._inputs_not_supported() + + super().__init__(actual, expected, **other_parameters) + + def compare(self) -> None: + if not (self.actual is None and self.expected is None): + self._fail( + AssertionError, f"None mismatch: {self.actual} is not {self.expected}" + ) + + +class BooleanPair(Pair): + """Pair for :class:`bool` inputs. + + .. note:: + + If ``numpy`` is available, also handles :class:`numpy.bool_` inputs. + + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...], + **other_parameters: Any, + ) -> None: + actual, expected = self._process_inputs(actual, expected, id=id) + super().__init__(actual, expected, **other_parameters) + + @property + def _supported_types(self) -> Tuple[Type, ...]: + cls: List[Type] = [bool] + if NUMPY_AVAILABLE: + cls.append(np.bool_) + return tuple(cls) + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...] + ) -> Tuple[bool, bool]: + self._check_inputs_isinstance(actual, expected, cls=self._supported_types) + actual, expected = ( + self._to_bool(bool_like, id=id) for bool_like in (actual, expected) + ) + return actual, expected + + def _to_bool(self, bool_like: Any, *, id: Tuple[Any, ...]) -> bool: + if isinstance(bool_like, bool): + return bool_like + elif isinstance(bool_like, np.bool_): + return bool_like.item() + else: + raise ErrorMeta( + TypeError, f"Unknown boolean type {type(bool_like)}.", id=id + ) + + def compare(self) -> None: + if self.actual is not self.expected: + self._fail( + AssertionError, + f"Booleans mismatch: {self.actual} is not {self.expected}", + ) + + +class NumberPair(Pair): + """Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs. + + .. note:: + + If ``numpy`` is available, also handles :class:`numpy.number` inputs. + + Kwargs: + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the type are selected with the below table. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the type are selected with the below table. + equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. + check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``. + + The following table displays correspondence between Python number type and the ``torch.dtype``'s. See + :func:`assert_close` for the corresponding tolerances. + + +------------------+-------------------------------+ + | ``type`` | corresponding ``torch.dtype`` | + +==================+===============================+ + | :class:`int` | :attr:`~torch.int64` | + +------------------+-------------------------------+ + | :class:`float` | :attr:`~torch.float64` | + +------------------+-------------------------------+ + | :class:`complex` | :attr:`~torch.complex64` | + +------------------+-------------------------------+ + """ + + _TYPE_TO_DTYPE = { + int: torch.int64, + float: torch.float64, + complex: torch.complex128, + } + _NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys()) + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_dtype: bool = False, + **other_parameters: Any, + ) -> None: + actual, expected = self._process_inputs(actual, expected, id=id) + super().__init__(actual, expected, id=id, **other_parameters) + + self.rtol, self.atol = get_tolerances( + *[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)], + rtol=rtol, + atol=atol, + id=id, + ) + self.equal_nan = equal_nan + self.check_dtype = check_dtype + + @property + def _supported_types(self) -> Tuple[Type, ...]: + cls = list(self._NUMBER_TYPES) + if NUMPY_AVAILABLE: + cls.append(np.number) + return tuple(cls) + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...] + ) -> Tuple[Union[int, float, complex], Union[int, float, complex]]: + self._check_inputs_isinstance(actual, expected, cls=self._supported_types) + actual, expected = ( + self._to_number(number_like, id=id) for number_like in (actual, expected) + ) + return actual, expected + + def _to_number( + self, number_like: Any, *, id: Tuple[Any, ...] + ) -> Union[int, float, complex]: + if NUMPY_AVAILABLE and isinstance(number_like, np.number): + return number_like.item() + elif isinstance(number_like, self._NUMBER_TYPES): + return number_like # type: ignore[return-value] + else: + raise ErrorMeta( + TypeError, f"Unknown number type {type(number_like)}.", id=id + ) + + def compare(self) -> None: + if self.check_dtype and type(self.actual) is not type(self.expected): + self._fail( + AssertionError, + f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.", + ) + + if self.actual == self.expected: + return + + if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected): + return + + abs_diff = abs(self.actual - self.expected) + tolerance = self.atol + self.rtol * abs(self.expected) + + if cmath.isfinite(abs_diff) and abs_diff <= tolerance: + return + + self._fail( + AssertionError, + make_scalar_mismatch_msg( + self.actual, self.expected, rtol=self.rtol, atol=self.atol + ), + ) + + def extra_repr(self) -> Sequence[str]: + return ( + "rtol", + "atol", + "equal_nan", + "check_dtype", + ) + + +class TensorLikePair(Pair): + """Pair for :class:`torch.Tensor`-like inputs. + + Kwargs: + allow_subclasses (bool): + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the type are selected. See :func:assert_close: for details. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the type are selected. See :func:assert_close: for details. + equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. + check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same + :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different + :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. + check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this + check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`) before being compared. + check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this + check is disabled, tensors with different ``layout``'s are converted to strided tensors before being + compared. + check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + allow_subclasses: bool = True, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_device: bool = True, + check_dtype: bool = True, + check_layout: bool = True, + check_stride: bool = False, + **other_parameters: Any, + ): + actual, expected = self._process_inputs( + actual, expected, id=id, allow_subclasses=allow_subclasses + ) + super().__init__(actual, expected, id=id, **other_parameters) + + self.rtol, self.atol = get_tolerances( + actual, expected, rtol=rtol, atol=atol, id=self.id + ) + self.equal_nan = equal_nan + self.check_device = check_device + self.check_dtype = check_dtype + self.check_layout = check_layout + self.check_stride = check_stride + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...], allow_subclasses: bool + ) -> Tuple[torch.Tensor, torch.Tensor]: + directly_related = isinstance(actual, type(expected)) or isinstance( + expected, type(actual) + ) + if not directly_related: + self._inputs_not_supported() + + if not allow_subclasses and type(actual) is not type(expected): + self._inputs_not_supported() + + actual, expected = (self._to_tensor(input) for input in (actual, expected)) + for tensor in (actual, expected): + self._check_supported(tensor, id=id) + return actual, expected + + def _to_tensor(self, tensor_like: Any) -> torch.Tensor: + if isinstance(tensor_like, torch.Tensor): + return tensor_like + + try: + return torch.as_tensor(tensor_like) + except Exception: + self._inputs_not_supported() + + def _check_supported(self, tensor: torch.Tensor, *, id: Tuple[Any, ...]) -> None: + if tensor.layout not in { + torch.strided, + torch.sparse_coo, + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + raise ErrorMeta( + ValueError, f"Unsupported tensor layout {tensor.layout}", id=id + ) + + def compare(self) -> None: + actual, expected = self.actual, self.expected + + self._compare_attributes(actual, expected) + if any(input.device.type == "meta" for input in (actual, expected)): + return + + actual, expected = self._equalize_attributes(actual, expected) + self._compare_values(actual, expected) + + def _compare_attributes( + self, + actual: torch.Tensor, + expected: torch.Tensor, + ) -> None: + """Checks if the attributes of two tensors match. + + Always checks + + - the :attr:`~torch.Tensor.shape`, + - whether both inputs are quantized or not, + - and if they use the same quantization scheme. + + Checks for + + - :attr:`~torch.Tensor.layout`, + - :meth:`~torch.Tensor.stride`, + - :attr:`~torch.Tensor.device`, and + - :attr:`~torch.Tensor.dtype` + + are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair. + """ + + def raise_mismatch_error( + attribute_name: str, actual_value: Any, expected_value: Any + ) -> NoReturn: + self._fail( + AssertionError, + f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.", + ) + + if actual.shape != expected.shape: + raise_mismatch_error("shape", actual.shape, expected.shape) + + if actual.is_quantized != expected.is_quantized: + raise_mismatch_error( + "is_quantized", actual.is_quantized, expected.is_quantized + ) + elif actual.is_quantized and actual.qscheme() != expected.qscheme(): + raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme()) + + if actual.layout != expected.layout: + if self.check_layout: + raise_mismatch_error("layout", actual.layout, expected.layout) + elif ( + actual.layout == torch.strided + and self.check_stride + and actual.stride() != expected.stride() + ): + raise_mismatch_error("stride()", actual.stride(), expected.stride()) + + if self.check_device and actual.device != expected.device: + raise_mismatch_error("device", actual.device, expected.device) + + if self.check_dtype and actual.dtype != expected.dtype: + raise_mismatch_error("dtype", actual.dtype, expected.dtype) + + def _equalize_attributes( + self, actual: torch.Tensor, expected: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Equalizes some attributes of two tensors for value comparison. + + If ``actual`` and ``expected`` are ... + + - ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory. + - ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`). + - ... not of the same ``layout``, they are converted to strided tensors. + + Args: + actual (Tensor): Actual tensor. + expected (Tensor): Expected tensor. + + Returns: + (Tuple[Tensor, Tensor]): Equalized tensors. + """ + # The comparison logic uses operators currently not supported by the MPS backends. + # See https://github.com/pytorch/pytorch/issues/77144 for details. + # TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend + if actual.is_mps or expected.is_mps: # type: ignore[attr-defined] + actual = actual.cpu() + expected = expected.cpu() + + if actual.device != expected.device: + actual = actual.cpu() + expected = expected.cpu() + + if actual.dtype != expected.dtype: + actual_dtype = actual.dtype + expected_dtype = expected.dtype + # For uint64, this is not sound in general, which is why promote_types doesn't + # allow it, but for easy testing, we're unlikely to get confused + # by large uint64 overflowing into negative int64 + if actual_dtype in [torch.uint64, torch.uint32, torch.uint16]: + actual_dtype = torch.int64 + if expected_dtype in [torch.uint64, torch.uint32, torch.uint16]: + expected_dtype = torch.int64 + dtype = torch.promote_types(actual_dtype, expected_dtype) + actual = actual.to(dtype) + expected = expected.to(dtype) + + if actual.layout != expected.layout: + # These checks are needed, since Tensor.to_dense() fails on tensors that are already strided + actual = actual.to_dense() if actual.layout != torch.strided else actual + expected = ( + expected.to_dense() if expected.layout != torch.strided else expected + ) + + return actual, expected + + def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None: + if actual.is_quantized: + compare_fn = self._compare_quantized_values + elif actual.is_sparse: + compare_fn = self._compare_sparse_coo_values + elif actual.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + compare_fn = self._compare_sparse_compressed_values + else: + compare_fn = self._compare_regular_values_close + + compare_fn( + actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan + ) + + def _compare_quantized_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness. + + .. note:: + + A detailed discussion about why only the dequantized variant is checked for closeness rather than checking + the individual quantization parameters for closeness and the integer representation for equality can be + found in https://github.com/pytorch/pytorch/issues/68548. + """ + return self._compare_regular_values_close( + actual.dequantize(), + expected.dequantize(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}", + ) + + def _compare_sparse_coo_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares sparse COO tensors by comparing + + - the number of sparse dimensions, + - the number of non-zero elements (nnz) for equality, + - the indices for equality, and + - the values for closeness. + """ + if actual.sparse_dim() != expected.sparse_dim(): + self._fail( + AssertionError, + ( + f"The number of sparse dimensions in sparse COO tensors does not match: " + f"{actual.sparse_dim()} != {expected.sparse_dim()}" + ), + ) + + if actual._nnz() != expected._nnz(): + self._fail( + AssertionError, + ( + f"The number of specified values in sparse COO tensors does not match: " + f"{actual._nnz()} != {expected._nnz()}" + ), + ) + + self._compare_regular_values_equal( + actual._indices(), + expected._indices(), + identifier="Sparse COO indices", + ) + self._compare_regular_values_close( + actual._values(), + expected._values(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier="Sparse COO values", + ) + + def _compare_sparse_compressed_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares sparse compressed tensors by comparing + + - the number of non-zero elements (nnz) for equality, + - the plain indices for equality, + - the compressed indices for equality, and + - the values for closeness. + """ + format_name, compressed_indices_method, plain_indices_method = { + torch.sparse_csr: ( + "CSR", + torch.Tensor.crow_indices, + torch.Tensor.col_indices, + ), + torch.sparse_csc: ( + "CSC", + torch.Tensor.ccol_indices, + torch.Tensor.row_indices, + ), + torch.sparse_bsr: ( + "BSR", + torch.Tensor.crow_indices, + torch.Tensor.col_indices, + ), + torch.sparse_bsc: ( + "BSC", + torch.Tensor.ccol_indices, + torch.Tensor.row_indices, + ), + }[actual.layout] + + if actual._nnz() != expected._nnz(): + self._fail( + AssertionError, + ( + f"The number of specified values in sparse {format_name} tensors does not match: " + f"{actual._nnz()} != {expected._nnz()}" + ), + ) + + # Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formates can be `torch.int32` _or_ + # `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it + # can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will + # fail. + actual_compressed_indices = compressed_indices_method(actual) + expected_compressed_indices = compressed_indices_method(expected) + indices_dtype = torch.promote_types( + actual_compressed_indices.dtype, expected_compressed_indices.dtype + ) + + self._compare_regular_values_equal( + actual_compressed_indices.to(indices_dtype), + expected_compressed_indices.to(indices_dtype), + identifier=f"Sparse {format_name} {compressed_indices_method.__name__}", + ) + self._compare_regular_values_equal( + plain_indices_method(actual).to(indices_dtype), + plain_indices_method(expected).to(indices_dtype), + identifier=f"Sparse {format_name} {plain_indices_method.__name__}", + ) + self._compare_regular_values_close( + actual.values(), + expected.values(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier=f"Sparse {format_name} values", + ) + + def _compare_regular_values_equal( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + equal_nan: bool = False, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + ) -> None: + """Checks if the values of two tensors are equal.""" + self._compare_regular_values_close( + actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier + ) + + def _compare_regular_values_close( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + ) -> None: + """Checks if the values of two tensors are close up to a desired tolerance.""" + matches = torch.isclose( + actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan + ) + if torch.all(matches): + return + + if actual.shape == torch.Size([]): + msg = make_scalar_mismatch_msg( + actual.item(), + expected.item(), + rtol=rtol, + atol=atol, + identifier=identifier, + ) + else: + msg = make_tensor_mismatch_msg( + actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier + ) + self._fail(AssertionError, msg) + + def extra_repr(self) -> Sequence[str]: + return ( + "rtol", + "atol", + "equal_nan", + "check_device", + "check_dtype", + "check_layout", + "check_stride", + ) + + +def originate_pairs( + actual: Any, + expected: Any, + *, + pair_types: Sequence[Type[Pair]], + sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,), + mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,), + id: Tuple[Any, ...] = (), + **options: Any, +) -> List[Pair]: + """Originates pairs from the individual inputs. + + ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or + :class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs. + First successful pair will be used. + sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. + mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. + id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message. + **options (Any): Options passed to each pair during construction. + + Raises: + ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their + length does not match. + ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of + keys do not match. + ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs. + ErrorMeta: With any expected exception that happens during the construction of a pair. + + Returns: + (List[Pair]): Originated pairs. + """ + # We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop: + # "a" == "a"[0][0]... + if ( + isinstance(actual, sequence_types) + and not isinstance(actual, str) + and isinstance(expected, sequence_types) + and not isinstance(expected, str) + ): + actual_len = len(actual) + expected_len = len(expected) + if actual_len != expected_len: + raise ErrorMeta( + AssertionError, + f"The length of the sequences mismatch: {actual_len} != {expected_len}", + id=id, + ) + + pairs = [] + for idx in range(actual_len): + pairs.extend( + originate_pairs( + actual[idx], + expected[idx], + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + id=(*id, idx), + **options, + ) + ) + return pairs + + elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types): + actual_keys = set(actual.keys()) + expected_keys = set(expected.keys()) + if actual_keys != expected_keys: + missing_keys = expected_keys - actual_keys + additional_keys = actual_keys - expected_keys + raise ErrorMeta( + AssertionError, + ( + f"The keys of the mappings do not match:\n" + f"Missing keys in the actual mapping: {sorted(missing_keys)}\n" + f"Additional keys in the actual mapping: {sorted(additional_keys)}" + ), + id=id, + ) + + keys: Collection = actual_keys + # Since the origination aborts after the first failure, we try to be deterministic + with contextlib.suppress(Exception): + keys = sorted(keys) + + pairs = [] + for key in keys: + pairs.extend( + originate_pairs( + actual[key], + expected[key], + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + id=(*id, key), + **options, + ) + ) + return pairs + + else: + for pair_type in pair_types: + try: + return [pair_type(actual, expected, id=id, **options)] + # Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the + # inputs. Thus, we try the next pair type. + except UnsupportedInputs: + continue + # Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This + # is only in a separate branch, because the one below would also except it. + except ErrorMeta: + raise + # Raising any other exception during origination is unexpected and will give some extra information about + # what happened. If applicable, the exception should be expected in the future. + except Exception as error: + raise RuntimeError( + f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n" + f"{type(actual).__name__}(): {actual}\n\n" + f"and\n\n" + f"{type(expected).__name__}(): {expected}\n\n" + f"resulted in the unexpected exception above. " + f"If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, " + "please except the previous error and raise an expressive `ErrorMeta` instead." + ) from error + else: + raise ErrorMeta( + TypeError, + f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.", + id=id, + ) + + +def not_close_error_metas( + actual: Any, + expected: Any, + *, + pair_types: Sequence[Type[Pair]] = (ObjectPair,), + sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,), + mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,), + **options: Any, +) -> List[ErrorMeta]: + """Asserts that inputs are equal. + + ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or + :class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the + inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`. + sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. + mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. + **options (Any): Options passed to each pair during construction. + """ + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + try: + pairs = originate_pairs( + actual, + expected, + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + **options, + ) + except ErrorMeta as error_meta: + # Explicitly raising from None to hide the internal traceback + raise error_meta.to_error() from None + + error_metas: List[ErrorMeta] = [] + for pair in pairs: + try: + pair.compare() + except ErrorMeta as error_meta: + error_metas.append(error_meta) + # Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information + # about what happened. If applicable, the exception should be expected in the future. + except Exception as error: + raise RuntimeError( + f"Comparing\n\n" + f"{pair}\n\n" + f"resulted in the unexpected exception above. " + f"If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, " + "please except the previous error and raise an expressive `ErrorMeta` instead." + ) from error + + # [ErrorMeta Cycles] + # ErrorMeta objects in this list capture + # tracebacks that refer to the frame of this function. + # The local variable `error_metas` refers to the error meta + # objects, creating a reference cycle. Frames in the traceback + # would not get freed until cycle collection, leaking cuda memory in tests. + # We break the cycle by removing the reference to the error_meta objects + # from this frame as it returns. + error_metas = [error_metas] + return error_metas.pop() + + +def assert_close( + actual: Any, + expected: Any, + *, + allow_subclasses: bool = True, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_device: bool = True, + check_dtype: bool = True, + check_layout: bool = True, + check_stride: bool = False, + msg: Optional[Union[str, Callable[[str], str]]] = None, +): + r"""Asserts that ``actual`` and ``expected`` are close. + + If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if + + .. math:: + + \lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert + + Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are + only considered equal to each other if ``equal_nan`` is ``True``. + + In addition, they are only considered close if they have the same + + - :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``), + - ``dtype`` (if ``check_dtype`` is ``True``), + - ``layout`` (if ``check_layout`` is ``True``), and + - stride (if ``check_stride`` is ``True``). + + If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed. + + If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are + checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR, + or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively, + are always checked for equality whereas the values are checked for closeness according to the definition above. + + If ``actual`` and ``expected`` are quantized, they are considered close if they have the same + :meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the + definition above. + + ``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which + :class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types + have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s + or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all + their elements are considered close according to the above definition. + + .. note:: + + Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e. + :class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus, + Python scalars of different types can be checked, but require ``check_dtype=False``. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types + are allowed. Otherwise type equality is required. + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. + equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal. + check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same + :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different + :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. + check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this + check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`) before being compared. + check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this + check is disabled, tensors with different ``layout``'s are converted to strided tensors before being + compared. + check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. + msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during + the comparison. Can also passed as callable in which case it will be called with the generated message and + should return the new message. + + Raises: + ValueError: If no :class:`torch.Tensor` can be constructed from an input. + ValueError: If only ``rtol`` or ``atol`` is specified. + AssertionError: If corresponding inputs are not Python scalars and are not directly related. + AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have + different types. + AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match. + AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match. + AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`. + AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same + :attr:`~torch.Tensor.layout`. + AssertionError: If only one of corresponding tensors is quantized. + AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s. + AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same + :attr:`~torch.Tensor.device`. + AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``. + AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride. + AssertionError: If the values of corresponding tensors are not close according to the definition above. + + The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching + ``dtype``'s, the maximum of both tolerances is used. + + +---------------------------+------------+----------+ + | ``dtype`` | ``rtol`` | ``atol`` | + +===========================+============+==========+ + | :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | other | ``0.0`` | ``0.0`` | + +---------------------------+------------+----------+ + + .. note:: + + :func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged + to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might + define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default: + + >>> import functools + >>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) + >>> assert_equal(1e-9, 1e-10) + Traceback (most recent call last): + ... + AssertionError: Scalars are not equal! + + Expected 1e-10 but got 1e-09. + Absolute difference: 9.000000000000001e-10 + Relative difference: 9.0 + + Examples: + >>> # tensor to tensor comparison + >>> expected = torch.tensor([1e0, 1e-1, 1e-2]) + >>> actual = torch.acos(torch.cos(expected)) + >>> torch.testing.assert_close(actual, expected) + + >>> # scalar to scalar comparison + >>> import math + >>> expected = math.sqrt(2.0) + >>> actual = 2.0 / math.sqrt(2.0) + >>> torch.testing.assert_close(actual, expected) + + >>> # numpy array to numpy array comparison + >>> import numpy as np + >>> expected = np.array([1e0, 1e-1, 1e-2]) + >>> actual = np.arccos(np.cos(expected)) + >>> torch.testing.assert_close(actual, expected) + + >>> # sequence to sequence comparison + >>> import numpy as np + >>> # The types of the sequences do not have to match. They only have to have the same + >>> # length and their elements have to match. + >>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)] + >>> actual = tuple(expected) + >>> torch.testing.assert_close(actual, expected) + + >>> # mapping to mapping comparison + >>> from collections import OrderedDict + >>> import numpy as np + >>> foo = torch.tensor(1.0) + >>> bar = 2.0 + >>> baz = np.array(3.0) + >>> # The types and a possible ordering of mappings do not have to match. They only + >>> # have to have the same set of keys and their elements have to match. + >>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)]) + >>> actual = {"baz": baz, "bar": bar, "foo": foo} + >>> torch.testing.assert_close(actual, expected) + + >>> expected = torch.tensor([1.0, 2.0, 3.0]) + >>> actual = expected.clone() + >>> # By default, directly related instances can be compared + >>> torch.testing.assert_close(torch.nn.Parameter(actual), expected) + >>> # This check can be made more strict with allow_subclasses=False + >>> torch.testing.assert_close( + ... torch.nn.Parameter(actual), expected, allow_subclasses=False + ... ) + Traceback (most recent call last): + ... + TypeError: No comparison pair was able to handle inputs of type + and . + >>> # If the inputs are not directly related, they are never considered close + >>> torch.testing.assert_close(actual.numpy(), expected) + Traceback (most recent call last): + ... + TypeError: No comparison pair was able to handle inputs of type + and . + >>> # Exceptions to these rules are Python scalars. They can be checked regardless of + >>> # their type if check_dtype=False. + >>> torch.testing.assert_close(1.0, 1, check_dtype=False) + + >>> # NaN != NaN by default. + >>> expected = torch.tensor(float("Nan")) + >>> actual = expected.clone() + >>> torch.testing.assert_close(actual, expected) + Traceback (most recent call last): + ... + AssertionError: Scalars are not close! + + Expected nan but got nan. + Absolute difference: nan (up to 1e-05 allowed) + Relative difference: nan (up to 1.3e-06 allowed) + >>> torch.testing.assert_close(actual, expected, equal_nan=True) + + >>> expected = torch.tensor([1.0, 2.0, 3.0]) + >>> actual = torch.tensor([1.0, 4.0, 5.0]) + >>> # The default error message can be overwritten. + >>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!") + Traceback (most recent call last): + ... + AssertionError: Argh, the tensors are not close! + >>> # If msg is a callable, it can be used to augment the generated message with + >>> # extra information + >>> torch.testing.assert_close( + ... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter" + ... ) + Traceback (most recent call last): + ... + AssertionError: Header + + Tensor-likes are not close! + + Mismatched elements: 2 / 3 (66.7%) + Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed) + Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed) + + Footer + """ + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + error_metas = not_close_error_metas( + actual, + expected, + pair_types=( + NonePair, + BooleanPair, + NumberPair, + TensorLikePair, + ), + allow_subclasses=allow_subclasses, + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + check_device=check_device, + check_dtype=check_dtype, + check_layout=check_layout, + check_stride=check_stride, + msg=msg, + ) + + if error_metas: + # TODO: compose all metas into one AssertionError + raise error_metas[0].to_error(msg) + + +def assert_allclose( + actual: Any, + expected: Any, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = True, + msg: str = "", +) -> None: + """ + .. warning:: + + :func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release. + Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions + `here `_. + """ + warnings.warn( + "`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. " + "Please use `torch.testing.assert_close()` instead. " + "You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.", + FutureWarning, + stacklevel=2, + ) + + if not isinstance(actual, torch.Tensor): + actual = torch.tensor(actual) + if not isinstance(expected, torch.Tensor): + expected = torch.tensor(expected, dtype=actual.dtype) + + if rtol is None and atol is None: + rtol, atol = default_tolerances( + actual, + expected, + dtype_precisions={ + torch.float16: (1e-3, 1e-3), + torch.float32: (1e-4, 1e-5), + torch.float64: (1e-5, 1e-8), + }, + ) + + torch.testing.assert_close( + actual, + expected, + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + check_device=True, + check_dtype=False, + check_stride=False, + msg=msg or None, + ) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_creation.py b/venv/lib/python3.10/site-packages/torch/testing/_creation.py new file mode 100644 index 0000000000000000000000000000000000000000..0b01b172a4774119043acbf3c458d41d82f97a30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_creation.py @@ -0,0 +1,267 @@ +""" +This module contains tensor creation utilities. +""" + +import collections.abc +import math +import warnings +from typing import cast, List, Optional, Tuple, Union + +import torch + +_INTEGRAL_TYPES = [ + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.uint16, + torch.uint32, + torch.uint64, +] +_FLOATING_TYPES = [torch.float16, torch.bfloat16, torch.float32, torch.float64] +_FLOATING_8BIT_TYPES = [ + torch.float8_e4m3fn, + torch.float8_e5m2, + torch.float8_e4m3fnuz, + torch.float8_e5m2fnuz, +] +_COMPLEX_TYPES = [torch.complex32, torch.complex64, torch.complex128] +_BOOLEAN_OR_INTEGRAL_TYPES = [torch.bool, *_INTEGRAL_TYPES] +_FLOATING_OR_COMPLEX_TYPES = [*_FLOATING_TYPES, *_COMPLEX_TYPES] + + +def _uniform_random_(t: torch.Tensor, low: float, high: float) -> torch.Tensor: + # uniform_ requires to-from <= std::numeric_limits::max() + # Work around this by scaling the range before and after the PRNG + if high - low >= torch.finfo(t.dtype).max: + return t.uniform_(low / 2, high / 2).mul_(2) + else: + return t.uniform_(low, high) + + +def make_tensor( + *shape: Union[int, torch.Size, List[int], Tuple[int, ...]], + dtype: torch.dtype, + device: Union[str, torch.device], + low: Optional[float] = None, + high: Optional[float] = None, + requires_grad: bool = False, + noncontiguous: bool = False, + exclude_zero: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> torch.Tensor: + r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with + values uniformly drawn from ``[low, high)``. + + If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable + finite values then they are clamped to the lowest or highest representable finite value, respectively. + If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`, + which depend on :attr:`dtype`. + + +---------------------------+------------+----------+ + | ``dtype`` | ``low`` | ``high`` | + +===========================+============+==========+ + | boolean type | ``0`` | ``2`` | + +---------------------------+------------+----------+ + | unsigned integral type | ``0`` | ``10`` | + +---------------------------+------------+----------+ + | signed integral types | ``-9`` | ``10`` | + +---------------------------+------------+----------+ + | floating types | ``-9`` | ``9`` | + +---------------------------+------------+----------+ + | complex types | ``-9`` | ``9`` | + +---------------------------+------------+----------+ + + Args: + shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor. + dtype (:class:`torch.dtype`): The data type of the returned tensor. + device (Union[str, torch.device]): The device of the returned tensor. + low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is + clamped to the least representable finite value of the given dtype. When ``None`` (default), + this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``. + high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is + clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value + is determined based on the :attr:`dtype` (see the table above). Default: ``None``. + + .. deprecated:: 2.1 + + Passing ``low==high`` to :func:`~torch.testing.make_tensor` for floating or complex types is deprecated + since 2.1 and will be removed in 2.3. Use :func:`torch.full` instead. + + requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``. + noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is + ignored if the constructed tensor has fewer than two elements. Mutually exclusive with ``memory_format``. + exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value + depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating + point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the + :attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number + whose real and imaginary parts are both the smallest positive normal number representable by the complex + type. Default ``False``. + memory_format (Optional[torch.memory_format]): The memory format of the returned tensor. Mutually exclusive + with ``noncontiguous``. + + Raises: + ValueError: If ``requires_grad=True`` is passed for integral `dtype` + ValueError: If ``low >= high``. + ValueError: If either :attr:`low` or :attr:`high` is ``nan``. + ValueError: If both :attr:`noncontiguous` and :attr:`memory_format` are passed. + TypeError: If :attr:`dtype` isn't supported by this function. + + Examples: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> from torch.testing import make_tensor + >>> # Creates a float tensor with values in [-1, 1) + >>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1) + >>> # xdoctest: +SKIP + tensor([ 0.1205, 0.2282, -0.6380]) + >>> # Creates a bool tensor on CUDA + >>> make_tensor((2, 2), device='cuda', dtype=torch.bool) + tensor([[False, False], + [False, True]], device='cuda:0') + """ + + def modify_low_high( + low: Optional[float], + high: Optional[float], + *, + lowest_inclusive: float, + highest_exclusive: float, + default_low: float, + default_high: float, + ) -> Tuple[float, float]: + """ + Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high) + if required. + """ + + def clamp(a: float, l: float, h: float) -> float: + return min(max(a, l), h) + + low = low if low is not None else default_low + high = high if high is not None else default_high + + if any(isinstance(value, float) and math.isnan(value) for value in [low, high]): + raise ValueError( + f"`low` and `high` cannot be NaN, but got {low=} and {high=}" + ) + elif low == high and dtype in _FLOATING_OR_COMPLEX_TYPES: + warnings.warn( + "Passing `low==high` to `torch.testing.make_tensor` for floating or complex types " + "is deprecated since 2.1 and will be removed in 2.3. " + "Use torch.full(...) instead.", + FutureWarning, + ) + elif low >= high: + raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}") + elif high < lowest_inclusive or low >= highest_exclusive: + raise ValueError( + f"The value interval specified by `low` and `high` is [{low}, {high}), " + f"but {dtype} only supports [{lowest_inclusive}, {highest_exclusive})" + ) + + low = clamp(low, lowest_inclusive, highest_exclusive) + high = clamp(high, lowest_inclusive, highest_exclusive) + + if dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + # 1. `low` is ceiled to avoid creating values smaller than `low` and thus outside the specified interval + # 2. Following the same reasoning as for 1., `high` should be floored. However, the higher bound of + # `torch.randint` is exclusive, and thus we need to ceil here as well. + return math.ceil(low), math.ceil(high) + + return low, high + + if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence): + shape = shape[0] # type: ignore[assignment] + shape = cast(Tuple[int, ...], tuple(shape)) + + if noncontiguous and memory_format is not None: + raise ValueError( + f"The parameters `noncontiguous` and `memory_format` are mutually exclusive, " + f"but got {noncontiguous=} and {memory_format=}" + ) + + if requires_grad and dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + raise ValueError( + f"`requires_grad=True` is not supported for boolean and integral dtypes, but got {dtype=}" + ) + + if dtype is torch.bool: + low, high = cast( + Tuple[int, int], + modify_low_high( + low, + high, + lowest_inclusive=0, + highest_exclusive=2, + default_low=0, + default_high=2, + ), + ) + result = torch.randint(low, high, shape, device=device, dtype=dtype) + elif dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + low, high = cast( + Tuple[int, int], + modify_low_high( + low, + high, + lowest_inclusive=torch.iinfo(dtype).min, + highest_exclusive=torch.iinfo(dtype).max + # In theory, `highest_exclusive` should always be the maximum value + 1. However, `torch.randint` + # internally converts the bounds to an int64 and would overflow. In other words: `torch.randint` cannot + # sample 2**63 - 1, i.e. the maximum value of `torch.int64` and we need to account for that here. + + (1 if dtype is not torch.int64 else 0), + # This is incorrect for `torch.uint8`, but since we clamp to `lowest`, i.e. 0 for `torch.uint8`, + # _after_ we use the default value, we don't need to special case it here + default_low=-9, + default_high=10, + ), + ) + result = torch.randint(low, high, shape, device=device, dtype=dtype) + elif dtype in _FLOATING_OR_COMPLEX_TYPES: + low, high = modify_low_high( + low, + high, + lowest_inclusive=torch.finfo(dtype).min, + highest_exclusive=torch.finfo(dtype).max, + default_low=-9, + default_high=9, + ) + result = torch.empty(shape, device=device, dtype=dtype) + _uniform_random_( + torch.view_as_real(result) if dtype in _COMPLEX_TYPES else result, low, high + ) + elif dtype in _FLOATING_8BIT_TYPES: + low, high = modify_low_high( + low, + high, + lowest_inclusive=torch.finfo(dtype).min, + highest_exclusive=torch.finfo(dtype).max, + default_low=-9, + default_high=9, + ) + result = torch.empty(shape, device=device, dtype=torch.float32) + _uniform_random_(result, low, high) + result = result.to(dtype) + else: + raise TypeError( + f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()." + " To request support, file an issue at: https://github.com/pytorch/pytorch/issues" + ) + + if noncontiguous and result.numel() > 1: + result = torch.repeat_interleave(result, 2, dim=-1) + result = result[..., ::2] + elif memory_format is not None: + result = result.clone(memory_format=memory_format) + + if exclude_zero: + result[result == 0] = ( + 1 if dtype in _BOOLEAN_OR_INTEGRAL_TYPES else torch.finfo(dtype).tiny + ) + + if dtype in _FLOATING_OR_COMPLEX_TYPES: + result.requires_grad = requires_grad + + return result diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..2a9055597f732982094f24c39c30e7ce02327ac6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py @@ -0,0 +1,270 @@ +# mypy: ignore-errors + +r"""This file is allowed to initialize CUDA context when imported.""" + +import functools +import torch +import torch.cuda +from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS +import inspect +import contextlib +import os + + +CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized() + + +TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2 +CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None +# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN +if TEST_WITH_ROCM: + TEST_CUDNN = LazyVal(lambda: TEST_CUDA) +else: + TEST_CUDNN = LazyVal(lambda: TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE))) + +TEST_CUDNN_VERSION = LazyVal(lambda: torch.backends.cudnn.version() if TEST_CUDNN else 0) + +SM53OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3)) +SM60OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0)) +SM70OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0)) +SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5)) +SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)) +SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0)) + +def evaluate_gfx_arch_exact(matching_arch): + if not torch.cuda.is_available(): + return False + gcn_arch_name = torch.cuda.get_device_properties('cuda').gcnArchName + arch = os.environ.get('PYTORCH_DEBUG_FLASH_ATTENTION_GCN_ARCH_OVERRIDE', gcn_arch_name) + return arch == matching_arch + +GFX90A_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-')) +GFX942_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-')) + +def evaluate_platform_supports_flash_attention(): + if TEST_WITH_ROCM: + return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-') + if TEST_CUDA: + return not IS_WINDOWS and SM80OrLater + return False + +PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_flash_attention()) +PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM) +# TODO(eqy): gate this against a cuDNN version +PLATFORM_SUPPORTS_CUDNN_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM and + torch.backends.cuda.cudnn_sdp_enabled()) +# This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate +PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION) + +PLATFORM_SUPPORTS_FUSED_SDPA: bool = TEST_CUDA and not TEST_WITH_ROCM + +if TEST_NUMBA: + try: + import numba.cuda + TEST_NUMBA_CUDA = numba.cuda.is_available() + except Exception as e: + TEST_NUMBA_CUDA = False + TEST_NUMBA = False +else: + TEST_NUMBA_CUDA = False + +# Used below in `initialize_cuda_context_rng` to ensure that CUDA context and +# RNG have been initialized. +__cuda_ctx_rng_initialized = False + + +# after this call, CUDA context and RNG must have been initialized on each GPU +def initialize_cuda_context_rng(): + global __cuda_ctx_rng_initialized + assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng' + if not __cuda_ctx_rng_initialized: + # initialize cuda context and rng for memory tests + for i in range(torch.cuda.device_count()): + torch.randn(1, device=f"cuda:{i}") + __cuda_ctx_rng_initialized = True + + +# Test whether hardware TF32 math mode enabled. It is enabled only on: +# - CUDA >= 11 +# - arch >= Ampere +def tf32_is_not_fp32(): + if not torch.cuda.is_available() or torch.version.cuda is None: + return False + if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: + return False + if int(torch.version.cuda.split('.')[0]) < 11: + return False + return True + + +@contextlib.contextmanager +def tf32_off(): + old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 + try: + torch.backends.cuda.matmul.allow_tf32 = False + with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False): + yield + finally: + torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul + + +@contextlib.contextmanager +def tf32_on(self, tf32_precision=1e-5): + old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 + old_precision = self.precision + try: + torch.backends.cuda.matmul.allow_tf32 = True + self.precision = tf32_precision + with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True): + yield + finally: + torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul + self.precision = old_precision + + +# This is a wrapper that wraps a test to run this test twice, one with +# allow_tf32=True, another with allow_tf32=False. When running with +# allow_tf32=True, it will use reduced precision as specified by the +# argument. For example: +# @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128) +# @tf32_on_and_off(0.005) +# def test_matmul(self, device, dtype): +# a = ...; b = ...; +# c = torch.matmul(a, b) +# self.assertEqual(c, expected) +# In the above example, when testing torch.float32 and torch.complex64 on CUDA +# on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at +# TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced +# precision to check values. +# +# This decorator can be used for function with or without device/dtype, such as +# @tf32_on_and_off(0.005) +# def test_my_op(self) +# @tf32_on_and_off(0.005) +# def test_my_op(self, device) +# @tf32_on_and_off(0.005) +# def test_my_op(self, device, dtype) +# @tf32_on_and_off(0.005) +# def test_my_op(self, dtype) +# if neither device nor dtype is specified, it will check if the system has ampere device +# if device is specified, it will check if device is cuda +# if dtype is specified, it will check if dtype is float32 or complex64 +# tf32 and fp32 are different only when all the three checks pass +def tf32_on_and_off(tf32_precision=1e-5): + def with_tf32_disabled(self, function_call): + with tf32_off(): + function_call() + + def with_tf32_enabled(self, function_call): + with tf32_on(self, tf32_precision): + function_call() + + def wrapper(f): + params = inspect.signature(f).parameters + arg_names = tuple(params.keys()) + + @functools.wraps(f) + def wrapped(*args, **kwargs): + for k, v in zip(arg_names, args): + kwargs[k] = v + cond = tf32_is_not_fp32() + if 'device' in kwargs: + cond = cond and (torch.device(kwargs['device']).type == 'cuda') + if 'dtype' in kwargs: + cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64}) + if cond: + with_tf32_disabled(kwargs['self'], lambda: f(**kwargs)) + with_tf32_enabled(kwargs['self'], lambda: f(**kwargs)) + else: + f(**kwargs) + + return wrapped + return wrapper + + +# This is a wrapper that wraps a test to run it with TF32 turned off. +# This wrapper is designed to be used when a test uses matmul or convolutions +# but the purpose of that test is not testing matmul or convolutions. +# Disabling TF32 will enforce torch.float tensors to be always computed +# at full precision. +def with_tf32_off(f): + @functools.wraps(f) + def wrapped(*args, **kwargs): + with tf32_off(): + return f(*args, **kwargs) + + return wrapped + +def _get_magma_version(): + if 'Magma' not in torch.__config__.show(): + return (0, 0) + position = torch.__config__.show().find('Magma ') + version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0] + return tuple(int(x) for x in version_str.split(".")) + +def _get_torch_cuda_version(): + if torch.version.cuda is None: + return (0, 0) + cuda_version = str(torch.version.cuda) + return tuple(int(x) for x in cuda_version.split(".")) + +def _get_torch_rocm_version(): + if not TEST_WITH_ROCM: + return (0, 0) + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + return tuple(int(x) for x in rocm_version.split(".")) + +def _check_cusparse_generic_available(): + return not TEST_WITH_ROCM + +def _check_hipsparse_generic_available(): + if not TEST_WITH_ROCM: + return False + + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) + return not (rocm_version_tuple is None or rocm_version_tuple < (5, 1)) + + +TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available() +TEST_HIPSPARSE_GENERIC = _check_hipsparse_generic_available() + +# Shared by test_torch.py and test_multigpu.py +def _create_scaling_models_optimizers(device="cuda", optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): + # Create a module+optimizer that will use scaling, and a control module+optimizer + # that will not use scaling, against which the scaling-enabled module+optimizer can be compared. + mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) + mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) + with torch.no_grad(): + for c, s in zip(mod_control.parameters(), mod_scaling.parameters()): + s.copy_(c) + + kwargs = {"lr": 1.0} + if optimizer_kwargs is not None: + kwargs.update(optimizer_kwargs) + opt_control = optimizer_ctor(mod_control.parameters(), **kwargs) + opt_scaling = optimizer_ctor(mod_scaling.parameters(), **kwargs) + + return mod_control, mod_scaling, opt_control, opt_scaling + +# Shared by test_torch.py, test_cuda.py and test_multigpu.py +def _create_scaling_case(device="cuda", dtype=torch.float, optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): + data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))] + + loss_fn = torch.nn.MSELoss().to(device) + + skip_iter = 2 + + return _create_scaling_models_optimizers( + device=device, optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs, + ) + (data, loss_fn, skip_iter) + + +# Importing this module should NOT eagerly initialize CUDA +if not CUDA_ALREADY_INITIALIZED_ON_IMPORT: + assert not torch.cuda.is_initialized() diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py new file mode 100644 index 0000000000000000000000000000000000000000..f63f6b4a334ce41edcbdd80137c28a31006da008 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py @@ -0,0 +1,1525 @@ +# mypy: ignore-errors + +import copy +import gc +import inspect +import runpy +import sys +import threading +from collections import namedtuple +from enum import Enum +from functools import wraps, partial +from typing import List, Any, ClassVar, Optional, Sequence, Tuple, Union, Dict, Set +import unittest +import os +import torch +from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \ + skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN, \ + IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, IS_WINDOWS, TEST_MPS, \ + _TestParametrizer, compose_parametrize_fns, dtype_name, \ + TEST_WITH_MIOPEN_SUGGEST_NHWC, NATIVE_DEVICES, skipIfTorchDynamo, \ + get_tracked_input, clear_tracked_input, PRINT_REPRO_ON_FAILURE, \ + TEST_WITH_TORCHINDUCTOR +from torch.testing._internal.common_cuda import _get_torch_cuda_version, \ + TEST_CUSPARSE_GENERIC, TEST_HIPSPARSE_GENERIC, _get_torch_rocm_version +from torch.testing._internal.common_dtype import get_all_dtypes + +try: + import psutil # type: ignore[import] + HAS_PSUTIL = True +except ImportError: + HAS_PSUTIL = False + +# Note [Writing Test Templates] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# This note was written shortly after the PyTorch 1.9 release. +# If you notice it's out-of-date or think it could be improved then please +# file an issue. +# +# PyTorch has its own framework for instantiating test templates. That is, for +# taking test classes that look similar to unittest or pytest +# compatible test classes and optionally doing the following: +# +# - instantiating a version of the test class for each available device type +# (often the CPU, CUDA, and META device types) +# - further instantiating a version of each test that's always specialized +# on the test class's device type, and optionally specialized further +# on datatypes or operators +# +# This functionality is similar to pytest's parametrize functionality +# (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable +# additional logic that specializes the instantiated test classes for their +# device types (see CPUTestBase and CUDATestBase below), supports a variety +# of composable decorators that allow for test filtering and setting +# tolerances, and allows tests parametrized by operators to instantiate +# only the subset of device type x dtype that operator supports. +# +# This framework was built to make it easier to write tests that run on +# multiple device types, multiple datatypes (dtypes), and for multiple +# operators. It's also useful for controlling which tests are run. For example, +# only tests that use a CUDA device can be run on platforms with CUDA. +# Let's dive in with an example to get an idea for how it works: +# +# -------------------------------------------------------- +# A template class (looks like a regular unittest TestCase) +# class TestClassFoo(TestCase): +# +# # A template test that can be specialized with a device +# # NOTE: this test case is not runnable by unittest or pytest because it +# # accepts an extra positional argument, "device", that they do not understand +# def test_bar(self, device): +# pass +# +# # Function that instantiates a template class and its tests +# instantiate_device_type_tests(TestCommon, globals()) +# -------------------------------------------------------- +# +# In the above code example we see a template class and a single test template +# that can be instantiated with a device. The function +# instantiate_device_type_tests(), called at file scope, instantiates +# new test classes, one per available device type, and new tests in those +# classes from these templates. It actually does this by removing +# the class TestClassFoo and replacing it with classes like TestClassFooCPU +# and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase +# and CUDATestBase respectively. Additional device types, like XLA, +# (see https://github.com/pytorch/xla) can further extend the set of +# instantiated test classes to create classes like TestClassFooXLA. +# +# The test template, test_bar(), is also instantiated. In this case the template +# is only specialized on a device, so (depending on the available device +# types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda() +# in TestClassFooCUDA. We can think of the instantiated test classes as +# looking like this: +# +# -------------------------------------------------------- +# # An instantiated test class for the CPU device type +# class TestClassFooCPU(CPUTestBase): +# +# # An instantiated test that calls the template with the string representation +# # of a device from the test class's device type +# def test_bar_cpu(self): +# test_bar(self, 'cpu') +# +# # An instantiated test class for the CUDA device type +# class TestClassFooCUDA(CUDATestBase): +# +# # An instantiated test that calls the template with the string representation +# # of a device from the test class's device type +# def test_bar_cuda(self): +# test_bar(self, 'cuda:0') +# -------------------------------------------------------- +# +# These instantiated test classes ARE discoverable and runnable by both +# unittest and pytest. One thing that may be confusing, however, is that +# attempting to run "test_bar" will not work, despite it appearing in the +# original template code. This is because "test_bar" is no longer discoverable +# after instantiate_device_type_tests() runs, as the above snippet shows. +# Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both +# can be run with the option "-k test_bar". +# +# Removing the template class and adding the instantiated classes requires +# passing "globals()" to instantiate_device_type_tests(), because it +# edits the file's Python objects. +# +# As mentioned, tests can be additionally parametrized on dtypes or +# operators. Datatype parametrization uses the @dtypes decorator and +# require a test template like this: +# +# -------------------------------------------------------- +# # A template test that can be specialized with a device and a datatype (dtype) +# @dtypes(torch.float32, torch.int64) +# def test_car(self, device, dtype) +# pass +# -------------------------------------------------------- +# +# If the CPU and CUDA device types are available this test would be +# instantiated as 4 tests that cover the cross-product of the two dtypes +# and two device types: +# +# - test_car_cpu_float32 +# - test_car_cpu_int64 +# - test_car_cuda_float32 +# - test_car_cuda_int64 +# +# The dtype is passed as a torch.dtype object. +# +# Tests parametrized on operators (actually on OpInfos, more on that in a +# moment...) use the @ops decorator and require a test template like this: +# -------------------------------------------------------- +# # A template test that can be specialized with a device, dtype, and OpInfo +# @ops(op_db) +# def test_car(self, device, dtype, op) +# pass +# -------------------------------------------------------- +# +# See the documentation for the @ops decorator below for additional details +# on how to use it and see the note [OpInfos] in +# common_methods_invocations.py for more details on OpInfos. +# +# A test parametrized over the entire "op_db", which contains hundreds of +# OpInfos, will likely have hundreds or thousands of instantiations. The +# test will be instantiated on the cross-product of device types, operators, +# and the dtypes the operator supports on that device type. The instantiated +# tests will have names like: +# +# - test_car_add_cpu_float32 +# - test_car_sub_cuda_int64 +# +# The first instantiated test calls the original test_car() with the OpInfo +# for torch.add as its "op" argument, the string 'cpu' for its "device" argument, +# and the dtype torch.float32 for is "dtype" argument. The second instantiated +# test calls the test_car() with the OpInfo for torch.sub, a CUDA device string +# like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype +# torch.int64 for its "dtype argument." +# +# In addition to parametrizing over device, dtype, and ops via OpInfos, the +# @parametrize decorator is supported for arbitrary parametrizations: +# -------------------------------------------------------- +# # A template test that can be specialized with a device, dtype, and value for x +# @parametrize("x", range(5)) +# def test_car(self, device, dtype, x) +# pass +# -------------------------------------------------------- +# +# See the documentation for @parametrize in common_utils.py for additional details +# on this. Note that the instantiate_device_type_tests() function will handle +# such parametrizations; there is no need to additionally call +# instantiate_parametrized_tests(). +# +# Clever test filtering can be very useful when working with parametrized +# tests. "-k test_car" would run every instantiated variant of the test_car() +# test template, and "-k test_car_add" runs every variant instantiated with +# torch.add. +# +# It is important to use the passed device and dtype as appropriate. Use +# helper functions like make_tensor() that require explicitly specifying +# the device and dtype so they're not forgotten. +# +# Test templates can use a variety of composable decorators to specify +# additional options and requirements, some are listed here: +# +# - @deviceCountAtLeast() +# Passes a list of strings representing all available devices of +# the test class's device type as the test template's "device" argument. +# If there are fewer devices than the value passed to the decorator +# the test is skipped. +# - @dtypes() +# In addition to accepting multiple dtypes, the @dtypes decorator +# can accept a sequence of tuple pairs of dtypes. The test template +# will be called with each tuple for its "dtype" argument. +# - @onlyNativeDeviceTypes +# Skips the test if the device is not a native device type (currently CPU, CUDA, Meta) +# - @onlyCPU +# Skips the test if the device is not a CPU device +# - @onlyCUDA +# Skips the test if the device is not a CUDA device +# - @onlyMPS +# Skips the test if the device is not a MPS device +# - @skipCPUIfNoLapack +# Skips the test if the device is a CPU device and LAPACK is not installed +# - @skipCPUIfNoMkl +# Skips the test if the device is a CPU device and MKL is not installed +# - @skipCUDAIfNoMagma +# Skips the test if the device is a CUDA device and MAGMA is not installed +# - @skipCUDAIfRocm +# Skips the test if the device is a CUDA device and ROCm is being used + + +# Note [Adding a Device Type] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# To add a device type: +# +# (1) Create a new "TestBase" extending DeviceTypeTestBase. +# See CPUTestBase and CUDATestBase below. +# (2) Define the "device_type" attribute of the base to be the +# appropriate string. +# (3) Add logic to this file that appends your base class to +# device_type_test_bases when your device type is available. +# (4) (Optional) Write setUpClass/tearDownClass class methods that +# instantiate dependencies (see MAGMA in CUDATestBase). +# (5) (Optional) Override the "instantiate_test" method for total +# control over how your class creates tests. +# +# setUpClass is called AFTER tests have been created and BEFORE and ONLY IF +# they are run. This makes it useful for initializing devices and dependencies. + + +# Note [Overriding methods in generic tests] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Device generic tests look a lot like normal test classes, but they differ +# from ordinary classes in some important ways. In particular, overriding +# methods in generic tests doesn't work quite the way you expect. +# +# class TestFooDeviceType(TestCase): +# # Intention is to override +# def assertEqual(self, x, y): +# # This DOESN'T WORK! +# super().assertEqual(x, y) +# +# If you try to run this code, you'll get an error saying that TestFooDeviceType +# is not in scope. This is because after instantiating our classes, we delete +# it from the parent scope. Instead, you need to hardcode a direct invocation +# of the desired subclass call, e.g., +# +# class TestFooDeviceType(TestCase): +# # Intention is to override +# def assertEqual(self, x, y): +# TestCase.assertEqual(x, y) +# +# However, a less error-prone way of customizing the behavior of TestCase +# is to either (1) add your functionality to TestCase and make it toggled +# by a class attribute, or (2) create your own subclass of TestCase, and +# then inherit from it for your generic test. + + +def _dtype_test_suffix(dtypes): + """ Returns the test suffix for a dtype, sequence of dtypes, or None. """ + if isinstance(dtypes, (list, tuple)): + if len(dtypes) == 0: + return '' + return '_' + '_'.join(dtype_name(d) for d in dtypes) + elif dtypes: + return f'_{dtype_name(dtypes)}' + else: + return '' + + +def _update_param_kwargs(param_kwargs, name, value): + """ Adds a kwarg with the specified name and value to the param_kwargs dict. """ + # Make name plural (e.g. devices / dtypes) if the value is composite. + plural_name = f'{name}s' + + # Clear out old entries of the arg if any. + if name in param_kwargs: + del param_kwargs[name] + if plural_name in param_kwargs: + del param_kwargs[plural_name] + + if isinstance(value, (list, tuple)): + param_kwargs[plural_name] = value + elif value is not None: + param_kwargs[name] = value + + # Leave param_kwargs as-is when value is None. + + +class DeviceTypeTestBase(TestCase): + device_type: str = 'generic_device_type' + + # Flag to disable test suite early due to unrecoverable error such as CUDA error. + _stop_test_suite = False + + # Precision is a thread-local setting since it may be overridden per test + _tls = threading.local() + _tls.precision = TestCase._precision + _tls.rel_tol = TestCase._rel_tol + + @property + def precision(self): + return self._tls.precision + + @precision.setter + def precision(self, prec): + self._tls.precision = prec + + @property + def rel_tol(self): + return self._tls.rel_tol + + @rel_tol.setter + def rel_tol(self, prec): + self._tls.rel_tol = prec + + # Returns a string representing the device that single device tests should use. + # Note: single device tests use this device exclusively. + @classmethod + def get_primary_device(cls): + return cls.device_type + + @classmethod + def _init_and_get_primary_device(cls): + try: + return cls.get_primary_device() + except Exception: + # For CUDATestBase, XLATestBase, and possibly others, the primary device won't be available + # until setUpClass() sets it. Call that manually here if needed. + if hasattr(cls, 'setUpClass'): + cls.setUpClass() + return cls.get_primary_device() + + # Returns a list of strings representing all available devices of this + # device type. The primary device must be the first string in the list + # and the list must contain no duplicates. + # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic + # mechanism of acquiring all available devices. + @classmethod + def get_all_devices(cls): + return [cls.get_primary_device()] + + # Returns the dtypes the test has requested. + # Prefers device-specific dtype specifications over generic ones. + @classmethod + def _get_dtypes(cls, test): + if not hasattr(test, 'dtypes'): + return None + + default_dtypes = test.dtypes.get('all') + msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it" + assert default_dtypes is not None, msg + + return test.dtypes.get(cls.device_type, default_dtypes) + + def _get_precision_override(self, test, dtype): + if not hasattr(test, 'precision_overrides'): + return self.precision + return test.precision_overrides.get(dtype, self.precision) + + def _get_tolerance_override(self, test, dtype): + if not hasattr(test, 'tolerance_overrides'): + return self.precision, self.rel_tol + return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol)) + + def _apply_precision_override_for_test(self, test, param_kwargs): + dtype = param_kwargs['dtype'] if 'dtype' in param_kwargs else None + dtype = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else dtype + if dtype: + self.precision = self._get_precision_override(test, dtype) + self.precision, self.rel_tol = self._get_tolerance_override(test, dtype) + + # Creates device-specific tests. + @classmethod + def instantiate_test(cls, name, test, *, generic_cls=None): + + def instantiate_test_helper(cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []): + # Add the device param kwarg if the test needs device or devices. + param_kwargs = {} if param_kwargs is None else param_kwargs + test_sig_params = inspect.signature(test).parameters + if 'device' in test_sig_params or 'devices' in test_sig_params: + device_arg: str = cls._init_and_get_primary_device() + if hasattr(test, 'num_required_devices'): + device_arg = cls.get_all_devices() + _update_param_kwargs(param_kwargs, 'device', device_arg) + + # Apply decorators based on param kwargs. + for decorator in decorator_fn(param_kwargs): + test = decorator(test) + + # Constructs the test + @wraps(test) + def instantiated_test(self, param_kwargs=param_kwargs): + # Sets precision and runs test + # Note: precision is reset after the test is run + guard_precision = self.precision + guard_rel_tol = self.rel_tol + try: + self._apply_precision_override_for_test(test, param_kwargs) + result = test(self, **param_kwargs) + except RuntimeError as rte: + # check if rte should stop entire test suite. + self._stop_test_suite = self._should_stop_test_suite() + # Check if test has been decorated with `@expectedFailure` + # Using `__unittest_expecting_failure__` attribute, see + # https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164 + # In that case, make it fail with "unexpected success" by suppressing exception + if getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite: + import sys + print("Suppressing fatal exception to trigger unexpected success", file=sys.stderr) + return + # raise the runtime error as is for the test suite to record. + raise rte + finally: + self.precision = guard_precision + self.rel_tol = guard_rel_tol + + return result + + assert not hasattr(cls, name), f"Redefinition of test {name}" + setattr(cls, name, instantiated_test) + + def default_parametrize_fn(test, generic_cls, device_cls): + # By default, no parametrization is needed. + yield (test, '', {}, lambda _: []) + + # Parametrization decorators set the parametrize_fn attribute on the test. + parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn) + + # If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it. + dtypes = cls._get_dtypes(test) + if dtypes is not None: + + def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes): + for dtype in dtypes: + param_kwargs: Dict[str, Any] = {} + _update_param_kwargs(param_kwargs, "dtype", dtype) + + # Note that an empty test suffix is set here so that the dtype can be appended + # later after the device. + yield (test, '', param_kwargs, lambda _: []) + + parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn) + + # Instantiate the parametrized tests. + for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020 + test_suffix = '' if test_suffix == '' else '_' + test_suffix + device_suffix = '_' + cls.device_type + + # Note: device and dtype suffix placement + # Special handling here to place dtype(s) after device according to test name convention. + dtype_kwarg = None + if 'dtype' in param_kwargs or 'dtypes' in param_kwargs: + dtype_kwarg = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else param_kwargs['dtype'] + test_name = f'{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}' + + instantiate_test_helper(cls=cls, name=test_name, test=test, param_kwargs=param_kwargs, + decorator_fn=decorator_fn) + + def run(self, result=None): + super().run(result=result) + # Early terminate test if _stop_test_suite is set. + if self._stop_test_suite: + result.stop() + + +class CPUTestBase(DeviceTypeTestBase): + device_type = 'cpu' + + # No critical error should stop CPU test suite + def _should_stop_test_suite(self): + return False + +class CUDATestBase(DeviceTypeTestBase): + device_type = 'cuda' + _do_cuda_memory_leak_check = True + _do_cuda_non_default_stream = True + primary_device: ClassVar[str] + cudnn_version: ClassVar[Any] + no_magma: ClassVar[bool] + no_cudnn: ClassVar[bool] + + def has_cudnn(self): + return not self.no_cudnn + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + primary_device_idx = int(cls.get_primary_device().split(':')[1]) + num_devices = torch.cuda.device_count() + + prim_device = cls.get_primary_device() + cuda_str = 'cuda:{0}' + non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx] + return [prim_device] + non_primary_devices + + @classmethod + def setUpClass(cls): + # has_magma shows up after cuda is initialized + t = torch.ones(1).cuda() + cls.no_magma = not torch.cuda.has_magma + + # Determines if cuDNN is available and its version + cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t) + cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version() + + # Acquires the current device as the primary (test) device + cls.primary_device = f'cuda:{torch.cuda.current_device()}' + +# See Note [Lazy Tensor tests in device agnostic testing] +lazy_ts_backend_init = False +class LazyTestBase(DeviceTypeTestBase): + device_type = 'lazy' + + def _should_stop_test_suite(self): + return False + + @classmethod + def setUpClass(cls): + import torch._lazy + import torch._lazy.metrics + import torch._lazy.ts_backend + global lazy_ts_backend_init + if not lazy_ts_backend_init: + # Need to connect the TS backend to lazy key before running tests + torch._lazy.ts_backend.init() + lazy_ts_backend_init = True + +class MPSTestBase(DeviceTypeTestBase): + device_type = 'mps' + primary_device: ClassVar[str] + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + # currently only one device is supported on MPS backend + prim_device = cls.get_primary_device() + return [prim_device] + + @classmethod + def setUpClass(cls): + cls.primary_device = 'mps:0' + + def _should_stop_test_suite(self): + return False + +class PrivateUse1TestBase(DeviceTypeTestBase): + primary_device: ClassVar[str] + device_mod = None + device_type = 'privateuse1' + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + primary_device_idx = int(cls.get_primary_device().split(':')[1]) + num_devices = cls.device_mod.device_count() + prim_device = cls.get_primary_device() + device_str = f'{cls.device_type}:{{0}}' + non_primary_devices = [device_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx] + return [prim_device] + non_primary_devices + + @classmethod + def setUpClass(cls): + cls.device_type = torch._C._get_privateuse1_backend_name() + cls.device_mod = getattr(torch, cls.device_type, None) + assert cls.device_mod is not None, f'''torch has no module of `{cls.device_type}`, you should register + a module by `torch._register_device_module`.''' + cls.primary_device = f'{cls.device_type}:{cls.device_mod.current_device()}' + +# Adds available device-type-specific test base classes +def get_device_type_test_bases(): + # set type to List[Any] due to mypy list-of-union issue: + # https://github.com/python/mypy/issues/3351 + test_bases: List[Any] = list() + + if IS_SANDCASTLE or IS_FBCODE: + if IS_REMOTE_GPU: + # Skip if sanitizer is enabled + if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN: + test_bases.append(CUDATestBase) + else: + test_bases.append(CPUTestBase) + else: + test_bases.append(CPUTestBase) + if torch.cuda.is_available(): + test_bases.append(CUDATestBase) + device_type = torch._C._get_privateuse1_backend_name() + device_mod = getattr(torch, device_type, None) + if hasattr(device_mod, "is_available") and device_mod.is_available(): + test_bases.append(PrivateUse1TestBase) + # Disable MPS testing in generic device testing temporarily while we're + # ramping up support. + # elif torch.backends.mps.is_available(): + # test_bases.append(MPSTestBase) + + return test_bases + +device_type_test_bases = get_device_type_test_bases() + + +def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None): + # device type cannot appear in both except_for and only_for + intersect = set(except_for if except_for else []) & set(only_for if only_for else []) + assert not intersect, f"device ({intersect}) appeared in both except_for and only_for" + + if except_for: + device_type_test_bases = filter( + lambda x: x.device_type not in except_for, device_type_test_bases) + if only_for: + device_type_test_bases = filter( + lambda x: x.device_type in only_for, device_type_test_bases) + + return list(device_type_test_bases) + + +# Note [How to extend DeviceTypeTestBase to add new test device] +# The following logic optionally allows downstream projects like pytorch/xla to +# add more test devices. +# Instructions: +# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project. +# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define +# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of +# `instantiate_test` method. +# - DO NOT import common_device_type inside the file. +# `runpy.run_path` with `globals()` already properly setup the context so that +# `DeviceTypeTestBase` is already available. +# - Set a top-level variable `TEST_CLASS` equal to your new class. +# E.g. TEST_CLASS = XLATensorBase +# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path +# to this file. Multiple paths can be separated by `:`. +# See pytorch/xla/test/pytorch_test_base.py for a more detailed example. +_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None) +if _TORCH_TEST_DEVICES: + for path in _TORCH_TEST_DEVICES.split(':'): + # runpy (a stdlib module) lacks annotations + mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value] + device_type_test_bases.append(mod['TEST_CLASS']) + + +PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1' + +PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = 'PYTORCH_TESTING_DEVICE_ONLY_FOR' +PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = 'PYTORCH_TESTING_DEVICE_EXCEPT_FOR' + + +def get_desired_device_type_test_bases(except_for=None, only_for=None, include_lazy=False, allow_mps=False): + # allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy` + test_bases = device_type_test_bases.copy() + if allow_mps and TEST_MPS and MPSTestBase not in test_bases: + test_bases.append(MPSTestBase) + # Filter out the device types based on user inputs + desired_device_type_test_bases = filter_desired_device_types(test_bases, except_for, only_for) + if include_lazy: + # Note [Lazy Tensor tests in device agnostic testing] + # Right now, test_view_ops.py runs with LazyTensor. + # We don't want to opt every device-agnostic test into using the lazy device, + # because many of them will fail. + # So instead, the only way to opt a specific device-agnostic test file into + # lazy tensor testing is with include_lazy=True + if IS_FBCODE: + print("TorchScript backend not yet supported in FBCODE/OVRSOURCE builds", file=sys.stderr) + else: + desired_device_type_test_bases.append(LazyTestBase) + + def split_if_not_empty(x: str): + return x.split(",") if x else [] + + # Filter out the device types based on environment variables if available + # Usage: + # export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu + # export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla + env_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, '')) + env_except_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, '')) + + return filter_desired_device_types(desired_device_type_test_bases, env_except_for, env_only_for) + + + +# Adds 'instantiated' device-specific test cases to the given scope. +# The tests in these test cases are derived from the generic tests in +# generic_test_class. This function should be used instead of +# instantiate_parametrized_tests() if the test class contains +# device-specific tests (NB: this supports additional @parametrize usage). +# +# See note "Writing Test Templates" +def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None, include_lazy=False, allow_mps=False): + # Removes the generic test class from its enclosing scope so its tests + # are not discoverable. + del scope[generic_test_class.__name__] + + # Creates an 'empty' version of the generic_test_class + # Note: we don't inherit from the generic_test_class directly because + # that would add its tests to our test classes and they would be + # discovered (despite not being runnable). Inherited methods also + # can't be removed later, and we can't rely on load_tests because + # pytest doesn't support it (as of this writing). + empty_name = generic_test_class.__name__ + "_base" + empty_class = type(empty_name, generic_test_class.__bases__, {}) + + # Acquires members names + # See Note [Overriding methods in generic tests] + generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys()) + generic_tests = [x for x in generic_members if x.startswith('test')] + + # Creates device-specific test cases + for base in get_desired_device_type_test_bases(except_for, only_for, include_lazy, allow_mps): + class_name = generic_test_class.__name__ + base.device_type.upper() + + # type set to Any and suppressed due to unsupport runtime class: + # https://github.com/python/mypy/wiki/Unsupported-Python-Features + device_type_test_class: Any = type(class_name, (base, empty_class), {}) + + for name in generic_members: + if name in generic_tests: # Instantiates test member + test = getattr(generic_test_class, name) + # XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls) + sig = inspect.signature(device_type_test_class.instantiate_test) + if len(sig.parameters) == 3: + # Instantiates the device-specific tests + device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class) + else: + device_type_test_class.instantiate_test(name, copy.deepcopy(test)) + else: # Ports non-test member + assert name not in device_type_test_class.__dict__, f"Redefinition of directly defined member {name}" + nontest = getattr(generic_test_class, name) + setattr(device_type_test_class, name, nontest) + + # Mimics defining the instantiated class in the caller's file + # by setting its module to the given class's and adding + # the module to the given scope. + # This lets the instantiated class be discovered by unittest. + device_type_test_class.__module__ = generic_test_class.__module__ + scope[class_name] = device_type_test_class + + +# Category of dtypes to run an OpInfo-based test for +# Example use: @ops(dtype=OpDTypes.supported) +# +# There are 5 categories: +# - supported: Every dtype supported by the operator. Use for exhaustive +# testing of all dtypes. +# - unsupported: Run tests on dtypes not supported by the operator. e.g. for +# testing the operator raises an error and doesn't crash. +# - supported_backward: Every dtype supported by the operator's backward pass. +# - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass. +# - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the +# operator supports in both forward and backward. +# - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test +# when this is selected. +class OpDTypes(Enum): + supported = 0 # Test all supported dtypes (default) + unsupported = 1 # Test only unsupported dtypes + supported_backward = 2 # Test all supported backward dtypes + unsupported_backward = 3 # Test only unsupported backward dtypes + any_one = 4 # Test precisely one supported dtype + none = 5 # Instantiate no dtype variants (no dtype kwarg needed) + any_common_cpu_cuda_one = 6 # Test precisely one supported dtype that is common to both cuda and cpu + + +# Arbitrary order +ANY_DTYPE_ORDER = ( + torch.float32, + torch.float64, + torch.complex64, + torch.complex128, + torch.float16, + torch.bfloat16, + torch.long, + torch.int32, + torch.int16, + torch.int8, + torch.uint8, + torch.bool +) + +def _serialize_sample(sample_input): + # NB: For OpInfos, SampleInput.summary() prints in a cleaner way. + if getattr(sample_input, "summary", None) is not None: + return sample_input.summary() + return str(sample_input) + +# Decorator that defines the OpInfos a test template should be instantiated for. +# +# Example usage: +# +# @ops(unary_ufuncs) +# def test_numerics(self, device, dtype, op): +# +# +# This will instantiate variants of test_numerics for each given OpInfo, +# on each device the OpInfo's operator supports, and for every dtype supported by +# that operator. There are a few caveats to the dtype rule, explained below. +# +# The @ops decorator can accept two +# additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified +# then the test variants are instantiated for those dtypes, regardless of +# what the operator supports. If given "allowed_dtypes" then test variants +# are instantiated only for the intersection of allowed_dtypes and the dtypes +# they would otherwise be instantiated with. That is, allowed_dtypes composes +# with the options listed above and below. +# +# The "dtypes" argument can also accept additional values (see OpDTypes above): +# OpDTypes.supported - the test is instantiated for all dtypes the operator +# supports +# OpDTypes.unsupported - the test is instantiated for all dtypes the operator +# doesn't support +# OpDTypes.supported_backward - the test is instantiated for all dtypes the +# operator's gradient formula supports +# OpDTypes.unsupported_backward - the test is instantiated for all dtypes the +# operator's gradient formula doesn't support +# OpDTypes.any_one - the test is instantiated for one dtype the +# operator supports. The dtype supports forward and backward if possible. +# OpDTypes.none - the test is instantiated without any dtype. The test signature +# should not include a dtype kwarg in this case. +# +# These options allow tests to have considerable control over the dtypes +# they're instantiated for. + +class ops(_TestParametrizer): + def __init__(self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported, + allowed_dtypes: Optional[Sequence[torch.dtype]] = None, skip_if_dynamo=True): + self.op_list = list(op_list) + self.opinfo_dtypes = dtypes + self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None + self.skip_if_dynamo = skip_if_dynamo + + def _parametrize_test(self, test, generic_cls, device_cls): + """ Parameterizes the given test function across each op and its associated dtypes. """ + if device_cls is None: + raise RuntimeError('The @ops decorator is only intended to be used in a device-specific ' + 'context; use it with instantiate_device_type_tests() instead of ' + 'instantiate_parametrized_tests()') + + op = check_exhausted_iterator = object() + for op in self.op_list: + # Determine the set of dtypes to use. + dtypes: Union[Set[torch.dtype], Set[None]] + if isinstance(self.opinfo_dtypes, Sequence): + dtypes = set(self.opinfo_dtypes) + elif self.opinfo_dtypes == OpDTypes.unsupported_backward: + dtypes = set(get_all_dtypes()).difference(op.supported_backward_dtypes(device_cls.device_type)) + elif self.opinfo_dtypes == OpDTypes.supported_backward: + dtypes = op.supported_backward_dtypes(device_cls.device_type) + elif self.opinfo_dtypes == OpDTypes.unsupported: + dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type)) + elif self.opinfo_dtypes == OpDTypes.supported: + dtypes = op.supported_dtypes(device_cls.device_type) + elif self.opinfo_dtypes == OpDTypes.any_one: + # Tries to pick a dtype that supports both forward or backward + supported = op.supported_dtypes(device_cls.device_type) + supported_backward = op.supported_backward_dtypes(device_cls.device_type) + supported_both = supported.intersection(supported_backward) + dtype_set = supported_both if len(supported_both) > 0 else supported + for dtype in ANY_DTYPE_ORDER: + if dtype in dtype_set: + dtypes = {dtype} + break + else: + dtypes = {} + elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one: + # Tries to pick a dtype that supports both CPU and CUDA + supported = op.dtypes.intersection(op.dtypesIfCUDA) + if supported: + dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)} + else: + dtypes = {} + + elif self.opinfo_dtypes == OpDTypes.none: + dtypes = {None} + else: + raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}") + + if self.allowed_dtypes is not None: + dtypes = dtypes.intersection(self.allowed_dtypes) + + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = op.formatted_name + + for dtype in dtypes: + # Construct parameter kwargs to pass to the test. + param_kwargs = {'op': op} + _update_param_kwargs(param_kwargs, 'dtype', dtype) + + # NOTE: test_wrapper exists because we don't want to apply + # op-specific decorators to the original test. + # Test-specific decorators are applied to the original test, + # however. + try: + @wraps(test) + def test_wrapper(*args, **kwargs): + try: + return test(*args, **kwargs) + except unittest.SkipTest as e: + raise e + except Exception as e: + tracked_input = get_tracked_input() + if PRINT_REPRO_ON_FAILURE and tracked_input is not None: + raise Exception( + f"Caused by {tracked_input.type_desc} " + f"at index {tracked_input.index}: " + f"{_serialize_sample(tracked_input.val)}") from e + raise e + finally: + clear_tracked_input() + + if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR: + test_wrapper = skipIfTorchDynamo("Policy: we don't run OpInfo tests w/ Dynamo")(test_wrapper) + + # Initialize info for the last input seen. This is useful for tracking + # down which inputs caused a test failure. Note that TrackedInputIter is + # responsible for managing this. + test.tracked_input = None + + decorator_fn = partial(op.get_decorators, generic_cls.__name__, + test.__name__, device_cls.device_type, dtype) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print(f"Failed to instantiate {test_name} for op {op.name}!") + raise ex + if op is check_exhausted_iterator: + raise ValueError('An empty op_list was passed to @ops. ' + 'Note that this may result from reuse of a generator.') + +# Decorator that skips a test if the given condition is true. +# Notes: +# (1) Skip conditions stack. +# (2) Skip conditions can be bools or strings. If a string the +# test base must have defined the corresponding attribute to be False +# for the test to run. If you want to use a string argument you should +# probably define a new decorator instead (see below). +# (3) Prefer the existing decorators to defining the 'device_type' kwarg. +class skipIf: + + def __init__(self, dep, reason, device_type=None): + self.dep = dep + self.reason = reason + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def dep_fn(slf, *args, **kwargs): + if self.device_type is None or self.device_type == slf.device_type: + if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep): + raise unittest.SkipTest(self.reason) + + return fn(slf, *args, **kwargs) + return dep_fn + + +# Skips a test on CPU if the condition is true. +class skipCPUIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='cpu') + + +# Skips a test on CUDA if the condition is true. +class skipCUDAIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='cuda') + +# Skips a test on Lazy if the condition is true. +class skipLazyIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='lazy') + +# Skips a test on Meta if the condition is true. +class skipMetaIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='meta') + +# Skips a test on MPS if the condition is true. +class skipMPSIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='mps') + +# Skips a test on XLA if the condition is true. +class skipXLAIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='xla') + +class skipPRIVATEUSE1If(skipIf): + + def __init__(self, dep, reason): + device_type = torch._C._get_privateuse1_backend_name() + super().__init__(dep, reason, device_type=device_type) + +def _has_sufficient_memory(device, size): + if torch.device(device).type == 'cuda': + if not torch.cuda.is_available(): + return False + gc.collect() + torch.cuda.empty_cache() + # torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU + if device == 'cuda': + device = 'cuda:0' + return torch.cuda.memory.mem_get_info(device)[0] >= size + + if device == 'xla': + raise unittest.SkipTest('TODO: Memory availability checks for XLA?') + + if device != 'cpu': + raise unittest.SkipTest('Unknown device type') + + # CPU + if not HAS_PSUTIL: + raise unittest.SkipTest('Need psutil to determine if memory is sufficient') + + # The sanitizers have significant memory overheads + if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN: + effective_size = size * 10 + else: + effective_size = size + + if psutil.virtual_memory().available < effective_size: + gc.collect() + return psutil.virtual_memory().available >= effective_size + + +def largeTensorTest(size, device=None): + """Skip test if the device has insufficient memory to run the test + + size may be a number of bytes, a string of the form "N GB", or a callable + + If the test is a device generic test, available memory on the primary device will be checked. + It can also be overriden by the optional `device=` argument. + In other tests, the `device=` argument needs to be specified. + """ + if isinstance(size, str): + assert size.endswith(('GB', 'gb')), "only bytes or GB supported" + size = 1024 ** 3 * int(size[:-2]) + + def inner(fn): + @wraps(fn) + def dep_fn(self, *args, **kwargs): + size_bytes = size(self, *args, **kwargs) if callable(size) else size + _device = device if device is not None else self.get_primary_device() + if not _has_sufficient_memory(_device, size_bytes): + raise unittest.SkipTest(f'Insufficient {_device} memory') + + return fn(self, *args, **kwargs) + return dep_fn + return inner + + +class expectedFailure: + + def __init__(self, device_type): + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def efail_fn(slf, *args, **kwargs): + if self.device_type is None or self.device_type == slf.device_type: + try: + fn(slf, *args, **kwargs) + except Exception: + return + else: + slf.fail('expected test to fail, but it passed') + + return fn(slf, *args, **kwargs) + return efail_fn + + +class onlyOn: + + def __init__(self, device_type): + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def only_fn(slf, *args, **kwargs): + if self.device_type != slf.device_type: + reason = f"Only runs on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(slf, *args, **kwargs) + + return only_fn + + +# Decorator that provides all available devices of the device type to the test +# as a list of strings instead of providing a single device string. +# Skips the test if the number of available devices of the variant's device +# type is less than the 'num_required_devices' arg. +class deviceCountAtLeast: + + def __init__(self, num_required_devices): + self.num_required_devices = num_required_devices + + def __call__(self, fn): + assert not hasattr(fn, 'num_required_devices'), f"deviceCountAtLeast redefinition for {fn.__name__}" + fn.num_required_devices = self.num_required_devices + + @wraps(fn) + def multi_fn(slf, devices, *args, **kwargs): + if len(devices) < self.num_required_devices: + reason = f"fewer than {self.num_required_devices} devices detected" + raise unittest.SkipTest(reason) + + return fn(slf, devices, *args, **kwargs) + + return multi_fn + +# Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1) +def onlyNativeDeviceTypes(fn): + @wraps(fn) + def only_fn(self, *args, **kwargs): + if self.device_type not in NATIVE_DEVICES: + reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return only_fn + +# Specifies per-dtype precision overrides. +# Ex. +# +# @precisionOverride({torch.half : 1e-2, torch.float : 1e-4}) +# @dtypes(torch.half, torch.float, torch.double) +# def test_X(self, device, dtype): +# ... +# +# When the test is instantiated its class's precision will be set to the +# corresponding override, if it exists. +# self.precision can be accessed directly, and it also controls the behavior of +# functions like self.assertEqual(). +# +# Note that self.precision is a scalar value, so if you require multiple +# precisions (or are working with multiple dtypes) they should be specified +# explicitly and computed using self.precision (e.g. +# self.precision *2, max(1, self.precision)). +class precisionOverride: + + def __init__(self, d): + assert isinstance(d, dict), "precisionOverride not given a dtype : precision dict!" + for dtype in d.keys(): + assert isinstance(dtype, torch.dtype), f"precisionOverride given unknown dtype {dtype}" + + self.d = d + + def __call__(self, fn): + fn.precision_overrides = self.d + return fn + +# Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over +# precisionOverride. +# Ex. +# +# @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3}, +# torch.double : tol{atol=1e-4, rtol = 0}) +# @dtypes(torch.half, torch.float, torch.double) +# def test_X(self, device, dtype): +# ... +# +# When the test is instantiated its class's tolerance will be set to the +# corresponding override, if it exists. +# self.rtol and self.precision can be accessed directly, and they also control +# the behavior of functions like self.assertEqual(). +# +# The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and +# atol = 1e-4 and rtol = 0 for torch.double. +tol = namedtuple('tol', ['atol', 'rtol']) + +class toleranceOverride: + def __init__(self, d): + assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!" + for dtype, prec in d.items(): + assert isinstance(dtype, torch.dtype), f"toleranceOverride given unknown dtype {dtype}" + assert isinstance(prec, tol), "toleranceOverride not given a dtype : tol dict!" + + self.d = d + + def __call__(self, fn): + fn.tolerance_overrides = self.d + return fn + +# Decorator that instantiates a variant of the test for each given dtype. +# Notes: +# (1) Tests that accept the dtype argument MUST use this decorator. +# (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU +# or dtypesIfCUDA. +# (3) Can accept an iterable of dtypes or an iterable of tuples +# of dtypes. +# Examples: +# @dtypes(torch.float32, torch.float64) +# @dtypes((torch.long, torch.float32), (torch.int, torch.float64)) +class dtypes: + + def __init__(self, *args, device_type="all"): + if len(args) > 0 and isinstance(args[0], (list, tuple)): + for arg in args: + assert isinstance(arg, (list, tuple)), \ + "When one dtype variant is a tuple or list, " \ + "all dtype variants must be. " \ + f"Received non-list non-tuple dtype {str(arg)}" + assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}" + else: + assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}" + + self.args = args + self.device_type = device_type + + def __call__(self, fn): + d = getattr(fn, 'dtypes', {}) + assert self.device_type not in d, f"dtypes redefinition for {self.device_type}" + d[self.device_type] = self.args + fn.dtypes = d + return fn + + +# Overrides specified dtypes on the CPU. +class dtypesIfCPU(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='cpu') + + +# Overrides specified dtypes on CUDA. +class dtypesIfCUDA(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='cuda') + +class dtypesIfMPS(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='mps') + +class dtypesIfPRIVATEUSE1(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name()) + +def onlyCPU(fn): + return onlyOn('cpu')(fn) + + +def onlyCUDA(fn): + return onlyOn('cuda')(fn) + + +def onlyMPS(fn): + return onlyOn('mps')(fn) + +def onlyPRIVATEUSE1(fn): + device_type = torch._C._get_privateuse1_backend_name() + device_mod = getattr(torch, device_type, None) + if device_mod is None: + reason = f"Skip as torch has no module of {device_type}" + return unittest.skip(reason)(fn) + return onlyOn(device_type)(fn) + +def onlyCUDAAndPRIVATEUSE1(fn): + @wraps(fn) + def only_fn(self, *args, **kwargs): + if self.device_type not in ('cuda', torch._C._get_privateuse1_backend_name()): + reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return only_fn + +def disablecuDNN(fn): + + @wraps(fn) + def disable_cudnn(self, *args, **kwargs): + if self.device_type == 'cuda' and self.has_cudnn(): + with torch.backends.cudnn.flags(enabled=False): + return fn(self, *args, **kwargs) + return fn(self, *args, **kwargs) + + return disable_cudnn + +def disableMkldnn(fn): + + @wraps(fn) + def disable_mkldnn(self, *args, **kwargs): + if torch.backends.mkldnn.is_available(): + with torch.backends.mkldnn.flags(enabled=False): + return fn(self, *args, **kwargs) + return fn(self, *args, **kwargs) + + return disable_mkldnn + + +def expectedFailureCPU(fn): + return expectedFailure('cpu')(fn) + + +def expectedFailureCUDA(fn): + return expectedFailure('cuda')(fn) + +def expectedFailureMeta(fn): + return skipIfTorchDynamo()(expectedFailure('meta')(fn)) + +def expectedFailureXLA(fn): + return expectedFailure('xla')(fn) + +# Skips a test on CPU if LAPACK is not available. +def skipCPUIfNoLapack(fn): + return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn) + + +# Skips a test on CPU if FFT is not available. +def skipCPUIfNoFFT(fn): + return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(fn) + + +# Skips a test on CPU if MKL is not available. +def skipCPUIfNoMkl(fn): + return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn) + + +# Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows). +def skipCPUIfNoMklSparse(fn): + return skipCPUIf(IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support")(fn) + + +# Skips a test on CPU if mkldnn is not available. +def skipCPUIfNoMkldnn(fn): + return skipCPUIf(not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support")(fn) + + +# Skips a test on CUDA if MAGMA is not available. +def skipCUDAIfNoMagma(fn): + return skipCUDAIf('no_magma', "no MAGMA library detected")(skipCUDANonDefaultStreamIf(True)(fn)) + +def has_cusolver(): + return not TEST_WITH_ROCM + +def has_hipsolver(): + rocm_version = _get_torch_rocm_version() + # hipSOLVER is disabled on ROCM < 5.3 + return rocm_version >= (5, 3) + +# Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available +def skipCUDAIfNoCusolver(fn): + return skipCUDAIf(not has_cusolver() and not has_hipsolver(), "cuSOLVER not available")(fn) + + +# Skips a test if both cuSOLVER and MAGMA are not available +def skipCUDAIfNoMagmaAndNoCusolver(fn): + if has_cusolver(): + return fn + else: + # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA + return skipCUDAIfNoMagma(fn) + +# Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available +def skipCUDAIfNoMagmaAndNoLinalgsolver(fn): + if has_cusolver() or has_hipsolver(): + return fn + else: + # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA + return skipCUDAIfNoMagma(fn) + +# Skips a test on CUDA when using ROCm. +def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): + def dec_fn(fn): + reason = f"skipCUDAIfRocm: {msg}" + return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn) + if func: + return dec_fn(func) + return dec_fn + +# Skips a test on CUDA when not using ROCm. +def skipCUDAIfNotRocm(fn): + return skipCUDAIf(not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack")(fn) + +# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested. +def skipCUDAIfRocmVersionLessThan(version=None): + + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if self.device_type == 'cuda': + if not TEST_WITH_ROCM: + reason = "ROCm not available" + raise unittest.SkipTest(reason) + rocm_version_tuple = _get_torch_rocm_version() + if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): + reason = f"ROCm {rocm_version_tuple} is available but {version} required" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA when using ROCm. +def skipCUDAIfNotMiopenSuggestNHWC(fn): + return skipCUDAIf(not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation")(fn) + +# Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s. +def skipCUDAVersionIn(versions : List[Tuple[int, int]] = None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + version = _get_torch_cuda_version() + if version == (0, 0): # cpu or rocm + return fn(self, *args, **kwargs) + if version in (versions or []): + reason = f"test skipped for CUDA version {version}" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test for CUDA versions less than specified, given in the form of [major, minor]. +def skipCUDAIfVersionLessThan(versions : Tuple[int, int] = None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + version = _get_torch_cuda_version() + if version == (0, 0): # cpu or rocm + return fn(self, *args, **kwargs) + if version < versions: + reason = f"test skipped for CUDA versions < {version}" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested. +def skipCUDAIfCudnnVersionLessThan(version=0): + + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if self.device_type == 'cuda': + if self.no_cudnn: + reason = "cuDNN not available" + raise unittest.SkipTest(reason) + if self.cudnn_version is None or self.cudnn_version < version: + reason = f"cuDNN version {self.cudnn_version} is available but {version} required" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA if cuSparse generic API is not available +def skipCUDAIfNoCusparseGeneric(fn): + return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(fn) + +def skipCUDAIfNoHipsparseGeneric(fn): + return skipCUDAIf(not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available")(fn) + +def skipCUDAIfNoSparseGeneric(fn): + return skipCUDAIf(not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC), "Sparse Generic API not available")(fn) + +def skipCUDAIfNoCudnn(fn): + return skipCUDAIfCudnnVersionLessThan(0)(fn) + +def skipCUDAIfMiopen(fn): + return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn) + +def skipCUDAIfNoMiopen(fn): + return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(skipCUDAIfNoCudnn(fn)) + +def skipLazy(fn): + return skipLazyIf(True, "test doesn't work with lazy tensors")(fn) + +def skipMeta(fn): + return skipMetaIf(True, "test doesn't work with meta tensors")(fn) + +def skipXLA(fn): + return skipXLAIf(True, "Marked as skipped for XLA")(fn) + +def skipMPS(fn): + return skipMPSIf(True, "test doesn't work on MPS backend")(fn) + +def skipPRIVATEUSE1(fn): + return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn) + +# TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now. +# This should probably enumerate all available device type test base classes. +def get_all_device_types() -> List[str]: + return ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda'] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py new file mode 100644 index 0000000000000000000000000000000000000000..e7bce5c37f3d95fc664b50764e9b9756b89c6cd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py @@ -0,0 +1,111 @@ +# mypy: ignore-errors + +# Owner(s): ["oncall: distributed"] + +from typing import Tuple + +import torch +import torch.nn as nn + + +class UnitModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.seq(self.l1(x))) + + +class CompositeModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.u2(self.u1(self.l1(x)))) + + +class UnitParamModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + + def forward(self, x): + return torch.mm(self.seq(self.l(x)), self.p) + + +class CompositeParamModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + self.register_buffer( + "buffer", torch.randn((100, 100), device=device), persistent=True + ) + + def forward(self, x): + a = self.u2(self.u1(self.l(x))) + b = self.p + return torch.mm(a, b) + + +class FakeSequential(nn.Module): + # Define this class to achieve a desired nested wrapping using the module + # wrap policy with `nn.Sequential` + def __init__(self, *modules: Tuple[nn.Module, ...]) -> None: + super().__init__() + self._module_sequence = list(modules) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + for module in self._module_sequence: + x = module(x) + return x + + +class NestedSequentialModel(nn.Module): + def __init__(self, device: torch.device) -> None: + super().__init__() + # This nested structure exercises traversal order to catch differences + # between valid traversals (e.g. BFS and DFS variations). + self.seq1 = nn.Sequential( + nn.Linear(1, 1, device=device), + FakeSequential( + nn.Linear(1, 1, device=device), + nn.ReLU(), + FakeSequential( + nn.Linear(1, 1, device=device), + ), + nn.ReLU(), + ), + nn.Linear(1, 2, device=device), + ) + self.lin = nn.Linear(2, 2, device=device) + self.seq2 = nn.Sequential( + nn.ReLU(), + nn.Linear(2, 3, device=device), + FakeSequential( + nn.Linear(3, 2, bias=False, device=device), + nn.Linear(2, 4, bias=False, device=device), + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.seq2(self.lin(self.seq1(x))) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..4c3f8484ed31097fda2e7058fe73866e2cd4ff96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py @@ -0,0 +1,134 @@ +# mypy: ignore-errors + +from typing import List + +import torch + + +# Functions and classes for describing the dtypes a function supports +# NOTE: these helpers should correspond to PyTorch's C++ dispatch macros + +# Verifies each given dtype is a torch.dtype +def _validate_dtypes(*dtypes): + for dtype in dtypes: + assert isinstance(dtype, torch.dtype) + return dtypes + +# class for tuples corresponding to a PyTorch dispatch macro +class _dispatch_dtypes(tuple): + def __add__(self, other): + assert isinstance(other, tuple) + return _dispatch_dtypes(tuple.__add__(self, other)) + +_empty_types = _dispatch_dtypes(()) +def empty_types(): + return _empty_types + +_floating_types = _dispatch_dtypes((torch.float32, torch.float64)) +def floating_types(): + return _floating_types + +_floating_types_and_half = _floating_types + (torch.half,) +def floating_types_and_half(): + return _floating_types_and_half + +def floating_types_and(*dtypes): + return _floating_types + _validate_dtypes(*dtypes) + +_floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble) +def floating_and_complex_types(): + return _floating_and_complex_types + +def floating_and_complex_types_and(*dtypes): + return _floating_and_complex_types + _validate_dtypes(*dtypes) + +_double_types = _dispatch_dtypes((torch.float64, torch.complex128)) +def double_types(): + return _double_types + +# NB: Does not contain uint16/uint32/uint64 for BC reasons +_integral_types = _dispatch_dtypes((torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)) +def integral_types(): + return _integral_types + +def integral_types_and(*dtypes): + return _integral_types + _validate_dtypes(*dtypes) + +_all_types = _floating_types + _integral_types +def all_types(): + return _all_types + +def all_types_and(*dtypes): + return _all_types + _validate_dtypes(*dtypes) + +_complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble)) +def complex_types(): + return _complex_types + +def complex_types_and(*dtypes): + return _complex_types + _validate_dtypes(*dtypes) + +_all_types_and_complex = _all_types + _complex_types +def all_types_and_complex(): + return _all_types_and_complex + +def all_types_and_complex_and(*dtypes): + return _all_types_and_complex + _validate_dtypes(*dtypes) + +_all_types_and_half = _all_types + (torch.half,) +def all_types_and_half(): + return _all_types_and_half + +def custom_types(*dtypes): + """Create a list of arbitrary dtypes""" + return _empty_types + _validate_dtypes(*dtypes) + +# The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro + +# See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. +def get_all_dtypes(include_half=True, + include_bfloat16=True, + include_bool=True, + include_complex=True, + include_complex32=False, + include_qint=False, + ) -> List[torch.dtype]: + dtypes = get_all_int_dtypes() + get_all_fp_dtypes(include_half=include_half, include_bfloat16=include_bfloat16) + if include_bool: + dtypes.append(torch.bool) + if include_complex: + dtypes += get_all_complex_dtypes(include_complex32) + if include_qint: + dtypes += get_all_qint_dtypes() + return dtypes + +def get_all_math_dtypes(device) -> List[torch.dtype]: + return get_all_int_dtypes() + get_all_fp_dtypes(include_half=device.startswith('cuda'), + include_bfloat16=False) + get_all_complex_dtypes() + +def get_all_complex_dtypes(include_complex32=False) -> List[torch.dtype]: + return [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128] + + +def get_all_int_dtypes() -> List[torch.dtype]: + return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] + + +def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]: + dtypes = [torch.float32, torch.float64] + if include_half: + dtypes.append(torch.float16) + if include_bfloat16: + dtypes.append(torch.bfloat16) + return dtypes + + +def get_all_qint_dtypes() -> List[torch.dtype]: + return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4] + + +float_to_corresponding_complex_type_map = { + torch.float16: torch.complex32, + torch.float32: torch.complex64, + torch.float64: torch.complex128, +} diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..b9e428d59454a1d246eafd1fbeaa118ceac59a2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py @@ -0,0 +1,4400 @@ +# mypy: ignore-errors + +import torch +import unittest +from copy import deepcopy +from enum import Enum +from functools import wraps, partial +from itertools import chain, product +import itertools +import math +import torch.nn.functional as F +from torch.nn.utils.rnn import pack_padded_sequence +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import TEST_CUDNN +from torch.testing._internal.common_dtype import ( + floating_types, floating_and_complex_types_and, get_all_fp_dtypes) +from torch.testing._internal.common_device_type import ( + _TestParametrizer, _update_param_kwargs, toleranceOverride, tol, + skipCUDAIfCudnnVersionLessThan, skipCUDAIfRocm, precisionOverride, skipMeta, skipMPS, skipCUDAVersionIn) +from torch.testing._internal.common_methods_invocations import DecorateInfo +from torch.testing._internal.common_nn import ( + cosineembeddingloss_reference, cross_entropy_loss_reference, ctcloss_reference, + hingeembeddingloss_reference, huberloss_reference, kldivloss_reference, + marginrankingloss_reference, multimarginloss_reference, multilabelmarginloss_reference, + nllloss_reference, nlllossNd_reference, smoothl1loss_reference, softmarginloss_reference, get_reduction) +from torch.testing._internal.common_utils import ( + freeze_rng_state, set_single_threaded_if_parallel_tbb, skipIfMps, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS, + skipIfTorchDynamo) +from types import ModuleType +from typing import List, Tuple, Type, Set, Dict + +# List of all namespaces containing modules to test. +MODULE_NAMESPACES: List[ModuleType] = [ + torch.nn.modules, + torch.ao.nn.qat.modules, + torch.ao.nn.quantizable.modules, + torch.ao.nn.quantized.modules, + torch.ao.nn.quantized.modules, +] + +# Modules that shouldn't be tested for one reason or another. +MODULES_TO_SKIP: Set[Type] = { + torch.nn.Module, # abstract base class + torch.nn.Container, # deprecated + torch.nn.NLLLoss2d, # deprecated + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d +} + +# List of all module classes to test. +MODULE_CLASSES: List[Type] = list(chain(*[ + [getattr(namespace, module_name) for module_name in namespace.__all__] # type: ignore[attr-defined] + for namespace in MODULE_NAMESPACES])) +MODULE_CLASSES = [cls for cls in MODULE_CLASSES if cls not in MODULES_TO_SKIP] + +# Dict of module class -> common name. Useful for making test names more intuitive. +# Example: torch.nn.modules.linear.Linear -> "nn.Linear" +MODULE_CLASS_NAMES: Dict[Type, str] = {} +for namespace in MODULE_NAMESPACES: + for module_name in namespace.__all__: # type: ignore[attr-defined] + module_cls = getattr(namespace, module_name) + namespace_name = namespace.__name__.replace('torch.', '').replace('.modules', '') + + # Deal with any aliases by preferring earlier names. + if module_cls not in MODULE_CLASS_NAMES: + MODULE_CLASS_NAMES[module_cls] = f'{namespace_name}.{module_name}' + + +# Specifies the modes (i.e. train, eval) to test over. +TrainEvalMode = Enum('TrainEvalMode', ('train_only', 'eval_only', 'train_and_eval')) + + +class modules(_TestParametrizer): + """ PROTOTYPE: Decorator for specifying a list of modules over which to run a test. """ + + def __init__(self, module_info_iterable, allowed_dtypes=None, + train_eval_mode=TrainEvalMode.train_and_eval, skip_if_dynamo=True): + self.module_info_list = list(module_info_iterable) + self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None + self.train_eval_mode = train_eval_mode + self.skip_if_dynamo = skip_if_dynamo + + def _get_training_flags(self, module_info): + training_flags = [] + if (self.train_eval_mode == TrainEvalMode.train_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(True) + + if (self.train_eval_mode == TrainEvalMode.eval_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(False) + + # If train and eval modes don't differ for the module, don't bother using more than one. + if not module_info.train_and_eval_differ: + training_flags = training_flags[:1] + + return training_flags + + def _parametrize_test(self, test, generic_cls, device_cls): + if device_cls is None: + raise RuntimeError('The @modules decorator is only intended to be used in a device-specific ' + 'context; use it with instantiate_device_type_tests() instead of ' + 'instantiate_parametrized_tests()') + + for module_info in self.module_info_list: + dtypes = set(module_info.supported_dtypes(device_cls.device_type)) + if self.allowed_dtypes is not None: + dtypes = dtypes.intersection(self.allowed_dtypes) + + training_flags = self._get_training_flags(module_info) + for (training, dtype) in product(training_flags, dtypes): + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = module_info.formatted_name + if len(training_flags) > 1: + test_name += f"_{'train_mode' if training else 'eval_mode'}" + + # Construct parameter kwargs to pass to the test. + param_kwargs = {'module_info': module_info} + _update_param_kwargs(param_kwargs, 'dtype', dtype) + _update_param_kwargs(param_kwargs, 'training', training) + + try: + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + if self.skip_if_dynamo and not torch.testing._internal.common_utils.TEST_WITH_TORCHINDUCTOR: + test_wrapper = skipIfTorchDynamo("Policy: we don't run ModuleInfo tests w/ Dynamo")(test_wrapper) + + decorator_fn = partial(module_info.get_decorators, generic_cls.__name__, + test.__name__, device_cls.device_type, dtype) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print(f"Failed to instantiate {test_name} for module {module_info.name}!") + raise ex + + +def get_module_common_name(module_cls): + if module_cls in MODULE_CLASS_NAMES: + # Example: "nn.Linear" + return MODULE_CLASS_NAMES[module_cls] + else: + return module_cls.__name__ + + +class FunctionInput: + """ Contains args and kwargs to pass as input to a function. """ + __slots__ = ['args', 'kwargs'] + + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +class ModuleInput: + """ Contains args / kwargs for module instantiation + forward pass. """ + __slots__ = ['constructor_input', 'forward_input', 'desc', 'reference_fn'] + + def __init__(self, constructor_input, forward_input=None, desc='', reference_fn=None): + self.constructor_input = constructor_input # Inputs to pass during construction + self.forward_input = forward_input # Inputs to pass to forward() + self.desc = desc # Description for this set of inputs + self.reference_fn = reference_fn # Reference with signature: reference_fn(module, parameters, *args, **kwargs) + + if reference_fn is not None: + + @wraps(reference_fn) + def copy_reference_fn(m, *args, **kwargs): + # Copy inputs to avoid undesired side effects from calling the reference. + args, kwargs = deepcopy(args), deepcopy(kwargs) + + # Note that module parameters are passed in for convenience. + return reference_fn(m, list(m.parameters()), *args, **kwargs) + + self.reference_fn = copy_reference_fn + +class ModuleErrorEnum(Enum): + """ Enumerates when error is raised when testing modules. """ + CONSTRUCTION_ERROR = 0 + FORWARD_ERROR = 1 + +class ErrorModuleInput: + """ + A ModuleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["module_error_input", "error_on", "error_type", "error_regex"] + + def __init__(self, + module_error_input, + *, + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=RuntimeError, + error_regex): + self.module_error_input = module_error_input + self.error_on = error_on + self.error_type = error_type + self.error_regex = error_regex + + +class ModuleInfo: + """ Module information to be used in testing. """ + + def __init__(self, + module_cls, # Class object for the module under test + *, + module_inputs_func, # Function to generate module inputs + skips=(), # Indicates which tests to skip + decorators=None, # Additional decorators to apply to generated tests + dtypes=floating_types(), # dtypes this function is expected to work with + dtypesIfMPS=(torch.float16, torch.float32,), # dtypes this function is expected to work with on MPS + supports_gradgrad=True, # whether the op supports second order gradients + gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck + module_memformat_affects_out=False, # whether converting module to channels last will generate + # channels last output + train_and_eval_differ=False, # whether the module has differing behavior between train and eval + module_error_inputs_func=None, # Function to generate module inputs that error + ): + self.module_cls = module_cls + self.module_inputs_func = module_inputs_func + self.decorators = (*(decorators if decorators else []), *(skips if skips else [])) + self.dtypes = dtypes + self.dtypesIfMPS = dtypesIfMPS + self.supports_gradgrad = supports_gradgrad + self.gradcheck_nondet_tol = gradcheck_nondet_tol + self.module_memformat_affects_out = module_memformat_affects_out + self.train_and_eval_differ = train_and_eval_differ + self.module_error_inputs_func = module_error_inputs_func + self.is_lazy = issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin) + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + result = [set_single_threaded_if_parallel_tbb] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active(test_class, test_name, device, dtype, param_kwargs): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + def supported_dtypes(self, device_type): + if device_type == 'mps': + return self.dtypesIfMPS + else: + return self.dtypes + + @property + def name(self): + return get_module_common_name(self.module_cls) + + @property + def formatted_name(self): + return self.name.replace('.', '_') + +# Start of module inputs functions. + +def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(10, 8), + forward_input=FunctionInput(input=make_input((4, 10))), + reference_fn=lambda m, p, input: torch.mm(input, p[0].t()) + p[1].view(1, -1).expand(4, 8)), + ModuleInput(constructor_input=FunctionInput(10, 8, bias=False), + forward_input=FunctionInput(make_input((4, 10))), + desc='no_bias', + reference_fn=lambda m, p, i: torch.mm(i, p[0].t())), + ModuleInput(constructor_input=FunctionInput(3, 5), + forward_input=FunctionInput(make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1]) + ] + + return module_inputs + + +def module_inputs_torch_nn_Bilinear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def bilinear_reference_fn(m, p, x1, x2, bias=True): + result = torch.einsum('bn,anm,bm->ba', x1, p[0], x2) + if bias: + if x1.shape[0] == 1: + result = result.view(-1) + p[1] + else: + result = result + p[1].view(1, -1).expand(x1.shape[0], p[0].shape[0]) + return result + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + reference_fn=bilinear_reference_fn), + ModuleInput(constructor_input=FunctionInput(2, 3, 4, bias=False), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + desc='no_bias', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2, bias=False)), + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input(2), make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1.view(1, -1), x2.view(1, -1))), + ] + + return module_inputs + + +def module_inputs_torch_nn_KLDivLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_batchmean', {'reduction': 'batchmean'}), + ('reduction_none', {'reduction': 'none'}), + ('log_target', {'log_target': True}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return kldivloss_reference(i, t, **constructor_kwargs) + + input = make_input((10, 10)).log() + target = make_input((10, 10)) if kwargs.get('log_target', False) else make_input((10, 10)).log() + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(input, target), + desc=desc, + reference_fn=reference_fn) + ) + + scalar_input = make_input(()).log() + scalar_target = make_input(()) if kwargs.get('log_target', False) else make_input(()).log() + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(scalar_input, scalar_input), + desc='scalar_' + desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + def make_input(shape, device=device, dtype=dtype, requires_grad=requires_grad): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=False).log_softmax(dim=1).requires_grad_(requires_grad) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_none', {'reduction': 'none'}), + ('ignore_index', {'ignore_index': 2}), + ('weights', {'weight': make_weight(4).abs()}), + ('weights_ignore_index', {'weight': make_weight(4).abs(), 'ignore_index': 2}), + ('weights_ignore_index_neg', {'weight': make_weight(4).abs(), 'ignore_index': -1}) + ] + + # TODO: Uncomment when negative weights is supported. + # negative_weight = make_weight(10) + # negative_weight[0] = -1 + # cases.append(('weights_negative', {'weight': negative_weight})) + module_inputs = [] + for desc, constructor_kwargs in cases: + + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return nllloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 4)), + torch.empty(15, device=device).uniform_().mul(4).floor().long()), + desc=desc, + reference_fn=reference_fn) + ) + + def nd_reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return nlllossNd_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5, 5)), + torch.empty(2, 5, 5, device=device).uniform_().mul(4).floor().long()), + desc=f"nd_{desc}", + reference_fn=nd_reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5, 5, 2, 2)), + torch.empty(2, 5, 5, 2, 2, device=device).uniform_().mul(4).floor().long()), + desc=f"higher_dim_{desc}", + reference_fn=nd_reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5)), + torch.empty(2, 5, device=device).uniform_().mul(4).floor().long()), + desc=f"3d_{desc}", + reference_fn=nd_reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_GaussianNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(3), + make_target(3), + make_input(1).abs()), + desc=desc, + reference_fn=no_batch_dim_reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_PoissonNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('full', {'full': True}), + ('no_log_input', {'log_input': False}), + ('full_no_log_input', {'full': True, 'log_input': False}), + ] + + def poissonnllloss_reference_fn(i, t, log_input=True, full=False, reduction='mean', eps=1e-8): + if log_input: + result = i.exp() - t.mul(i) + else: + result = i - t.mul((i + eps).log()) + + if full: + result += (t.mul(t.log()) - t + 0.5 * (2. * math.pi * t).log()).masked_fill(t <= 1, 0) + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return poissonnllloss_reference_fn(i, t, **constructor_kwargs) + + log_input = constructor_kwargs.get('log_input', True) + input = make_input((2, 3, 4, 5)) if log_input else make_input((2, 3, 4, 5)).abs().add(0.001) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(input, + make_target((2, 3, 4, 5)).floor_().abs_()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MSELoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + def mse_loss_reference_fn(m, p, i, t, reduction='mean'): + if reduction == 'none': + return (i - t).pow(2) + elif reduction == 'mean': + return (i - t).pow(2).sum() / i.numel() + else: + return (i - t).pow(2).sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 4, 5)), + make_target((2, 3, 4, 5))), + desc=desc, + reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_target(())), + desc=f'{desc}_scalar', + reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def no_batch_dim_reference_fn(m, p, *args, **kwargs): + """Reference function for modules supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + + Currently it only supports modules which return a single Tensor as output. + You can bind the following kwargs. + Kwargs: + batch_first[bool] : If True, all the Tensors in `args` while be unsqueezed at dim `0` . + and output will be squeezed at dim `0` else dim `1` for both. + kwargs_to_batchify[dict] : Dictionary specifying the name of the argument and dimension to unsqueeze. + Useful if there are few arguments whose batch dimension are different + from the ones selected by `batch_first`. + is_criterion[bool] : Specify if the module is a criterion and handle the reduction for output accordingly. + """ + def get_and_pop(key, default): + v = kwargs.get(key, default) + if key in kwargs: + kwargs.pop(key) + return v + + batch_dim = 0 if get_and_pop('batch_first', True) else 1 + kwargs_to_batchify = get_and_pop('kwargs_to_batchify', None) + is_criterion = get_and_pop('is_criterion', False) + + if kwargs_to_batchify is not None: + assert isinstance(kwargs_to_batchify, dict) + for k, v in kwargs.items(): + if k in kwargs_to_batchify and v is not None: + bdim = kwargs_to_batchify[k] + kwargs[k] = v.unsqueeze(bdim) + + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs).squeeze(batch_dim) + + if is_criterion: + reduction = get_reduction(m) + if reduction == 'none': + return output.squeeze(0) + return output + + +def no_batch_dim_reference_mha(m, p, *args, **kwargs): + """Reference function for MultiheadAttention supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + batch_dim = 0 if kwargs.get('batch_first', True) else 1 + if 'batch_first' in kwargs: + kwargs.pop('batch_first') + if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None: + kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0) + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(0)) + + +def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs): + """Reference function for RNN and GRU supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = h.unsqueeze(1) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(1)) + + +def no_batch_dim_reference_lstm(m, p, *args, **kwargs): + """Reference function for LSTM supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = (h[0].unsqueeze(1), h[1].unsqueeze(1)) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), (output[1][0].squeeze(1), output[1][1].squeeze(1))) + + +def no_batch_dim_reference_lstmcell(m, p, *args, **kwargs): + """Reference function for LSTMCell supporting no batch dimensions. + + The module is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + inp, (h, c) = args + single_batch_input_args = (inp.unsqueeze(0), (h.unsqueeze(0), c.unsqueeze(0))) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(0), output[1].squeeze(0)) + + +def generate_regression_criterion_inputs(make_input): + return [ + ModuleInput( + constructor_input=FunctionInput(reduction=reduction), + forward_input=FunctionInput(make_input((4, )), make_input(4,)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True), + desc=f'no_batch_dim_{reduction}' + ) for reduction in ['none', 'mean', 'sum']] + + +def module_inputs_torch_nn_AvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(kernel_size=2), + forward_input=FunctionInput(make_input((3, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((2, 3, 6)))), + ModuleInput(constructor_input=FunctionInput((2,), (2,)), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, 1), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride_pad')] + + +def module_inputs_torch_nn_AvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((3, 6, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput((2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride_pad')] + + + +def module_inputs_torch_nn_AvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((3, 4, 4, 4))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2)), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride_pad_gpu_input_nooverlap'), + ModuleInput(constructor_input=FunctionInput((2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='divisor_stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0, divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride_pad_gpu_input_nooverlap')] + + + +def module_inputs_torch_nn_AdaptiveAvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='one_output')] + + +def module_inputs_torch_nn_AdaptiveAvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single_1x1output'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + +def module_inputs_torch_nn_AdaptiveAvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 2, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 2, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((None, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput((3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 3, 2, 6))), + desc='last_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + + +def module_inputs_torch_nn_AdaptiveMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 12, 9, 3))), + desc='single_nonatomic'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 6, 4, 10))), + desc='tuple_nonatomic')] + + +def module_inputs_torch_nn_BatchNorm1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(10,), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine'), + ModuleInput(constructor_input=FunctionInput(5,), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, None), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine_simple_average'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, True, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input_not_affine'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 9))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='2d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, True, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='3d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, True, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_ConvNd(module_info, device, dtype, requires_grad, training, **kwargs): + N = kwargs['N'] + lazy = kwargs.get('lazy', False) + transposed = kwargs.get('transposed', False) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + conv_kwargs_list = [{}] if transposed else [{}, {'padding': 'same'}] + kernel_size, C_in, C_out = 3, 4, 5 + input_no_batch_shape = (C_in,) + tuple(i + 3 for i in range(N)) + input_batch_shape = (2,) + input_no_batch_shape + return [ + ModuleInput(constructor_input=(FunctionInput(C_out, kernel_size, **conv_kwargs) if lazy else + FunctionInput(C_in, C_out, kernel_size, **conv_kwargs)), + forward_input=FunctionInput(make_input( + input_batch_shape if with_batch else input_no_batch_shape)), + desc=('' if with_batch else 'no_batch_dim'), + reference_fn=(None if with_batch else no_batch_dim_reference_fn)) + for with_batch, conv_kwargs in itertools.product([True, False], conv_kwargs_list) + ] + + +def module_inputs_torch_nn_CosineEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.7}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): + return cosineembeddingloss_reference(i1, i2, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10)), make_input((15, 10)), + make_target((15,)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_ELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input')] + + +def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((5, 6, 7))), + desc='dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((4,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_ReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_ReLU6(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_LeakyReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='with_negval'), + ModuleInput(constructor_input=FunctionInput(0.0), + forward_input=FunctionInput(make_input((10, 10))), + desc='with_zero_negval'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input(())), + desc='with_negval_scalar')] + + +def module_inputs_torch_nn_PReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d_multiparam')] + + +def module_inputs_torch_nn_SELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar')] + + +def module_inputs_torch_nn_SiLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x))] + + +def module_inputs_torch_nn_Softmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20))), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(0, True)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmax2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, False))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_LogSoftmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_()), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(), + desc='multiparam'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(), + desc='multiparam_scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmin(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 5, 10))), + desc='multidim'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((3, 4, 10))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softplus(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.log(1 + torch.exp(i))), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: 1. / 2. * torch.log(1 + torch.exp(2 * i)), + desc='beta'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input(())), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='lambda'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input(())), + desc='lambda_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softsign(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Tanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + + +def module_inputs_torch_nn_Tanhshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Threshold(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='threshold_value'), + ModuleInput(constructor_input=FunctionInput(2., 10.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='large_value'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(())), + desc='threshold_value_scalar'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Mish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_L1Loss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4)), + make_input((2, 3, 4))), + reference_fn=lambda m, p, i, t: 1. / i.numel() * sum((a - b).abs().sum() + for a, b in zip(i, t))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(()), make_input(())), + reference_fn=lambda m, p, i, t: 1. / i.numel() * (i - t).abs().sum(), + desc='scalar')] + generate_regression_criterion_inputs(make_input) + + +def module_inputs_torch_nn_SmoothL1Loss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return smoothl1loss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_input((5, 10))), + desc=desc, + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_input(())), + desc=f'scalar_{desc}', + reference_fn=reference_fn) + ) + + return module_inputs + + + +def module_inputs_torch_nn_BCELoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weights', {'weight': make_weight((10,))}), + ] + + def bce_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): + result = -(t * i.log() + (1 - t) * (1 - i).log()) + + if weight is not None: + result = result * weight + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), + make_target((15, 10)).gt(0).to(dtype)), + desc=desc, + reference_fn=partial(bce_loss_reference_fn, **constructor_kwargs)) + ) + + scalar_weight = make_weight(()) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(weight=scalar_weight), + forward_input=FunctionInput(make_input((), low=1e-2, high=1 - 1e-2), + make_target(()).gt(0).to(dtype)), + desc='scalar_weight', + reference_fn=partial(bce_loss_reference_fn, weight=scalar_weight)) + ) + + return module_inputs + + +def module_inputs_torch_nn_BCEWithLogitsLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weights', {'weight': make_weight((10,))}), + ('scalar_weights', {'weight': make_weight(())}) + ] + + def bce_withlogitsloss_reference_fn(m, p, i, t, reduction='mean', weight=None): + # TODO: add pos_weight to the definition here and corresponding SampleInputs + max_val = (-i).clamp(min=0) + result = (1 - t).mul_(i).add_(max_val).add_((-max_val).exp_().add_((-i - max_val).exp_()).log_()) + + if weight is not None: + result = result * weight + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), + make_target((15, 10)).gt(0).to(dtype)), + desc=desc, + reference_fn=partial(bce_withlogitsloss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def module_inputs_torch_nn_CrossEntropyLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + reductions: List[str] = ['mean', 'sum', 'none'] + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('weights', {'weight': make_weight((3,))}), + ('ignore_index', {'ignore_index': 1}), + ('label_smoothing', {'label_smoothing': 0.15}), + ('ignore_index_label_smoothing', {'ignore_index': 1, 'label_smoothing': 0.15}) + ] + + module_inputs = [] + for reduction, (desc, constructor_kwargs) in product(reductions, cases): + def reference_fn(m, p, i, t, reduction=reduction, constructor_kwargs=constructor_kwargs): + return cross_entropy_loss_reference(i, t, reduction=reduction, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5)), + make_target((2, 5, 5), low=0, high=3)), + desc=f"4d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5)), + make_target((2, 5), low=0, high=3)), + desc=f"3d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3)), + make_target((2), low=0, high=3)), + desc=f"2d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), + make_target((2, 5, 5, 2, 2), low=0, high=3)), + desc=f"higher_dim_{desc}_{reduction}", + reference_fn=reference_fn) + ) + + if constructor_kwargs.get('ignore_index', None) is None: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3, 4, 2)), + make_input((5, 3, 4, 2)).softmax(dim=1)), + desc=f"4d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3, 4)), + make_input((5, 3, 4)).softmax(dim=1)), + desc=f"3d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3)), + make_input((5, 3)).softmax(dim=1)), + desc=f"2d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), + make_input((2, 3, 5, 5, 2, 2)).softmax(dim=1)), + desc=f"higher_dim_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((3,)), + make_target((), low=0, high=3)), + desc=f"no_batch_dim_{desc}_{reduction}", + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) + ) + + return module_inputs + + + +def module_inputs_torch_nn_CTCLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('blank', {'blank': 14}) + ] + target_dtypes = [torch.int, torch.long] + + module_inputs = [] + for target_dtype, (desc, constructor_kwargs) in product(target_dtypes, cases): + def reference_fn(m, p, i, t, il, tl, constructor_kwargs=constructor_kwargs): + return ctcloss_reference(i, t, il, tl, **constructor_kwargs) + + blank = constructor_kwargs.get('blank', 0) + low = 0 if blank == 14 else 1 + high = 14 if blank == 14 else 15 + + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((3, 30), dtype=target_dtype, low=low, high=high), + (50, 50, 50), (30, 25, 20)), + desc=f'{desc}_lengths_intlists', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((3, 30), dtype=target_dtype, low=low, high=high), + torch.tensor((50, 50, 50), device=device), + torch.tensor((30, 25, 20), device=device)), + desc=f'{desc}_lengths_tensors', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), + (50, 50, 50), (30, 25, 20)), + desc=f'{desc}_1d_target_lengths_intlists', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), + torch.tensor((50, 50, 50), device=device), + torch.tensor((30, 25, 20), device=device)), + desc=f'{desc}_1d_target_lengths_tensors', + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_GroupNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 5))), + desc='1d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 12, 1e-3), + forward_input=FunctionInput(make_input((4, 12))), + desc='1d_affine_GN'), + ModuleInput( + constructor_input=FunctionInput(1, 6, 1e-3), + forward_input=FunctionInput(make_input((150, 6))), + desc='1d_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput(5, 5, 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 10, 1e-3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='1d_no_affine_LN'), + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 2, 3))), + desc='2d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 28, 28))), + desc='2d_affine_large_feature'), + ModuleInput( + constructor_input=FunctionInput(3, 51, 1e-5, False), + forward_input=FunctionInput(make_input((2, 51, 28, 28))), + desc='2d_no_affine_large_feature'), + ModuleInput( + constructor_input=FunctionInput(3, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_LN'), + ] + + +def module_inputs_torch_nn_Hardshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input((4, 3, 2, 4))), + ), + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input(())), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_Hardswish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input') + ] + + +def module_inputs_torch_nn_Hardtanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_HingeEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.5}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return hingeembeddingloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((10,)), + make_target((10,)).gt(0).to(dtype).mul_(2).sub_(1)), + desc=desc, + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_target(()).gt(0).to(dtype).mul_(2).sub_(1)), + desc=f'scalar_{desc}', + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_HuberLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return huberloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_input((5, 10))), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_InstanceNormNd(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lazy = kwargs.get('lazy', False) + N = kwargs['N'] + num_features, eps, momentum, affine, track_running_stats = 3, 1e-3, 0.3, False, True + input_no_batch_shape_dict = {1: (3, 15), 2: (3, 6, 6), 3: (3, 4, 4, 4)} + input_no_batch_shape = input_no_batch_shape_dict[N] + input_batch_shape = (4,) + input_no_batch_shape + + return [ + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_batch_shape))), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_batch_shape)), + desc='tracking_stats'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='tracking_stats_no_batch_dim'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim') + ] + +def module_inputs_torch_nn_LayerNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((128, 5, 5))), + desc='1d_elementwise_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((0, 5))), + desc='1d_empty_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, elementwise_affine=True, bias=False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine_no_bias'), + ] + + +def module_inputs_torch_nn_LocalResponseNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 5, 7))), + desc='1d'), + ModuleInput( + constructor_input=FunctionInput(2,), + forward_input=FunctionInput(make_input((1, 5, 7, 7))), + desc='2d_uneven_pad'), + ModuleInput( + constructor_input=FunctionInput(1, 1., 0.5, 2.), + forward_input=FunctionInput(make_input((1, 5, 7, 7, 7))), + desc='3d_custom_params'), + ] + + +def module_inputs_torch_nn_LPPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7))), + desc='norm'), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((1, 3, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((3, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ] + + + +def module_inputs_torch_nn_LPPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((3, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='norm'), + ] + + +def module_inputs_torch_nn_LPPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((3, 7, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7, 7))), + desc='norm'), + ] + + +def module_inputs_torch_nn_MaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput(4, 4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(4, return_indices=True), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_MaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((3, 7, 7))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='4d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1), return_indices=True), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='return_indices'), + ] + +def module_inputs_torch_nn_MaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5)))), + ModuleInput( + constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_padding'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1), return_indices=True), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((1, 3, 2), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 7, 6))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 5, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 7, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((2, 4, 3), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 7, 7, 7))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput((4, 2, 3), output_size=(10, 3, 2), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 16, 7, 5))), + desc='asymsize'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 5, 5, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 7, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_Sigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format' + ) + ] + + +def module_inputs_torch_nn_LogSigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.sigmoid().log(), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: i.sigmoid().log(), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ] + + +def module_inputs_torch_nn_MarginRankingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.5}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): + return marginrankingloss_reference(i1, i2, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50,)), make_input((50,)), + make_target((50,)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiLabelMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return multilabelmarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((10,)), + make_target((10), low=0, high=10)), + desc=f'1d_{desc}', + reference_fn=reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5, 10), low=0, high=10)), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('p', {'p': 2}), + ('margin', {'margin': 0.5}), + ('weights', {'weight': make_weight(10)}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return multimarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5), low=0, high=10)), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiLabelSoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weight', {'weight': make_weight(10)}), + ] + + def multilabelsoftmargin_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): + result = t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log() + if weight is not None: + result *= weight + result = (-result).sum(i.dim() - 1) / i.size(-1) + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.mean() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5, 10), low=0, high=2)), + desc=desc, + reference_fn=partial(multilabelsoftmargin_loss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def module_inputs_torch_nn_SoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return softmarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 5)), + make_target((5, 5)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_TransformerEncoder(module_info, device, dtype, requires_grad, training, **kwargs): + # Reuse the TransformerEncoderLayer samples since the forward args are nearly the same. + samples = [] + for layer_module_input in module_inputs_torch_nn_TransformerEncoderLayer( + None, device, dtype, requires_grad, training): + # Construct a TransformerEncoderLayer object to pass to TransformerEncoder. + l_args, l_kwargs = (layer_module_input.constructor_input.args, + layer_module_input.constructor_input.kwargs) + l_kwargs['device'] = device + l_kwargs['dtype'] = dtype + encoder_layer = torch.nn.TransformerEncoderLayer(*l_args, **l_kwargs) + num_layers = 2 + # Note: TransformerEncoderLayer takes a "src_mask" while + # TransformerEncoder takes a "mask"; rename kwarg appropriately. + forward_input = layer_module_input.forward_input + if 'src_mask' in forward_input.kwargs: + forward_input.kwargs['mask'] = forward_input.kwargs['src_mask'] + del forward_input.kwargs['src_mask'] + samples.append(ModuleInput( + constructor_input=FunctionInput(encoder_layer, num_layers), + forward_input=forward_input, + desc=layer_module_input.desc + )) + return samples + +def module_inputs_torch_nn_TransformerEncoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='no_bias' + ),] + + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for src_mask, src_key_padding_mask, norm_first, batch_first, bias in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, kwargs_to_batchify={'src_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + + # Samples below where we pass reference_fn are for validating the fast path, + # since the fast path requires no_grad mode, we run the fast path in .eval() + # and no_grad() in the reference_fn and verify that against the results in train mode. + def fast_path_reference_fn(module, parameters, *args, **kwargs): + assert module.training + module.train(False) + with torch.no_grad(): + output = module(*args, **kwargs) + module.train(True) + return output + + if training: + for norm_first, bias in itertools.product((True, False), (True, False)): + samples.append( + ModuleInput( + constructor_input=FunctionInput( + 4, 2, 8, dropout=0.0, batch_first=True, norm_first=norm_first, bias=bias + ), + forward_input=FunctionInput( + make_input((2, 3, 4)), + ), + # fastpath doesn't run when bias=False + reference_fn=fast_path_reference_fn if bias else None, + desc=f'fastpath_{bias}_norm_first_{norm_first}' + ) + ) + + return samples + + +def module_inputs_torch_nn_TransformerDecoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='no_bias' + ), ] + + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for tgt_mask, tgt_key_padding_mask, norm_first, bias, batch_first in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + # Using same mask for tgt and memory + memory_mask = tgt_mask + memory_key_padding_mask = tgt_key_padding_mask + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) + if not batch_first: + src, tgt = src.transpose(0, 1), tgt.transpose(0, 1) + if tgt_key_padding_mask is not None: + memory_key_padding_mask, tgt_key_padding_mask = (tgt_key_padding_mask.expand(2, 3),) * 2 + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + src, tgt, tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + desc=f'norm_first_{norm_first}_batch_first_{batch_first}_bias_{bias}' + )) + + return samples + + +def module_inputs_torch_nn_Transformer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for mask, key_padding_mask, norm_first, bias, batch_first in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + # Using same mask for tgt and memory + src_mask , tgt_mask = (mask,) * 2 + src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask,) * 2 + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + + src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) + if not batch_first: + src = src.transpose(0, 1) + tgt = tgt.transpose(0, 1) + if key_padding_mask is not None: + src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask.expand(2, 3),) * 2 + + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + src, tgt, tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + )) + return samples + + +def module_inputs_torch_nn_Embedding(module_info, device, dtype, requires_grad, training, **kwargs): + make_empty = partial(torch.empty, device=device, dtype=torch.long, requires_grad=False) + return [ + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(2, 3).random_(4)) + ), + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(1, 512).random_(4).expand(7, 512)), + desc='discontiguous' + ), + ] + + +def module_inputs_torch_nn_MultiheadAttention(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + bool_vals = (True, False) + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3, 3))) + products = itertools.product(bool_vals, bool_vals, bool_vals, key_padding_masks, attn_masks) + for bias, add_bias_kv, add_zero_attn, key_padding_mask, attn_mask in products: + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=True, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=no_batch_dim_reference_mha, + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=False, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=partial(no_batch_dim_reference_mha, batch_first=False), + ) + ) + + return samples + + +def module_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ] + + is_rnn = kwargs.get('is_rnn', False) + if is_rnn: + # RNN also supports `nonlinearity` argument. + # `tanh` is the default, so we check with `relu` + samples.append( + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True, nonlinearity='relu'), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = ( + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ) + + return samples + +def make_packed_sequence(inp, batch_sizes): + required_grad = inp.requires_grad + inp.requires_grad_(False) # user won't have access to inp so won't be able to get its grads + seq = pack_padded_sequence(inp, batch_sizes) + seq.data.requires_grad_(required_grad) + return seq + + +def module_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, with_packed_sequence=False, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + is_rnn = kwargs['is_rnn'] + nonlinearity = ('relu', 'tanh') + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + + samples = [] + if is_rnn: + prod_gen = product(nonlinearity, bias, batch_first, bidirectional) + else: + prod_gen = product(bias, batch_first, bidirectional) + + for args in prod_gen: + if is_rnn: + nl, b, b_f, bidir = args + else: + b, b_f, bidir = args + + cons_args = {'input_size': 2, 'hidden_size': 2, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': 3, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + if is_rnn: + cons_args['nonlinearity'] = nl + cons_args_hidden['nonlinearity'] = nl + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((3, 2))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), make_input((4 if bidir else 2, 3))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + if with_packed_sequence: + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 2, 2)), torch.tensor([5, 3]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 5, 2)), torch.tensor([5, 3, 3, 2, 2]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTM(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + proj_sizes = (0, 2) + + samples = [] + prod_gen = product(bias, batch_first, bidirectional, proj_sizes) + + for args in prod_gen: + b, b_f, bidir, proj_size = args + hidden_size = 3 + cons_args = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((2, 2))), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + h_out = proj_size if proj_size > 0 else hidden_size + hx = (make_input((4 if bidir else 2, h_out)), make_input((4 if bidir else 2, hidden_size))) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), hx), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + + return samples + + + +def module_inputs_torch_nn_ReflectionPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((3, 3, 3, 3, 3))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((3, 4, 5, 6, 7))), + ), + ] + +def module_inputs_torch_nn_ZeroPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ZeroPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ZeroPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((1, 2, 3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 2), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2), 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4), 5), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ConstantPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6), 7), + forward_input=FunctionInput(make_input((1, 2, 1, 2, 1))), + ), + ] + +def module_inputs_torch_nn_CircularPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding1d_circular_ref(inp, pad): + r""" input: + [[[0., 1., 2.], + [3., 4., 5.]]] + pad: (1, 2) + output: + [[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.]]] + """ + return torch.cat([inp[:, :, -pad[0]:], inp, inp[:, :, :pad[1]]], dim=2) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 1)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding2d_circular_ref(inp, pad): + r"""input: + [[[[0., 1., 2], + [3., 4., 5.]]]] + pad: (1, 2, 2, 1) + output: + [[[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.]]]] + """ + inp = torch.cat([inp[:, :, -pad[2]:], inp, inp[:, :, :pad[3]]], dim=2) + return torch.cat([inp[:, :, :, -pad[0]:], inp, inp[:, :, :, :pad[1]]], dim=3) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 2, 1)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((2, 3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 3, 1)), + forward_input=FunctionInput(make_input((1, 1, 3, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + + def padding3d_circular_ref(inp, pad): + r"""input: + [[[[[ 0., 1., 2.], + [ 3., 4., 5.]], + [[ 6., 7., 8.], + [ 9., 10., 11.]]]]] + pad: (1, 2, 2, 1, 1, 2) + output: [[[[[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]]]]] + """ + inp = torch.cat([inp[:, :, -pad[4]:], inp, inp[:, :, :pad[5]]], dim=2) + inp = torch.cat([inp[:, :, :, -pad[2]:], inp, inp[:, :, :, :pad[3]]], dim=3) + return torch.cat([inp[:, :, :, :, -pad[0]:], inp, inp[:, :, :, :, :pad[1]]], dim=4) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 2, 2, 1, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 2, 1, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ] + + +# All these operators share similar issues on cuDNN and MIOpen +rnn_gru_lstm_module_info_decorators = ( + # RuntimeError: Batching rule not implemented for aten::_cudnn_rnn_backward. + # We could not generate a fallback + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_grad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # NotImplementedError: the derivative for '_cudnn_rnn_backward' is not implemented. + # Double backwards is not supported for CuDNN RNNs due to limitations in the CuDNN API + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_gradgrad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # CUDNN GRU doesn't accept non-contiguous hx + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # MIOPEN GRU doesn't accept non-contiguous hx (this is dispatched to miopen only for float). + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and TEST_WITH_ROCM), dtypes=(torch.float,), device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestExpandedWeightModule", "test_module", + device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestDecomp", "test_rnn_decomp_module", + device_type='cuda' + ) +) + +# Start of module error inputs functions. + +def module_error_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), make_input(3, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(5, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 1, 1, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hidden to be 1D or 2D, got 4D instead" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'relu'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'tanh'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ] + return samples + +def module_error_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), (make_input(3, 20), make_input(3, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 21), make_input(3, 21))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(5, 20), make_input(5, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 1, 1, 20), make_input(3, 1, 1, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hx\\[0\\] to be 1D or 2D, got 4D instead" + ), + ] + return samples + + +def module_error_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, **kwargs): + samples = [ + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 0, 1)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="hidden_size must be greater than zero" + ), + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 10, 0)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="num_layers must be greater than zero" + ), + ] + return samples + +def module_error_inputs_torch_nn_Pad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 2D or 3D input \(got 4D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 3D or 4D input \(got 2D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 4D or 5D input \(got 2D input\)", + + ), + ] + + +# Database of ModuleInfo entries in alphabetical order. +module_db: List[ModuleInfo] = [ + ModuleInfo(torch.nn.AdaptiveAvgPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool1d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool2d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS), + # Fails on backward check if output size is 1x1 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + ),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveMaxPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool1d, + ), + ModuleInfo(torch.nn.AdaptiveMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool2d, + ), + ModuleInfo(torch.nn.AdaptiveMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AvgPool1d, + module_inputs_func=module_inputs_torch_nn_AvgPool1d, + ), + ModuleInfo(torch.nn.AvgPool2d, + module_inputs_func=module_inputs_torch_nn_AvgPool2d, + skips=( + # The difference between channels last backward and + # channels first backward of AvgPool2d on CUDA is too large + # See https://github.com/pytorch/pytorch/issues/107201 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='cuda', + ), + # error: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),), + ), + ModuleInfo(torch.nn.AvgPool3d, + module_inputs_func=module_inputs_torch_nn_AvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # No channels_last support for AvgPool1d as it does not take 4D inputs + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.BatchNorm1d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm1d, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=lambda p: p['training'] + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=lambda p: p['training'] + )) + ), + ModuleInfo(torch.nn.BatchNorm2d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm2d, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=lambda p: p['training'] + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=lambda p: p['training'] + ),) + ), + ModuleInfo(torch.nn.BatchNorm3d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm3d, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=lambda p: p['training'] + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=lambda p: p['training'] + ),) + ), + ModuleInfo(torch.nn.CELU, + module_inputs_func=module_inputs_torch_nn_CELU, + # not MPS specific, will be xfailed for all devices in next PR + skips=( + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.Conv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Conv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]),), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Fails on backward check because ViewAsRealBackward apply contiguous for grad + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64, torch.complex128]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # Not implemented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False, transposed=True), + dtypes=floating_and_complex_types_and(torch.chalf), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # ConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + # These fail only on ROCm + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.complex32, torch.complex64], active_if=TEST_WITH_ROCM), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.complex64: 1e-04}), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.CosineEmbeddingLoss, + module_inputs_func=module_inputs_torch_nn_CosineEmbeddingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.ELU, + module_inputs_func=module_inputs_torch_nn_ELU, + # not MPS specific, will be xfailed for all devices in next PR + skips=( + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.FractionalMaxPool2d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.FractionalMaxPool3d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.L1Loss, + module_inputs_func=module_inputs_torch_nn_L1Loss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.SmoothL1Loss, + module_inputs_func=module_inputs_torch_nn_SmoothL1Loss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', 'test_non_contiguous_tensors', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LazyConv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Linear, + module_inputs_func=module_inputs_torch_nn_Linear, + skips=( + # No channels_last support for Linear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Bilinear, + module_inputs_func=module_inputs_torch_nn_Bilinear, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-4, rtol=1e-4), + torch.float64: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_forward', device_type='cpu'), + ], + skips=( + # No channels_last support for Bilinear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LPPool1d, + module_inputs_func=module_inputs_torch_nn_LPPool1d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.LPPool2d, + module_inputs_func=module_inputs_torch_nn_LPPool2d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LPPool3d, + module_inputs_func=module_inputs_torch_nn_LPPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps),) + ), + ModuleInfo(torch.nn.MaxPool1d, + module_inputs_func=module_inputs_torch_nn_MaxPool1d, + ), + ModuleInfo(torch.nn.MaxPool2d, + module_inputs_func=module_inputs_torch_nn_MaxPool2d, + ), + ModuleInfo(torch.nn.MaxPool3d, + module_inputs_func=module_inputs_torch_nn_MaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.KLDivLoss, + module_inputs_func=module_inputs_torch_nn_KLDivLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # https://github.com/pytorch/pytorch/issues/115588 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.MSELoss, + module_inputs_func=module_inputs_torch_nn_MSELoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', 'test_non_contiguous_tensors', dtypes=[torch.float16]), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.MarginRankingLoss, + module_inputs_func=module_inputs_torch_nn_MarginRankingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.MultiLabelMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiLabelMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # 'aten::multilabel_margin_loss_forward' is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # derivative for aten::multilabel_margin_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.MultiMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # 'aten::multi_margin_loss' is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # RuntimeError: derivative for aten::multi_margin_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.SoftMarginLoss, + module_inputs_func=module_inputs_torch_nn_SoftMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.MultiLabelSoftMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiLabelSoftMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.NLLLoss, + module_inputs_func=module_inputs_torch_nn_NLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.GaussianNLLLoss, + module_inputs_func=module_inputs_torch_nn_GaussianNLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), + ModuleInfo(torch.nn.PoissonNLLLoss, + module_inputs_func=module_inputs_torch_nn_PoissonNLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), + ModuleInfo(torch.nn.HingeEmbeddingLoss, + module_inputs_func=module_inputs_torch_nn_HingeEmbeddingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.HuberLoss, + module_inputs_func=module_inputs_torch_nn_HuberLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: seemingly incorrect output dtype + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.BCELoss, + module_inputs_func=module_inputs_torch_nn_BCELoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # error: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.BCEWithLogitsLoss, + module_inputs_func=module_inputs_torch_nn_BCEWithLogitsLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # see #119108: tolerance issue + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.CrossEntropyLoss, + module_inputs_func=module_inputs_torch_nn_CrossEntropyLoss, + dtypes=get_all_fp_dtypes(include_half=True, include_bfloat16=False), + decorators=( + # No channels_last support for loss functions. + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format'), + # Expect failures for tests that rely on torch.half implementation on CPU + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", dtypes=[torch.float16], device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestModule", "test_if_train_and_eval_modes_differ", + dtypes=[torch.float16], device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestModule", "test_save_load", dtypes=[torch.float16], + device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", dtypes=[torch.float16], + device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestModule", "test_multiple_device_transfer", dtypes=[torch.float16], + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, "TestModule", "test_cpu_gpu_parity", dtypes=[torch.float16], + device_type='cuda'),), + ), + ModuleInfo(torch.nn.CTCLoss, + module_inputs_func=module_inputs_torch_nn_CTCLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # The operator aten::_ctc_loss is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + # https://github.com/pytorch/pytorch/issues/115585 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_non_contiguous_tensors'),) + ), + ModuleInfo(torch.nn.GELU, + module_inputs_func=module_inputs_torch_nn_GELU, + skips=( + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.GLU, + module_inputs_func=module_inputs_torch_nn_GLU, + ), + ModuleInfo(torch.nn.GroupNorm, + module_inputs_func=module_inputs_torch_nn_GroupNorm, + dtypes=get_all_fp_dtypes(include_bfloat16=True, include_half=True), + skips=( + # Tracking at https://github.com/pytorch/pytorch/issues/98089 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_memory_format', device_type='cpu'), + # No channels_last support for GroupNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'), + DecorateInfo(unittest.skip("Skipped!"), "TestModule", "test_grad", + active_if=TEST_WITH_ROCM, device_type='cuda'),) + ), + ModuleInfo(torch.nn.Hardshrink, + module_inputs_func=module_inputs_torch_nn_Hardshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),), + ), + ModuleInfo(torch.nn.Hardswish, + module_inputs_func=module_inputs_torch_nn_Hardswish, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),), + supports_gradgrad=False), + ModuleInfo(torch.nn.Hardtanh, + module_inputs_func=module_inputs_torch_nn_Hardtanh, + ), + ModuleInfo(torch.nn.InstanceNorm1d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=1), + train_and_eval_differ=True, + skips=( + # No channels_last support for InstanceNorm1d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm2d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=2), + train_and_eval_differ=True, + skips=( + # No channels_last support for InstanceNorm2d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm3d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=3), + train_and_eval_differ=True, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # No channels_last support for InstanceNorm3d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.LocalResponseNorm, + module_inputs_func=module_inputs_torch_nn_LocalResponseNorm, + skips=( + # uses avg_pool3d which is not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.LayerNorm, + module_inputs_func=module_inputs_torch_nn_LayerNorm, + skips=( + # No channels_last support for LayerNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + # TransformerEncoder takes the same inputs as TransformerEncoderLayer + ModuleInfo(torch.nn.TransformerEncoder, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoder, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # Doesn't support device / dtype kwargs directly because it is just a + # container of TransformerEncoderLayers. + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_factory_kwargs'),) + ), + ModuleInfo(torch.nn.TransformerEncoderLayer, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoderLayer, + decorators=[ + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_non_contiguous_tensors', + device_type='cpu', active_if=IS_WINDOWS), + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.TransformerDecoderLayer, + module_inputs_func=module_inputs_torch_nn_TransformerDecoderLayer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerDecoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Transformer, + module_inputs_func=module_inputs_torch_nn_Transformer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for Transformer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.MultiheadAttention, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_MultiheadAttention, + skips=( + # No channels_last support for MultiheadAttention currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Embedding, + module_inputs_func=module_inputs_torch_nn_Embedding, + decorators=[ + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_non_contiguous_tensors', + device_type='mps')], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.ReLU, + module_inputs_func=module_inputs_torch_nn_ReLU, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LeakyReLU, + module_inputs_func=module_inputs_torch_nn_LeakyReLU, + ), + ModuleInfo(torch.nn.ReLU6, + module_inputs_func=module_inputs_torch_nn_ReLU6, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.PReLU, + module_inputs_func=module_inputs_torch_nn_PReLU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNNCell, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU_Cell, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + ), + ModuleInfo(torch.nn.GRUCell, + module_inputs_func=module_inputs_torch_nn_RNN_GRU_Cell, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + ), + ModuleInfo(torch.nn.LSTMCell, + module_inputs_func=module_inputs_torch_nn_LSTMCell, + module_error_inputs_func=module_error_inputs_torch_nn_LSTMCell, + ), + ModuleInfo(torch.nn.Sigmoid, + module_inputs_func=module_inputs_torch_nn_Sigmoid, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LogSigmoid, + module_inputs_func=module_inputs_torch_nn_LogSigmoid, + skips=( + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.SiLU, + module_inputs_func=module_inputs_torch_nn_SiLU, + ), + ModuleInfo(torch.nn.Softmax, + module_inputs_func=module_inputs_torch_nn_Softmax, + ), + ModuleInfo(torch.nn.Softmax2d, + module_inputs_func=module_inputs_torch_nn_Softmax2d, + skips=( + # no channels last support for Softmax2d currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LogSoftmax, + module_inputs_func=module_inputs_torch_nn_LogSoftmax, + skips=( + # no channels last support for LogSoftmax currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: inf nan error + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.Softmin, + module_inputs_func=module_inputs_torch_nn_Softmin, + skips=( + # no channels last support for Softmin currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Softplus, + module_inputs_func=module_inputs_torch_nn_Softplus, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softshrink, + module_inputs_func=module_inputs_torch_nn_Softshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softsign, + module_inputs_func=module_inputs_torch_nn_Softsign, + ), + ModuleInfo(torch.nn.Tanh, + module_inputs_func=module_inputs_torch_nn_Tanh, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Tanhshrink, + module_inputs_func=module_inputs_torch_nn_Tanhshrink, + skips=( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Threshold, + module_inputs_func=module_inputs_torch_nn_Threshold, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Mish, + module_inputs_func=module_inputs_torch_nn_Mish, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNN, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + decorators=rnn_gru_lstm_module_info_decorators + ), + ModuleInfo(torch.nn.GRU, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=False), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.LSTM, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_LSTM, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + skips=( + # LSTM with projections is not currently supported with MPS + DecorateInfo(skipMPS),), + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.ReflectionPad1d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad1d, + ), + ModuleInfo(torch.nn.ReflectionPad2d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReflectionPad3d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReplicationPad1d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad1d, + ), + ModuleInfo(torch.nn.ReplicationPad2d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReplicationPad3d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.SELU, + module_inputs_func=module_inputs_torch_nn_SELU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.ZeroPad1d, + module_inputs_func=module_inputs_torch_nn_ZeroPad1d, + ), + ModuleInfo(torch.nn.ZeroPad2d, + module_inputs_func=module_inputs_torch_nn_ZeroPad2d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ZeroPad3d, + module_inputs_func=module_inputs_torch_nn_ZeroPad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.CircularPad1d, + module_inputs_func=module_inputs_torch_nn_CircularPad1d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad1d, + ), + ModuleInfo(torch.nn.CircularPad2d, + module_inputs_func=module_inputs_torch_nn_CircularPad2d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad2d, + ), + ModuleInfo(torch.nn.CircularPad3d, + module_inputs_func=module_inputs_torch_nn_CircularPad3d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),) + ), + ModuleInfo(torch.nn.ConstantPad1d, + module_inputs_func=module_inputs_torch_nn_ConstantPad1d, + ), + ModuleInfo(torch.nn.ConstantPad2d, + module_inputs_func=module_inputs_torch_nn_ConstantPad2d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ConstantPad3d, + module_inputs_func=module_inputs_torch_nn_ConstantPad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ) +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py new file mode 100644 index 0000000000000000000000000000000000000000..b38a4be89141af10e215d5953190b4d56b8826d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py @@ -0,0 +1,2033 @@ +# mypy: ignore-errors + +import functools +import itertools +import unittest +from copy import deepcopy +from enum import Enum +from typing import Any, Dict, List, Tuple, Union + +import torch +from torch import Tensor +from torch.nn import Parameter +from torch.optim import ( + Adadelta, + Adagrad, + Adam, + Adamax, + AdamW, + ASGD, + LBFGS, + NAdam, + Optimizer, + RAdam, + RMSprop, + Rprop, + SGD, + SparseAdam, +) +from torch.testing._internal.common_device_type import tol, toleranceOverride +from torch.testing._internal.common_methods_invocations import DecorateInfo +from torch.testing._internal.common_utils import ( + _TestParametrizer, + set_single_threaded_if_parallel_tbb, + skipIfMps, + skipIfTorchDynamo, + TEST_WITH_TORCHDYNAMO, +) +from torch.utils._foreach_utils import ( + _get_foreach_kernels_supported_devices, + _get_fused_kernels_supported_devices, +) + + +class OptimizerInput: + """Contains args / kwargs to be passed to an optimizer constructor.""" + + __slots__ = ["params", "kwargs", "desc"] + + def __init__( + self, + params: Union[List[Parameter], List[Tensor], Dict[Any, Any]], + kwargs: Dict[str, Any], + desc: str = "", + ): + # params can be a list of Tensors OR param_groups OR None + self.params = params + self.kwargs = kwargs + self.desc = desc + + def __repr__(self): + return f"params={self.params}, kwargs={self.kwargs}, desc={self.desc}" + + +class OptimizerErrorEnum(Enum): + """Enumerates when an error is raised when testing optimizers.""" + + CONSTRUCTION_ERROR = 0 + STEP_ERROR = 1 + + +class ErrorOptimizerInput: + """ + An OptimizerInput that will cause the optimizer to throw an error when constructed. + Includes the type and string of the resulting error. + """ + + __slots__ = ["optimizer_error_input", "error_on", "error_type", "error_regex"] + + def __init__( + self, + optimizer_error_input, + *, + error_on=OptimizerErrorEnum.CONSTRUCTION_ERROR, + error_type=RuntimeError, + error_regex="", + ): + self.optimizer_error_input = optimizer_error_input + self.error_on = error_on + self.error_type = error_type + self.error_regex = error_regex + + +class OptimizerInfo: + """Optimizer information to be used in testing.""" + + def __init__( + self, + optim_cls: Optimizer, # Class object for the Optimizer under test + *, + # Function to generate optimizer inputs EXCLUDING params. We delegate params responsibility + # to the test using the OptimizerInfo. OptimizerInput.params is likely None. + # Can optionally take in device to filter out certain unsupported configs + optim_inputs_func, + # A subset of the global-cliquey flags (fused, foreach, differentiable) the optimizer + # supports. See NOTE: [optimizer kwarg categories] for what global-cliquey means. + supported_impls: Tuple[str] = ("foreach", "differentiable"), + # the devices on which the optim supports sparse tensors for params and grads, see SGD + supports_sparse_on: Tuple[str] = (), + # the optim only supports one config: sparse grads w/ dense params, see SparseAdam + only_supports_sparse_grads: bool = False, + # the optim supports complex parameters + supports_complex: bool = True, + # whether the optimizer.step() function requires a closure to be passed + step_requires_closure: bool = False, + # whether the optimizer supports per-param options with parameter groups + supports_param_groups: bool = True, + # whether the optimizer supports parameters on multiple devices + supports_multiple_devices: bool = True, + skips=(), # Indicates which tests to skip + decorators=None, # Additional decorators to apply to generated tests + optim_error_inputs_func=None, # Function to generate optim inputs that error + ): + self.optim_cls = optim_cls + self.optim_inputs_func = optim_inputs_func + self.supported_impls = supported_impls + self.supports_sparse_on = supports_sparse_on + self.only_supports_sparse_grads = only_supports_sparse_grads + self.supports_complex = supports_complex + self.step_requires_closure = step_requires_closure + self.supports_param_groups = supports_param_groups + self.supports_multiple_devices = supports_multiple_devices + self.decorators = ( + *(decorators if decorators else []), + *(skips if skips else []), + ) + self.optim_error_inputs_func = optim_error_inputs_func + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + result = [set_single_threaded_if_parallel_tbb] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active( + test_class, test_name, device, dtype, param_kwargs + ): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + @property + def name(self): + return self.optim_cls.__name__ + + +class optims(_TestParametrizer): + """Decorator for specifying a list of optimizers over which to run a test.""" + + def __init__(self, optim_info_iterable, dtypes=None): + self.optim_info_list = list(optim_info_iterable) + + # optimizers aren't limited to be one dtype as parameters can have different dtypes + # We default to torch.float32, but dtypes should be specified through passed in + # parameters. + self.dtypes = dtypes if dtypes is not None else [torch.float32] + + def _parametrize_test(self, test, generic_cls, device_cls): + if device_cls is None: + raise RuntimeError( + "The @optims decorator is only intended to be used in a device-specific " + "context; use it with instantiate_device_type_tests() instead of " + "instantiate_parametrized_tests()" + ) + + for optim_info, dtype in itertools.product(self.optim_info_list, self.dtypes): + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = optim_info.name + + # Construct parameter kwargs to pass to the test. + param_kwargs = {"optim_info": optim_info, "dtype": dtype} + + try: + + @functools.wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + decorator_fn = functools.partial( + optim_info.get_decorators, + generic_cls.__name__, + test.__name__, + device_cls.device_type, + dtype, + ) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print( + f"Failed to instantiate {test_name} for module {optim_info.name}!" + ) + raise ex + + +# Helper function for generating error inputs for all optimizers, used below. +def get_error_inputs_for_all_optims(device, dtype): + if str(device) == "cpu": + sample_param = Parameter(torch.randn(1, device=device, dtype=dtype)) + return [ + ErrorOptimizerInput( + OptimizerInput( + params=sample_param, + kwargs={}, + desc="invalid param type", + ), + error_type=TypeError, + error_regex="params argument given to the optimizer should be an iterable of Tensors or dicts", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[sample_param, sample_param], + kwargs={}, + desc="a param group cannot have duplicate parameters", + ), + error_type=UserWarning, + error_regex=".*a parameter group with duplicate parameters.*", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[{"params": sample_param}, {"params": sample_param}], + kwargs={}, + desc="duplicate parameters should not occur across param groups either", + ), + error_type=ValueError, + error_regex="some parameters appear in more than one parameter group", + ), + ] + else: + return [] + + +# ------------------------------------------------------------------------------------------ +# NOTE: [optimizer kwarg categories] +# We categorize optimizer kwargs as 3 types: +# 1. optimizer-specific flags are like amsgrad or rho or beta, flags that are specific to +# algorithms and thus only show up for certain optimizers. There are many of these, so I +# do not bother gathering them all and listing them here. The converse to these would be +# global flags that every optimizer ideally _should_ support. We break global flags into +# 2 further categories and list them all below. +# 2. global-friendly = ["lr", "weight_decay", "maximize", "capturable"] +# global-friendly flags are global flags who play nicely with all other global flags, +# i.e., are mutually exclusive in function. This means that any pair of the following +# flags can be toggled at once (e.g., maximize and weight_decay). Furthermore, any of the +# following flags theoretically can be enabled with ANY other global flag, including the +# cliquey ones (e.g, capturable and foreach). +# 3. global-cliquey = ["foreach", "fused", "differentiable"] +# global-cliquey flags are global flags that do NOT coexist with other cliquey flags, +# usually because they contradict each other in function. For example, one should not flip +# both foreach AND fused to True, because they are two differing performance optimizations +# in which you can only opt into one. +# +# The following optim_inputs_func_* sampling functions only return constructor combinations of +# optimizer-specific and global-friendly flags. This is because we are confident they would mesh +# well with additional kwargs. On the flip side of the same coin, we reserve setting the +# global-cliquey flags to individual tests and fully expect tests to edit OptimizerInput.kwargs. + + +def optim_inputs_func_adadelta(device): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + OptimizerInput( + params=None, kwargs={"rho": 0.95, "weight_decay": 0.9}, desc="rho" + ), + ] + + +def optim_error_inputs_func_adadelta(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, rho=1.1), + desc="rho should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid rho value: 1.1", + ), + ] + return error_inputs + + +def optim_inputs_func_adagrad(device): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"), + OptimizerInput( + params=None, + kwargs={"initial_accumulator_value": 0.1, "weight_decay": 0.1}, + desc="initial_accumulator_value", + ), + OptimizerInput( + params=None, + kwargs={"lr": 0.1, "lr_decay": 0.5, "weight_decay": 0.1}, + desc="lr_decay", + ), # TODO: Move out to testing in param_group? + ] + + +def optim_error_inputs_func_adagrad(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, lr_decay=-0.5), + desc="lr_decay must be bigger than 0", + ), + error_type=ValueError, + error_regex="Invalid lr_decay value: -0.5", + ), + ] + return error_inputs + + +# TODO: consider tensor LR! See multi_tensor_optimizer_configs in test_optim.py --> tensor LR should work +# with all implementation code paths... +def optim_inputs_func_adam(device): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "amsgrad": True, "capturable": True}, + desc="capturable, amsgrad", + ), + OptimizerInput( + params=None, + kwargs={"lr": torch.tensor(0.001), "amsgrad": True, "capturable": True}, + desc="Tensor lr with capturable and amsgrad", + ), + ] + + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1, "amsgrad": True}, desc="amsgrad" + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_adam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, weight_decay=-1), + desc="weight_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid weight_decay value: -1", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=torch.tensor(0.001), foreach=True), + desc="lr as Tensor doesn't work with foreach & not capturable", + ), + error_type=ValueError, + error_regex="lr as a Tensor is not supported for capturable=False and foreach=True", + ), + ] + if "cuda" in str(device): + sample_tensor = torch.empty((), device=device, dtype=dtype) + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=[sample_tensor], + kwargs={"foreach": True, "fused": True}, + desc="`fused` and `foreach` cannot be `True` together", + ), + error_type=RuntimeError, + error_regex="`fused` and `foreach` cannot be `True` together", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[sample_tensor], + kwargs={"fused": True, "differentiable": True}, + desc="`fused` does not support `differentiable`", + ), + error_type=RuntimeError, + error_regex="`fused` does not support `differentiable`", + ), + ] + return error_inputs + + +def optim_inputs_func_adamax(device): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.9, "maximize": True, "capturable": True}, + desc="capturable, maximize, weight_decay", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0, "maximize": True, "capturable": True}, + desc="capturable, maximize", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.9, "maximize": False, "capturable": True}, + desc="capturable, weight_decay", + ), + ] + + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_adamax(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(0.0, 1.0)), + desc="beta2 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 1: 1.0", + ), + ] + return error_inputs + + +def optim_inputs_func_adamw(device): + return optim_inputs_func_adam(device) + + +def optim_error_inputs_func_adamw(device, dtype): + return optim_error_inputs_func_adam(device, dtype) + + +def optim_inputs_func_asgd(device): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"maximize": True, "capturable": True}, + desc="maximize, capturable", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "capturable": True}, + desc="weight_decay, capturable", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True}, + desc="maximize, weight_decay, capturable", + ), + ] + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.02}, desc="non-default lr"), + OptimizerInput(params=None, kwargs={"t0": 100}, desc="t0"), + OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize, nonzero weight_decay", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_asgd(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, weight_decay=-0.5), + desc="weight_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid weight_decay value: -0.5", + ), + ] + return error_inputs + + +def optim_inputs_func_lbfgs(device): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"tolerance_grad": 1e-6}, desc="tolerance_grad" + ), + OptimizerInput( + params=None, + kwargs={"line_search_fn": "strong_wolfe"}, + desc="strong_wolfe", + ), + ] + + +def optim_error_inputs_func_lbfgs(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + return error_inputs + + +# Weird story bro, NAdam and RAdam do not have maximize. +def optim_inputs_func_nadam(device): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.9, "momentum_decay": 6e-3, "capturable": True}, + desc="weight_decay, capturable", + ), + OptimizerInput( + params=None, + kwargs={ + "weight_decay": 0.9, + "momentum_decay": 6e-3, + "decoupled_weight_decay": True, + "capturable": True, + }, + desc="decoupled_weight_decay, capturable", + ), + ] + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"), + OptimizerInput( + params=None, + kwargs={"momentum_decay": 6e-3}, + desc="non-zero momentum_decay", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "momentum_decay": 6e-3}, + desc="weight_decay", + ), + OptimizerInput( + params=None, + kwargs={ + "weight_decay": 0.1, + "momentum_decay": 6e-3, + "decoupled_weight_decay": True, + }, + desc="decoupled_weight_decay", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_nadam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, momentum_decay=-0.2), + desc="momentum_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid momentum_decay value: -0.2", + ), + ] + return error_inputs + + +# Weird story bro, NAdam and RAdam do not have maximize. +def optim_inputs_func_radam(device=None): + cuda_supported_configs = [ + OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), + OptimizerInput( + params=None, + kwargs={ + "capturable": True, + "weight_decay": 0.1, + }, + desc="capturable, weight_decay", + ), + OptimizerInput( + params=None, + kwargs={ + "capturable": True, + "weight_decay": 0.1, + "decoupled_weight_decay": True, + }, + desc="capturable, weight_decay, decoupled_weight_decay", + ), + ] + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 2e-3}, desc="non-default lr"), + OptimizerInput(params=None, kwargs={"eps": 1e-6}, desc="non-default eps"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "decoupled_weight_decay": True}, + desc="decoupled_weight_decay", + ), + ] + (cuda_supported_configs if "cuda" in str(device) else []) + + +def optim_error_inputs_func_radam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, weight_decay=-1), + desc="weight_decay should > 0", + ), + error_type=ValueError, + error_regex="Invalid weight_decay value: -1", + ), + ] + return error_inputs + + +def optim_inputs_func_rmsprop(device): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "centered": True}, + desc="centered", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "centered": True, "momentum": 0.1}, + desc="momentum", + ), + OptimizerInput( + params=None, + kwargs={ + "weight_decay": 0.1, + "centered": True, + "momentum": 0.1, + "maximize": True, + }, + desc="maximize", + ), + ] + + +def optim_error_inputs_func_rmsprop(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, momentum=-1.0), + desc="momentum should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid momentum value: -1.0", + ), + ] + return error_inputs + + +def optim_inputs_func_rprop(device): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 2e-4}, desc="non-default lr"), + OptimizerInput( + params=None, kwargs={"etas": (0.5, 1.5)}, desc="non-default etas" + ), + OptimizerInput( + params=None, + kwargs={"step_sizes": (2e-6, 100)}, + desc="non-default step_sizes", + ), + OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), + ] + + +def optim_error_inputs_func_rprop(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, etas=(1.0, 0.5)), + desc="0 < eta1 < 1 < eta2", + ), + error_type=ValueError, + error_regex="Invalid eta values: 1.0, 0.5", + ), + ] + return error_inputs + + +def optim_inputs_func_sgd(device): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput(params=None, kwargs={"lr": 1e-2}, desc="non-default lr"), + OptimizerInput(params=None, kwargs={"momentum": 0.9}, desc="momentum"), + OptimizerInput( + params=None, + kwargs={"momentum": 0.9, "dampening": 0.5}, + desc="dampening", + ), + OptimizerInput( + params=None, + kwargs={"momentum": 0.9, "weight_decay": 0.1}, + desc="non-zero weight_decay", + ), + OptimizerInput( + params=None, + kwargs={"momentum": 0.9, "nesterov": True, "weight_decay": 0.1}, + desc="nesterov", + ), + OptimizerInput( + params=None, + kwargs={"weight_decay": 0.1, "maximize": True}, + desc="maximize", + ), + ] + + +def optim_error_inputs_func_sgd(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + if str(device) == "cpu": + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, momentum=-0.5), + desc="momentum should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid momentum value: -0.5", + ), + ] + return error_inputs + + +def optim_inputs_func_sparseadam(device): + return [ + OptimizerInput(params=None, kwargs={}, desc="default"), + OptimizerInput( + params=None, kwargs={"lr": 0.01}, desc="non-default lr" + ), # TODO: Move out to testing in param_group? + OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), + ] + + +def optim_error_inputs_func_sparseadam(device, dtype): + error_inputs = get_error_inputs_for_all_optims(device, dtype) + + if str(device) == "cpu": + # SparseAdam raises a warning and not an error for the first entry. We + # update it here: + error_inputs[0].error_type = FutureWarning + error_inputs[ + 0 + ].error_regex = "Passing in a raw Tensor as ``params`` to SparseAdam" + + error_inputs += [ + ErrorOptimizerInput( + OptimizerInput( + params=None, + kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), + desc="beta1 should be between 0 and 1", + ), + error_type=ValueError, + error_regex="Invalid beta parameter at index 0: 1.0", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[ + torch.zeros( + 3, layout=torch.sparse_coo, device=device, dtype=dtype + ) + ], + kwargs={}, + desc="dense params required", + ), + error_type=ValueError, + error_regex="SparseAdam requires dense parameter tensors", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[ + { + "params": [ + torch.zeros( + 3, + layout=torch.sparse_coo, + device=device, + dtype=dtype, + ) + ] + } + ], + kwargs={}, + desc="dense params required in param_groups", + ), + error_type=ValueError, + error_regex="SparseAdam requires dense parameter tensors", + ), + ErrorOptimizerInput( + OptimizerInput( + params=[torch.rand(2, 3, device=device, dtype=torch.complex64)], + kwargs=dict(), + desc="complex not supported", + ), + error_type=ValueError, + error_regex="SparseAdam does not support complex parameters", + ), + ] + return error_inputs + + +def _get_device_type(device: Union[str, torch.device]) -> str: + # Returns the device type as a string, e.g., "cpu" or "cuda" + if isinstance(device, torch.device): + device = str(device.type) + assert isinstance(device, str) + return device.split(":")[0] + + +def _get_optim_inputs_including_global_cliquey_kwargs( + device, dtype, optim_info, skip=() +) -> List[OptimizerInput]: + """ + Return a list of all configs for a given optimizer as a list of OptimizerInputs, + including configs that have supported global cliquey kwargs (foreach, fused, + differentiable) based on optim_info.supported_impls. + + The configs (optim_inputs) returned by optim_info.optim_inputs_func(...) + intentionally do NOT include global cliquey kwargs to give flexibility to tests. + For example, testing correctness between toggling foreach on and off is now + trivial. That said, we sometimes want to test for all possible configs on an + optimizer including all supported flags, so this helper returns all optim inputs. + """ + assert all( + x in ["foreach", "fused", "differentiable"] for x in skip + ), "skip must be a subset of ['foreach', 'fused', 'differentiable']" + + optim_inputs = optim_info.optim_inputs_func(device) + + supported_impls = tuple( + x + for x in optim_info.supported_impls + if x not in skip + and ( + _get_device_type(device) in _get_fused_kernels_supported_devices() + or x != "fused" + ) + and ( + _get_device_type(device) in _get_foreach_kernels_supported_devices() + or x != "foreach" + ) + ) + + all_optim_inputs = [] + for optim_input in optim_inputs: + # Add the base config where all the flags are False + base_kwargs = deepcopy(optim_input.kwargs) + if len(supported_impls) != 0: + for flag in supported_impls: + base_kwargs[flag] = False + all_optim_inputs.append( + OptimizerInput(params=None, kwargs=base_kwargs, desc=optim_input.desc) + ) + else: + all_optim_inputs.append(optim_input) + # Add a config for when each of the global cliquey kwargs is True + # Note that in [optimizer kwarg categories], these kwargs are mutually + # exclusive, so we do not need to product them together. + for flag in supported_impls: + new_kwargs = deepcopy(base_kwargs) + new_kwargs[flag] = True + all_optim_inputs.append( + OptimizerInput( + params=None, kwargs=new_kwargs, desc=f"{optim_input.desc} & {flag}" + ) + ) + return all_optim_inputs + + +# Database of OptimizerInfo entries in alphabetical order. +optim_db: List[OptimizerInfo] = [ + OptimizerInfo( + Adadelta, + optim_inputs_func=optim_inputs_func_adadelta, + optim_error_inputs_func=optim_error_inputs_func_adadelta, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679" + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679 and #116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679" + ), + "TestOptimRenewed", + "test_state_dict_with_cuda_params", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + # Note on tolerances: + # test_correctness_Adadelta_cuda_float32 + # Mismatched elements: 10 / 100 (10.0%) + # Greatest absolute difference: 4.838220775127411e-05 at index (7, 4) (up to 1e-05 allowed) + # Greatest relative difference: 0.007270356640219688 at index (7, 2) (up to 1e-05 allowed) + # This is due to floating point ordering error + usage of sqrt + DecorateInfo( + toleranceOverride( + { + torch.float32: tol( + rtol=5.5e-4, + atol=5e-5, + ) + } + ), + "CompiledOptimizerParityTests", + "test_correctness", + ), + ), + ), + OptimizerInfo( + Adagrad, + optim_inputs_func=optim_inputs_func_adagrad, + optim_error_inputs_func=optim_error_inputs_func_adagrad, + supported_impls=("foreach", "differentiable"), + supports_sparse_on=("cpu"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115607" + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115607 and #116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + Adam, + optim_inputs_func=optim_inputs_func_adam, + optim_error_inputs_func=optim_error_inputs_func_adam, + supported_impls=("foreach", "differentiable", "fused"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Fixing #115607 should fix this test. fused is correct, but forloop is not." + ), + "TestOptimRenewed", + "test_fused_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + Adamax, + optim_inputs_func=optim_inputs_func_adamax, + optim_error_inputs_func=optim_error_inputs_func_adamax, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo("Mismatched _foreach_addcdiv_ types, see #118159"), + "TestOptimRenewed", + "test_complex", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115607" + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115607 and #116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + unittest.skip("Uses too much memory, even for H100, surprisingly."), + "TestOptimRenewed", + "test_foreach_large_tensor", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + DecorateInfo( + skipIfTorchDynamo("cpu fails due to #115607"), + "TestOptimRenewed", + "test_can_load_older_state_dict", + device_type="cpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "capturable path no longer called after hitting cache limit, see #121178" + ), + "TestOptimRenewed", + "test_save_load_equality_with_weights_only", + ), + DecorateInfo( + skipIfTorchDynamo( + "capturable path no longer called after hitting cache limit, see #121178" + ), + "TestOptimRenewed", + "test_load_nontensor_step", + ), + DecorateInfo( + skipIfTorchDynamo( + "capturable path no longer called after hitting cache limit, see #121178" + ), + "TestOptimRenewed", + "test_param_groups_lr", + ), + ), + ), + OptimizerInfo( + AdamW, + optim_inputs_func=optim_inputs_func_adamw, + optim_error_inputs_func=optim_error_inputs_func_adamw, + supported_impls=("foreach", "differentiable", "fused"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Fixing #115607 should fix this test. fused is correct, but forloop is not." + ), + "TestOptimRenewed", + "test_fused_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + ASGD, + optim_inputs_func=optim_inputs_func_asgd, + optim_error_inputs_func=optim_error_inputs_func_asgd, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "See discrepancy in https://github.com/pytorch/pytorch/issues/115607" + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1.5e-5, rtol=1e-5), + } + ), + "TestOptimRenewed", + "test_step_is_noop_for_zero_grads", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + LBFGS, + optim_inputs_func=optim_inputs_func_lbfgs, + optim_error_inputs_func=optim_error_inputs_func_lbfgs, + supported_impls=(), + step_requires_closure=True, + supports_param_groups=False, + supports_multiple_devices=False, + skips=( + # Fails on MacOS 13.2.1 in CI https://github.com/pytorch/pytorch/issues/117094 + DecorateInfo( + skipIfMps, "TestOptimRenewed", "test_can_load_older_state_dict" + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + DecorateInfo( + unittest.skip("Does not support param groups"), + "TestOptimRenewed", + "test_param_groups_lr", + ), + DecorateInfo( + unittest.skip("Does not support param groups"), + "TestOptimRenewed", + "test_param_groups_weight_decay", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + unittest.skip("LBFGS doesn't support multidevice"), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + ), + ), + OptimizerInfo( + NAdam, + optim_inputs_func=optim_inputs_func_nadam, + optim_error_inputs_func=optim_error_inputs_func_nadam, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/116499" + ), + "TestOptimRenewed", + "test_can_load_older_state_dict", + device_type="cuda", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors, https://github.com/pytorch/pytorch/issues/117150" + ), + "TestOptimRenewed", + "test_load_nontensor_step", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors, see https://github.com/pytorch/pytorch/issues/117150" + ), + "TestOptimRenewed", + "test_state_dict_with_cuda_params", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + RAdam, + optim_inputs_func=optim_inputs_func_radam, + optim_error_inputs_func=optim_error_inputs_func_radam, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115607" + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + toleranceOverride( + { + # previously atol=1e-7, rtol=1e-7 + torch.float64: tol(atol=1.5e-7, rtol=1.1e-7) + } + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "Should be fixed by https://github.com/pytorch/pytorch/issues/115607" + ), + "TestOptimRenewed", + "test_can_load_older_state_dict", + device_type="cpu", + ), + ), + ), + OptimizerInfo( + RMSprop, + optim_inputs_func=optim_inputs_func_rmsprop, + optim_error_inputs_func=optim_error_inputs_func_rmsprop, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679" + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679 and #116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + toleranceOverride( + { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202 + torch.float32: tol(atol=5e-04, rtol=0.01), + } + ), + "TestOptimRenewed", + "test_mixed_device_dtype", + active_if=TEST_WITH_TORCHDYNAMO, + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679" + ), + "TestOptimRenewed", + "test_state_dict_with_cuda_params", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + Rprop, + optim_inputs_func=optim_inputs_func_rprop, + optim_error_inputs_func=optim_error_inputs_func_rprop, + supported_impls=("foreach", "differentiable"), + skips=( + DecorateInfo( + skipIfMps, # Rprop doesn't update for non-contiguous, see #118117 + "TestOptimRenewed", + "test_forloop_goes_right_direction", + active_if=lambda kwargs: not kwargs["contiguous"], + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679" + ), + "TestOptimRenewed", + "test_foreach_matches_forloop", + ), + DecorateInfo( + skipIfTorchDynamo( + "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679 and #116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "See https://github.com/pytorch/pytorch/issues/115679" + ), + "TestOptimRenewed", + "test_state_dict_with_cuda_params", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + SGD, + optim_inputs_func=optim_inputs_func_sgd, + optim_error_inputs_func=optim_error_inputs_func_sgd, + supported_impls=("foreach", "differentiable", "fused"), + supports_sparse_on=("cpu", "cuda"), + skips=( + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046" + ), + "TestOptimRenewed", + "test_peak_memory_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" + ), + "TestOptimRenewed", + "test_set_default_dtype_works_with_foreach", + ), + DecorateInfo( + skipIfTorchDynamo( + "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" + ), + "TestOptimRenewed", + "test_complex_2d", + ), + DecorateInfo( + toleranceOverride( + { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202 + torch.float32: tol(atol=5e-04, rtol=0.007), + } + ), + "TestOptimRenewed", + "test_mixed_device_dtype", + active_if=TEST_WITH_TORCHDYNAMO, + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" + ), + "TestOptimRenewed", + "test_step_is_noop_for_zero_grads", + device_type="cpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "No closure handling, https://github.com/pytorch/pytorch/issues/116494" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" + ), + "TestOptimRenewed", + "test_param_groups_weight_decay", + device_type="cpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" + ), + "TestOptimRenewed", + "test_param_groups_lr", + device_type="cpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" + ), + "TestOptimRenewed", + "test_load_nontensor_step", + device_type="cpu", + ), + DecorateInfo( + skipIfTorchDynamo( + "momentum_buffer inconsistency, https://github.com/pytorch/pytorch/issues/117147" + ), + "TestOptimRenewed", + "test_state_dict_with_cuda_params", + ), + DecorateInfo( + skipIfTorchDynamo( + "fails, https://github.com/pytorch/pytorch/issues/117165" + ), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), + OptimizerInfo( + SparseAdam, + optim_inputs_func=optim_inputs_func_sparseadam, + optim_error_inputs_func=optim_error_inputs_func_sparseadam, + supported_impls=(), + only_supports_sparse_grads=True, + supports_complex=False, # Missing complex support, see #118153 + skips=( + DecorateInfo( + skipIfMps, # SparseAdam does not support MPS + "TestOptimRenewed", + ), + DecorateInfo( + unittest.skip( + "SparseAdam does not support dense gradients, see #116507" + ), + "TestOptimRenewed", + "test_state_dict_deterministic", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_param_groups_lr", + ), + DecorateInfo( + unittest.skip( + "SparseAdam does not support dense gradients, see #116507" + ), + "TestOptimRenewed", + "test_can_load_older_state_dict", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_load_nontensor_step", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_forloop_goes_right_direction", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_forloop_goes_right_direction_multigpu", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_state_dict_with_cuda_params", + ), + DecorateInfo( + skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), + "TestOptimRenewed", + "test_deepcopy_copies_all_public_attrs", + ), + ), + ), +] + + +class TensorTracker: + """ + A utility to track tensor clones in a list, with the expectation of popping them later (in + order) to make fair comparisons between two multi-step computation. The intended use case is + usually when comparing two supposed equal computations, such as an optimizer step that each + individually consists of multiple steps, where numerical deviation could multiply. + + The goal is to be able to compare and align numbers at every milestone so as to minimize + numerical discrepancies, and so when the test fails, it is likely a real problem. + """ + + def __init__(self): + self.tensors = [] + + def add(self, tensor): + """ + Add a clone().detach()'d version of the tensor + """ + self.tensors.append(tensor.clone().detach()) + + # pops from beginning, like a queue and not a stack! + def pop_check_set(self, tensor_to_set, testcase): + """ + Pop the first element in the tensor tracker, assert equality between the popped tensor and + the input tensor, and then set the input tensor to have the same values as the popped tensor + (with copy_). + """ + testcase.assertGreater(len(self.tensors), 0, "no tensors to pop") + ref = self.tensors.pop(0) + + testcase.assertTrue(isinstance(ref, Tensor), f"{type(ref)=}") + testcase.assertEqual(tensor_to_set, ref) + + with torch.no_grad(): + tensor_to_set.copy_(ref) + + def all_popped(self): + return len(self.tensors) == 0 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..259ecd20a9b32cf34a00360c2b54ece639bc503f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py @@ -0,0 +1,2835 @@ +# mypy: ignore-errors + +r"""Importing this file includes common utility methods and base clases for +checking quantization api and properties of resulting modules. +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.ao.nn.intrinsic.quantized.dynamic as nniqd +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.dynamic as nnqd +from torch.ao.nn.intrinsic import _FusedModule +import torch.distributed as dist +from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM + +from torch._export import capture_pre_autograd_graph +from torch.ao.quantization import ( + QuantType, + default_dynamic_qat_qconfig, + default_embedding_qat_qconfig, + default_symmetric_qnnpack_qat_qconfig, +) +from torch.ao.quantization.quantize_pt2e import ( + _convert_to_reference_decomposed_fx, + convert_pt2e, + prepare_pt2e, + prepare_qat_pt2e, +) +from torch.ao.quantization.backend_config import ( + get_executorch_backend_config, +) +from torch.ao.quantization.quantizer.xnnpack_quantizer import ( + XNNPACKQuantizer, + get_symmetric_quantization_config, +) +from torch.ao.quantization import QuantWrapper, QuantStub, DeQuantStub, \ + default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \ + propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \ + get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, quantize, \ + QConfigMapping, get_default_qconfig_mapping, get_default_qat_qconfig_mapping +from torch.ao.quantization.quantization_mappings import ( + get_default_dynamic_quant_module_mappings, + get_default_qconfig_propagation_list, + get_default_qat_module_mappings, +) +from torch.testing._internal.common_quantized import ( + override_quantized_engine, +) +from torch.jit.mobile import _load_for_lite_interpreter + +try: + # graph mode quantization based on fx + from torch.ao.quantization.quantize_fx import ( + prepare_fx, + prepare_qat_fx, + convert_fx, + convert_to_reference_fx, + ) + from torch.ao.ns.fx.ns_types import NSSingleResultValuesType, NSSubgraph + from torch.fx.graph import Node + from torch.fx import GraphModule + HAS_FX = True +except ImportError: + HAS_FX = False + +import copy +import io +import functools +import time +import os + +import unittest +import numpy as np +from torch.testing import FileCheck +from typing import Callable, Tuple, Dict, Any, Union, Type, Optional +import torch._dynamo as torchdynamo + +class NodeSpec: + ''' Used for checking GraphModule Node + ''' + def __init__(self, op, target): + ''' + op: call_function | call_module + target: + for call_function, target would be a function + for call_module, target would be the type of PyTorch module + ''' + self.op = op + self.target = target + + @classmethod + def call_function(cls, target): + return NodeSpec('call_function', target) + + @classmethod + def call_method(cls, target): + return NodeSpec('call_method', target) + + @classmethod + def call_module(cls, target): + return NodeSpec('call_module', target) + + def __hash__(self): + return hash((self.op, self.target)) + + def __eq__(self, other): + if not isinstance(other, NodeSpec): + return NotImplemented + + return self.op == other.op and self.target == other.target + + def __repr__(self): + return repr(self.op) + " " + repr(self.target) + +def get_supported_device_types(): + return ['cpu', 'cuda'] if torch.cuda.is_available() and not TEST_WITH_ROCM else ['cpu'] + +def test_only_eval_fn(model, calib_data): + r""" + Default evaluation function takes a torch.utils.data.Dataset or a list of + input Tensors and run the model on the dataset + """ + for inp in calib_data: + output = model(*inp) + +_default_loss_fn = torch.nn.CrossEntropyLoss() +def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn): + r""" + Default train function takes a torch.utils.data.Dataset and train the model + on the dataset + """ + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + train_loss, correct, total = 0, 0, 0 + for i in range(10): + model.train() + + for data, target in train_data: + optimizer.zero_grad() + output = model(data) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + train_loss += loss.item() + _, predicted = torch.max(output, 1) + total += target.size(0) + correct += (predicted == target).sum().item() + return train_loss, correct, total + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + +def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches): + model.train() + cnt = 0 + for image, target in data_loader: + start_time = time.time() + print('.', end='') + cnt += 1 + image, target = image.to(device), target.to(device) + output = model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + optimizer.step() + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + if cnt >= ntrain_batches: + return + return + +def ddp_setup(rank, world_size): + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '12355' + + # initialize the process group + dist.init_process_group("gloo", rank=rank, world_size=world_size) + +def ddp_cleanup(): + dist.destroy_process_group() + +def run_ddp(rank, world_size, prepared): + ddp_setup(rank, world_size) + prepared.cuda() + prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank]) + prepared.to(rank) + model_with_ddp = prepared + optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001) + train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1) # noqa: F821 + ddp_cleanup() + + +def convert_dynamic(module): + convert(module, get_default_dynamic_quant_module_mappings(), inplace=True) + +def prepare_dynamic(model, qconfig_dict=None): + propagate_qconfig_(model, qconfig_dict) + +def _make_conv_test_input( + batch_size, in_channels_per_group, input_feature_map_size, + out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale, + W_zero_point, use_bias, use_channelwise, +): + in_channels = in_channels_per_group * groups + out_channels = out_channels_per_group * groups + + (X_value_min, X_value_max) = (0, 4) + X_init = torch.randint( + X_value_min, X_value_max, + (batch_size, in_channels,) + input_feature_map_size) + X = X_scale * (X_init - X_zero_point).float() + X_q = torch.quantize_per_tensor( + X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8) + + W_scale = W_scale * out_channels + W_zero_point = W_zero_point * out_channels + # Resize W_scale and W_zero_points arrays equal to out_channels + W_scale = W_scale[:out_channels] + W_zero_point = W_zero_point[:out_channels] + # For testing, we use small values for weights and for activations so that + # no overflow occurs in vpmaddubsw instruction. If the overflow occurs in + # qconv implementation and if there is no overflow. + # In reference we can't exactly match the results with reference. + # Please see the comment in qconv implementation file + # aten/src/ATen/native/quantized/cpu/qconv.cpp for more details. + (W_value_min, W_value_max) = (-5, 5) + # The operator expects them in the format + # (out_channels, in_channels/groups,) + kernel_size + W_init = torch.randint( + W_value_min, W_value_max, + (out_channels, in_channels_per_group,) + kernel_size) + b_init = torch.randint(0, 10, (out_channels,)) + + if use_channelwise: + W_shape = (-1, 1) + (1,) * len(kernel_size) + W_scales_tensor = torch.tensor(W_scale, dtype=torch.float) + W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float) + W = W_scales_tensor.reshape(*W_shape) * ( + W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float() + b = X_scale * W_scales_tensor * b_init.float() + W_q = torch.quantize_per_channel( + W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0, + dtype=torch.qint8) + else: + W = W_scale[0] * (W_init - W_zero_point[0]).float() + b = X_scale * W_scale[0] * b_init.float() + W_q = torch.quantize_per_tensor( + W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8) + + return (X, X_q, W, W_q, b if use_bias else None) + +def _make_conv_add_extra_input_tensor(scale, zero_point, sizes): + (X_value_min, X_value_max) = (0, 4) + X_init = torch.randint( + X_value_min, + X_value_max, + sizes # Infer the size of tensor to do the add + ) + X = scale * (X_init - zero_point).float() + X_q = torch.quantize_per_tensor( + X, scale=scale, zero_point=zero_point, dtype=torch.quint8) + return X, X_q + +def skipIfNoFBGEMM(fn): + reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.' + if isinstance(fn, type): + if 'fbgemm' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'fbgemm' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoQNNPACK(fn): + reason = 'Quantized operations require QNNPACK.' + if isinstance(fn, type): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torch.onnx._CAFFE2_ATEN_FALLBACK: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def withQNNPACKBackend(fn): + # TODO(future PR): consider combining with skipIfNoQNNPACK, + # will require testing of existing callsites + reason = 'Quantized operations require QNNPACK.' + if isinstance(fn, type): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + with override_quantized_engine('qnnpack'): + fn(*args, **kwargs) + + return wrapper + +def skipIfNoONEDNN(fn): + reason = 'Quantized operations require ONEDNN.' + if isinstance(fn, type): + if 'onednn' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'onednn' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoONEDNNBF16(fn): + reason = 'Quantized operations require BF16 support.' + if isinstance(fn, type): + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoX86(fn): + reason = 'Quantized operations require X86.' + if isinstance(fn, type): + if 'x86' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'x86' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoDynamoSupport(fn): + reason = "dynamo doesn't support." + if isinstance(fn, type): + if not torchdynamo.is_dynamo_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torchdynamo.is_dynamo_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoInductorSupport(fn): + reason = "inductor doesn't support." + if isinstance(fn, type): + if not torchdynamo.is_inductor_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torchdynamo.is_inductor_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +try: + import torchvision # noqa: F401 + HAS_TORCHVISION = True +except ImportError: + HAS_TORCHVISION = False +skip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") + +def get_script_module(model, tracing, data): + return torch.jit.trace(model, data) if tracing else torch.jit.script(model) + +def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True): + """ + Convert lengths to offsets for embedding_bag + """ + tt = np.zeros((t.shape[0] + 1,), dtype=offset_type) + tt[1:] = t + tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type)) + if use_begin_offset: + return tt[:-1] + return tt[1:] + +# QuantizationTestCase used as a base class for testing quantization on modules +class QuantizationTestCase(TestCase): + def setUp(self): + super().setUp() + self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)] + self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)] + self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)] + for _ in range(2)] + self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)] + for _ in range(2)] + self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)] + for _ in range(2)] + self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + + self.img_data_dict = {1 : self.img_data_1d, + 2 : self.img_data_2d, + 3 : self.img_data_3d} + + # Quant types that produce statically quantized ops + self.static_quant_types = [QuantType.STATIC, QuantType.QAT] + # All quant types for (fx based) graph mode quantization + self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT] + + def checkNoPrepModules(self, module): + r"""Checks the module does not contain child + modules for quantization preparation, e.g. + quant, dequant and observer + """ + self.assertFalse(hasattr(module, 'quant')) + self.assertFalse(hasattr(module, 'dequant')) + + def checkNoQconfig(self, module): + r"""Checks the module does not contain qconfig + """ + self.assertFalse(hasattr(module, 'qconfig')) + + for child in module.children(): + self.checkNoQconfig(child) + + def checkHasPrepModules(self, module): + r"""Checks the module contains child + modules for quantization preparation, e.g. + quant, dequant and observer + """ + self.assertTrue(hasattr(module, 'module')) + self.assertTrue(hasattr(module, 'quant')) + self.assertTrue(hasattr(module, 'dequant')) + + def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None): + r"""Checks the module or module's leaf descendants + have observers in preparation for quantization + """ + if propagate_qconfig_list is None: + propagate_qconfig_list = get_default_qconfig_propagation_list() + if prepare_custom_config_dict is None: + prepare_custom_config_dict = {} + float_to_observed_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {}) + + # check if a module is a leaf module, ignoring activation_post_process attribute + def is_leaf_module(module): + submodule_name_count = 0 + for name, _ in module.named_children(): + if name != 'activation_post_process': + submodule_name_count += 1 + return submodule_name_count == 0 + + if hasattr(module, 'qconfig') and module.qconfig is not None and \ + ((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential) + and type(module) in propagate_qconfig_list) or + type(module) in float_to_observed_module_class_mapping.keys()) and \ + not isinstance(module, torch.ao.quantization.DeQuantStub): + self.assertTrue(hasattr(module, 'activation_post_process'), + 'module: ' + str(type(module)) + ' do not have observer') + # we don't need to check observers for child modules of the + # qat modules + if type(module) not in get_default_qat_module_mappings().values() and \ + type(module) not in float_to_observed_module_class_mapping.values() and \ + not isinstance(module, _FusedModule): + for child in module.children(): + if type(child) in [nn.Dropout]: + continue + self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict) + + def checkQuantDequant(self, mod): + r"""Checks that mod has nn.Quantize and + nn.DeQuantize submodules inserted + """ + self.assertEqual(type(mod.quant), nnq.Quantize) + self.assertEqual(type(mod.dequant), nnq.DeQuantize) + + def checkWrappedQuantizedLinear(self, mod): + r"""Checks that mod has been swapped for an nnq.Linear + module, the bias is qint32, and that the module + has Quantize and DeQuantize submodules + """ + self.assertEqual(type(mod.module), nnq.Linear) + self.checkQuantDequant(mod) + + def checkQuantizedLinear(self, mod): + self.assertEqual(type(mod), nnq.Linear) + + def checkDynamicQuantizedLinear(self, mod, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + self.assertEqual(type(mod), nnqd.Linear) + self.assertEqual(mod._packed_params.dtype, dtype) + + def checkDynamicQuantizedLinearRelu(self, mod, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + self.assertEqual(type(mod), nniqd.LinearReLU) + self.assertEqual(mod._packed_params.dtype, dtype) + + def check_eager_serialization(self, ref_model, loaded_model, x): + # Check state dict serialization and torch.save APIs + model_dict = ref_model.state_dict() + b = io.BytesIO() + torch.save(model_dict, b) + b.seek(0) + loaded_dict = torch.load(b) + loaded_model.load_state_dict(loaded_dict) + ref_out = ref_model(*x) + load_out = loaded_model(*x) + + def check_outputs(ref_out, load_out): + self.assertEqual(ref_out[0], load_out[0]) + if isinstance(ref_out[1], tuple): + self.assertEqual(ref_out[1][0], load_out[1][0]) + self.assertEqual(ref_out[1][1], load_out[1][1]) + else: + self.assertEqual(ref_out[1], load_out[1]) + + check_outputs(ref_out, load_out) + b = io.BytesIO() + torch.save(ref_model, b) + b.seek(0) + loaded = torch.load(b) + load_out = loaded(*x) + check_outputs(ref_out, load_out) + + def check_weight_bias_api(self, ref_model, weight_keys, bias_keys): + weight = ref_model.get_weight() + bias = ref_model.get_bias() + self.assertEqual(weight_keys ^ weight.keys(), set()) + self.assertEqual(bias_keys ^ bias.keys(), set()) + + def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype): + r"""Checks that mod has been swapped for an nnqd.LSTM type + module, the bias is float. + """ + wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} + self.assertEqual(type(mod), reference_module_type) + for packed_params in mod._all_weight_values: + self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) + + def checkLinear(self, mod): + self.assertEqual(type(mod), torch.nn.Linear) + + def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} + self.assertEqual(type(mod), reference_module_type) + if hasattr(mod, '_all_weight_values'): + for packed_params in mod._all_weight_values: + self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) + + def checkScriptable(self, orig_mod, calib_data, check_save_load=False): + scripted = torch.jit.script(orig_mod) + self._checkScriptable(orig_mod, scripted, calib_data, check_save_load) + + # Use first calib_data entry as trace input + traced = torch.jit.trace(orig_mod, calib_data[0]) + self._checkScriptable(orig_mod, traced, calib_data, check_save_load) + + # Call this twice: once for a scripted module and once for a traced module + def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load): + self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data) + + # Test save/load + buffer = io.BytesIO() + torch.jit.save(script_mod, buffer) + + buffer.seek(0) + loaded_mod = torch.jit.load(buffer) + # Pending __get_state_ and __set_state__ support + # See tracking task https://github.com/pytorch/pytorch/issues/23984 + if check_save_load: + self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data) + + def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data): + for inp in calib_data: + ref_output = orig_mod(*inp) + scripted_output = test_mod(*inp) + self.assertEqual(scripted_output, ref_output) + + + def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False, + check=True, eval_mode=True, dynamic=False, qconfig=None): + if debug: + print('Testing:', str(module)) + qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)} + + if eval_mode: + module = module.eval() + if dynamic: + qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig} + model = get_script_module(module, tracing, inputs[0]).eval() + if debug: + print('input graph:', model.graph) + models = {} + outputs = {} + for debug in [True, False]: + if dynamic: + models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug) + # make sure it runs + outputs[debug] = models[debug](inputs) + else: + # module under test can contain in-place ops, and we depend on + # input data staying constant for comparisons + inputs_copy = copy.deepcopy(inputs) + models[debug] = quantize_jit( + model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False, + debug=debug) + # make sure it runs + outputs[debug] = models[debug](*inputs[0]) + + if debug: + print('debug graph:', models[True].graph) + print('non debug graph:', models[False].graph) + + if check: + # debug and non-debug option should have the same numerics + self.assertEqual(outputs[True], outputs[False]) + + # non debug graph should produce quantized op + FileCheck().check(quantized_op) \ + .run(models[False].graph) + + return models[False] + + def checkGraphModuleNodes( + self, graph_module, + expected_node=None, + expected_node_occurrence=None, + expected_node_list=None): + """ Check if GraphModule contains the target node + Args: + graph_module: the GraphModule instance we want to check + expected_node, expected_node_occurrence, expected_node_list: + see docs for checkGraphModeFxOp + """ + nodes_in_graph = {} + node_list = [] + modules = dict(graph_module.named_modules(remove_duplicate=False)) + for node in graph_module.graph.nodes: + n = None + if node.op == 'call_function' or node.op == 'call_method': + n = NodeSpec(node.op, node.target) + elif node.op == 'call_module': + n = NodeSpec(node.op, type(modules[node.target])) + + if n is not None: + node_list.append(n) + if n in nodes_in_graph: + nodes_in_graph[n] += 1 + else: + nodes_in_graph[n] = 1 + + if expected_node is not None: + self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) + + ' not found in the graph module') + + if expected_node_occurrence is not None: + for expected_node, occurrence in expected_node_occurrence.items(): + if occurrence != 0: + self.assertTrue( + expected_node in nodes_in_graph, + 'Check failed for node:' + str(expected_node) + + ' not found') + self.assertTrue( + nodes_in_graph[expected_node] == occurrence, + 'Check failed for node:' + str(expected_node) + + ' Expected occurrence:' + str(occurrence) + + ' Found occurrence:' + str(nodes_in_graph[expected_node])) + else: + self.assertTrue( + expected_node not in nodes_in_graph, + 'Check failed for node:' + str(expected_node) + + ' expected no occurrence but found') + + if expected_node_list is not None: + cur_index = 0 + for n in node_list: + if cur_index == len(expected_node_list): + return + if n == expected_node_list[cur_index]: + cur_index += 1 + self.assertTrue( + cur_index == len(expected_node_list), + "Check failed for graph:" + + self.printGraphModule(graph_module, print_str=False) + + "Expected ordered list:" + + str(expected_node_list)) + + def printGraphModule(self, graph_module, print_str=True): + modules = dict(graph_module.named_modules(remove_duplicate=False)) + node_infos = [] + for n in graph_module.graph.nodes: + node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs])) + if n.op == 'call_module': + node_info += ' module type: ' + repr(type(modules[n.target])) + node_infos.append(node_info) + str_to_print = '\n'.join(node_infos) + if print_str: + print(str_to_print) + return str_to_print + + if HAS_FX: + + def assert_types_for_matched_subgraph_pairs( + self, + matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]], + expected_types: Dict[str, Tuple[Tuple[Callable, Callable], Tuple[Callable, Callable]]], + gm_a: GraphModule, + gm_b: GraphModule, + ) -> None: + """ + Verifies that the types specified in expected_types match + the underlying objects pointed to by the nodes in matched_subgraph_pairs. + + An example successful test case: + + matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)} + expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)} + + The function tests for key equivalence, and verifies types with + instance checks. + """ + + def _get_underlying_op_type( + node: Node, gm: GraphModule + ) -> Union[Callable, str]: + if node.op == 'call_module': + mod = getattr(gm, node.target) + return type(mod) + else: + assert node.op in ('call_function', 'call_method') + return node.target + + self.assertTrue( + len(matched_subgraph_pairs) == len(expected_types), + f'Expected length of results to match, but got {len(matched_subgraph_pairs)} and {len(expected_types)}' + ) + for k, v in expected_types.items(): + expected_types_a, expected_types_b = v + exp_type_start_a, exp_type_end_a = expected_types_a + exp_type_start_b, exp_type_end_b = expected_types_b + subgraph_a, subgraph_b = matched_subgraph_pairs[k] + + act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a) + act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b) + act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a) + act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b) + types_match = (exp_type_start_a is act_type_start_a) and \ + (exp_type_end_a is act_type_end_a) and \ + (exp_type_start_b is act_type_start_b) and \ + (exp_type_end_b is act_type_end_b) + self.assertTrue( + types_match, + 'Type mismatch at {}: expected {}, got {}'.format( + k, + (exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b), + (act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b)) + ) + + def assert_ns_compare_dict_valid( + self, + act_compare_dict: Dict[str, Dict[str, Dict[str, Any]]], + ) -> None: + """ + Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid: + 1. for each layer, results are recorded for two models + 2. number of seen tensors match + 3. shapes of each pair of seen tensors match + """ + for layer_name, result_type_to_data in act_compare_dict.items(): + for result_type, layer_data in result_type_to_data.items(): + self.assertTrue( + len(layer_data) == 2, + f"Layer {layer_name} does not have exactly two model results.") + model_name_0, model_name_1 = layer_data.keys() + for res_idx in range(len(layer_data[model_name_0])): + layer_data_0 = layer_data[model_name_0][res_idx] + layer_data_1 = layer_data[model_name_1][res_idx] + self.assertTrue( + layer_data_0['type'] == layer_data_0['type'], + f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.") + + self.assertTrue( + len(layer_data_0['values']) == + len(layer_data_1['values']), + f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.") + + # F.conv1d weight has rank 3, and toq.conv1d unpacked weight + # has rank 4. For now, skip the length check for conv1d only. + is_weight_functional_conv1d = ( + result_type == NSSingleResultValuesType.WEIGHT.value and + ( + 'conv1d' in layer_data_0['prev_node_target_type'] or + 'conv1d' in layer_data_1['prev_node_target_type'] + ) + ) + if not is_weight_functional_conv1d: + for idx in range(len(layer_data_0['values'])): + values_0 = layer_data_0['values'][idx] + values_1 = layer_data_1['values'][idx] + if isinstance(values_0, torch.Tensor): + self.assertTrue( + values_0.shape == values_1.shape, + f"Layer {layer_name}, {model_name_0} and {model_name_1} " + + f"have a shape mismatch at idx {idx}.") + elif isinstance(values_0, list): + values_0 = values_0[0] + values_1 = values_1[0] + self.assertTrue( + values_0.shape == values_1.shape, + f"Layer {layer_name}, {model_name_0} and {model_name_1} " + + f"have a shape mismatch at idx {idx}.") + else: + assert isinstance(values_0, tuple), \ + f"unhandled type {type(values_0)}" + assert len(values_0) == 2 + assert len(values_0[1]) == 2 + assert values_0[0].shape == values_1[0].shape + assert values_0[1][0].shape == values_1[1][0].shape + assert values_0[1][1].shape == values_1[1][1].shape + + # verify that ref_node_name is valid + ref_node_name_0 = layer_data_0['ref_node_name'] + ref_node_name_1 = layer_data_1['ref_node_name'] + prev_node_name_0 = layer_data_0['prev_node_name'] + prev_node_name_1 = layer_data_1['prev_node_name'] + if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value: + self.assertTrue(ref_node_name_0 == prev_node_name_0) + self.assertTrue(ref_node_name_1 == prev_node_name_1) + elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value: + self.assertTrue(ref_node_name_0 != prev_node_name_0) + self.assertTrue(ref_node_name_1 != prev_node_name_1) + + def checkGraphModeFxOp( + self, + model, + inputs, + quant_type, + expected_node=None, + expected_node_occurrence=None, + expected_node_list=None, + is_reference=False, + print_debug_info=False, + custom_qconfig_dict=None, + prepare_expected_node=None, + prepare_expected_node_occurrence=None, + prepare_expected_node_list=None, + prepare_custom_config=None, + backend_config=None): + """ Quantizes model with graph mode quantization on fx and check if the + quantized model contains the quantized_node + + Args: + model: floating point torch.nn.Module + inputs: one positional sample input arguments for model + expected_node: NodeSpec + e.g. NodeSpec.call_function(torch.quantize_per_tensor) + expected_node_occurrence: a dict from NodeSpec to + expected number of occurrences (int) + e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1, + NodeSpec.call_method('dequantize'): 1} + expected_node_list: a list of NodeSpec, used to check the order + of the occurrence of Node + e.g. [NodeSpec.call_function(torch.quantize_per_tensor), + NodeSpec.call_module(nnq.Conv2d), + NodeSpec.call_function(F.hardtanh_), + NodeSpec.call_method('dequantize')] + is_reference: if True, enables reference mode + print_debug_info: if True, prints debug info + custom_qconfig_dict: overrides default qconfig_dict + prepare_expected_node: same as expected_node, but for prepare + prepare_expected_node_occurrence: same as + expected_node_occurrence, but for prepare + prepare_expected_node_list: same as expected_node_list, but + for prepare + + Returns: + A dictionary with the following structure: + { + "prepared": ..., # the prepared model + "quantized": ..., # the quantized non-reference model + "quantized_reference": ..., # the quantized reference model + "result": ..., # the result for either quantized or + # quantized_reference model depending on the + # is_reference argument + } + """ + # TODO: make img_data a single example instead of a list + if type(inputs) == list: + inputs = inputs[0] + + if quant_type == QuantType.QAT: + qconfig_mapping = get_default_qat_qconfig_mapping(torch.backends.quantized.engine) + model.train() + elif quant_type == QuantType.STATIC: + qconfig_mapping = get_default_qconfig_mapping(torch.backends.quantized.engine) + model.eval() + else: + qconfig = default_dynamic_qconfig + qconfig_mapping = QConfigMapping().set_global(qconfig) + model.eval() + + if quant_type == QuantType.QAT: + prepare = prepare_qat_fx + else: + prepare = prepare_fx + + # overwrite qconfig_dict with custom_qconfig_dict + if custom_qconfig_dict is not None: + assert type(custom_qconfig_dict) in (QConfigMapping, dict), \ + 'custom_qconfig_dict should be a QConfigMapping or a dict' + if isinstance(custom_qconfig_dict, QConfigMapping): + qconfig_mapping = custom_qconfig_dict + else: + qconfig_mapping = QConfigMapping.from_dict(custom_qconfig_dict) + prepared = prepare( + model, qconfig_mapping, + example_inputs=inputs, + prepare_custom_config=prepare_custom_config, + backend_config=backend_config) + if not quant_type == QuantType.DYNAMIC: + prepared(*inputs) + + if print_debug_info: + print() + print('quant type:\n', quant_type) + print('original model:\n', model) + print() + print('prepared model:\n', prepared) + + self.checkGraphModuleNodes( + prepared, prepare_expected_node, + prepare_expected_node_occurrence, prepare_expected_node_list) + + prepared_copy = copy.deepcopy(prepared) + qgraph = convert_fx(copy.deepcopy(prepared)) + qgraph_reference = convert_to_reference_fx(copy.deepcopy(prepared)) + result = qgraph(*inputs) + result_reference = qgraph_reference(*inputs) + qgraph_copy = copy.deepcopy(qgraph) + qgraph_reference_copy = copy.deepcopy(qgraph_reference) + + qgraph_to_check = qgraph_reference if is_reference else qgraph + if print_debug_info: + print() + print('quantized model:\n', qgraph_to_check) + self.printGraphModule(qgraph_to_check) + print() + self.checkGraphModuleNodes( + qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list) + return {"prepared": prepared_copy, + "quantized": qgraph_copy, + "quantized_reference": qgraph_reference_copy, + "quantized_output": result, + "quantized_reference_output": result_reference} + + + def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets, + set_qconfig, is_emb_bag, dtype=torch.quint8): + # Test serialization of dynamic EmbeddingBag module using state_dict + if is_emb_bag: + inputs = [indices, offsets] + else: + inputs = [indices] + emb_dict = qemb.state_dict() + b = io.BytesIO() + torch.save(emb_dict, b) + b.seek(0) + loaded_dict = torch.load(b) + embedding_unpack = torch.ops.quantized.embedding_bag_unpack + # Check unpacked weight values explicitly + for key in emb_dict: + if isinstance(emb_dict[key], torch._C.ScriptObject): + assert isinstance(loaded_dict[key], torch._C.ScriptObject) + emb_weight = embedding_unpack(emb_dict[key]) + loaded_weight = embedding_unpack(loaded_dict[key]) + self.assertEqual(emb_weight, loaded_weight) + + # Check state dict serialization and torch.save APIs + if is_emb_bag: + loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, + include_last_offset=True, mode='sum', dtype=dtype) + else: + loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype) + self.check_eager_serialization(qemb, loaded_qemb, inputs) + + loaded_qemb.load_state_dict(loaded_dict) + self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight), + embedding_unpack(loaded_qemb._packed_params._packed_weight)) + + + # Test JIT serialization + self.checkScriptable(qemb, [inputs], check_save_load=True) + + # Test from_float call + if is_emb_bag: + float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, + include_last_offset=True, scale_grad_by_freq=False, mode='sum') + else: + float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim) + + if set_qconfig: + float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype, + qscheme=torch.per_channel_affine_float_qparams, + ch_axis=0) + float_embedding.qconfig = QConfig(activation=default_dynamic_quant_observer, + weight=float_qparams_observer) + + prepare_dynamic(float_embedding) + + float_embedding(*inputs) + if is_emb_bag: + q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding) + expected_name = "QuantizedEmbeddingBag" + else: + q_embeddingbag = nnq.Embedding.from_float(float_embedding) + expected_name = "QuantizedEmbedding" + + q_embeddingbag(*inputs) + + self.assertTrue(expected_name in str(q_embeddingbag)) + +class QuantizationLiteTestCase(QuantizationTestCase): + def _create_quantized_model(self, model_class: Type[torch.nn.Module], **kwargs): + # Creates quantized model for testing mobile script modules + qengine = "qnnpack" + with override_quantized_engine(qengine): + qconfig = torch.ao.quantization.get_default_qconfig(qengine) + model = model_class(**kwargs) + model = quantize(model, test_only_eval_fn, [self.calib_data]) + + return model + + def _compare_script_and_mobile(self, + model: torch.nn.Module, + input: torch.Tensor): + # Compares the numerical outputs for script and lite modules + qengine = "qnnpack" + with override_quantized_engine(qengine): + script_module = torch.jit.script(model) + script_module_result = script_module(input) + + max_retry = 5 + for retry in range(1, max_retry + 1): + # retries `max_retry` times; breaks iff succeeds else throws exception + try: + buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter()) + buffer.seek(0) + mobile_module = _load_for_lite_interpreter(buffer) + + mobile_module_result = mobile_module(input) + + torch.testing.assert_close(script_module_result, mobile_module_result) + mobile_module_forward_result = mobile_module.forward(input) + torch.testing.assert_close(script_module_result, mobile_module_forward_result) + + mobile_module_run_method_result = mobile_module.run_method("forward", input) + torch.testing.assert_close(script_module_result, mobile_module_run_method_result) + except AssertionError as e: + if retry == max_retry: + raise e + else: + continue + break + + +class PT2EQuantizationTestCase(QuantizationTestCase): + """ + Base QuantizationTestCase for PT2 with some helper methods. + """ + _MAP_TO_FX_TRACED_OPS = { + torch.ops.quantized_decomposed.quantize_per_tensor: torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor: torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.quantized_decomposed.quantize_per_channel: torch.ops.quantized_decomposed.quantize_per_channel.default, + torch.ops.quantized_decomposed.dequantize_per_channel: torch.ops.quantized_decomposed.dequantize_per_channel.default, + torch.ops.quantized_decomposed.quantize_per_tensor.tensor: torch.ops.quantized_decomposed.quantize_per_tensor.tensor, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, + } + + def _test_quantizer( + self, + model, + example_inputs, + quantizer, + expected_node_occurrence, + expected_node_list=None, + check_against_fx_quant=False, + fx_qconfig_mapping=None, + export_with_dynamic_shape=False, + is_qat=False, + ): + # resetting dynamo cache + torch._dynamo.reset() + m_eager = model.eval() + + # program capture + m = copy.deepcopy(m_eager) + dynamic_shapes = tuple( + {0: torch.export.Dim("dim")} if i == 0 else None + for i in range(len(example_inputs)) + ) + m = capture_pre_autograd_graph( + m, + example_inputs, + dynamic_shapes=dynamic_shapes if export_with_dynamic_shape else None, + ) + + if is_qat: + m = prepare_qat_pt2e(m, quantizer) + else: + m = prepare_pt2e(m, quantizer) + # Calibrate + m(*example_inputs) + m = convert_pt2e(m) + + pt2_quant_output = m(*example_inputs) + ns = NodeSpec + node_occurrence = { + ns.call_function(k): v for k, v in expected_node_occurrence.items() + } + if expected_node_list is None: + expected_node_list = [] + node_list = [ns.call_function(n) for n in expected_node_list] + self.checkGraphModuleNodes( + m, expected_node_occurrence=node_occurrence, expected_node_list=node_list + ) + if check_against_fx_quant: + qconfig_mapping = fx_qconfig_mapping + backend_config = get_executorch_backend_config() + m_copy = copy.deepcopy(m_eager) + m_fx = prepare_fx( + m_copy, qconfig_mapping, example_inputs, backend_config=backend_config + ) + m_fx(*example_inputs) + m_fx = _convert_to_reference_decomposed_fx( + m_fx, backend_config=backend_config + ) + m_fx = capture_pre_autograd_graph( + m_fx, + example_inputs, + dynamic_shapes=dynamic_shapes if export_with_dynamic_shape else None, + ) + node_occurrence = {} + for k, v in PT2EQuantizationTestCase._MAP_TO_FX_TRACED_OPS.items(): + if k in expected_node_occurrence: + node_occurrence[ns.call_function(v)] = expected_node_occurrence[k] + self.checkGraphModuleNodes(m_fx, expected_node_occurrence=node_occurrence) + fx_quant_output = m_fx(*example_inputs) + self.assertEqual(fx_quant_output, pt2_quant_output) + + def _quantize(self, m, quantizer, example_inputs): + # resetting dynamo cache + torch._dynamo.reset() + + m = capture_pre_autograd_graph( + m, + example_inputs, + ) + m = prepare_pt2e(m, quantizer) + m(*example_inputs) + m = convert_pt2e(m) + return m + + def _get_pt2e_quantized_linear(self, is_per_channel=False) -> torch.fx.GraphModule: + class M(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(2, 2) + + def forward(self, x): + return self.linear(x) + + quantizer = XNNPACKQuantizer() + operator_config = get_symmetric_quantization_config(is_per_channel=is_per_channel) + quantizer.set_global(operator_config) + example_inputs = (torch.randn(2, 2),) + m = M().eval() + return self._quantize(m, quantizer, example_inputs) + +# Below are a series of toy models to use in testing quantization + +class SingleLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class AnnotatedSingleLayerLinearModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class SingleLayerLinearDynamicModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearAddModel(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = torch.add(x, 5) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class RNNDynamicModel(torch.nn.Module): + def __init__(self, mod_type): + super().__init__() + self.qconfig = default_dynamic_qconfig + if mod_type == 'GRU': + self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float) + if mod_type == 'LSTM': + self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float) + + def forward(self, x): + x = self.mod(x) + return x + +class RNNCellDynamicModel(torch.nn.Module): + def __init__(self, mod_type): + super().__init__() + self.qconfig = default_dynamic_qconfig + if mod_type == 'GRUCell': + self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float) + if mod_type == 'LSTMCell': + self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float) + if mod_type == 'RNNReLU': + self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float) + if mod_type == 'RNNTanh': + self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float) + + def forward(self, x): + x = self.mod(x) + return x + +class LSTMwithHiddenDynamicModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float) + + def forward(self, x, hid): + x, hid = self.lstm(x, hid) + return x, hid + +class ConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvTransposeModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvTransposeModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvBnModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvBnModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.qconfig = default_qconfig + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.bn(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvBnReLUModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvBnReLUModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=True) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + x = self.dequant(x) + return x + + def fuse_model(self): + # TODO: remove this check and define two fuse_modules function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, [['conv', 'bn', 'relu']], inplace=True) + else: + torch.ao.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class TwoLayerConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class TwoLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearModelWithSubmodule(nn.Module): + def __init__(self): + super().__init__() + self.subm = TwoLayerLinearModel() + self.fc = nn.Linear(5, 5) + + def forward(self, x): + x = self.subm(x) + x = self.fc(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.subm.get_example_inputs() + +class AnnotatedTwoLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float)) + self.fc2.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class ActivationsTestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") + self.quant = torch.ao.quantization.QuantStub() + self.hardswish = torch.nn.Hardswish().to(dtype=torch.float) + self.elu = torch.nn.ELU().to(dtype=torch.float) + self.dequant = torch.ao.quantization.DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.hardswish(x) + x = self.elu(x) + x = self.dequant(x) + return x + +class LinearReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + + +class LinearReluLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearReluAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = torch.add(x, 5) + x = self.fc2(x) + self.relu = torch.nn.ReLU() + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearBnLeakyReluModel(torch.nn.Module): + def __init__(self, with_bn=True): + super().__init__() + self.linear = nn.Linear(5, 5) + self.bn1d = nn.BatchNorm1d(5) + self.leaky_relu = nn.LeakyReLU(0.01) + self.with_bn = with_bn + + def forward(self, x): + x = self.linear(x) + if self.with_bn: + x = self.bn1d(x) + x = self.leaky_relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearTanhModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = nn.Linear(5, 5) + self.tanh = nn.Tanh() + + def forward(self, x): + x = self.linear(x) + x = self.tanh(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class ConvBnAddReluModel(torch.nn.Module): + def __init__(self, + with_bn=True, + with_relu=True, + left_conv=True, + two_conv=True, + use_torch_add=True): + super().__init__() + self.conv = nn.Conv2d(5, 5, (2, 2)) + self.conv2 = nn.Conv2d(5, 5, (2, 2)) + self.bn = nn.BatchNorm2d(5) + self.relu = nn.ReLU() + self.with_bn = with_bn + self.with_relu = with_relu + self.two_conv = two_conv + self.left_conv = left_conv + self.use_torch_add = use_torch_add + + def forward(self, x1, x2): + if self.two_conv: + if self.use_torch_add: + if self.with_bn: + x = torch.add(self.bn(self.conv(x1)), self.conv2(x1)) + else: + x = torch.add(self.conv(x1), self.conv2(x1)) + else: + if self.with_bn: + x = self.bn(self.conv(x1)) + self.conv2(x1) + else: + x = self.conv(x1) + self.conv2(x1) + else: + if self.use_torch_add: + if self.left_conv: + if self.with_bn: + x = torch.add(self.bn(self.conv(x1)), x2) + else: + x = torch.add(self.conv(x1), x2) + else: + if self.with_bn: + x = torch.add(x2, self.bn(self.conv(x1))) + else: + x = torch.add(x2, self.conv(x1)) + else: + if self.left_conv: + if self.with_bn: + x = self.bn(self.conv(x1)) + x2 + else: + x = self.conv(x1) + x2 + else: + if self.with_bn: + x = x2 + self.bn(self.conv(x1)) + else: + x = x2 + self.conv(x1) + if self.with_relu: + x = self.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5, 3, 3), torch.rand(1, 5, 2, 2)) + +# TODO: self.fc should be self.conv +class ConvReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +# TODO: self.fc should be self.conv +class ConvReluConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +# TODO: self.fc should be self.conv +class ConvReluAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = torch.add(x, 5) + x = self.fc2(x) + self.relu = torch.nn.ReLU() + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class NormalizationTestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.quant = torch.ao.quantization.QuantStub() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.layer_norm = torch.nn.LayerNorm(8) + self.group_norm = torch.nn.GroupNorm(2, 8) + self.instance_norm1d = torch.nn.InstanceNorm1d(8) + self.instance_norm2d = torch.nn.InstanceNorm2d(8) + self.instance_norm3d = torch.nn.InstanceNorm3d(8) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.layer_norm(x) + x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3)) + x = self.instance_norm1d(x) + x = self.instance_norm2d(x.unsqueeze(-1)) + x = self.instance_norm3d(x.unsqueeze(-1)) + return x + +class NestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedNestedModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.fc1 = QuantWrapper(self.sub2.fc1) + if qengine == 'fbgemm': + self.sub2.fc1.qconfig = default_per_channel_qconfig + else: + self.sub2.fc1.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedSubNestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = QuantWrapper(TwoLayerLinearModel()) + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedCustomConfigNestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.qconfig = default_qconfig + + custom_options = { + 'dtype': torch.quint8, + 'qscheme': torch.per_tensor_affine + } + custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options), + weight=default_weight_observer) + self.sub2.fc1.qconfig = custom_qconfig + + self.sub2.fc1 = QuantWrapper(self.sub2.fc1) + self.sub2.fc2 = QuantWrapper(self.sub2.fc2) + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class QuantSubModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = QuantWrapper(TwoLayerLinearModel()) + self.sub2.qconfig = default_qconfig + self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.fc3.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class InnerModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.relu1 = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + self.relu2 = torch.nn.ReLU() + + def forward(self, x): + return self.relu2(self.fc2(self.relu1(self.fc1(x)))) + + def fuse_modules(self): + fusable_layers = [] + named_children = list(self.named_children()) + for idx, (current_name, layer) in enumerate(named_children): + if isinstance(layer, torch.nn.Linear): + if idx >= len(named_children) - 1: + break + if isinstance(named_children[idx + 1][1], torch.nn.ReLU): + fusable_layers.append([current_name, + named_children[idx + 1][0]]) + # TODO: remove this check and define two fuse_modules function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, fusable_layers, inplace=True) + else: + torch.ao.quantization.fuse_modules(self, fusable_layers, inplace=True) + +class FunctionalLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.weight = torch.rand((5, 5)) + self.bias = torch.zeros(5) + + def forward(self, x): + return F.linear(x, self.weight, self.bias) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class SingleLayerFunctionalLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class TwoLayerFunctionalLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalLinearAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = torch.add(x, 5) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalLinearReluModel(nn.Module): + def __init__(self): + super().__init__() + self.linear = FunctionalLinear() + + def forward(self, x): + x = self.linear(x) + x = F.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear.get_example_inputs() + +class FunctionalLinearReluLinearModel(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.relu = nn.ReLU() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = self.relu(x) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalConv2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.weight = torch.rand(3, 3, 3, 3) + self.bias = torch.rand(3) + self.stride = (1, 1) + self.padding = (0, 0) + self.dilation = (1, 1) + self.groups = 1 + + def forward(self, x): + return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class SingleLayerFunctionalConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class TwoLayerFunctionalConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + self.conv2 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class FunctionalConvReluModel(nn.Module): + def __init__(self): + super().__init__() + self.conv = FunctionalConv2d() + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv.get_example_inputs() + +class FunctionalConvReluConvModel(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + self.relu = nn.ReLU() + self.conv2 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class SkipQuantModel(torch.nn.Module): + r"""We can skip quantization by explicitly + setting qconfig of a submodule to None + """ + def __init__(self): + super().__init__() + self.sub = InnerModule() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + return self.fc(self.sub(x)) + + def fuse_modules(self): + self.sub.fuse_modules() + +class AnnotatedSkipQuantModel(torch.nn.Module): + r"""We can skip quantization by explicitly + setting qconfig of a submodule to None + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.sub = QuantWrapper(InnerModule()) + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + # don't quantize this fc + self.fc.qconfig = None + + def forward(self, x): + return self.fc(self.sub(x)) + + def fuse_modules(self): + self.sub.module.fuse_modules() + +class QuantStubModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack") + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.fc(x) + return self.dequant(x) + +class ManualLinearQATModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.fc2(x) + return self.dequant(x) + +class ManualDropoutQATModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.dropout = torch.nn.Dropout(0.5) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.dropout(x) + return self.dequant(x) + +class ManualLinearDynamicQATModel(torch.nn.Module): + r"""A Module that uses a dynamic QAT by default. + """ + def __init__(self, qconfig=None): + super().__init__() + self.qconfig = qconfig or default_dynamic_qat_qconfig + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + +class ManualConvLinearQATModel(torch.nn.Module): + r"""A module with manually inserted `QuantStub` and `DeQuantStub` + and contains both linear and conv modules + """ + def __init__(self, qconfig=None): + super().__init__() + self.qconfig = qconfig if qconfig else torch.ao.quantization.get_default_qat_qconfig("qnnpack") + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float) + self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = x.view(-1, 64).contiguous() + x = self.fc1(x) + x = self.fc2(x) + return self.dequant(x) + +class ManualConvLinearSymmQATModel(ManualConvLinearQATModel): + r"""Same as ManualConvLinearQATModule but with Symmetric Quantization. + Supported only with qnnpack. + """ + def __init__(self): + super().__init__(default_symmetric_qnnpack_qat_qconfig) + +class ManualEmbeddingBagLinear(nn.Module): + def __init__(self): + super().__init__() + self.emb = nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum') + self.emb.qconfig = default_embedding_qat_qconfig + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.linear = nn.Linear(12, 1).to(dtype=torch.float) + self.qconfig = get_default_qat_qconfig("qnnpack") + + def forward(self, input: torch.Tensor, offsets: Optional[torch.Tensor] = None, + per_sample_weights: Optional[torch.Tensor] = None): + x = self.emb(input, offsets, per_sample_weights) + x = self.quant(x) + x = self.linear(x) + return self.dequant(x) + +class DeFusedEmbeddingBagLinear(nn.Module): + r"""A module to simulate QAT embedding bag with a linear layer, + this module uses a separate embedding and bagging op, similar + to that which is described in the EmbeddingBag documentation. + + https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html + """ + def __init__(self) -> None: + super().__init__() + self.emb = nn.Embedding(num_embeddings=10, embedding_dim=12) + self.emb.qconfig = default_embedding_qat_qconfig + self.bagging_op = torch.sum + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.linear = nn.Linear(12, 1).to(dtype=torch.float) + self.qconfig = get_default_qat_qconfig("qnnpack") + + def forward(self, input: torch.Tensor) -> torch.Tensor: + x = self.bagging_op(self.emb(input), dim=1) + x = self.quant(x) + x = self.linear(x) + return self.dequant(x) + +class SubModelForFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) + self.bn = nn.BatchNorm2d(2).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class SubModelWithoutFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=False).to(dtype=torch.float) + + def forward(self, x): + return self.relu(self.conv(x)) + +class ModelForFusion(nn.Module): + def __init__(self, qconfig): + super().__init__() + self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float) + self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.sub1 = SubModelForFusion() + self.sub2 = SubModelWithoutFusion() + self.fc = nn.Linear(36, 10).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.qconfig = qconfig + self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float) + self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float) + self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float) + self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float) + self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float) + self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float) + # don't quantize sub2 + self.sub2.qconfig = None + self.fc.qconfig = None + + def forward(self, x): + x = x.squeeze(2) + x = self.quant(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu4(x) + x = x.unsqueeze(2) + y = x.unsqueeze(2) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.sub1(x) + x = self.dequant(x) + x = self.sub2(x) + x = x.reshape(-1, 36).contiguous() + x = self.fc(x) + y = self.conv2(y) + y = self.relu2(y) + y = self.bn2(y) + y = self.relu3(y) + y = self.dequant(y) + return x + +class ConvBNReLU(nn.Sequential): + def __init__(self): + super().__init__( + nn.Conv2d(3, 3, 1, 1, bias=False), + nn.BatchNorm2d(3), + nn.ReLU(inplace=False) + ) + +class ModelWithSequentialFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 3, 1) + self.relu1 = nn.ReLU(inplace=False) + layers = [] + for i in range(3): + layers.append(ConvBNReLU()) + self.features = nn.Sequential(*layers) + head = [nn.Linear(300, 10), nn.ReLU(inplace=False)] + self.classifier = nn.Sequential(*head) + self.seq = nn.Sequential() + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv1(x) + x = self.relu1(x) + x = self.features(x) + x = torch.reshape(x, (-1, 3 * 10 * 10)) + x = self.classifier(x) + x = self.seq(x) + x = self.dequant(x) + return x + +class ModelForFusionWithBias(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float) + self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float) + self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.dequant(x) + return x + +class ModelForLinearBNFusion(nn.Module): + def __init__(self): + super().__init__() + self.fc = nn.Linear(20, 10) + self.bn = nn.BatchNorm1d(10) + nn.init.uniform_(self.bn.weight) + nn.init.uniform_(self.bn.bias) + + def forward(self, x): + return self.bn(self.fc(x)) + +class DummyObserver(torch.nn.Module): + def calculate_qparams(self): + return 1.0, 0 + + def forward(self, x): + return x + + +class ModelForConvTransposeBNFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.ConvTranspose1d(3, 3, 1) + self.bn1 = nn.BatchNorm1d(3) + self.conv2 = nn.ConvTranspose2d(3, 3, 1) + self.bn2 = nn.BatchNorm2d(3) + self.conv3 = nn.ConvTranspose3d(3, 3, 1) + self.bn3 = nn.BatchNorm3d(3) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = x.unsqueeze(2) + x = self.conv2(x) + x = self.bn2(x) + x = x.unsqueeze(2) + x = self.conv3(x) + x = self.bn3(x) + return x + + +class ModelWithFunctionals(torch.nn.Module): + def __init__(self): + super().__init__() + self.mycat = nnq.FloatFunctional() + self.myadd = nnq.FloatFunctional() + self.myadd_relu = nnq.FloatFunctional() + self.mymatmul = nnq.FloatFunctional() + # Tracing doesnt work yet for c10 ops with scalar inputs + # https://github.com/pytorch/pytorch/issues/27097 + # self.my_scalar_add = nnq.FloatFunctional() + # self.my_scalar_mul = nnq.FloatFunctional() + + def forward(self, x): + y = self.mycat.cat([x, x, x]) + z = self.myadd.add(y, y) + w = self.myadd_relu.add_relu(z, z) + u = self.mymatmul.matmul(w, w.T) + # Tracing doesnt work yet for c10 ops with scalar inputs + # https://github.com/pytorch/pytorch/issues/27097 + # w = self.my_scalar_add.add_scalar(w, -0.5) + # w = self.my_scalar_mul.mul_scalar(w, 0.5) + return u + + +class ResNetBase(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.downsample = torch.nn.Identity() + self.myop = nn.quantized.FloatFunctional() + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = torch.nn.Linear(inplanes, 1) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + identity = self.downsample(x) + out = self.myop.add(out, identity) + out = self.relu2(out) + out = self.avgpool(out) + out = torch.flatten(out, 1) + out = self.fc(out) + return out + + def fuse_model(self): + # TODO: remove this check and define two fuse_model function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, [['conv1', 'bn1', 'relu1']], inplace=True) + else: + torch.ao.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True) + +class ModelMultipleOps(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.downsample = torch.nn.Identity() + self.skip_add = nn.quantized.FloatFunctional() + self.cat = nn.quantized.FloatFunctional() + self.avgpool = nn.AdaptiveAvgPool2d((4, 4)) + self.fc = nn.Linear(12, 6) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + identity = self.downsample(x) + out = self.skip_add.add(out, identity) + out = self.relu2(out) + out = self.avgpool(out) + out = self.conv2(out) + out = torch.nn.functional.max_pool2d(out, 2, 2) + out = self.cat.cat([out, out]) + out = out.reshape(-1, 3 * 2 * 2) + out = self.fc(out) + return out + +# Model to ensure consistency of fake quant with true quant +# Average pooling and mean operations are not modelled +# accurately with fake-quant so this model does not +# contain those operations +class ModelMultipleOpsNoAvgPool(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.skip_add = nn.quantized.FloatFunctional() + self.cat = nn.quantized.FloatFunctional() + self.maxpool = nn.MaxPool2d((4, 4)) + self.fc = nn.Linear(12, 6) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + skip = self.conv2(x) + out = self.skip_add.add(out, skip) + out = self.relu2(out) + out = self.maxpool(out) + out = self.conv2(out) + out = torch.nn.functional.max_pool2d(out, 2, 2) + out = self.cat.cat([out, out]) + out = out.reshape(-1, 3 * 2 * 2) + out = self.fc(out) + return out + +class EmbeddingBagModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, + include_last_offset=True, scale_grad_by_freq=False, mode='sum') + + def forward(self, indices, offsets, per_sample_weights): + return self.emb(indices, offsets, per_sample_weights) + +class EmbeddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) + + def forward(self, indices): + return self.emb(indices) + +class EmbeddingWithStaticLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12) + self.fc = torch.nn.Linear(4, 2) + self.emb.qconfig = float_qparams_weight_only_qconfig + self.qconfig = default_qconfig + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, indices, offsets, linear_in): + emb = self.emb(indices, offsets) + q_x = self.quant(linear_in) + fc = self.fc(q_x) + fc = self.dequant(fc) + features = torch.cat([fc] + [emb], dim=1) + return features + +class DenseTopMLP(nn.Module): + + def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None: + super().__init__() + + self.dense_mlp = nn.Sequential( + nn.Linear(dense_dim, dense_out), + ) + self.top_mlp = nn.Sequential( + nn.Linear(dense_out + embedding_dim, top_out_in), + nn.Linear(top_out_in, top_out_out), + ) + + def forward( + self, + sparse_feature: torch.Tensor, + dense: torch.Tensor, + ) -> torch.Tensor: + dense_feature = self.dense_mlp(dense) + features = torch.cat([dense_feature] + [sparse_feature], dim=1) + + out = self.top_mlp(features) + return out + +# thin wrapper around embedding bag, because tracing inside nn.Embedding +# bag is not supported at the moment and this is top level +class EmbBagWrapper(nn.Module): + def __init__(self, num_embeddings, embedding_dim): + super().__init__() + self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum') + + def forward(self, indices, offsets): + return self.emb_bag(indices, offsets) + +class SparseNNModel(nn.Module): + _NUM_EMBEDDINGS = 10 + _EMBEDDING_DIM = 5 + _DENSE_DIM = 4 + _DENSE_OUTPUT = 2 + _TOP_OUT_IN = 2 + _TOP_OUT_OUT = 2 + _TOP_MLP_DIM = 1 + + def __init__(self) -> None: + super().__init__() + + self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM) + self.dense_top = DenseTopMLP( + self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN, + self._TOP_OUT_OUT) + + def forward( + self, + sparse_indices: torch.Tensor, + sparse_offsets: torch.Tensor, + dense: torch.Tensor, + ) -> torch.Tensor: + + sparse_feature = self.model_sparse(sparse_indices, sparse_offsets) + out = self.dense_top(sparse_feature, dense) + + return out + +class TestHelperModules: + class Conv2dPropAnnotaton(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.linear = torch.nn.Linear(3, 3) + + def forward(self, x): + x = self.conv(x) + x = x.view(-1, 3) + x = torch.nn.functional.hardtanh(x, -0.5, 0.5) + x = self.linear(x) + return x + + class Conv2dWithObsSharingOps(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.hardtanh = torch.nn.Hardtanh() + self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x): + x = self.conv(x) + x = self.adaptive_avg_pool2d(x) + x = self.hardtanh(x) + x = torch.mean(x) + return x + + class Conv2dWithTwoLinearPermute(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 16, 3) + self.linear1 = torch.nn.Linear(16, 8, bias=False) + self.linear2 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + permute_out = torch.permute(conv_out, (0, 2, 3, 1)) + return self.linear2(self.linear1(permute_out)) + + class Conv2dWithTwoLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 16, 3) + self.linear1 = torch.nn.Linear(64, 8, bias=False) + self.linear2 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + reshape_out = torch.reshape(conv_out, (2, 64)) + return self.linear2(self.linear1(reshape_out)) + + class ConvLinearWPermute(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 8, 3) + self.linear1 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + permute_out = torch.permute(conv_out, (0, 2, 3, 1)) + return self.linear1(permute_out) + + class TwoLinearModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = torch.nn.Linear(8, 16, bias=False) + self.linear2 = torch.nn.Linear(16, 8) + + def forward(self, x): + return self.linear2(self.linear1(x)) + + class ConvMaxPool2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(2, 2, 1) + self.pool = torch.nn.MaxPool2d(1, 1) + + def forward(self, x): + x = self.conv(x) + x = self.pool(x) + return x + + class ConvWithAdaptiveAvgPool2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x): + x = self.conv(x) + x = self.adaptive_avg_pool2d(x) + return x + + class ConvWithBNRelu(torch.nn.Module): + def __init__(self, relu, dim=2, bn=True, bias=True): + super().__init__() + convs = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d} + bns = {1: torch.nn.BatchNorm1d, 2: torch.nn.BatchNorm2d} + self.conv = convs[dim](3, 3, 3, bias=bias) + + if bn: + self.bn = bns[dim](3) + else: + self.bn = torch.nn.Identity() + if relu: + self.relu = torch.nn.ReLU() + else: + self.relu = torch.nn.Identity() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return self.relu(x) + + class Conv2dThenConv1d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1d = torch.nn.Conv1d(3, 3, 3) + self.conv2d = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x): + x = self.conv2d(x) + x = x.squeeze(0) + x = self.conv1d(x) + return x + + def example_inputs(self): + return (torch.randn(1, 3, 5, 5),) + + class Conv2dWithCat(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 3) + self.conv2 = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x, y): + x = self.conv1(x) + y = self.conv2(y) + z = torch.cat([x, y], dim=1) + return z + + class Conv2dWithTwoCat(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 3) + self.conv2 = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x1, x2, x3, x4): + x1 = self.conv1(x1) + x2 = self.conv2(x2) + y = torch.cat([x1, x2], dim=1) + z = x3 + x4 + w = torch.cat([z, y]) + return w + + class ThreeAdd(torch.nn.Module): + def forward(self, x1, x2, x3, x4): + y = x1 + x2 + z = x3 + x4 + w = y + z + return w + + class EmbeddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) + + def forward(self, indices): + return self.emb(indices) + + class EmbeddingConvLinearModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=8) + self.conv = torch.nn.Conv2d(8, 16, (1, 3)) + self.linear = torch.nn.Linear(16, 8) + + def forward(self, indices): + embeddings = self.emb(indices) + embeddings = torch.unsqueeze(embeddings, dim=0) + embeddings = torch.permute(embeddings, (0, 3, 1, 2)) + conv_out = self.conv(embeddings) + conv_out = torch.permute(conv_out, (0, 2, 3, 1)) + conv_out = torch.squeeze(conv_out, dim=0) + return self.linear(conv_out) + + class AddInplaceAdd(torch.nn.Module): + def forward(self, x, y): + x = x + y + x += y + return x + + class MulInplaceMul(torch.nn.Module): + def forward(self, x, y): + x = x * y + x *= y + return x + + class AddMulScalar(torch.nn.Module): + def forward(self, x): + x = x + 3 + x = x * 3 + x += 3 + x *= 3 + return x + + class ConvBnReLU2dAndLinearReLU(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv_bn_relu = TestHelperModules.ConvWithBNRelu(relu=True) + self.linear = torch.nn.Linear(3, 8, bias=False) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.conv_bn_relu(x) + permute_out = torch.permute(x, (0, 2, 3, 1)) + linear_out = self.linear(permute_out) + return linear_out + + class GroupwiseConv2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(4, 4, 3, groups=2) + + def forward(self, x): + return self.conv(x) + + def example_inputs(self): + return (torch.randn(2, 4, 10, 10),) + + class LinearReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c3bd4a130e08d24bb10845ff5b8774f731ff1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py @@ -0,0 +1,581 @@ +# mypy: ignore-errors + +import torch +from torch import Tensor +import itertools + +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten +from torch.utils import _pytree as pytree +from functools import partial +from torch.utils._mode_utils import no_dispatch, all_same_mode +import torch.autograd.forward_ad as fwAD +from typing import Callable +import re + + +def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor): + elem = wrapper_tensor.elem + metadata_wrapper_tensor = metadata_accessor(wrapper_tensor) + metadata_elem = metadata_accessor(elem) + if metadata_wrapper_tensor == metadata_elem: + return + raise RuntimeError( + f"This operator is not Composite Compliant: the " + f"{metadata_name} of the tensor was modified directly without " + f"going through the PyTorch dispatcher.") + +def check_metadata_consistency(wrapper_tensor, CCT): + # CCT: CompositeCompliantTensor class which is generated using generate_cct + if not isinstance(wrapper_tensor, CCT): + return + things_to_check = { + 'shape': Tensor.size, + 'dtype': lambda x: x.dtype, + 'device': lambda x: x.device, + 'numel': Tensor.numel, + 'stride': Tensor.stride, + 'storage_offset': Tensor.storage_offset, + } + for metadata_name, metadata_accessor in things_to_check.items(): + check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor) + +def is_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided', + 'detach', + 'diagonal', + 'expand', + 'expand_as', + 'movedim', + 'narrow', + 'permute', + 'select', + 'squeeze', + 'transpose', + 't', + 'real', + 'imag', + 'view_as_real', + 'view_as_complex', + 'unflatten', + 'unfold', + 'unsqueeze', + 'view', + 'view_as', + 'unbind', + 'split', + 'split_with_sizes', + 'vsplit', + 'hsplit', + 'tensor_split', + 'chunk', + 'swapaxes', + 'slice', + '_reshape_alias', + '_unsafe_view', + '_conj', + 'alias', + } + +# manually populated from native_functions that have inplace_view: True. +# In the future we will probably be able to grab that list directly +def is_inplace_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided_', + 'detach_', + 'squeeze_', + 'swapaxes_', + 'swapdims_', + 't_', + 'transpose_', + 'unsqueeze_', + } + + +# Introspection please save us +def is_inplace(func): + name = func.overloadpacket.__name__ + if re.match('__i.+__', name): + return True + if re.match('__.+__', name): + return False + return name[-1] == '_' + + +def generate_cct_and_mode(autograd_view_consistency=True): + # This function returns a new class CompositeCompliantTensor + # The two arguments control the behaviour described below. + + # autograd_view_consistency: + # If True, alias result using `set_` if func returns a view + # (See Note [Alias Result]). + # Since Forward AD doesn't work with `set_` + # we disable it by setting alias to False. + + class CompositeCompliantTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + + @staticmethod + def __new__(cls, elem, mode, *args, **kwargs): + assert type(elem) is not cls, \ + "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported" + + # The storage of CompositeCompliantTensor should never be used directly + # by a Composite operation; if the Composite + # operator attempts to read from the storage without dispatching then it'll + # raise a RuntimeError due to it being a meta storage. + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=elem.requires_grad, + strides=elem.stride(), storage_offset=elem.storage_offset()) + + if elem.requires_grad: + # CompositeCompliantTensor steals the "requires_grad"-ness. + # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests... + tmp = torch.empty_strided(elem.shape, elem.stride(), dtype=elem.dtype, + device=elem.device, layout=elem.layout, + requires_grad=False) + tmp.copy_(elem.detach()) + r.elem = tmp + else: + r.elem = elem + + assert r.stride() == r.elem.stride() + + # Propagate conjugate bits to the wrapper tensor + # Ref: https://github.com/albanD/subclass_zoo/issues/24 + # Ref: https://github.com/albanD/subclass_zoo/issues/21 + torch._C._set_conj(r, r.elem.is_conj()) + torch._C._set_neg(r, r.elem.is_neg()) + + r.mode = mode + return r + + def __repr__(self): + return f"CompositeCompliantTensor({self.elem})" + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + all_args = pytree.arg_tree_leaves(*args, **(kwargs or {})) + modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor)) + if not all_same_mode(modes): + raise RuntimeError("Multiple CompositeCompliantTensorModes NYI") + with modes[0]: + return func(*args, **kwargs) + + class CompositeCompliantTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, CompositeCompliantTensor) else e + + def wrap(e): + return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e + + if func == torch.ops.aten._local_scalar_dense.default: + raise RuntimeError( + ".item() is not allowed to be called inside of composite " + "functions in the PyTorch library because not all backends " + "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.") + + if func.overloadpacket.__name__ in ('set_', 'resize_'): + raise RuntimeError( + f"{func.__name__} is not allowed to be called inside of " + f"Composite operators.") + + if is_inplace(func): + # NB: We are making an assumption that if the function is in-place, + # then the first argument is being written to. Introspection please save us! + mutated_argument = args[0] + if not isinstance(mutated_argument, CompositeCompliantTensor) and \ + any(isinstance(a, CompositeCompliantTensor) for a in args[1:]): + raise RuntimeError( + 'Not composite compliant: performing in-place operation ' + f'{func.__name__} where the Tensor being written to is ' + 'regular Tensor but the other tensors are Tensor Subclasses. ' + 'Please try to avoid this in-place operation.') + + unwrapped_args = tree_map(unwrap, args) + unwrapped_kwargs = tree_map(unwrap, kwargs) + unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs) + rs = tree_map(wrap, unwrapped_rs) + + if is_view_fn(func) and autograd_view_consistency: + # Note [Alias Result] + # Autograd asserts that for B = A.view_fn(...), B and A's storages + # are the same. Here we try to make B alias A to avoid those asserts. + # See https://github.com/pytorch/pytorch/issues/65339 for more information + # about the issue. + with no_dispatch(): + # Idea: this is a weird way of getting a storage that aliases the input. + # This is a workaround for #65339. + # 1. under no_dispatch, all of the wrapper tensors look like regular + # tensors with special storage (the storage is nullptr and + # advertises CPU/CUDA device. + # 2. we run func, which ends up running the view operation + # 3. All view operations reuse the input's storage and return + # result Tensor(s) with new sizes/strides/offset that alias + # the input. + # 4. we set the storage (and sizes/strides/offset) of the wrapper + # tensor results to be that of the tensors that alias the input + result = func(*args, **kwargs) + if isinstance(result, (tuple, list)): + for a, b in zip(rs, result): + a.set_(b) + else: + rs.set_(result) + + # Some operations are allowed to in-place modify the metadata of the + # inputs. The only ones are the "inplace view functions"; when we + # run into these, we manually modify the metadata of the input. + with no_dispatch(): + if is_inplace_view_fn(func): + func(*args, **kwargs) + + # For each CompositeCompliantTensor t, we check that t and t.elem + # have consistent metadata. If they don't have consistent metadata, + # that means the operator did something fishy. + check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor) + pytree.tree_map_(check, args) + pytree.tree_map_(check, kwargs) + pytree.tree_map_(check, rs) + return rs + + return CompositeCompliantTensor, CompositeCompliantTensorMode() + +def is_tensorlist(lst): + if not isinstance(lst, list) and not isinstance(lst, tuple): + return False + if len(lst) == 0: + return False + all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst) + if all_tensors: + return True + exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst) + if exists_one_tensor: + raise RuntimeError('This test assumes that PyTorch APIs cannot take ' + 'mixed lists of Tensor and other things') + return False + + +def maybe_map(fn, should_map, arg): + return fn(arg) if should_map else arg + + +def wrap(arg, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + if isinstance(arg, torch.Tensor): + return CCT(arg, cct_mode) + if is_tensorlist(arg): + return [CCT(a, cct_mode) for a in arg] + raise RuntimeError("wrap assumes that the input can be wrapped") + + +# Given a list of flat arguments, some of which may be Tensors, return all +# possible ways some of the arguments could be CompositeCompliantTensors (CCT). +# For example, given Tensors A, B, C and flat_args = [A, 1, B], +# We would return the following 4 options: +# [CCT(A), 1, CCT(B)] +# [CCT(A), 1, B] +# [A, 1, CCT(B)] +# [A, 1, B] +# NB: Yes, this is exponential. No, we don't care too much because PyTorch ops +# don't accept that many input Tensors. +def generate_subclass_choices(flat_args, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args] + subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes] + + for which_args_are_wrapped in itertools.product(*subclass_options): + + result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg) + for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)] + yield result, which_args_are_wrapped + + +# For an operation f(*args, **kwargs), each Tensor argument may either be +# a regular Tensor or a Tensor Subclass. This iterator iterates through +# all of those options. +def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + flat_kwargs, spec = tree_flatten(kwargs) + flat_args_kwargs = list(args) + list(flat_kwargs) + for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode): + new_args = choice[:len(args)] + new_kwargs = tree_unflatten(choice[len(args):], spec) + which_args_are_wrapped = debug_metadata[:len(args)] + which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec) + yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped + + +def raise_composite_compliance_error(err, additional_info=''): + raise RuntimeError( + "Composite compliance check failed with " + "the above error.\n" + f"{additional_info}" + "If you are adding an OpInfo of an " + "existing operator, please feel free to skip this test " + "because the problem was pre-existing and file an issue. " + "Otherwise, if you added a new operator, please read " + "through the Composite Compliance section in " + "aten/src/ATen/native/README.md for how to resolve this. " + ) from err + + +# This test checks ALL possible permutations of calling `op` with arguments +# that are individually either a regular Tensor or a Tensor subclass. +# +# The general strategy is to wrap some Tensor args and kwargs in +# CompositeCompliantTensor wrappers and call the operation. + +# If some composite operation does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_all_permutations(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + expected = op(*args, **kwargs) + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + try: + actual = op(*new_args, **new_kwargs) + # NOTE: [What errors are Composite Compliance trying to catch?] + # + # There's two things we want to catch: + # - errors that would raise within the torch_dispatch impl + # - data_ptr accesses + # The first is easy to filter for (we could make the error a different + # error class), the second is always going to be a RuntimeError due to + # how it is implemented (if you try to access the data_ptr of thex + # wrapper Tensor, it raises you some internal RuntimeError). + # + # So the most general thing to catch here was RuntimeError. If you + # are here and debugging why your test failed, it's plausible that + # the operator itself is broken and that there are other tests failing. + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +# Checks via the usage of torch dispatch mode certain anti-patterns that +# are not composite compliant. +# +# In particular, the anti-pattern we are trying to prevent is a user +# creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps +# here because all factory functions will create tensors that are +# CompositeCompliantTensor. +# +# The general strategy is to wrap all Tensor args and kwargs in +# CompositeCompliantTensor wrappers. If an operator that is +# Composite does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_with_mode(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + + def wrap(e): + return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e + + expected = op(*args, **kwargs) + + args = tree_map(wrap, args) + kwargs = tree_map(wrap, kwargs) + try: + with cct_mode: + actual = op(*args, **kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error(err) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +def gather_leaf_tensors(args, kwargs): + leaf_tensors = [] + args, args_spec = tree_flatten(args) + kwargs, kwargs_spec = tree_flatten(kwargs) + args = args + kwargs + for arg in args: + if not isinstance(arg, torch.Tensor): + continue + if arg.requires_grad: + leaf_tensors.append(arg) + return leaf_tensors + + +def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None): + if gradcheck_wrapper is None: + results = op(*args, **kwargs) + else: + results = gradcheck_wrapper(op, *args, **kwargs) + + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results] + leaf_tensors = gather_leaf_tensors(args, kwargs) + assert len(leaf_tensors) > 0 + return torch.autograd.grad(flat_diff_results, leaf_tensors, + grads, allow_unused=True, retain_graph=True) + + +# Checks if the backward formula is composite compliant by testing +# all possible permutations of {inputs, grad_outputs} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_backward_formula to things that aren't OpInfos +# while debugging. +def check_backward_formula(op: Callable, args, kwargs, + output_process_fn_grad=None, + gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode() + + expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper) + + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + leaf_tensors = gather_leaf_tensors(new_args, new_kwargs) + assert len(leaf_tensors) > 0 + + try: + if gradcheck_wrapper is None: + results = op(*new_args, **new_kwargs) + else: + results = gradcheck_wrapper(op, *new_args, **new_kwargs) + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + # NB: ones, not ones_like, so we get a regular Tensor here + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) + for r in flat_diff_results] + for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode): + try: + actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads, + allow_unused=True, retain_graph=True) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_grads: {which_grad_is_batched}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True) + +# Checks if the forward AD formula is composite compliant by testing +# all possible permutations of {primals, tangents} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_forward_ad_formula to things that aren't OpInfos +# while debugging. +def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False) + + def maybe_tangent(t): + assert type(t) is not CCT + # Generate `tangent` tensor + # if given object is a Tensor and requires grad is set. + if isinstance(t, torch.Tensor) and t.requires_grad: + return torch.randn_like(t) + elif is_tensorlist(t): + return [torch.randn_like(e) if e.requires_grad else None for e in t] + return None + + tangent_args = tuple(maybe_tangent(arg) for arg in args) + flat_kwargs, spec = tree_flatten(kwargs) + flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs) + tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec) + + with fwAD.dual_level(): + def maybe_make_dual(dual): + # Returns dual tensor if primal is a tensor/tensor subclass + # with requires_grad set. + primal, tangent = dual + if isinstance(primal, torch.Tensor) and primal.requires_grad: + return fwAD.make_dual(primal.detach(), tangent) + elif is_tensorlist(primal): + return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri + for pri, tang in zip(primal, tangent)) + return primal + + def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs): + op_args = tuple(map(maybe_make_dual, zip(args, tangent_args))) + op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()} + + if gradcheck_wrapper is None: + return op(*op_args, **op_kwargs) + return gradcheck_wrapper(op, *op_args, **op_kwargs) + + expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs) + expected = tree_map(fwAD.unpack_dual, expected) + expected_primals = tree_map(lambda x: x.primal, expected) + expected_tangents = tree_map(lambda x: x.tangent, expected) + + # Permutations of arg and kwargs in CCT. + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + # Permutations tangent arg and tangent kwargs in CCT. + for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode): + new_tang_args, new_tang_kwargs, \ + which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice + + op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args))) + op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()} + + try: + if gradcheck_wrapper is None: + actual = op(*op_args, **op_kwargs) + else: + actual = gradcheck_wrapper(op, *op_args, **op_kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n" + f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + actual = tree_map(fwAD.unpack_dual, actual) + actual_primals = tree_map(lambda x: unwrap(x.primal), actual) + actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual) + assert_equal_fn(actual_primals, expected_primals, equal_nan=True) + assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py new file mode 100644 index 0000000000000000000000000000000000000000..6cd17627939e848d2195ef51b2159c1b1b5af424 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py @@ -0,0 +1,77 @@ +# mypy: ignore-errors + +import torch +import functools +from torch.testing import make_tensor +from functorch.experimental.control_flow import map +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and + +def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput([make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)], + args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2))) + +def inner_f(x, y0, y1): + return [x[0].cos().add_(1.) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())] + +def simple_map(xs, y0, y1): + def f(x, y0, y1): + return inner_f(x, y0, y1) + return map(f, xs, y0, y1) + +def nested_map(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + +def triple_nested_map(xs, y0, y1): + def f0(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + return map(f0, xs, y0, y1) + +control_flow_opinfo_db = [ + OpInfo( + "MapControlflowOp", + op=simple_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + "NestedMapControlflowOp", + op=nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + "TripleNestedMapControlflowOp", + op=triple_nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ) +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py new file mode 100644 index 0000000000000000000000000000000000000000..68a5f90d6b2d97ca349d2e68b63fec63fdc53793 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py @@ -0,0 +1,456 @@ +# mypy: ignore-errors + +import torch +import functools +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np +from torch.testing._internal.autograd_function_db import ( + sample_inputs_numpy_cube, + sample_inputs_numpy_mul, + sample_inputs_numpy_sort, + sample_inputs_numpy_take, +) +from torch import Tensor +from torch.types import Number +from typing import * # noqa: F403 +import torch._custom_ops as custom_ops + +# Note: [custom op db] +# +# This is a collection of custom operator test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if subsystems +# support them correctly. + +def to_numpy(tensor): + return tensor.cpu().numpy() + +@custom_ops.custom_op('_torch_testing::numpy_cube') +def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_cube') +def numpy_cube_impl(x): + x_np = to_numpy(x) + dx = torch.tensor(3 * x_np ** 2, device=x.device) + return torch.tensor(x_np ** 3, device=x.device), dx + +@custom_ops.impl_abstract('_torch_testing::numpy_cube') +def numpy_cube_abstract(x): + return x.clone(), x.clone() + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_cube') +def numpy_cube_save_for_backward(inputs, output): + return (inputs.x, output[1]) + +@custom_ops.impl_backward('_torch_testing::numpy_cube') +def numpy_cube_backward(ctx, saved, grad_out, grad_dx): + x, dx = saved + grad_x = torch.ops._torch_testing.numpy_mul(grad_out, dx) + 6 * torch.ops._torch_testing.numpy_mul(grad_dx, x) + return {'x': grad_x} + +@custom_ops.custom_op('_torch_testing::numpy_mul') +def numpy_mul(x: Tensor, y: Tensor) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_mul') +def numpy_mul_impl(x, y): + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + +@custom_ops.impl_abstract('_torch_testing::numpy_mul') +def numpy_mul_abstract(x, y): + assert x.device == y.device + return (x * y).contiguous() + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_mul') +def numpy_mul_save_for_backward(inputs, output): + saved = {} + saved['x_requires_grad'] = inputs.x.requires_grad + saved['y_requires_grad'] = inputs.y.requires_grad + # Optimization: only save what is necessary + saved['y'] = inputs.y if inputs.x.requires_grad else None + saved['x'] = inputs.x if inputs.y.requires_grad else None + return saved + +@custom_ops.impl_backward('_torch_testing::numpy_mul') +def numpy_mul_backward(ctx, saved, grad_out): + grad_x = grad_out * saved['y'] if saved['x_requires_grad'] else None + grad_y = grad_out * saved['x'] if saved['x_requires_grad'] else None + return {'y': grad_y, 'x': grad_x} + +@custom_ops.custom_op('_torch_testing::numpy_sort') +def numpy_sort(x: Tensor, dim: int) -> Tuple[Tensor, Tensor, Tensor]: + raise NotImplementedError() + +@custom_ops.impl("_torch_testing::numpy_sort") +def numpy_sort_impl(x, dim): + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(result, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + +@custom_ops.impl_abstract('_torch_testing::numpy_sort') +def numpy_sort_abstract(x, dim): + return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long) + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_sort') +def numpy_sort_save_for_backward(inputs, output): + out, ind, ind_inv = output + return [inputs.dim, ind, ind_inv] + +@custom_ops.impl_backward('_torch_testing::numpy_sort', output_differentiability=[True, False, False]) +def numpy_sort_backward(ctx, saved, grad_out, grad_ind, grad_ind_inv): + dim, ind, ind_inv = saved + return {'x': torch.ops._torch_testing.numpy_take(grad_out, ind_inv, ind, dim)} + +@custom_ops.custom_op('_torch_testing::numpy_take') +def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl("_torch_testing::numpy_take") +def numpy_take_impl(x, ind, ind_inv, dim): + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + +@custom_ops.impl_abstract('_torch_testing::numpy_take') +def numpy_take_abstract(x, ind, ind_inv, dim): + assert x.device == ind.device + assert x.device == ind_inv.device + assert ind.dtype == torch.long + assert ind_inv.dtype == torch.long + return torch.empty_like(x) + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_take') +def numpy_take_save_for_backward(inputs, output): + return { + 'dim': inputs.dim, + 'ind': inputs.ind, + 'ind_inv': inputs.ind_inv, + } + +@custom_ops.impl_backward('_torch_testing::numpy_take') +def numpy_take_backward(ctx, saved, grad_out): + return { + 'x': torch.ops._torch_testing.numpy_take(grad_out, saved['ind_inv'], saved['ind'], saved['dim']), + 'ind': None, + 'ind_inv': None, + } + +@custom_ops.custom_op('_torch_testing::numpy_nonzero') +def numpy_nonzero(x: Tensor) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_nonzero') +def numpy_nonzero_impl(x): + x_np = to_numpy(x) + res = np.stack(np.nonzero(x_np), axis=1) + if res.shape[0] <= 1: + raise RuntimeError("not supported") + return torch.tensor(res, device=x.device) + +@custom_ops.impl_abstract('_torch_testing::numpy_nonzero') +def numpy_nonzero_abstract(x): + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + shape = [i0, x.dim()] + result = x.new_empty(shape, dtype=torch.long) + return result + +def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = 10 + result = make_arg(shape, low=0.9, high=2) + mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long) + with torch.no_grad(): + result *= mask + + yield SampleInput(result, args=()) + +@custom_ops.custom_op('_torch_testing::numpy_view_copy') +def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_view_copy') +def numpy_view_copy_impl(x, shape) -> Tensor: + return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device) + +@custom_ops.impl_abstract('_torch_testing::numpy_view_copy') +def numpy_view_copy_abstract(x, shape) -> Tensor: + return x.clone().view(shape).clone() + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_view_copy') +def numpy_view_copy_save_for_backward(inputs, output) -> Tensor: + return inputs.x.shape + +@custom_ops.impl_backward('_torch_testing::numpy_view_copy') +def numpy_view_copy_backward(ctx, x_shape, grad_out) -> Tensor: + return {'x': torch.ops._torch_testing.numpy_view_copy(grad_out, x_shape)} + +def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + result = make_arg(2, 3, 4, low=0.9, high=2) + yield SampleInput(result, args=([2, 12],)) + +@custom_ops.custom_op('_torch_testing::numpy_cat') +def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_cat') +def numpy_cat_impl(xs, dim): + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + np_xs = [to_numpy(x) for x in xs] + np_out = np.concatenate(np_xs, axis=dim) + return torch.tensor(np_out, device=xs[0].device) + +@custom_ops.impl_abstract('_torch_testing::numpy_cat') +def numpy_cat_abstract(xs, dim): + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + return torch.cat(xs, dim=dim) + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_cat') +def numpy_cat_save_for_backward(inputs, output): + dim_sizes = [x.shape[inputs.dim] for x in inputs.xs] + return dim_sizes, inputs.dim + +@custom_ops.impl_backward('_torch_testing::numpy_cat') +def numpy_cat_backward(ctx, saved, grad_out): + dim_sizes, dim = saved + splits = list(np.cumsum(dim_sizes)[:-1]) + grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim) + return {'xs': grad_xs} + +def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + r0 = make_arg(2, 3, 4, low=0.9, high=2) + r1 = make_arg(4, 3, 4, low=0.9, high=2) + r2 = make_arg(5, 3, 4, low=0.9, high=2) + yield SampleInput([r0, r1, r2], args=(0,)) + +@custom_ops.custom_op('_torch_testing::numpy_split_copy') +def numpy_split_copy(x: Tensor, sections: Sequence[int], dim: int) -> List[Tensor]: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_split_copy') +def numpy_split_copy_impl(x, splits, dim): + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs] + +@custom_ops.impl_abstract('_torch_testing::numpy_split_copy') +def numpy_split_copy_abstract(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)] + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_split_copy') +def numpy_split_copy_save_for_backward(inputs, output): + return inputs.dim + +@custom_ops.impl_backward('_torch_testing::numpy_split_copy') +def numpy_split_copy_backward(ctx, saved, grad_out): + dim = saved + return {'x': torch.ops._torch_testing.numpy_cat(grad_out, dim=dim)} + +def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg(2, 9, low=0.9, high=2) + yield SampleInput(x, args=([1, 3, 6], 1)) + +@custom_ops.custom_op('_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int(x: Tensor, sections: Sequence[int], dim: int) -> Tuple[List[Tensor], int]: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int_impl(x, splits, dim): + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits) + +@custom_ops.impl_abstract('_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int_abstract(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits) + +@custom_ops.impl_save_for_backward( + '_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int_save_for_backward(inputs, output): + return inputs.dim + +@custom_ops.impl_backward( + '_torch_testing::numpy_split_copy_with_int', + output_differentiability=[True, False]) +def numpy_split_copy_with_int_backward(ctx, saved, grad_out, _): + dim = saved + return {'x': torch.ops._torch_testing.numpy_cat(grad_out, dim=dim)} + +@custom_ops.custom_op('_torch_testing::numpy_nms') +def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_nms') +def numpy_nms_impl(boxes, scores, iou_threshold): + # Adapted from Ross Girshick's fast-rcnn implementation at + # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py + assert boxes.device == scores.device + device = boxes.device + + boxes = to_numpy(boxes) + scores = to_numpy(scores) + + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= iou_threshold)[0] + order = order[inds + 1] + + result = torch.tensor(np.stack(keep), device=device) + # Needed for data-dependent condition :( + assert result.size(0) >= 2 + return result + +@custom_ops.impl_abstract('_torch_testing::numpy_nms') +def numpy_nms_abstract(boxes, scores, iou_threshold): + assert boxes.device == scores.device + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + result = boxes.new_empty([i0], dtype=torch.int64) + return result + +def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype) + N = 64 + xs = make_arg([N], low=0, high=28) + dx = make_arg([N], low=0, high=4) + ys = make_arg([N], low=0, high=28) + dy = make_arg([N], low=0, high=4) + boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad) + scores = make_arg([N], low=0, high=1, requires_grad=requires_grad) + iou_threshold = make_arg([], low=0, high=1).item() + + yield SampleInput(boxes, args=(scores, iou_threshold)) + +custom_op_db = [ + OpInfo( + 'NumpyCubeCustomOp', + op=torch.ops._torch_testing.numpy_cube, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulCustomOp', + op=torch.ops._torch_testing.numpy_mul, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortCustomOp', + op=torch.ops._torch_testing.numpy_sort, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyTakeCustomOp', + op=torch.ops._torch_testing.numpy_take, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyNonzeroCustomOp', + op=torch.ops._torch_testing.numpy_nonzero, + sample_inputs_func=sample_inputs_numpy_nonzero, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyNMSCustomOp', + op=torch.ops._torch_testing.numpy_nms, + sample_inputs_func=sample_inputs_numpy_nms, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyViewCopyCustomOp', + op=torch.ops._torch_testing.numpy_view_copy, + sample_inputs_func=sample_inputs_numpy_view_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + supports_out=False, + ), + OpInfo( + 'NumpyCatCustomOp', + op=torch.ops._torch_testing.numpy_cat, + sample_inputs_func=sample_inputs_numpy_cat, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyCustomOp', + op=torch.ops._torch_testing.numpy_split_copy, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyWithIntCustomOp', + op=torch.ops._torch_testing.numpy_split_copy_with_int, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0], + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ced8671404e26bdc655a552f9ec868cda02d31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py @@ -0,0 +1,206 @@ +# mypy: ignore-errors + +import re +import sys +import time +from functools import partial, wraps +from typing import Tuple + +import torch.distributed as dist +import torch.distributed.rpc as rpc +from torch.distributed.rpc import _rref_context_get_debug_info +from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN + + +if not dist.is_available(): + print("c10d not available, skipping tests", file=sys.stderr) + sys.exit(0) + + +INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}" + +def dist_init( + old_test_method=None, + setup_rpc: bool = True, + clean_shutdown: bool = True, + faulty_messages=None, + messages_to_delay=None, +): + """ + We use this decorator for setting up and tearing down state since + MultiProcessTestCase runs each `test*` method in a separate process and + each process just runs the `test*` method without actually calling + 'setUp' and 'tearDown' methods of unittest. + + Note: pass the string representation of MessageTypes that should be used + with the faulty agent's send function. By default, all retriable messages + ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is + set from faulty_rpc_agent_test_fixture.py). + """ + # If we use dist_init without arguments (ex: @dist_init), old_test_method is + # appropriately set and we return the wrapper appropriately. On the other + # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)), + # old_test_method is None and we return a functools.partial which is the real + # decorator that is used and as a result we recursively call dist_init with + # old_test_method and the rest of the arguments appropriately set. + if old_test_method is None: + return partial( + dist_init, + setup_rpc=setup_rpc, + clean_shutdown=clean_shutdown, + faulty_messages=faulty_messages, + messages_to_delay=messages_to_delay, + ) + + @wraps(old_test_method) + def new_test_method(self, *arg, **kwargs): + # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted + # in tests. + import torch.distributed.rpc.api as api + + api._ignore_rref_leak = False + self.worker_id = self.rank + self.setup_fault_injection(faulty_messages, messages_to_delay) + + rpc_backend_options = self.rpc_backend_options + if setup_rpc: + if TEST_WITH_TSAN: + # TSAN runs much slower. + rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5 + rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60 + + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + return_value = old_test_method(self, *arg, **kwargs) + + if setup_rpc: + rpc.shutdown(graceful=clean_shutdown) + + return return_value + + return new_test_method + + +def noop() -> None: + pass + + +def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str: + """ + Loops until an RPC to the given rank fails. This is used to + indicate that the node has failed in unit tests. + Args: + rank (int): Rank of the node expected to fail + expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure + occurs, not just any. + """ + while True: + try: + rpc.rpc_sync(f"worker{rank}", noop, args=()) + time.sleep(0.1) + except Exception as e: + if re.search(pattern=expected_error_regex, string=str(e)): + return str(e) + + +def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None: + """ + The RRef protocol holds forkIds of rrefs in a map until those forks are + confirmed by the owner. The message confirming the fork may arrive after + our tests check whether this map is empty, which leads to failures and + flaky tests. to_here also does not guarantee that we have finished + processind the owner's confirmation message for the RRef. This function + loops until the map is empty, which means the messages have been received + as processed. Call this function before asserting the map returned by + _get_debug_info is empty. + """ + start = time.time() + while True: + debug_info = _rref_context_get_debug_info() + num_pending_futures = int(debug_info["num_pending_futures"]) + num_pending_users = int(debug_info["num_pending_users"]) + if num_pending_futures == 0 and num_pending_users == 0: + break + time.sleep(0.1) + if time.time() - start > timeout: + raise ValueError( + "Timed out waiting to flush pending futures and users, had {} pending futures and {} pending users".format( + num_pending_futures, num_pending_users + ) + ) + + +def get_num_owners_and_forks() -> Tuple[str, str]: + """ + Retrieves number of OwnerRRefs and forks on this node from + _rref_context_get_debug_info. + """ + rref_dbg_info = _rref_context_get_debug_info() + num_owners = rref_dbg_info["num_owner_rrefs"] + num_forks = rref_dbg_info["num_forks"] + return num_owners, num_forks + + +def wait_until_owners_and_forks_on_rank( + num_owners: int, num_forks: int, rank: int, timeout: int = 20 +) -> None: + """ + Waits until timeout for num_forks and num_owners to exist on the rank. Used + to ensure proper deletion of RRefs in tests. + """ + start = time.time() + while True: + num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync( + worker_name(rank), get_num_owners_and_forks, args=(), timeout=5 + ) + num_owners_on_rank = int(num_owners_on_rank) + num_forks_on_rank = int(num_forks_on_rank) + if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks: + return + time.sleep(1) + if time.time() - start > timeout: + raise ValueError( + "Timed out waiting {} sec for {} owners and {} forks on rank, had {} owners and {} forks".format( + timeout, + num_owners, + num_forks, + num_owners_on_rank, + num_forks_on_rank, + ) + ) + + +def initialize_pg(init_method, rank: int, world_size: int) -> None: + # This is for tests using `dist.barrier`. + if not dist.is_initialized(): + dist.init_process_group( + backend="gloo", + init_method=init_method, + rank=rank, + world_size=world_size, + ) + + +def worker_name(rank: int) -> str: + return f"worker{rank}" + + +def get_function_event(function_events, partial_event_name): + """ + Returns the first event that matches partial_event_name in the provided + function_events. These function_events should be the output of + torch.autograd.profiler.function_events(). + + Args: + function_events: function_events returned by the profiler. + event_name (str): partial key that the event was profiled with. + """ + event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015 + return event diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e3572cfc4c6a0ddc3d8fa2e1b056415204acdfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py @@ -0,0 +1 @@ +# mypy: ignore-errors diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py new file mode 100644 index 0000000000000000000000000000000000000000..4d125d2bd0955ab28d5ea4987f482a656cf77b01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py @@ -0,0 +1,66 @@ +# mypy: ignore-errors + +import copy +import random +import torch +from torch.distributed._shard import sharded_tensor + +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, +) + +PLACEMENTS = [ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", +] + +DEFAULT_GPU_NUM = 4 + + +def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0): + spec_list = [] + for i in range(len(sharding_dims)): + random.Random(seed + i).shuffle(PLACEMENTS) + spec_list.append( + ChunkShardingSpec( + dim=sharding_dims[i], + placements=copy.deepcopy(PLACEMENTS), + ) + ) + return spec_list + +class MyShardedModel2(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor2 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor2 = None + self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2)) + + +class MyShardedModel1(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor1 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor1 = None + self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2)) + self.submodule = MyShardedModel2(spec, group, init_rrefs) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..6dd034eb0f45aeea3906e60530b97289ec74ceac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py @@ -0,0 +1,42 @@ +# mypy: ignore-errors + +import torch +import torch.nn as nn + +from torch.distributed._shard.sharded_tensor import ShardedTensor + + +class SimpleMegatronLM(nn.Module): + def __init__(self, linear_size, rank=None, dtype=torch.float32): + super().__init__() + self.fc1 = nn.Linear(*linear_size[0], dtype=dtype) + self.gelu = nn.GELU() + self.fc2 = nn.Linear(*linear_size[1], dtype=dtype) + if rank is not None: + self.fc1.cuda(rank) + self.fc2.cuda(rank) + + def forward(self, inp): + return self.fc2(self.gelu(self.fc1(inp))) + + def get_weights(self): + if isinstance(self.fc1.weight, ShardedTensor): + weight1 = self.fc1.weight.local_tensor() + else: + weight1 = self.fc1.weight + + if isinstance(self.fc2.weight, ShardedTensor): + weight2 = self.fc2.weight.local_tensor() + else: + weight2 = self.fc2.weight + + return (weight1, weight2) + + def get_biases(self): + return (self.fc1.bias, self.fc2.bias) + + def get_weight_grads(self): + return (self.fc1.weight.grad, self.fc2.weight.grad) + + def get_bias_grads(self): + return (self.fc1.bias.grad, self.fc2.bias.grad) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ec7499575d81380b3513b344826e55d29aa738e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py @@ -0,0 +1,51 @@ +# mypy: ignore-errors + +# Copyright (c) Meta Platforms, Inc. and affiliates + +import os +import shutil +import tempfile +from functools import wraps +from typing import Any, Callable, Dict, Optional, Tuple + +import torch.distributed as dist + + +def with_temp_dir( + func: Optional[Callable] = None, +) -> Optional[Callable]: + """ + Wrapper to initialize temp directory for distributed checkpoint. + """ + assert func is not None + + @wraps(func) + def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None: + if dist.is_initialized(): + # Only create temp_dir when rank is 0 + if dist.get_rank() == 0: + temp_dir = tempfile.mkdtemp() + print(f"Using temp directory: {temp_dir}") + else: + temp_dir = "" + object_list = [temp_dir] + + # Broadcast temp_dir to all the other ranks + os.sync() + dist.broadcast_object_list(object_list) + self.temp_dir = object_list[0] + os.sync() + else: + temp_dir = tempfile.mkdtemp() + print(f"No process group initialized, using temp directory: {temp_dir}") + self.temp_dir = temp_dir + + try: + func(self, *args, **kwargs) + finally: + if dist.is_initialized() and dist.get_rank() == 0: + shutil.rmtree(self.temp_dir, ignore_errors=True) + else: + shutil.rmtree(self.temp_dir, ignore_errors=True) + + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..aa05792dc176ff4a8e9f6845669536978bab7a3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py @@ -0,0 +1,113 @@ +# mypy: ignore-errors + +# Owner(s): ["oncall: distributed"] + +import copy +from itertools import chain +from typing import Any, Dict + +import torch +import torch.nn as nn + +from torch.distributed._sharded_tensor import ShardedTensor +from torch.distributed._state_dict_utils import _gather_state_dict +from torch.distributed._tensor import DTensor +from torch.distributed.checkpoint.state_dict import ( + PG, + set_state_dict, + STATE, + StateDictOptions, +) + + +class VerifyStateDictMixin: + def _compare_tensor(self, orig_tensor, dist_tensor): + if isinstance(dist_tensor, (DTensor, ShardedTensor)): + dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey") + self.assertTrue(isinstance(dist_tensor, torch.Tensor)) + self.assertTrue(torch.allclose(orig_tensor, dist_tensor)) + + def _verify_msd( + self, + msd: Dict[str, Any], + dist_msd: Dict[str, Any], + options: StateDictOptions = StateDictOptions(), + ) -> None: + if not options.ignore_frozen_params: + self.assertEqual(len(msd), len(dist_msd)) + for fqn, param in msd.items(): + dist_param = dist_msd.get(fqn, None) + if not options.ignore_frozen_params: + self.assertIsNotNone(dist_param, f"{fqn=}") + self._compare_tensor(param, dist_param) + elif dist_param is None: + self.assertFalse(param.requires_grad, f"{fqn=}") + + def _verify_osd( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + osd: Dict[str, Any], + dist_osd: Dict[str, Any], + ) -> None: + params = list(chain.from_iterable(g["params"] for g in optim.param_groups)) + param_pid_mapping = dict(zip(params, range(len(params)))) + fqn_pid_mapping = {} + for fqn, param in model.named_parameters(): + pid = param_pid_mapping[param] + fqn_pid_mapping[fqn] = pid + fqn_pid_mapping[pid] = fqn + # Check optimizer_state_dict state + + self.assertEqual(len(osd[STATE]), len(dist_osd[STATE])) + for pid, states in osd[STATE].items(): + fqn = fqn_pid_mapping[pid] + dist_states = dist_osd[STATE].get(fqn, None) + self.assertIsNotNone(dist_states, fqn) + self.assertEqual(len(states), len(dist_states)) + for key, state in states.items(): + dist_state = states.get(key, None) + self.assertIsNotNone(dist_state) + self._compare_tensor(state, dist_state) + + # Check optimizer_state_dict param_group + old_dist_osd_pg = dist_osd[PG] + if len(osd[PG]) != len(dist_osd[PG]): + self.assertTrue(len(dist_osd[PG]) > len(osd[PG])) + new_pg = copy.deepcopy(dist_osd[PG][0]) + new_pg["params"] = [] + for dist_group in dist_osd[PG]: + new_pg["params"].extend(dist_group["params"]) + dist_osd[PG] = [new_pg] + + self.assertEqual(len(osd[PG]), len(dist_osd[PG])) + for group, dist_group in zip(osd[PG], dist_osd[PG]): + self.assertEqual(len(group), len(dist_group)) + for key, value in group.items(): + # Below doesn't work because param_groups can have None + # values. + # dist_value = dist_group.get(key, None) + # self.assertIsNotNone(dist_value, (dist_group, group)) + dist_value = dist_group[key] + if key == "params": + fqns = [fqn_pid_mapping[pid] for pid in value] + self.assertEqual(sorted(fqns), sorted(dist_value)) + else: + self.assertEqual(value, dist_value) + dist_osd[PG] = old_dist_osd_pg + + def _verify_osd_by_load( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + new_optim: torch.optim.Optimizer, + dist_osd: Dict[str, Any], + ) -> None: + new_dist_osd = _gather_state_dict(dist_osd) + set_state_dict( + model, + optimizers=new_optim, + model_state_dict={}, + optim_state_dict=new_dist_osd, + ) + self.assertEqual(optim.state_dict(), new_optim.state_dict()) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..e13c814b12a5ffffd3649148d5ce54fa1d4804d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py @@ -0,0 +1,733 @@ +# mypy: ignore-errors + +import contextlib +import enum +import logging +import os +import threading +from typing import NamedTuple + +import torch +import torch.distributed as dist +import torch.distributed.autograd as dist_autograd +import torch.nn as nn +from torch.distributed import rpc +from torch.distributed.nn import RemoteModule +from torch.nn.parallel import DistributedDataParallel +from torch.testing._internal.common_distributed import ( + requires_gloo, + requires_nccl, + skip_if_lt_x_gpu, + skip_if_rocm, +) +from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +NUM_EM_ROW = 2 +D_SPARSE = 3 +D_DENSE = 2 +D_HID = 3 +D_OUT = 1 +NUM_TRAINERS = 4 +# Trainers + the master + the remote worker +WORLD_SIZE = NUM_TRAINERS + 2 +TRAINER_RANKS = list(range(NUM_TRAINERS)) +REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1 +MASTER_RANK = REMOTE_WORKER_RANK + 1 + + +class DdpMode(enum.Enum): + # Don't apply DDP + NONE = enum.auto() + # Apply DDP to the top level nn.Module + OUTSIDE = enum.auto() + # Embed DDP inside the top level nn.Module + INSIDE = enum.auto() + + +def init_logger(): + logger = logging.getLogger(__name__) + level = logging.DEBUG if "debug" in os.environ else logging.INFO + logger.setLevel(level) + console = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" + ) + console.setFormatter(formatter) + console.setLevel(level) + # add the handlers to the logger + logger.addHandler(console) + logger.propagate = False + return logger + + +gLogger = init_logger() + + +class FeatureSet(NamedTuple): + """ A feature set has 2 types of features""" + + dense_features: torch.Tensor + sparse_features: torch.LongTensor + values: torch.Tensor + + +def _call_method(method, rref, *args, **kwargs): + return method(rref.local_value(), *args, **kwargs) + + +def _remote_method(method, rref, *args, **kwargs): + args_tup = tuple([method, rref] + list(args)) + return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) + + +def _remote_method_async(method, rref, *args, **kwargs): + args_tup = tuple([method, rref] + list(args)) + return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) + + +class RemoteEM(nn.Module): + def __init__(self, num_embeddings: int, embedding_dim: int): + gLogger.info("Initing RemoteEM with %s %s", num_embeddings, embedding_dim) + super().__init__() + init_em = [0.5] * embedding_dim + self.em = nn.EmbeddingBag( + num_embeddings, + embedding_dim, + _weight=torch.tensor([init_em] * num_embeddings), + ) + + def forward(self, input: torch.Tensor): + gLogger.debug("Running RemoteEM.forward() on: %s", input) + return self.em(input, offsets=torch.LongTensor(range(input.shape[0]))) + + +# Return a linear module with predefined parameters. +def getLinear(d_in, d_out): + l = nn.Linear(d_in, d_out, bias=False) + w = torch.ones((d_out, d_in)) + w[0][0] = -1 + w.requires_grad_() + l.weight.data = w + return l + + +class RemoteNet(nn.Module): + def __init__(self, d_in: int, d_out: int): + gLogger.info("Initing RemoteNet with %s %s", d_in, d_out) + super().__init__() + self.fc = getLinear(d_in, d_out) + self.relu = nn.ReLU() + + def forward(self, input: torch.Tensor): + gLogger.debug("Running RemoteNet.forward() on: %s", input) + return self.relu(self.fc(input)) + + +class HybridModel(nn.Module): + def __init__( + self, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + process_group_for_ddp: dist.ProcessGroup = None, + ): + super().__init__() + self.remote_em_rref = remote_em_rref + self.remote_net_rref = remote_net_rref + self.fc1 = getLinear(D_DENSE, D_DENSE) + self.fc2 = getLinear(D_HID, D_OUT) + + self.non_ddp_params = tuple(self.fc1.parameters()) + tuple( + self.fc2.parameters() + ) + self.ddp_params = () + + if process_group_for_ddp is not None: + self.non_ddp_params, self.ddp_params = ( + tuple(self.fc1.parameters()), + tuple(self.fc2.parameters()), + ) + gLogger.info("Use DDP for the second local net.") + self.fc2 = DistributedDataParallel( + self.fc2, check_reduction=True, process_group=process_group_for_ddp + ) + + gLogger.info( + "HybridModel has %s groups of parameters.", len(list(self.parameters())) + ) + + def forward(self, input: FeatureSet): + gLogger.debug("Running HybridModel.forward on %s", input) + sparse = _remote_method( + RemoteEM.forward, self.remote_em_rref, input.sparse_features + ) + # The same size of mini batch. + assert sparse.shape[0] == input.dense_features.shape[0] + dense = self.fc1(input.dense_features) + x = torch.cat((dense, sparse), 1) + gLogger.debug("Concatenated feature: %s", x) + x = _remote_method(RemoteNet.forward, self.remote_net_rref, x) + return self.fc2(x) + + +class Trainer: + def __init__( + self, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + ddp_mode: DdpMode, + rank: int, + ): + self.rank = rank + self.trainer_group = ( + dist.new_group(TRAINER_RANKS) + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE) + else None + ) + self.remote_em_rref = remote_em_rref + self.remote_net_rref = remote_net_rref + self.hybrid_module = HybridModel( + self.remote_em_rref, + self.remote_net_rref, + self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None, + ) + self.ddp_params, self.non_ddp_params = ( + self.hybrid_module.ddp_params, + self.hybrid_module.non_ddp_params, + ) + if ddp_mode == DdpMode.OUTSIDE: + gLogger.info("Wrapping the whole hybrid module into DDP.") + self.ddp_params += self.non_ddp_params + self.non_ddp_params = () + self.hybrid_module = DistributedDataParallel( + self.hybrid_module, + check_reduction=True, + process_group=self.trainer_group, + ) + gLogger.info( + "Succeeded in creating a HybridModel instance with " + "%s ddp params and %s other local params.", + len(self.ddp_params), len(self.non_ddp_params) + ) + + def destroy_pg(self): + if self.trainer_group: + dist.destroy_process_group(self.trainer_group) + + def train_batch( + self, + mini_batch: FeatureSet, + trainer_has_less_inputs: bool, + simulate_uneven_inputs: bool, + ): + grads_dict = None + + if not simulate_uneven_inputs: + input_batches = [mini_batch] + else: + # Split into microbatches, and trim to simulate uneven inputs. + dense_features = mini_batch.dense_features + sparse_features = mini_batch.sparse_features + values = mini_batch.values + + dense_microbatch = torch.split(dense_features, 2) + sparse_microbatch = torch.split(sparse_features, 2) + values_microbatch = torch.split(values, 2) + batches = [] + for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch): + feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v) + batches.append(feature_set) + + if trainer_has_less_inputs: + input_batches = batches[: len(batches) // 2] + gLogger.info( + "Trainer reduced input patches from %s " + "to %s to simulate uneven inputs.", + len(batches), len(input_batches) + ) + else: + input_batches = batches + + with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.nullcontext(): + for b in input_batches: + with dist_autograd.context() as context_id: + output = self.hybrid_module.forward(b) + loss = (output * mini_batch.values).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + gLogger.info( + "Loss is %s for mini batch: %s. " + "Grads dict has %s entries: %s", loss, mini_batch, len(grads_dict), grads_dict + ) + return ( + tuple(grads_dict[param] for param in self.ddp_params), + tuple(grads_dict[param] for param in self.non_ddp_params), + ) + + +def get_training_examples(): + n = 16 + training_examples = FeatureSet( + dense_features=torch.zeros((n, D_DENSE)), + sparse_features=torch.zeros(n, dtype=torch.long), + values=torch.zeros(n), + ) + idx = 0 + # Every example has another one that has exactly the same features but an + # opposite value. Therefore, their grads cancel each other in all-reduce. + for value in (-1, 1): + for x in (-1.0 * value, 1.0 * value): + for y in (1.0 * value, -1.0 * value): + for z in (0, 1): + training_examples.dense_features[idx, :] = torch.tensor((x, y)) + training_examples.sparse_features[idx] = z + training_examples.values[idx] = value + idx += 1 + + # Split the examples among NUM_TRAINERS trainers + assert 0 == (n % NUM_TRAINERS) + examples_per_trainer = int(n / NUM_TRAINERS) + return [ + FeatureSet( + dense_features=training_examples.dense_features[ + start : start + examples_per_trainer, : + ], + sparse_features=training_examples.sparse_features[ + start : start + examples_per_trainer + ], + values=training_examples.values[start : start + examples_per_trainer], + ) + for start in range(0, n, examples_per_trainer) + ] + + +shutdown_signal = threading.Condition() + + +def set_shutdown_signal(): + global shutdown_signal + with shutdown_signal: + shutdown_signal.notify() + + +class DdpUnderDistAutogradTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return WORLD_SIZE + + def remote_worker_name(self) -> str: + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{REMOTE_WORKER_RANK}" + + def trainer_name(self, rank): + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{rank}" + + def _remote_worker_process(self, ddp_mode): + gLogger.info("The remote worker is running.") + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): + # new_group needs to be called on ranks. + dist.new_group(TRAINER_RANKS) + + global shutdown_signal + with shutdown_signal: + shutdown_signal.wait() + gLogger.info("Exiting remote worker.") + dist.destroy_process_group() + + def _trainer_process(self, rank: int): + gLogger.info("Running the trainer #%s...", rank) + gLogger.info( + "Initing trainer process group by trainer #%s with ranks %s", rank, TRAINER_RANKS + ) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + gLogger.info("Waiting for shutdown signal on trainer #%s...", rank) + + global shutdown_signal + with shutdown_signal: + shutdown_signal.wait() + gLogger.info("Exiting the trainer #%s...", rank) + dist.destroy_process_group() + + def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool): + gLogger.info("Running the master process...") + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + remote_em_rref = rpc.remote( + self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE) + ) + remote_net_rref = rpc.remote( + self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID) + ) + gLogger.info("Created remote rrefs on master") + self.do_test_on_master( + ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref + ) + + def do_test_on_master( + self, + ddp_mode: DdpMode, + simulate_uneven_inputs: bool, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + ): + if simulate_uneven_inputs: + gLogger.info( + "Running DDP + RPC test with simulating uneven inputs across trainers." + ) + + trainer_rrefs = [] + for rank in TRAINER_RANKS: + trainer = self.trainer_name(rank) + trainer_rrefs.append( + rpc.remote( + trainer, + Trainer, + args=(remote_em_rref, remote_net_rref, ddp_mode, rank), + ) + ) + + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): + # new_group needs to be called on ranks. + dist.new_group(TRAINER_RANKS) + + training_examples = get_training_examples() + for _ in range(3): + futures = [] + num_trainers = len(trainer_rrefs) + for idx, trainer_rref in enumerate(trainer_rrefs): + # Half the trainers will deplete inputs earlier than the rest. + trainer_has_less_inputs = ( + simulate_uneven_inputs and idx < num_trainers // 2 + ) + futures.append( + _remote_method_async( + Trainer.train_batch, + trainer_rref, + training_examples[idx], + trainer_has_less_inputs, + simulate_uneven_inputs, + ) + ) + + for future in futures: + ddp_grads, non_ddp_grads = future.wait() + # When there are uneven inputs, it is not necessary that grads + # cancel each other out, since some trainers contribute 0 grad. + if not simulate_uneven_inputs: + for grad in ddp_grads: + self.assertEqual( + grad, + torch.zeros_like(grad), + msg=f"The grad for any ddp parameter should be zeros, because " + "the training examples' grads cancel each other. Received " + f"gradient {grad}", + ) + for grad in non_ddp_grads: + self.assertNotEqual( + grad, + torch.zeros_like(grad), + msg="The grad for any non-ddp parameter shouldn't be zeros", + ) + + # Destroy process groups + for idx, trainer_rref in enumerate(trainer_rrefs): + _remote_method_async(Trainer.destroy_pg, trainer_rref).wait() + + # Send shutdown signals. + for rank in TRAINER_RANKS: + trainer = self.trainer_name(rank) + rpc.rpc_sync(trainer, set_shutdown_signal, args=()) + + rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=()) + + def _do_test(self, ddp_mode, simulate_uneven_inputs=False): + if self.rank == MASTER_RANK: + self._master_process(ddp_mode, simulate_uneven_inputs) + elif self.rank == REMOTE_WORKER_RANK: + self._remote_worker_process(ddp_mode) + elif self.rank in TRAINER_RANKS: + self._trainer_process(self.rank) + else: + raise RuntimeError(f"Unknown process rank: {self.rank}") + + @requires_gloo() + @dist_init + def test_backward_no_ddp(self): + self._do_test(DdpMode.NONE) + + @requires_gloo() + @dist_init + def test_backward_ddp_outside(self): + self._do_test(DdpMode.OUTSIDE) + + @requires_gloo() + @dist_init + def test_backward_ddp_outside_uneven_inputs(self): + self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True) + + @requires_gloo() + @dist_init + def test_backward_ddp_inside(self): + self._do_test(DdpMode.INSIDE) + + +# Common utils for both CPU and CUDA test suites +class CommonDdpComparisonTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return NUM_TRAINERS + + def trainer_name(self, rank): + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{rank}" + + @staticmethod + def get_remote_grads(rref, context_id): + return dist_autograd.get_gradients(context_id)[rref.local_value().weight] + + +class DdpComparisonTest(CommonDdpComparisonTest): + def _run_test_ddp_comparision(self, simulate_uneven_inputs=False): + gLogger.info("Running trainer rank: %s", self.rank) + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + # Postfix file_name with "pg" since file_name is also used by RPC agent + init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"), + world_size=self.world_size, + rank=self.rank, + ) + net = nn.Linear(2, 3) + ddp_net = DistributedDataParallel(net) + + # Odd ranks join early if simulate_uneven_inputs. + num_inputs = 1 + if simulate_uneven_inputs: + if self.rank % 2 == 0: + num_inputs += 2 + inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)] + + if simulate_uneven_inputs: + gLogger.info("Rank %s training with %s inputs.", self.rank, len(inputs_list)) + + # Use distributed autograd. The gradients will be in RPC context map. + grads_dict = {} + with ddp_net.join(simulate_uneven_inputs): + for i, inputs in enumerate(inputs_list): + with dist_autograd.context() as context_id: + loss = ddp_net(inputs).norm() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict) + + # Use local autograd. The gradients will be in each variable's '.grad'. + ddp_net.zero_grad() + loss = ddp_net(inputs).norm() + loss.backward() + + # The gradients should be the same + for param in net.parameters(): + self.assertTrue( + param in grads_dict, + msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}", + ) + self.assertEqual( + grads_dict[param], + param.grad, + msg=f"The grads for param {param} are different under local " + f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}", + ) + dist.destroy_process_group() + + @requires_gloo() + @dist_init + def test_ddp_comparison(self): + self._run_test_ddp_comparision() + + @requires_gloo() + @dist_init + def test_ddp_comparison_uneven_inputs(self): + # test with simulating uneven inputs in DDP + self._run_test_ddp_comparision(simulate_uneven_inputs=True) + + @requires_gloo() + @dist_init + def test_ddp_dist_autograd_sparse_grads(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + model = nn.EmbeddingBag(10, 3, sparse=True) + ddp_model = DistributedDataParallel(model) + + # Different inputs for each + input = torch.LongTensor(10).random_(0, 10) + offsets = torch.LongTensor([0, 4]) + + # Run local. + loss = ddp_model(input, offsets).sum() + loss.backward() + + with dist_autograd.context() as context_id: + loss = ddp_model(input, offsets).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + self.assertEqual(1, len(grads_dict)) + self.assertEqual(model.weight.grad, grads_dict[model.weight]) + + @requires_gloo() + @dist_init + def test_ddp_dist_autograd_local_vs_remote(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + # Use two different remote device input string, w/ and w/o the default + # device string "cpu", respectively. + for remote_device in ["worker0/cpu", "worker0"]: + remote_layer1 = RemoteModule( + remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False) + ) + layer1 = nn.Linear(10, 5, False) + # Start with the same parameters for remote and local + layer1.weight = remote_layer1.module_rref.to_here().weight + + # Run local case. + layer2 = nn.Linear(5, 1) + inputs = torch.rand((10, 10)) + ddp_model = DistributedDataParallel(layer2) + loss = ddp_model(layer1(inputs)).sum() + loss.backward() + + # Run remote case. + with dist_autograd.context() as context_id: + loss = ddp_model(remote_layer1(inputs)).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + dist.barrier() + self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) + self.assertEqual( + layer1.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer1.module_rref, context_id), + ), + ) + + +class CudaDdpComparisonTest(CommonDdpComparisonTest): + @skip_if_lt_x_gpu(NUM_TRAINERS) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_ddp_dist_autograd_local_vs_remote_gpu(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + remote_layer1 = RemoteModule( + remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False) + ) + layer1 = nn.Linear(10, 7, False) + # Start with the same parameters for remote and local + layer1.weight = remote_layer1.module_rref.to_here().weight + + layer2 = nn.Linear(7, 5).cuda(self.rank) + ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank]) + + remote_layer3 = RemoteModule( + remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False) + ) + layer3 = nn.Linear(5, 3, False) + # Start with the same parameters for remote and local + layer3.weight = remote_layer3.module_rref.to_here().weight + + layer4 = nn.Linear(3, 1).cuda(self.rank) + ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank]) + + # Run local case. + inputs = torch.rand((10, 10)) + loss = ddp_layer4( + layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank) + ).sum() + loss.backward() + + # Run remote case. + with dist_autograd.context() as context_id: + loss = ddp_layer4( + remote_layer3( + ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu() + ).cuda(self.rank) + ).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + dist.barrier() + self.assertEqual( + layer1.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer1.module_rref, context_id), + ), + ) + self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) + self.assertEqual( + layer3.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer3.module_rref, context_id), + ), + ) + self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight]) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c0e4339e126cf856a4e04e5e2f2b8a591f43751a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py @@ -0,0 +1,10062 @@ +# mypy: ignore-errors + +import copy +import itertools +import math +import os +import random +import sys +import tempfile +import time +from collections import namedtuple, OrderedDict +from contextlib import contextmanager, nullcontext +from dataclasses import dataclass +from datetime import timedelta +from functools import reduce +from typing import Union, NamedTuple, Callable, Any +import unittest +import numpy as np +import torch +import torch.cuda +import torch.distributed as dist +import torch.distributed.algorithms.model_averaging.averagers as averagers +import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD +import torch.distributed.algorithms.model_averaging.utils as model_averaging_utils +import torch.nn as nn +import torch.nn.functional as F +from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR +from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT +from torch.autograd import DeviceType +from torch.cuda.amp import GradScaler, autocast + +from torch.distributed.algorithms.ddp_comm_hooks import ( + post_localSGD_hook as post_localSGD, + powerSGD_hook as powerSGD, + default_hooks as default, + quantization as quantization_hooks, +) +from torch.distributed.optim import _apply_optimizer_in_backward + +from torch.distributed.distributed_c10d import ( + get_world_size, + _get_default_group, + _get_pg_config, +) +from torch.distributed.utils import ( + _verify_param_shape_across_processes, + _sync_module_states, +) + +from torch.nn.parallel import DistributedDataParallel +from torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars, _MixedPrecision +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + init_multigpu_helper, + initialize_temp_directories, + cleanup_temp_dir, + simple_sparse_reduce_tests, + skip_if_rocm, + skip_if_small_worldsize, + skip_if_odd_worldsize, + skip_if_lt_x_gpu, + nccl_skip_if_lt_x_gpu, + skip_if_no_gpu, + require_n_gpus_for_nccl_backend, + requires_nccl_version, + captured_output, + with_nccl_blocking_wait, + with_dist_debug_levels, + verify_ddp_error_logged, + DistTestCases, +) +from torch.testing._internal.common_utils import ( + instantiate_parametrized_tests, + IS_MACOS, + IS_WINDOWS, + FILE_SCHEMA, + IS_FBCODE, + NO_MULTIPROCESSING_SPAWN, + IS_SANDCASTLE, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, +) + +import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer + +from torch.utils.data.distributed import DistributedSampler +import operator + +try: + import torchvision + + HAS_TORCHVISION = True +except ImportError: + HAS_TORCHVISION = False + +if sys.platform == "win32": + import msvcrt +else: + import fcntl + + +class NetWithBuffers(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + self.buffer.add_(1) + return self.b(self.a(x)) + + +class Foo: + def __init__(self, x): + # Can be tensor or int + self.x = x + + def __eq__(self, other): + def eq(value, other): + if isinstance(value, torch.Tensor): + return torch.equal(value, other) + return value == other + + for attr, value in self.__dict__.items(): + other_value = other.__dict__[attr] + if not eq(value, other_value): + return False + return True + + +f = Foo(10) +f.bar = 1 + +foo_cpu_tensor = Foo(torch.randn(3, 3)) + + +COLLECTIVES_OBJECT_TEST_LIST = [ + {"key1": 3, "key2": 4, "key3": {"nested": True}}, + f, + foo_cpu_tensor, + "foo", + [1, 2, True, "string", [4, 5, "nested"]], +] + +# Allowlist of distributed backends where profiling collectives is supported. +PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.NCCL, + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported with use_cuda=True +CUDA_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported for p2p ops +SEND_RECV_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.MPI, + dist.Backend.GLOO, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Dummy NamedTuple data structures to test DDP support for NamedTuple types. +EXPECTED_FIELDS = ("a", "b") +TestNamedTupleInput_0 = namedtuple("NamedTuple", EXPECTED_FIELDS) + + +class TestNamedTupleInput_1(NamedTuple): + a: torch.tensor + b: torch.tensor + + +skipIfNoTorchVision = skip_but_pass_in_sandcastle_if( + not HAS_TORCHVISION, "no torchvision" +) + +BACKEND = os.environ["BACKEND"] +INIT_METHOD = os.getenv("INIT_METHOD", "env://") + +DEFAULT_TIMEOUT = 300 +CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500} + + +def get_profiling_event(event_name, profiler, dedup_gpu_user_annotation=False): + event_list = ( + profiler.events() + if isinstance(profiler, torch.profiler.profile) + else profiler.function_events + ) + return [ + event for event in event_list + if ( + (event.name.endswith(event_name) or event.name.startswith(event_name)) + and (not dedup_gpu_user_annotation or event.device_type != DeviceType.CUDA) + ) + ] + + +# Base error message substring on unfinished reductions. +ddp_prev_reduction_unfinished_str = ( + "Expected to have finished reduction in the prior iteration" +) +# Error message substring when find_unused_parameters=True has not been passed +ddp_recommend_find_unused_params_str = ( + "passing the keyword argument `find_unused_parameters=True`" +) +# Error message substring when find_unused_parameters=True is enabled +ddp_find_unused_params_enabled_str = "Since `find_unused_parameters=True` is enabled" +# Error message substring for possibility of not all model outputs being used +# in loss computation +ddp_outputs_not_used_in_loss_str = ( + "`forward` function outputs participate in calculating loss" +) +# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG +ddp_suggest_debug_mode_str = ( + "set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL" +) + + +class DDPUnevenTestInput(NamedTuple): + name: str + model: nn.Module + inp: Union[torch.tensor, tuple] + sync_interval: int + throw_on_early_termination: bool = False + hook: Callable = None + state: Any = None + + +class _FC2(nn.Module): + def __init__(self): + super().__init__() + self.fc = nn.Linear(10, 50, bias=True) + self.fc.bias.requires_grad = False + + def forward(self, x): + x = self.fc(x) + return x + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(2, 10, bias=False) + self.fc2 = _FC2() + self.fc3 = nn.Linear(50, 4, bias=False) + self.relu = nn.ReLU() + self.no_grad_param = nn.Parameter( + torch.tensor([2, 2]).long(), requires_grad=False + ) + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return F.softmax(x, dim=1) + + +class LargeNet(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(1000, 2000, bias=False) + self.fc2 = nn.Linear(2000, 500, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + +class Task(nn.Module): + def __init__(self): + super().__init__() + self.p = nn.Parameter(torch.ones(2, 2)) + + def forward(self, x): + return self.p + x + + +class BatchNormNet(nn.Module): + def __init__(self, affine=True): + super().__init__() + self.fc1 = nn.Linear(2, 40, bias=False) + self.bn = nn.BatchNorm1d(4, affine=affine) + self.fc2 = nn.Linear(40, 4, bias=False) + + def forward(self, x): + x = torch.reshape(self.fc1(x), (-1, 4, 10)) + x = self.bn(x) + x = torch.reshape(x, (-1, 40)) + x = self.fc2(x) + return F.softmax(x, dim=1) + + +class UnusedParamTwoLinLayerNet(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 10, bias=False) + self.c = nn.Linear(5, 5, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class DictOutputModule(nn.Module): + def __init__(self): + super().__init__() + self.module = UnusedParamTwoLinLayerNet() + + def forward(self, x): + predictions = self.module(x) + loss = (predictions[0] + predictions[1]).sum() + return { + "predictions": predictions, + "loss": loss, + } + + +class TwoLinLayerNet(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class EmbeddingNetDifferentParams(nn.Module): + """ + A module containing an embedding with different dimension or different # of + parameters depending on the rank. + """ + + def __init__(self, rank, diff_num_params=False): + super().__init__() + embedding_dim = 500 if diff_num_params or rank == 0 else 50 + self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim) + self.lin = nn.Linear(embedding_dim, 1) + if diff_num_params: + self.lin2 = nn.Linear(1, 1, bias=False) + + def forward(self, x): + x = self.embedding(x) + return self.lin(x) + + +class ControlFlowToyModel(nn.Module): + def __init__(self): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + # Second layer is used dependent on input x. + use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device)) + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + +DDP_NET = Net() +BN_NET = BatchNormNet() +BN_NET_NO_AFFINE = BatchNormNet(affine=False) +ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99) + + +def get_timeout(test_id): + test_name = test_id.split(".")[-1] + if test_name in CUSTOMIZED_TIMEOUT: + return CUSTOMIZED_TIMEOUT[test_name] + else: + return DEFAULT_TIMEOUT + + +default_pg_timeout = 60 + +CUSTOM_PG_TIMEOUT = { + # This test runs slowly and needs additional time to complete, otherwise can + # be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING + "test_ddp_uneven_inputs": 300, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_model_diff_across_ranks": 5, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_has_finalized": 5, +} + +def require_backend_is_available(backends): + def check(backend): + if backend == dist.Backend.GLOO: + return dist.is_gloo_available() + if backend == dist.Backend.NCCL: + return dist.is_nccl_available() + if backend == dist.Backend.MPI: + return dist.is_mpi_available() + if backend == dist.Backend.UCC: + return dist.is_ucc_available() + if backend in DistTestCases.backend_feature["plugin"]: + return True + return False + + if BACKEND not in backends: + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be one of {backends}" + ) + + if not check(dist.Backend(BACKEND)): + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be available" + ) + return lambda func: func + + +def require_world_size(world_size): + if int(os.environ["WORLD_SIZE"]) < world_size: + return skip_but_pass_in_sandcastle( + "Test requires world size of %d" % world_size + ) + return lambda func: func + + +@contextmanager +def _lock(): + TEMP_DIR = os.environ["TEMP_DIR"] + lockfile = os.path.join(TEMP_DIR, "lockfile") + with open(lockfile, "w") as lf: + try: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1) + yield + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_EX) + yield + finally: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1) + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_UN) + lf.close() + + +@contextmanager +def _rank_temp_file(): + if dist.get_rank() == 0: + fd, name = tempfile.mkstemp() + os.close(fd) + else: + name = None + object_list = [name] + dist.broadcast_object_list(object_list) + name = object_list[0] + try: + yield name + finally: + if dist.get_rank() == 0: + os.remove(name) + + +def _build_tensor(size, value=None, dtype=torch.float, device_id=None): + if value is None: + value = size + if device_id is None: + return torch.empty(size, size, size, dtype=dtype).fill_(value) + else: + return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id) + + +def _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float): + if value is None: + value = dim + return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value) + + +def _create_autograd_profiler(): + return torch.autograd.profiler.profile(record_shapes=True) + + +def _create_torch_profiler(): + return torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + ], + record_shapes=True, + ) + + +class Barrier: + barrier_id = 0 + + @classmethod + def init(cls): + cls.barrier_id = 0 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + for f_name in os.listdir(barrier_dir): + os.unlink(os.path.join(barrier_dir, f_name)) + + @classmethod + def sync(cls, wait_for=None, timeout=10): + if wait_for is None: + wait_for = dist.get_world_size() + cls.barrier_id += 1 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + pid = str(os.getpid()) + barrier_file = os.path.join(barrier_dir, pid) + with _lock(): + with open(barrier_file, "w") as f: + f.write(str(cls.barrier_id)) + + start_time = time.time() + while True: + arrived = 0 + with _lock(): + for f_name in os.listdir(barrier_dir): + with open(os.path.join(barrier_dir, f_name)) as f: + data = f.read() + if int(data) >= cls.barrier_id: + arrived += 1 + if arrived == wait_for: + break + + if time.time() - start_time > timeout: + raise RuntimeError("barrier timeout") + time.sleep(0.1) + + +class TestDistBackend(MultiProcessTestCase): + @classmethod + def setUpClass(cls): + os.environ["MASTER_ADDR"] = str(MASTER_ADDR) + # Not setting MASTER_PORT and get a random free port + super().setUpClass() + + def setUp(self): + super().setUp() + # initialize temp directories + initialize_temp_directories() + # initialize Barrier + Barrier.init() + # Skip return code checking for following tests as they are expected to + # crash a process due to TORCH_NCCL_ASYNC_ERROR_HANDLING. + self.skip_return_code_checks = [self.test_ddp_has_finalized.__wrapped__] + + def tearDown(self): + cleanup_temp_dir() + super().tearDown() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + @classmethod + def _run(cls, rank, test_name, file_name, pipe): + if BACKEND == "nccl" and not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + self = cls(test_name) + self.rank = rank + self.file_name = file_name + + if torch.cuda.is_available() and torch.cuda.device_count() < int( + self.world_size + ): + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + try: + pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout) + timeout = timedelta(seconds=pg_timeout_seconds) + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=int(self.world_size), + rank=self.rank, + timeout=timeout, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + self._barrier() + + self.run_test(test_name, pipe) + self._barrier() + dist.destroy_process_group() + sys.exit(0) + + # Needed since MultiProcessTestCase assumes a world_size of 4, but we + # run these tests under other various world_sizes. + @property + def world_size(self): + return os.environ["WORLD_SIZE"] + + +class DistributedTest: + class _DistTestBase: + def _barrier(self, *args, **kwargs): + Barrier.sync(*args, **kwargs) + + def _init_group_test(self, **kwargs): + group = [1, 2] + group_id = dist.new_group(group, **kwargs) + rank = dist.get_rank() + if rank not in group: + return ([], None, rank) + + return (group, group_id, rank) + + def _init_full_group_test(self, **kwargs): + group = list(range(0, dist.get_world_size())) + group_id = dist.new_group(**kwargs) + rank = dist.get_rank() + return (group, group_id, rank) + + def _init_global_test(self): + group = list(range(0, dist.get_world_size())) + group_id = dist.group.WORLD + rank = dist.get_rank() + return (group, group_id, rank) + + def _verify_buffers_equal(self, m1, m2): + # verify buffers across models + m1_buf_dict = dict(m1.module.named_buffers()) + for name, buf in m2.module.named_buffers(): + self.assertEqual(buf, m1_buf_dict[name]) + + # Verify buffers across ranks. + m1_buffers = list(m1.buffers()) + m2_buffers = list(m2.buffers()) + for (buf1, buf2) in zip(m1_buffers, m2_buffers): + gathered_bufs = [ + torch.empty_like(buf1) for _ in range(dist.get_world_size()) + ] + dist.all_gather(gathered_bufs, buf1) + gathered_bufs_m2 = [ + torch.empty_like(buf2) for _ in range(dist.get_world_size()) + ] + for b in gathered_bufs: + self.assertEqual(b, buf1) + dist.all_gather(gathered_bufs_m2, buf2) + for b in gathered_bufs_m2: + self.assertEqual(b, buf2) + + def test_dump_DDP_relevant_env_vars(self): + with captured_output() as (out, _): + _dump_DDP_relevant_env_vars() + lines = out.getvalue().splitlines() + + def format_line(var): + return f"env:{var}={os.environ[var] if var in os.environ else 'N/A'}" + + # Check relevant env vars + vars = [ + "MASTER_ADDR", + "MASTER_PORT", + "WORLD_SIZE", + "NCCL_TOPO_DUMP_FILE", # N/A + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + ] + for var in vars: + line = format_line(var) + self.assertIn(line, lines) + # Check irrelevant env vars + vars = [ + "xxx", + "yyy", + "zzz", + ] + for var in vars: + line = format_line(var) + self.assertNotIn(line, lines) + + # GET RANK + def test_get_rank(self): + test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir") + pid = str(os.getpid()) + num_processes = dist.get_world_size() + with open(os.path.join(test_dir, pid), "w") as f: + f.write(str(dist.get_rank())) + + self._barrier() + + all_ranks = set() + for f_name in os.listdir(test_dir): + with open(os.path.join(test_dir, f_name)) as f: + all_ranks.add(int(f.read())) + self.assertEqual(len(all_ranks), num_processes) + + self._barrier() + + if dist.get_rank() == 0: + for f_name in os.listdir(test_dir): + os.unlink(os.path.join(test_dir, f_name)) + + self._barrier() + + def test_get_backend(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + backend_str = BACKEND.lower() + self.assertEqual(dist.get_backend(), backend_str) + if dist.get_rank() in group: + self.assertEqual(dist.get_backend(group_id), backend_str) + else: + with self.assertRaisesRegex( + ValueError, "Invalid process group specified" + ): + dist.get_backend(group_id) + + def test_Backend_enum_class(self): + # test parsing + backend = BACKEND.lower() + self.assertEqual(dist.Backend(BACKEND.upper()), backend) + self.assertEqual(dist.Backend(BACKEND), backend) + with self.assertRaises(ValueError): + dist.Backend(None) + with self.assertRaises(ValueError): + dist.Backend(3) + with self.assertRaises(ValueError): + dist.Backend(["gloo"]) + + # Test destroy + def test_destroy_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of group + def test_get_rank_size_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + if dist.get_rank() in group: + self.assertEqual(dist.get_world_size(group_id), 2) + self.assertTrue(dist.get_rank(group_id) in list(range(2))) + else: + self.assertEqual(dist.get_world_size(group_id), -1) + self.assertEqual(dist.get_rank(group_id), -1) + + # Test destroy full groups + def test_destroy_full_group(self): + _, group_id, _ = self._init_full_group_test() + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of full group + def test_get_rank_size_full_group(self): + _, group_id, _ = self._init_full_group_test() + self.assertEqual(dist.get_world_size(group_id), dist.get_world_size()) + self.assertEqual(dist.get_rank(group_id), dist.get_rank()) + + def _test_barrier_timeout(self, group_id, timeout): + local_rank = dist.get_rank(group_id) + + # Only execute barrier on rank == 0, causing it to timeout + if local_rank == 0: + expected_time = time.time() + timeout.total_seconds() + # In debug mode, we execute a monitored_barrier before the + # collective, so assert on that. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + exception_ctx = self.assertRaisesRegex( + Exception, "failed to pass monitoredBarrier" + ) + else: + exception_ctx = self.assertRaisesRegex( + Exception, " (Timed out|closed|timeout) " + ) + with exception_ctx: + dist.barrier(group_id) + self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1) + else: + pass + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + @skip_but_pass_in_sandcastle_if( + not INIT_METHOD.startswith("file://"), + "Requires file:// initialization method. " + + "Both tcp:// and env:// rely on the TCP store for which " + "reinitialization has proven racy.", + ) + def test_barrier_timeout_global(self): + dist.destroy_process_group() + + # Explicitly pass world size to the barrier because we've + # just destroyed any state in torch.distributed. + self._barrier(wait_for=int(os.environ["WORLD_SIZE"])) + + # Reinitialize global process group + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + self._test_barrier_timeout(dist.group.WORLD, timeout) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_group(self): + timeout = timedelta(seconds=5) + _, group_id, _ = self._init_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_full_group(self): + timeout = timedelta(seconds=1) + _, group_id, _ = self._init_full_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + # This test helper can only be used when using the Gloo or NCCL backend + # **and** both the Gloo and NCCL backends are available. + # See the @skip annotations below. + def _test_group_override_backend(self, initializer): + if BACKEND == "gloo": + new_backend = "nccl" + elif BACKEND == "nccl": + new_backend = "gloo" + elif BACKEND in DistTestCases.backend_feature["plugin"]: + new_backend = "gloo" + + group, group_id, rank = initializer(backend=new_backend) + if group_id is None: + return + + if new_backend == "gloo": + self.assertTrue(group_id._get_backend_name(), "gloo") + if new_backend == "nccl": + self.assertTrue(group_id._get_backend_name(), "nccl") + + self.assertEqual(rank, group[dist.get_rank(group_id)]) + self.assertEqual(len(group), dist.get_world_size(group_id)) + + # Pin device (so we avoid NCCL race conditions/deadlocks). + group_rank = dist.get_rank(group_id) + torch.cuda.set_device(group_rank) + + # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL). + tensor = _build_tensor(2, value=group_rank).cuda() + dist.broadcast(tensor, src=group[0], group=group_id) + self.assertEqual(_build_tensor(2, value=0), tensor.to("cpu")) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_world_size(3) + @skip_if_lt_x_gpu(2) + def test_backend_group(self): + self._test_group_override_backend(self._init_group_test) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @unittest.skipIf(BACKEND == "ucc", "broken, see https://github.com/pytorch/pytorch/pull/113620") + def test_backend_full_group(self): + self._test_group_override_backend(self._init_full_group_test) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(2) + def test_new_subgroups(self): + subgroup_size = 2 + cur_subgroup, subgroups = dist.new_subgroups(subgroup_size) + + world_size = dist.get_world_size() + self.assertEqual(cur_subgroup.size(), subgroup_size) + self.assertEqual(len(subgroups), world_size / subgroup_size) + self.assertFalse(dist._rank_not_in_group(cur_subgroup)) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_group_size_exceeds_world_size(self): + with self.assertRaisesRegex(ValueError, "must not exceed"): + dist.new_subgroups(100) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_world_size_not_divisible_by_group_size(self): + with self.assertRaisesRegex( + ValueError, "The world size must be divisible by 'group_size'" + ): + dist.new_subgroups(3) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + cur_subgroup, subgroups = dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 2], [1, 3]] + ) + if device_id >= 4: + self.assertIsNone(cur_subgroup) + else: + self.assertEqual(cur_subgroup.size(), 2) + self.assertEqual(len(subgroups), 2) + if device_id == 0 or device_id == 2: + self.assertEqual(cur_subgroup, subgroups[0]) + else: + self.assertEqual(cur_subgroup, subgroups[1]) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + world_size = get_world_size(group_id) + + with self.assertRaisesRegex( + RuntimeError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 1], [world_size, 2]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_by_enumeration_negative_input_rank(self): + group, group_id, rank = self._init_global_test() + + with self.assertRaisesRegex( + ValueError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[-1, -2], [-3, -4]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_overlap_not_allowed(self): + with self.assertRaisesRegex( + ValueError, "Rank 1 has appeared in both subgroup" + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0], [1, 2], [1, 3]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_average_parameters(self): + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Sequential( + nn.Conv2d(3, 3, kernel_size=3, padding=1), + nn.ReLU(), + nn.Linear(1, 5, bias=False), + ).cuda(device_id) + # Test global model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=None + ) + # Every element will be the same as the input. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data)) + + # Test partial model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) * rank + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=group_nccl + ) + if not dist._rank_not_in_group(group_nccl): + # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * 0.5) + else: + # Every element on device not in the subgroup should remain the same. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager_param_group(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + opt = torch.optim.SGD(model.parameters(), lr=0.1) + + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + for param_group in opt.param_groups: + for params in param_group["params"]: + # mock grad + params.grad = torch.ones_like(param.data) * rank + params.data = torch.ones_like(param.data) * rank + averager.average_parameters(opt.param_groups) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, + torch.ones_like(param.data) + * sum(range(world_size)) + / world_size, + ) + else: + # No model averaging, so the parameters are not updated. + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, torch.ones_like(param.data) * rank + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_1_level_hierarchical_model_averager_equivalent_to_periodic_model_averager( + self, + ): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = hierarchicalSGD.HierarchicalModelAverager( + # Run the global averaging at a period of 4, + # which is equivalent to the above periodic model averaging test case. + period_group_size_dict=OrderedDict([(period, world_size)]), + warmup_steps=warmup_steps, + ) + + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_3_level_hierarchical_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + # Set up such a hierarchical model averaging as follows: + # after the first 10 warmup steps, + # run model averaging every 2 steps within each subgroup of size 2, + # run model averaging every 4 steps within each subgroup of size 3, + # and run the global model averaging every 8 steps. + # If there is a conflict in model averaging at a step, only run the highest-level model averaging. + warmup_steps = 10 + subgroup_size1 = 2 + subgroup_avg_period1 = 2 + subgroup_size2 = 4 + subgroup_avg_period2 = 4 + global_avg_period = 8 + period_group_size_dict = OrderedDict( + [ + (subgroup_avg_period1, subgroup_size1), + (subgroup_avg_period2, subgroup_size2), + (global_avg_period, world_size), + ] + ) + averager = hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=warmup_steps + ) + self.assertEqual(dist.get_pg_count(), len(period_group_size_dict)) + + subgroup1 = averager.period_process_group_dict[subgroup_avg_period1] + subgroup2 = averager.period_process_group_dict[subgroup_avg_period2] + real_group_ranks_res1 = _get_pg_config(subgroup1)['ranks'] + real_group_ranks_res2 = _get_pg_config(subgroup2)['ranks'] + + expect_group_ranks_res1 = ( + rank // subgroup_size1 * subgroup_size1 + + np.array(list(range(subgroup_size1))) + ).tolist() + expect_group_ranks_res2 = ( + rank // subgroup_size2 * subgroup_size2 + + np.array(list(range(subgroup_size2))) + ).tolist() + self.assertEqual(real_group_ranks_res1, expect_group_ranks_res1) + self.assertEqual(real_group_ranks_res2, expect_group_ranks_res2) + + expected_avg_tensor_within_subgroup1 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res1) + / subgroup_size1 + ) + expected_avg_tensor_within_subgroup2 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res2) + / subgroup_size2 + ) + expected_global_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + for step in range(0, 25): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step == 16 or step == 24: + # Run global model averaging when `step` can be divided by 8. + self.assertEqual(param.data, expected_global_avg_tensor) + elif step == 12 or step == 20: + # Run model averaging within subgroup when `step` can be divided by 4 but not by 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup2) + elif step == 10 or step == 14 or step == 18 or step == 22: + # Run model averaging within subgroup when `step` can be divided by 2 but not by 4 or 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup1) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + # Coalescing manager (sync mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(): + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # Coalescing manager (async mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager_async(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(async_ops=True) as cm: + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + cm.wait() + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # NCCL Batch SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + recv_tensors = [None for _ in range(world_size)] + expected_tensors = [None for _ in range(world_size)] + + for val in ["1", "0"]: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val + for src in range(0, world_size): + send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_( + src + ) + recv_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(-1) + expected_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(rank) + recv_op = dist.P2POp(dist.irecv, recv_tensors[src], src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + for src in range(0, world_size): + self.assertEqual(recv_tensors[src], expected_tensors[src]) + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_ring_exchange_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + send_tensor = _build_tensor(world_size, device_id=device_id) + recv_tensor = _build_tensor(world_size, value=-1, device_id=device_id) + send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1) % world_size) + recv_op = dist.P2POp( + dist.irecv, recv_tensor, (rank - 1 + world_size) % world_size + ) + reqs = dist.batch_isend_irecv([send_op, recv_op]) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_self_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + p2p_op_list = [] + + if rank == 0: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, 0) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, 0) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_no_rank_zero_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + if rank == 1: + peer = 2 + elif rank == 2: + peer = 1 + + if rank in [1, 2]: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, peer) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, peer) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU with provided tags + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo_tags(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # NCCL Batch SEND RECV Op Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + with self.assertRaisesRegex(ValueError, "^Invalid ``op``"): + send_tensor = _build_tensor(rank + 1, device_id=device_id) + send_op = dist.P2POp(dist.broadcast, send_tensor, 1) + dist.batch_isend_irecv([send_op]) + + # NCCL Batch SEND RECV p2p_op_list Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_list_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + with self.assertRaisesRegex(ValueError, "^Invalid ``p2p_op_list``"): + dist.batch_isend_irecv([1, 2]) + + # NCCL Batch SEND RECV Mixed Backend Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_mixed_backend_err(self): + self._barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + group_gloo = dist.new_group(ranks=[0, 1], backend="gloo") + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + if rank == 0: + with self.assertRaisesRegex( + ValueError, "All ops need to use the same group" + ): + send_tensor = _build_tensor(rank + 1) + send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo) + send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl) + dist.batch_isend_irecv([send_op_gloo, send_op_nccl]) + + # NCCL SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def _test_send_recv_nccl(self, profiler_ctx=None): + # TODO: now that nccl send/recv is supported, there does not seem to + # be a need to have nccl send/recv be tested separately. + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + tensor = _build_tensor(rank + 1, device_id=device_id) + profiler_cls = profiler_ctx if profiler_ctx is not None else nullcontext() + with profiler_cls as prof: + for src in range(0, world_size): + if src == rank: + # Send mode + for dst in range(0, world_size): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + expected_tensor = _build_tensor(src + 1) + output_tensor = _build_tensor( + src + 1, value=-1, device_id=device_id + ) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof, dedup_gpu_user_annotation=True) + self.assertTrue(events) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.input_shapes in expected_shapes) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl(self): + self._test_send_recv_nccl() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl_autograd_profiler(self): + profiler_ctx = torch.autograd.profiler.profile(record_shapes=True) + self._test_send_recv_nccl(profiler_ctx) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_nccl_torch_profiler(self): + profiler_ctx = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + record_shapes=True, + ) + self._test_send_recv_nccl(profiler_ctx) + + # SEND RECV + def _test_send_recv(self, profiler_ctx): + rank = dist.get_rank() + send_size = rank + 1 + tensor = _build_tensor(send_size) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for src in range(0, dist.get_world_size()): + if src == rank: + # Send mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + recv_size = src + 1 + expected_tensor = _build_tensor(recv_size) + output_tensor = _build_tensor(recv_size, value=-1) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks. + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.is_async) + self.assertTrue(event.input_shapes in expected_shapes) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl send/recv tested by test_send_recv_nccl" + ) + def test_send_recv(self): + self._test_send_recv(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv(profiler_ctx=torch_profiler_ctx) + + # SEND RECV ANY SOURCE + def _test_send_recv_any_source(self, profiler_ctx): + rank = dist.get_rank() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + recv_ranks = list() + irecv_ranks = list() + + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, dist.get_world_size()): + if dst == rank: + # Recv mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + + for recv in ["recv", "irecv"]: + output_tensor = _build_tensor(send_recv_size, value=-1) + + if recv == "recv": + sender = dist.recv(output_tensor) + recv_ranks.append(sender) + elif recv == "irecv": + work = dist.irecv(output_tensor) + work.wait() + sender = work._source_rank() + irecv_ranks.append(sender) + + # Assert the scalar value "sender" that should be + # equal to the rank of the sender is equal to all + # values in the received tensor. + self.assertTrue(output_tensor.eq(sender).all()) + else: + # Send mode + dist.send(tensor, dst) # recv + dist.send(tensor, dst) # irecv + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recvAnySource"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from other rank twice. + self.assertEqual( + sum(event.count for event in events), + 2 * (dist.get_world_size() - 1), + ) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + # Each rank would have 2 * (world_size - 1) sends, verify that + # globally we receive the same amount on the other end. + recv_ranks_tensor = torch.cat( + (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0 + ) + global_recv_ranks = [ + torch.empty_like(recv_ranks_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(global_recv_ranks, recv_ranks_tensor) + global_recv_ranks_list = [] + for tensor in global_recv_ranks: + global_recv_ranks_list += tensor.tolist() + + from itertools import groupby + + global_recv_ranks_list.sort() + frequency = [ + len(list(group)) for key, group in groupby(global_recv_ranks_list) + ] + self.assertEqual(dist.get_world_size(), len(frequency)) + self.assertEqual( + [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source(self): + self._test_send_recv_any_source(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_any_source_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx) + + # SEND RECV WITH TAG + def _test_send_recv_with_tag(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, world_size): + if dst == rank: + # Recv mode + for src in range(0, world_size): + if src == rank: + continue + output_tensor = _build_tensor(send_recv_size, value=-1) + dist.recv(output_tensor, src, tag=src) + self.assertTrue(output_tensor.eq(src).all()) + else: + # Send mode + dist.send(tensor, dst, tag=rank) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, event_name) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag(self): + self._test_send_recv_with_tag(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_with_tag_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx) + + # ISEND + def _test_isend(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + if rank == 0: + requests = [ + dist.isend(_build_tensor(dest, 10), dest) + for dest in range(1, world_size) + ] + for request in requests: + request.wait() + self.assertTrue(request.is_completed()) + else: + tensor = _build_tensor(rank, -1) + dist.recv(tensor, 0) + self.assertEqual(tensor, _build_tensor(rank, 10)) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + expected_event_name = ( + f"{backend}:send" if rank == 0 else f"{backend}:recv" + ) + events = get_profiling_event(expected_event_name, prof) + event_count = sum(e.count for e in events) + expected_count = dist.get_world_size() - 1 if rank == 0 else 1 + self.assertEqual(expected_count, event_count) + # Event ordering is not guaranteed, so simply ensure the shapes are + # found in the following map. + expected_shapes = { + r: [[r] * 3] for r in range(1, dist.get_world_size()) + } + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, expected_event_name) + if rank == 0: + self.assertTrue( + event.input_shapes in expected_shapes.values() + ) + else: + self.assertEqual(event.input_shapes, expected_shapes[rank]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend(self): + self._test_isend(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_isend(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_isend_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + self._test_isend(profiler_ctx=torch_profiler_ctx) + + # IRECV + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support irecv" + ) + def test_irecv(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + + if rank == 0: + expected_tensors = [ + _build_tensor(src, -1) for src in range(1, world_size) + ] + requests = [ + dist.irecv(expected_tensors[src - 1], src) + for src in range(1, world_size) + ] + + for src in range(1, world_size): + requests[src - 1].wait() + self.assertTrue(requests[src - 1].is_completed()) + self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10)) + else: + tensor = _build_tensor(rank, 10) + dist.send(tensor, 0) + + self._barrier() + + # BROADCAST + def _test_broadcast_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + with_options=False, + ): + for dtype, value, requires_cuda in [ + (torch.float, -1e-10, False), + (torch.double, -1e-100, False), + (torch.half, -0.1, True), + (torch.int8, -2, False), + (torch.uint8, 129, False), + (torch.int, -1e5, False), + (torch.long, -1e15, False), + ]: + if requires_cuda and not cuda: + continue + for src in group: + expected_tensor = _build_tensor(src + 1, value, dtype) + if cuda: + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + if rank == src: + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", + True, + group_id.broadcast, + [expected_tensor], + opts, + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + expected_tensor, + src, + group_id, + ) + else: + tensor = _build_tensor(src + 1, -1, dtype) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", True, group_id.broadcast, [tensor], opts + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + tensor, + src, + group_id, + ) + self.assertEqual(tensor.size(), expected_tensor.size()) + self.assertEqual( + tensor.ne(expected_tensor).max(), torch.tensor(False) + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast(self): + group, group_id, rank = self._init_global_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and Nccl backend supports CUDA allReduce", + ) + @skip_if_no_gpu + def test_broadcast_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_group(self): + group, group_id, rank = self._init_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "Only NCCL backend supports high priority stream", + ) + @skip_if_no_gpu + def test_nccl_high_priority_stream(self): + group, _, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + new_port = str(MASTER_PORT + 1) + os.environ["MASTER_PORT"] = new_port + gen_iterator = dist.rendezvous("env://", rank, dist.get_world_size()) + store, rank, size = next(gen_iterator) + store = dist.PrefixStore(new_port, store) + + opts = dist.ProcessGroupNCCL.Options() + opts.is_high_priority_stream = False + group_id = dist.ProcessGroupNCCL(store, rank, size, opts) + + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True) + + # REDUCE + def _test_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensor = _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensor, + src, + op, + group_id, + tensor_shapes=[tensor.shape], + ) + if rank == src: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # REDUCE TWICE + def _test_reduce_twice_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensors = [ + _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + for i in range(2) + ] + if cuda: + for i in range(2): + tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensors[0], + src, + op, + group_id, + secondary_op_call=lambda: dist.reduce( + tensors[1], src, op, group_id + ), + tensor_shapes=[tensors[0].shape], + ) + if rank == src: + for tensor in tensors: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum_twice(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda_twice(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports reduce_scatter_v" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_scatter_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + input_split_sizes = [] + for src in group: + input_split_sizes.append(src + 1) + start_len = sum(input_split_sizes[:rank]) + end_len = start_len + input_split_sizes[rank] + sum_len = sum(input_split_sizes) + master_value = 2 + worker_value = 10 + + for async_val in [True, False]: + tensor = _build_tensor(sum_len, worker_value, device_id=device_id) + tensor[start_len:end_len].fill_(master_value) + out_tensor = ( + torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(-1) + .cuda(device_id) + ) + + req = dist.reduce_scatter( + out_tensor, + list(torch.split(tensor, input_split_sizes)), + dist.ReduceOp.SUM, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = 2 + (10 * (len(group) - 1)) + expected_tensor = torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + expected_tensor = expected_tensor.fill_(expected_value).cuda(device_id) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test reduce_scatter_tensor accepting single tensor as input + def _reduce_scatter_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + tensor_shapes = [tensor_out.shape] + self.call_dist_op( + ":reduce_scatter_tensor", + False, + dist.reduce_scatter_tensor, + tensor_out, + tensor_in, + dist.ReduceOp.SUM, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce_scatter_tensor" + ) + @skip_if_no_gpu + def test_reduce_scatter_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_out = torch.zeros(size, dtype=torch.int64) + + # Concatenated input + tensor_in = torch.arange(len(group) * size) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + expected_tensor = torch.arange(rank * size, (rank + 1) * size) * len(group) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + # Stacked input + tensor_in = torch.reshape(tensor_in, (len(group), size)) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + # Should be the same as the result in concatenated case + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + def call_dist_op( + self, + profiling_title_postfix, + is_async, + op, + *args, + expect_event=True, + secondary_op_call=None, + profile_cuda=False, + tensor_shapes=None, + **kwargs, + ): + op_calls = [lambda: op(*args, **kwargs)] + if secondary_op_call is not None: + op_calls.append(secondary_op_call) + + autograd_profiler_ctx = torch.autograd.profiler.profile( + use_cuda=profile_cuda, record_shapes=True + ) + + # TODO: move this test to use torch.profiler once kineto issues are + # fixed internally. + with autograd_profiler_ctx as prof: + works = [op_call() for op_call in op_calls] + if is_async: + for work in works: + work.wait() + + if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS: + # We are only interested in the backend's implementation not the dispatcher wrapper. + events = get_profiling_event( + dist.get_backend() + profiling_title_postfix, autograd_profiler_ctx + ) + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if dist.get_debug_level() != dist.DebugLevel.DETAIL: + self.assertEqual(len(events), len(op_calls)) + for e in events: + self.assertTrue(e.is_async) + self.assertEqual(e.count, 1) + self.assertGreaterEqual(e.cpu_time, 0) + # Verify tensor shapes if given + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if ( + tensor_shapes is not None + and dist.get_debug_level() != dist.DebugLevel.DETAIL + ): + self.assertEqual( + e.input_shapes, + tensor_shapes, + f"event shape: {e.input_shapes} vs tensor {tensor_shapes}", + ) + + # ALL REDUCE + def _test_all_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + async_op=False, + ): + for src in group: + curr_value = master_value if rank == src else worker_value + + tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if tensor.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor).shape] + else: + tensor_shapes = [tensor.shape] + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + tensor_shapes=tensor_shapes, + ) + # Currently, only Gloo backend has profiling tested with CUDA enabled. + # Only run cuda profiling test for one rank to speed up since + # running with different src_rank does not affect the correctness. + if ( + src == 0 + and cuda + and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS + ): + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + profile_cuda=True, + tensor_shapes=tensor_shapes, + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_async(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_async(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_complex_unsupported_ops(self): + unsupported_ops = [ + dist.ReduceOp.MAX, + dist.ReduceOp.MIN, + dist.ReduceOp.PRODUCT, + dist.ReduceOp.BAND, + dist.ReduceOp.BOR, + dist.ReduceOp.BXOR, + ] + group, group_id, rank = self._init_global_test() + for unsupported_op in unsupported_ops: + with self.assertRaisesRegex( + ValueError, "all_reduce does not support" + ): + dist.all_reduce( + _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_complex(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # SPARSE ALL REDUCE + def _test_sparse_all_reduce_sum(self, fn): + group, group_id, rank = self._init_global_test() + + tests = simple_sparse_reduce_tests( + rank, dist.get_world_size(), num_inputs=1 + ) + for (inputs, outputs) in tests: + tensors = [fn(input) for input in inputs] + dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id) + self.assertEqual(tensors[0], outputs[0]) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + def test_sparse_all_reduce_sum(self): + self._test_sparse_all_reduce_sum(lambda t: t) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + @skip_if_no_gpu + def test_sparse_all_reduce_sum_cuda(self): + self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda()) + + # ALL REDUCE - COALESCED + @staticmethod + def _all_reduce_coalesced_sum_test_cases(group_size): + return ( + [2, 3, complex(2, 3)], + [10, 11, complex(10, 11)], + [ + 2 + 10 * (group_size - 1), + 3 + 11 * (group_size - 1), + complex(2, 3) + complex(10, 11) * (group_size - 1), + ], + [torch.float, torch.float, torch.cfloat], + ) + + @staticmethod + def _all_reduce_coalesced_product_test_cases(group_size): + return ( + [1, 2], + [3, 4], + [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_min_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [1, 3], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_max_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [2, 4], + [torch.float, torch.float], + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_coalesced_max_complex_unsupported(self): + group, group_id, rank = self._init_global_test() + with self.assertRaisesRegex(ValueError, "all_reduce does not support"): + dist.all_reduce_coalesced( + [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id + ) + + def _test_all_reduce_coalesced_helper( + self, + group, + group_id, + rank, + op, + cuda=False, + rank_to_GPU=None, + ): + test_case_func = { + dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases, + dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases, + dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases, + dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases, + }[op] + + master_values, worker_values, expected_values, dtypes = test_case_func( + len(group) + ) + + for src in group: + curr_values = master_values if rank == src else worker_values + tensors = [ + _build_tensor(src + 1, val, dtype=dtype) + for dtype, val in zip(dtypes, curr_values) + ] + if cuda: + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + tensor_shapes = [] + for tensor in tensors: + if tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(tensor).shape) + else: + tensor_shapes.append(tensor.shape) + self.call_dist_op( + ":all_reduce", + False, + dist.all_reduce_coalesced, + tensors, + op, + group_id, + tensor_shapes=tensor_shapes, + ) + expected_tensors = [ + _build_tensor(src + 1, expected_value, dtype=dtype) + for dtype, expected_value in zip(dtypes, expected_values) + ] + self.assertEqual(tensors, expected_tensors) + + self._barrier() + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + # SCATTER + def _test_scatter_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, -1, dtype=dtype) + expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = ( + [_build_tensor(dest + 1, i, dtype=dtype) for i in group] + if rank == dest + else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(t).shape for t in tensors] + else: + tensor_shapes = [t.shape for t in tensors] + self.call_dist_op( + ":scatter", + False, + dist.scatter, + tensor, + src=dest, + scatter_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(tensor, expected_tensor) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify scatter_list argument only on source rank. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, src=0, scatter_list=scatter_list) + else: + dist.scatter(output, src=0) + self.assertEqual(output, one * rank) + + # Don't specify src argument. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, scatter_list=scatter_list) + else: + dist.scatter(output) + self.assertEqual(output, one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_complex(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_scatter_group(self): + group, group_id, rank = self._init_group_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_scatter_helper(group, group_id, rank) + + # GATHER + def _test_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank) + tensors = ( + [_build_tensor(dest + 1, -1) for i in group] if rank == dest else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + self.call_dist_op( + ":gather", + False, + dist.gather, + tensor, + dst=dest, + gather_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None, + ) + if rank == dest: + expected_tensors = [_build_tensor(dest + 1, i) for i in group] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify gather_list argument only on destination rank. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, dst=0, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank, dst=0) + + # Don't specify dst argument. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather(self): + group, group_id, rank = self._init_global_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_gather_helper(group, group_id, rank) + + # ALL GATHER + def _test_all_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group] + allgather = dist.all_gather + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if tensors[0].dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensors[0]).shape] + else: + tensor_shapes = [tensors[0].shape] + self.call_dist_op( + ":all_gather", + False, + allgather, + tensors, + tensor, + group_id, + False, + tensor_shapes=tensor_shapes, + ) + + expected_tensors = [ + _build_tensor(dest + 1, i, dtype=dtype) for i in group + ] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports all_gather_v" + ) + @skip_if_no_gpu + def test_all_gather_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + output_split_sizes = [] + for dst in group: + output_split_sizes.append(dst + 1) + sum_len = sum(output_split_sizes) + value = 2 + + for async_val in [True, False]: + tensor = ( + torch.empty( + output_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(value) + .cuda(device_id) + ) + out_tensor = _build_tensor(sum_len, -1, device_id=device_id) + + req = dist.all_gather( + list(torch.split(out_tensor, output_split_sizes)), + tensor, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = value + expected_tensor = _build_tensor( + sum_len, expected_value, device_id=device_id + ) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test all_gather accepting single tensor as output + def _all_gather_into_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + if tensor_out.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor_in).shape] + else: + tensor_shapes = [tensor_in.shape] + self.call_dist_op( + ":all_gather_into_tensor", + False, + dist.all_gather_into_tensor, + tensor_out, + tensor_in, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_cat_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Concatenated output + tensor_out = torch.ones([len(group) * size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Concatenate all blocks into a bigger tensor + expected_tensor = torch.cat([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_stack_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Stacked output + tensor_out = torch.ones([len(group), size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Stack all blocks into a bigger tensor + expected_tensor = torch.stack([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + def _run_all_gather_coalesced_and_verify( + self, output_tensor_lists, input_tensors, expected_tensors, group_id + ): + """ + Helper that runs all_gather_coalesced and returns true if output + matches expectations. + """ + tensor_shapes = [] + for input_tensor in input_tensors: + if input_tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(input_tensor).shape) + else: + tensor_shapes.append(input_tensor.shape) + self.call_dist_op( + ":all_gather", + False, + dist.all_gather_coalesced, + output_tensor_lists, + input_tensors, + group_id, + tensor_shapes=tensor_shapes, + ) + + for l1, l2 in zip(output_tensor_lists, expected_tensors): + for t1, t2 in zip(l1, l2): + if not torch.equal(t1, t2): + return False + return True + + def _test_all_gather_coalesced_helper( + self, group, group_id, rank, dtype=torch.float + ): + # TODO: Instead we should probably go through _rank_not_in_group + # mechanism to disable sending tensors + if group_id is not None: + for test_case_id in range(2, 5): + # Make sure we create tensors of incompatible sizes, e.g. + # [1], [2x2], [3x3x3] ... to be sent in one batch + input_tensors = [ + _build_multidim_tensor( + tensor_id, tensor_id, rank + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + output_tensor_lists = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, -1, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for _ in group + ] + expected_tensors = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for rank_iter in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensor_lists, input_tensors, expected_tensors, group_id + ), "output tensors do not match expected outputs" + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_simple(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_with_empty(self): + group, group_id, rank = self._init_global_test() + input_tensors = [ + rank * torch.ones([2, 2]), + torch.ones([0]), + (rank + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + output_tensors_lists = [ + [ + -1 * torch.ones([2, 2]), + -1 * torch.ones([0]), + -1 * torch.ones([3, 3]), + -1 * torch.ones([0]), + -1 * torch.ones([0]), + ] + for _ in group + ] + expected_tensors = [ + [ + r * torch.ones([2, 2]), + torch.ones([0]), + (r + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + for r in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensors_lists, input_tensors, expected_tensors, group_id + ) + self._barrier() + + # AllToAll + def _test_all_to_all_single_equal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_tensor = torch.ones([size, size], dtype=dtype) * rank + expected_tensor = torch.cat( + [torch.ones([1, size], dtype=dtype) * i for i in group] + ) + out_tensor = torch.ones([size, size], dtype=dtype) * -1 + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(in_tensor).shape] + else: + tensor_shapes = [in_tensor.shape] + self.call_dist_op( + ":all_to_all", + False, + dist.all_to_all_single, + out_tensor, + in_tensor, + group=group_id, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_single_unequal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + out_splits = [rank + 1 for _ in group] + in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank + out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype) + expected_tensor = torch.cat( + [torch.ones([rank + 1, size], dtype=dtype) * i for i in group] + ) + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + dist.all_to_all_single( + out_tensor, in_tensor, out_splits, in_splits, group=group_id + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + in_tensors = [ + torch.ones([in_splits[i], size], dtype=dtype) * rank + for i, _ in enumerate(group) + ] + out_tensors = [ + torch.ones([(rank + 1), size], dtype=dtype) for _ in group + ] + expected_tensors = [ + torch.ones([rank + 1, size], dtype=dtype) * i for i in group + ] + if cuda: + in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors] + expected_tensors = [ + t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors + ] + out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors] + dist.all_to_all(out_tensors, in_tensors, group=group_id) + for t1, t2 in zip(out_tensors, expected_tensors): + self.assertEqual(t1, t2) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm + def test_all_to_all_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm + def test_all_to_all_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + @skip_if_small_worldsize + def test_all_to_all_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_small_worldsize + @skip_if_rocm + def test_all_to_all_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm + def test_all_to_all_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + # BARRIER + def _test_barrier_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + WAIT_TIME = 0.3 # seconds + + for dest in group: + expected_time = torch.DoubleTensor(1).fill_(0.0) + if cuda: + expected_time = expected_time.cuda(rank_to_GPU[rank][0]) + if dest == rank: + expected_time.fill_(time.time() + WAIT_TIME) + dist.broadcast(expected_time, dest, group_id) + time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer + dist.barrier(group_id) + else: + dist.broadcast(expected_time, dest, group_id) + dist.barrier(group_id) + self.assertGreaterAlmostEqual( + float(time.time()), + float(expected_time[0]), + "destination rank: %d, my rank: %d" % (dest, rank) + + " (if you see this failure, please report in #14554)", + ) + + # Use higher timeout for the instance where the test runs + # against a subgroup and uses a CUDA tensor for expected time. + # The CUDA initialization for the participating processes can + # take long enough for the barrier timeout to trigger on the + # process that doesn't participate in the group. + self._barrier(timeout=20) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + def test_barrier_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier(self): + group, group_id, rank = self._init_global_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_group(self): + group, group_id, rank = self._init_group_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_barrier_helper(group, group_id, rank) + + def _model_step(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad = None + + def _model_step_with_zero_grad(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad.requires_grad_(False) + param.grad.zero_() + + def _prepare_dummy_data(self, local_bs): + # global_bs for DDP should be divisible by WORLD_SIZE + world_size = int(os.environ["WORLD_SIZE"]) + global_bs = world_size * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + return global_bs, input_cpu, target, loss + + # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL + def _test_DDP_helper( + self, model, input_var, target, loss, scale_factor=1.0, memory_format=None + ): + model.train() + output = model(input_var) + l = loss(output, target) * scale_factor + l.backward() + if memory_format is not None: + self.assertTrue(output.is_contiguous(memory_format=memory_format)) + + def _assert_equal_param(self, param_gpu, param_DDP): + self.assertEqual(len(param_gpu), len(param_DDP)) + for p_gpu, p_DDP in zip(param_gpu, param_DDP): + self.assertEqual(p_gpu, p_DDP) + + def _test_DDP_niter( + self, + model_base, + model_DDP, + input, + target, + loss, + local_bs, + rank, + batch_size, + test_save, + offset=None, + world_size=0, + zero_grad=False, + memory_format=None, + n_iter=5, + ): + for idx in range(n_iter): + # single cpu/gpu training + self._test_DDP_helper( + model_base, input, target, loss, memory_format=memory_format + ) + + if offset is None: + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + world_size * local_bs / batch_size if world_size != 0 else 1, + memory_format=memory_format, + ) + + # Update weights and run a second iteration to shake out errors + if zero_grad: + self._model_step_with_zero_grad(model_base) + self._model_step_with_zero_grad(model_DDP) + else: + self._model_step(model_base) + self._model_step(model_DDP) + self._assert_equal_param( + list(model_base.parameters()), list(model_DDP.module.parameters()) + ) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + # save the model in the middle and reload + if test_save and idx == 2 and INIT_METHOD.startswith("file://"): + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + model_DDP = torch.load(tmp) + else: + torch.save(model_DDP, tmp.name) + model_DDP = torch.load(tmp.name) + + with tempfile.TemporaryFile() as tmp_file: + torch.save(model_DDP, tmp_file) + tmp_file.seek(0) + saved_model = torch.load(tmp_file) + for k in model_DDP.state_dict(): + self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k]) + + def _test_DistributedDataParallel( + self, + gpu_subset, + rank, + output_device=None, + gradient_as_bucket_view=False, + static_graph=False, + set_static_graph_twice=False, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = DDP_NET + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = copy.deepcopy(model) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=gpu_subset, + gradient_as_bucket_view=gradient_as_bucket_view, + static_graph=static_graph, + ) + + if set_static_graph_twice: + model_DDP._set_static_graph() + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + model_DDP = torch.load(tmp) + else: + torch.save(model_DDP, tmp.name) + model_DDP = torch.load(tmp.name) + + # dummy data initialization + local_bs = len(gpu_subset) + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False): + # Run a simple end to end DDP-CPU model, use result of single node + # model as baseline + group, group_id, rank = self._init_global_test() + + # cpu training setup + model_base = DDP_NET + + # DDP-CPU training setup + model_DDP = copy.deepcopy(model_base) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, gradient_as_bucket_view=gradient_as_bucket_view + ) + + # dummy data initialization + local_bs = 2 + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_base, + model_DDP, + input_cpu, + target, + loss, + local_bs, + rank, + global_bs, + False, + zero_grad=True, + ) + self._barrier() + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU(self): + self._test_DistributedDataParallelCPU() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU_grad_is_view(self): + self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_DistributedDataParallel_requires_grad(self): + # a module without gradients shouldn't be accepted + self.assertRaises( + RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module()) + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_zero_output_features(self): + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + self.net1 = nn.Linear(10, 10) + self.relu = nn.ReLU() + self.net2 = nn.Linear(10, 0) + + model = ToyModel().to(self.rank) + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + + @skip_but_pass_in_sandcastle_if(BACKEND == "nccl", "Gloo-only test") + def test_ddp_create_graph(self): + class Model(nn.Module): + def __init__(self): + super().__init__() + self.p = nn.Parameter(torch.tensor(1.0)) + + def forward(self): + return self.p.pow(2) + + model = Model() + ddp_model = torch.nn.parallel.DistributedDataParallel(model) + for _ in range(6): + # Verify DDP doesn't throw when ran with create_graph=True. + # Although we do warn about potential issues, please see + # https://github.com/pytorch/pytorch/issues/63929 for details. + ddp_model().backward(create_graph=True) + # grad tensors should require grad. + self.assertTrue( + all(param.requires_grad for param in ddp_model.parameters()) + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedDataParallel_non_default_stream(self): + stream = torch.cuda.Stream(self.rank) + rank = self.rank + with torch.cuda.stream(stream): + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank] + ) + for i in range(1000): + # Clear gradients manually + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net.module.weight.grad + avg = grad.clone() + # All-reducing the gradient averages should give us the gradient + # average. If not, then one of the workers has not correctly + # written back the averaged gradient before this all-reduce call. + dist.all_reduce(avg) + world_size = int(os.environ["WORLD_SIZE"]) + avg.div_(world_size) + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual( + avg[0, 0], + expected_grad, + msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_comm_hook_logging(self): + hooks = [ + default.allreduce_hook, + default.fp16_compress_hook, + powerSGD.powerSGD_hook, + powerSGD.batched_powerSGD_hook, + quantization_hooks.quantization_pertensor_hook, + quantization_hooks.quantization_perchannel_hook, + ] + + cpp_builtin_hooks = [ + dist.BuiltinCommHookType.ALLREDUCE, + dist.BuiltinCommHookType.FP16_COMPRESS, + ] + + for hook in hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model.register_comm_hook(None, hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), hook.__qualname__) + + for hook in cpp_builtin_hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model._register_builtin_comm_hook(hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), str(hook)) + + # No hook registered + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + # After second forward pass, hook should still be empty string + for i in range(2): + inp = torch.ones(1, 1, device=self.rank) + loss = ddp_model(inp).sum() + loss.backward() + + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("comm_hook", ""), "") + + def _test_ddp_hook_with_optimizer_parity( + self, + grad_as_bucket_view, + static_graph, + optim_cls, + optimize_subset, + *functional_optim_args, + **functional_optim_kwargs, + ): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + models_to_test = [ + (LargeNet(), torch.randn(1, 1000).cuda()), + ] + if HAS_TORCHVISION: + models_to_test.append( + (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda()) + ) + for (model, inp) in models_to_test: + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + # Create DDP model that runs optimizer in fused fashion. + ddp_model_with_optimizer_hook = ( + torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + ) + + # Create DDP model with no hook that does optimizer after + # backward. + ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + hook_params = ddp_model_with_optimizer_hook.parameters() + no_hook_params = ddp_model_with_no_hook.parameters() + if optimize_subset: + hook_params = list(hook_params) + no_hook_params = list(no_hook_params) + self.assertGreater(len(hook_params), 0) + hook_params = [hook_params[0]] + no_hook_params = [no_hook_params[0]] + + # Register a fused optimizer that will run optimizer in step + # with allreduce. + + if optimize_subset: + # API where optim_params is specified. + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + optim_params=hook_params, + **functional_optim_kwargs, + ) + else: + # API where optim_params is omitted + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + **functional_optim_kwargs, + ) + + optimizer_no_hook = optim_cls( + no_hook_params, + *functional_optim_args, + **functional_optim_kwargs, + ) + + # Verify parameters are equal initially. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Save old parameters to later verify optimizer modified them. + opt_hook_init_params = copy.deepcopy( + list(ddp_model_with_optimizer_hook.parameters()) + ) + + # Run optimizer with hook model. + for i in range(6): + ddp_model_with_optimizer_hook.zero_grad() + out = ddp_model_with_optimizer_hook(inp) + loss = out.sum() + loss.backward() + + dist.barrier() + + # Run regular model. + for i in range(6): + ddp_model_with_no_hook.zero_grad() + out = ddp_model_with_no_hook(inp) + loss = out.sum() + loss.backward() + optimizer_no_hook.step() + + dist.barrier() + + # Now verify parameters are equal. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Verify optimizer modified appropriate parameter set, + # otherwise they'd be trivially equal above. + if optimize_subset: + self.assertNotEqual( + opt_hook_init_params[0], + next(iter(ddp_model_with_optimizer_hook.parameters())), + ) + # Untouched params should be equal + self.assertEqual( + opt_hook_init_params[1:], + list(ddp_model_with_optimizer_hook.parameters())[1:], + ) + else: + self.assertNotEqual( + opt_hook_init_params, + list(ddp_model_with_optimizer_hook.parameters()), + ) + dist.barrier() + + """ + # Commenting out the following 3 tests as they cause Sandcastle jobs to fail + # Failure signature: + # AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw + + from torch.testing._internal.common_utils import parametrize + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("grad_as_bucket_view", [True, False]) + @parametrize("static_graph", [True, False]) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adamw( + self, + grad_as_bucket_view, + static_graph, + optimize_subset, + ): + adamw_lr = 1e-2 + adamw_betas = (0.9, 0.99) + adamw_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + grad_as_bucket_view, + static_graph, + torch.optim.AdamW, + optimize_subset, + adamw_lr, + betas=adamw_betas, + eps=adamw_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset): + adam_lr = 1e-2 + adam_betas = (0.9, 0.99) + adam_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static graph + torch.optim.Adam, + optimize_subset, + adam_lr, + betas=adam_betas, + eps=adam_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset): + sgd_lr = 1e-2 + sgd_momentum = 0.9 + sgd_weight_decay = 0.01 + # Not testing grad_as_bucket_view and static_graph as they are + # tested in AdamW test above. + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static_graph + torch.optim.SGD, + optimize_subset, + sgd_lr, + momentum=sgd_momentum, + weight_decay=sgd_weight_decay, + ) + """ + + @skip_if_lt_x_gpu(2) + def test_get_data_parallel_params(self): + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params( + model, named_params=True + ) + for name, _ in dp_params: + self.assertNotEqual(f"module.{params_to_ignore[0]}", name) + + # test named_params=False, just check if returns the expected + # no of parameters. + num_ddp_params = len(list(model.parameters())) - 1 + count = 0 + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params(model, named_params=False) + for _ in dp_params: + count += 1 + self.assertEqual(count, num_ddp_params) + + def _test_ddp_apply_optim_in_backward( + self, + optim_cls, + optim_kwargs, + init_before, + gradient_as_bucket_view=True, + ): + # Need to seed to ensure inputs are unique across rank. Otherwise, + # allreduce won't have any effect. + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + torch.cuda.set_device(self.rank) + + # Test a simple linear as well as a ResNet model. + models_to_test = [ + nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3), nn.Linear(3, 3)).cuda() + ] + if HAS_TORCHVISION: + models_to_test.append(torchvision.models.resnet50().cuda()) + + for j, model in enumerate(models_to_test): + model_optim_in_bwd = copy.deepcopy(model) + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + optim = optim_cls(model.parameters(), **optim_kwargs) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + model_optim_in_bwd = nn.parallel.DistributedDataParallel( + model_optim_in_bwd, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + + for p1, p2 in zip(model.parameters(), model_optim_in_bwd.parameters()): + self.assertEqual(p1, p2, "Parameters not initially equal!") + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + for i in range(8): + inp = ( + torch.randn(1, 3, 1000, 1000, device="cuda") + if j == 1 + else torch.randn(10, 3, device="cuda") + ) + model(inp).sum().backward() + optim.step() + model_optim_in_bwd( + inp + ).sum().backward() # runs optimizer as well + for p1, p2 in zip( + model.parameters(), model_optim_in_bwd.parameters() + ): + self.assertEqual( + p1, p2, f"Params not equal at iteration {i}" + ) + self.assertTrue( + p2.grad is None, + f"Optim in backward grad is not None at {i}", + ) + + # set_to_none for regular optimizer to match in backward + # case. + optim.zero_grad(set_to_none=True) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward(self): + for optim_cls, init_before in itertools.product( + [torch.optim.SGD, torch.optim.Adam], [True, False] + ): + with self.subTest(optim_cls=optim_cls): + self._test_ddp_apply_optim_in_backward( + optim_cls=optim_cls, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_grad_as_bucket_view_false(self): + for init_before in [True, False]: + self._test_ddp_apply_optim_in_backward( + optim_cls=torch.optim.SGD, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + gradient_as_bucket_view=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_ignored_params(self): + torch.cuda.set_device(self.rank) + for init_before in [True, False]: + with self.subTest(init_before=init_before): + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + inp = torch.randn(1, 10) + a, b = net(inp) + (a.transpose(0, 1) @ b).sum().backward() + # a.weight did not go through allreduce, so optimizer acted on local + # gradient, which should be different across ranks. Remaining params + # should be equal. + models = [None for _ in range(dist.get_world_size())] + dist.all_gather_object(models, model) + rank0_model, remainder = models[0], models[1:] + for m in remainder: + self.assertNotEqual(rank0_model.a.weight, m.a.weight) + self.assertEqual( + list(rank0_model.b.parameters()), list(m.b.parameters()) + ) + self.assertEqual(rank0_model.a.bias, m.a.bias) + + def _get_fp16_config(self) -> _MixedPrecision: + return _MixedPrecision( + param_dtype=torch.float16, + reduce_dtype=torch.float16, + buffer_dtype=torch.float16, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_ignored_params(self): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + model = TwoLinLayerNet() + model.register_buffer("buffer", torch.ones(5)) + # Parameters to ignore are in the format {module_name}.{param_name} + to_ignore = ["a.weight", "buffer"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, to_ignore, + ) + mp_config = self._get_fp16_config() + net = torch.nn.parallel.DistributedDataParallel( + model.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=True, + ) + to_ignore = [f"module.{name}" for name in to_ignore] + expected_ignored = len(to_ignore) + n_ignored = 0 + # ignored params should not have _mp_param or _fp_param fields. + for (n, p) in itertools.chain(net.named_parameters(), net.named_buffers()): + if n in to_ignore: + n_ignored += 1 + self.assertFalse(hasattr(p, '_mp_param')) + self.assertFalse(hasattr(p, '_fp_param')) + else: + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + self.assertEqual(expected_ignored, n_ignored) + + def _test_ddp_native_mixed_precision( + self, gradient_as_bucket_view, set_grad_to_none + ): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + inp = torch.randn(10, 1) + mp_config = self._get_fp16_config() + + class MyModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.m = torch.nn.Linear(1, 5) + self.register_buffer('buffer', torch.randn(1, 2)) + self.p = torch.nn.Parameter( + torch.randn(10, 5), requires_grad=False + ) + + def forward(self_, x): # noqa: B902 + params = self_.m.parameters() + for p in params: + self.assertEqual(mp_config.param_dtype, p.dtype) + + self.assertEqual(self_.buffer.dtype, mp_config.buffer_dtype) + + self.assertEqual(mp_config.param_dtype, x.dtype) + return self_.m(x) + self_.p + + m = MyModel() + + net = torch.nn.parallel.DistributedDataParallel( + m.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + # Buffers are casted in constructor. + self.assertEqual(net.module.buffer.dtype, mp_config.buffer_dtype) + # Each param should have an mp_param in the lower precision, and + # an fp_param in the higher precision. + for p in net.parameters(): + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + for i in range(6): + loss = net(inp).sum() + loss.backward() + # Verify gradient synchronization and params and grads are fp32. + for n, param in net.named_parameters(): + self.assertEqual(param.dtype, torch.float32) + if param.grad is None: + assert n == 'module.p' # Only param that doesn't require grad + else: + self.assertEqual(param.grad.dtype, torch.float32) + tensor_list = [ + torch.zeros_like(param.grad) + for _ in range(dist.get_world_size(net.process_group)) + ] + dist.all_gather(tensor_list, param.grad) + g, rest = tensor_list[0], tensor_list[1:] + self.assertEqual(g.dtype, torch.float32) + for g_ in rest: + self.assertEqual(g_.dtype, torch.float32) + self.assertEqual(g, g_) + net.zero_grad(set_to_none=set_grad_to_none) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=False, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + def _test_ddp_hook_parity(self, state, hook, num_validated_iters=100): + rank = self.rank + m = torch.nn.Linear(1, 5) + try: + process_group = state.process_group + except AttributeError: + process_group = state + + net_with_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + net_with_hook.register_comm_hook(state=state, hook=hook) + net_without_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + for i in range(100): + # Clear gradients manually. + for g in [ + net_without_hook.module.weight.grad, + net_with_hook.module.weight.grad, + ]: + if g is not None: + g.requires_grad_(False) + g.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net_without_hook(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net_without_hook.module.weight.grad + avg = grad.clone() + expected_grad = ( + sum(i for i in range(dist.get_world_size())) / dist.get_world_size() + ) + loss_hook = net_with_hook(batch).sum() + loss_hook.backward() + grad_hook = net_with_hook.module.weight.grad + avg_hook = grad_hook.clone() + + if i < num_validated_iters: + # Verify hook grad with expected. + self.assertEqual( + avg_hook[0, 0].item(), + expected_grad, + msg=f"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}", + ) + # Verify hook grad with vanilla allreduce + self.assertEqual( + avg_hook[0, 0], + avg[0, 0], + msg=f"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce(self): + self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce_process_group(self): + # process_group is passed in to both DDP and comm. hook + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + gpus = [rank_to_GPU[int(r)][0] for r in range(world_size)] + process_group = torch.distributed.new_group(gpus) + self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_powerSGD(self): + for warm_start in [True, False]: + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=2, + warm_start=warm_start, + ) + self._test_ddp_hook_parity( + state=powersgd_state, hook=powerSGD.powerSGD_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_but_pass_in_sandcastle_if( + NO_MULTIPROCESSING_SPAWN, + "Disabled for environments that \ + don't support multiprocessing with spawn start method", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_post_localSGD(self): + # Although we start run local SGD at iteration 10, since we still use the global process group to run it, + # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + # Only validate the warmup iterations before local SGD is applied, + # because when `post_local_gradient_allreduce` is disabled, the gradients will not be synchronized at all. + # Note that in practice a model averager has to be applied to run model averaging, + # so local gradient averaging is not necessary. + start_localSGD_iter = 10 + state = post_localSGD.PostLocalSGDState( + process_group=None, + subgroup=dist.group.WORLD, + start_localSGD_iter=start_localSGD_iter, + post_local_gradient_allreduce=False, + ) + self._test_ddp_hook_parity( + state=state, + hook=post_localSGD.post_localSGD_hook, + num_validated_iters=start_localSGD_iter, + ) + + # When `subgroup` is None, it is equivalent to the subgroup on the each node. + # For this single-node test environment, the intra-node process group is equivalent to + # the global process group. + if self.world_size == dist.get_world_size(): + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + # Since we start local SGD later than the total number of 100 iterations, + # no local SGD actually is executed, and we don't even need to provide a subgroup for this case. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=1000 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + def _prepare_single_device_module( + self, + rank, + process_group, + devices, + device_ids, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + device = devices[0] if devices else torch.device("cuda:%d" % rank) + ddp_model = DistributedDataParallel( + copy.deepcopy(model).to(device), + device_ids=device_ids, + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + + model.to(device) + + input = torch.randn(global_batch_size, 2).to(device) + target = torch.randn(global_batch_size, 4).to(device) + + return model, ddp_model, input, target + + def _prepare_cpu_module( + self, + process_group, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + ddp_model = DistributedDataParallel( + copy.deepcopy(model), + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + input = torch.randn(global_batch_size, 2) + target = torch.randn(global_batch_size, 4) + return model, ddp_model, input, target + + def _test_accumulate_gradients_no_sync( + self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False + ): + """ + This is the recommended way to implement accumulate grads. + If ``ddp_comm_hook`` input was specified, it will also register that hook + to the ``ddp_model``. The hook fed into this function should not change + the resulting gradients. + """ + group, group_id, rank = self._init_global_test() + world_size = get_world_size() + + # FIXME: Add testing for gloo/CUDA + if BACKEND == "mpi" or BACKEND == "gloo": + global_batch_size = world_size + local_batch_size = 1 + model, ddp_model, input, target = self._prepare_cpu_module( + group_id, global_batch_size, gradient_as_bucket_view + ) + + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + int_devices = rank_to_GPU[rank][:1] + devices = [torch.device("cuda:" + str(i)) for i in int_devices] + global_batch_size = world_size + local_batch_size = len(devices) + model, ddp_model, input, target = self._prepare_single_device_module( + rank, + group_id, + devices, + devices, + global_batch_size, + gradient_as_bucket_view, + ) + + if ddp_comm_hook is not None: + ddp_model.register_comm_hook(group_id, ddp_comm_hook) + + def step_model(model, input, target): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + loss.backward() + + # ensure accumulate grads works with no_grad => no grads are accumulated. + with torch.no_grad(): + with ddp_model.no_sync(): + ddp_model.train() + ddp_model(input) + + # check two model parameters over num_iters iterations + for iteration in range(num_iters): + step_model(model, input, target) + + ddp_input = input[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + ddp_target = target[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + + if iteration % 2 == 0: + # accumulate grads locally + with ddp_model.no_sync(): + step_model(ddp_model, ddp_input, ddp_target) + else: + # sync grads + step_model(ddp_model, ddp_input, ddp_target) + + for i, j in zip(model.parameters(), ddp_model.parameters()): + if not i.requires_grad: + continue + if iteration % 2 == 0: + self.assertNotEqual(i.grad, j.grad) + else: + self.assertEqual(i.grad, j.grad) + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + iteration) + input = input[torch.randperm(global_batch_size)] + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_grad_is_view(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync + using allreduce hook and validates whether future result was properly + passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + tensors = [bucket.buffer() / world_size] + return ( + group_id.allreduce(tensors) + .get_future() + .then(lambda fut: fut.value()[0]) + ) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce + hook that also uses then callbacks. In first then callback result is multiplied + by 2, and the second callback divides the result by 2 * world_size. It validates + whether final result was properly passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_with_then_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + fut = group_id.allreduce([bucket.buffer()]).get_future() + + def mult(fut): + # Multiply the result by 2. + return 2 * fut.wait()[0] + + def div(fut): + # Divide the result by 2 * world_size. + return fut.wait() / (2 * world_size) + + return fut.then(mult).then(div) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_with_then_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_get_future(self): + def mult(fut): + return [t * 3 for t in fut.wait()] + + def add(fut): + return [t + 1 for t in fut.wait()] + + group, group_id, rank = self._init_global_test() + input = _build_tensor(3, 2) + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + input = input.to(device_id) + fut = group_id.allreduce([input]).get_future() + res = fut.then(mult).then(add).wait() + expected = _build_tensor(3, 2 * len(group) * 3 + 1) + + self.assertEqual(res[0], expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + gpus = list(rank_to_GPU[rank]) + + for use_bucket_view, static_graph in itertools.product( + (False, True), (False, True) + ): + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test set static graph twice + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + set_static_graph_twice=True, + ) + + # test output_device + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test device_ids + gpus_list = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel( + gpu_subset=gpus_list, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + def _test_DistributedDataParallel_with_amp(self, grad_is_view=False): + torch.manual_seed(31415) + # Creates model and optimizer in default precision + model = copy.deepcopy(DDP_NET).cuda() + optimizer = torch.optim.SGD(model.parameters(), lr=0.03) + + # Creates a GradScaler once at the beginning of training. + scaler = GradScaler() + + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + # verify grads are none before training + for p in ddp_model.parameters(): + self.assertTrue(p is not None) + self.assertTrue(p.grad is None) + + for idx in range(20): + optimizer.zero_grad() + # Runs the forward pass with autocasting. + with autocast(): + output = ddp_model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + # Backward passes under autocast are not recommended. + # Backward ops run in the same dtype autocast chose for corresponding forward ops. + scaler.scale(loss).backward() + + # verify grads are not none and are valid during training + for p in ddp_model.parameters(): + if p.requires_grad: + self.assertTrue(p.grad is not None) + self.assertFalse(p.grad.isnan().any()) + self.assertFalse(p.grad.isinf().any()) + + # scaler.step() first unscales the gradients of the optimizer's assigned params. + # If these gradients do not contain infs or NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + idx) + input = input[torch.randperm(dist.get_world_size() * 2)] + + return ddp_model + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_with_amp_and_grad_is_view(self): + torch.cuda.set_device(self.rank) + ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=False + ) + ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=True + ) + for i, j in zip( + ddp_model_grad_not_view.parameters(), + ddp_model_grad_is_view.parameters(), + ): + self.assertEqual(i, j) + + def _test_DistributedDataParallel_SyncBatchNorm( + self, + gpu_subset, + rank, + local_bs, + global_bs, + offset, + output_device=None, + affine=True, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = BN_NET if affine else BN_NET_NO_AFFINE + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, device_ids=gpu_subset + ) + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + model_DDP = torch.load(tmp) + else: + torch.save(model_DDP, tmp.name) + model_DDP = torch.load(tmp.name) + + # data initialization + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + offset, + dist.get_world_size(), + 5 if affine else 2, + ) + self._barrier() + + def _test_post_localSGD_optimizer_parity(self, create_averager, grad_is_view): + learning_rate = 0.03 + + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + averager = create_averager() + opt = torch.optim.SGD(net.parameters(), lr=learning_rate) + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + # Process group cannot be pickled in some environments, + # so cannot deep copy an averager. See: + # https://github.com/pytorch/pytorch/pull/74737#pullrequestreview-922487496 + averager2 = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step(opt, net, loss_fn, input, target) + averager.average_parameters(net.parameters()) + + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + for p1, p2 in zip( + net.parameters(), net_using_post_localSGD_opt.parameters() + ): + self.assertEqual(p1.data, p2.data) + + # Also check if the built-in step counters are the same to prevent a bug like #74737. + self.assertEqual(averager.step, averager2.step) + + def _create_periodic_model_averager(self): + return averagers.PeriodicModelAverager(period=4, warmup_steps=10) + + def _create_post_localSGD_optimizer(self, net, learning_rate, averager): + return post_localSGD_optimizer.PostLocalSGDOptimizer( + optim=torch.optim.SGD(net.parameters(), lr=learning_rate), + averager=averager, + ) + + def _perform_a_train_step(self, optimizer, net, loss_fn, input, target): + optimizer.zero_grad() + output = net(input) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + + def _test_post_localSGD_optimizer_step_reload( + self, create_averager, chkpt_file + ): + learning_rate = 0.03 + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), device_ids=[self.rank] + ) + + averager = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager + ) + + averager2 = create_averager() + dummy_post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + + if self.rank == 0: + torch.save( + {"optimizer_state_dict": post_localSGD_opt.state_dict()}, chkpt_file + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank} + checkpoint = torch.load(chkpt_file, map_location=map_location) + dummy_post_localSGD_opt.load_state_dict(checkpoint["optimizer_state_dict"]) + + # Check that we didn't hit the trivial case + self.assertNotEqual(averager2.step, 0) + # Check if dummy averager was initialized to a correct value + self.assertEqual(averager.step, averager2.step) + + # Remove 'step' entry from a checkpoint. + # And make sure it is not in the state dictionary + del checkpoint["optimizer_state_dict"]["step"] + self.assertNotIn("step", checkpoint["optimizer_state_dict"]) + + # Check if checkpoint without a 'step' entry invokes a warning + with self.assertWarnsRegex( + expected_warning=UserWarning, + expected_regex="Loaded state dict does not contain a step counter for an averager. " + "Setting step counter to 0.", + ): + dummy_post_localSGD_opt.load_state_dict( + checkpoint["optimizer_state_dict"] + ) + + self.assertEqual(averager2.step, 0) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_grad_is_view(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=True, + ) + + def _create_hierarchical_model_averager(self): + period_group_size_dict = OrderedDict([(2, 2), (4, dist.get_world_size())]) + return hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=4 + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd_grad_is_view( + self, + ): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=True, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_step_reload(self): + torch.cuda.set_device(self.rank) + with _rank_temp_file() as tmp_file: + self._test_post_localSGD_optimizer_step_reload( + self._create_periodic_model_averager, tmp_file + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self): + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last + ) + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last_3d + ) + + def _test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + self, memory_format + ): + group, group_id, rank = self._init_global_test() + num_processes = dist.get_world_size() + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(num_processes * 2) + + model = ONLY_SBN_NET + model_gpu = copy.deepcopy(model).cuda(rank) + model_DDP = nn.parallel.DistributedDataParallel( + model_gpu, device_ids=[rank] + ) + + shapes = [global_bs, 2, 4, 4] + ( + [] if memory_format is torch.channels_last else [4] + ) + + input_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + target_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_gpu, + target_gpu, + loss, + local_bs, + rank, + global_bs, + True, + bs_offset, + dist.get_world_size(), + memory_format=memory_format, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + # test output_device + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + # test device_ids + gpus = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + affine=False, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = len(gpus) * 2 + global_bs = dist.get_world_size() * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + @require_world_size(2) + def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = 1 + global_bs = dist.get_world_size() + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value( + self, + ): + group, group_id, rank = self._init_global_test() + model = nn.parallel.DistributedDataParallel( + ONLY_SBN_NET.cuda(rank), device_ids=[rank] + ) + + input_var = [] + for i in range(dist.get_world_size()): + input_var_rank = torch.cat( + [ + torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)), + torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)), + ], + dim=1, + ) + input_var.append(input_var_rank) + + all_input_var = torch.cat( + [ + x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1) + for x in input_var + ], + dim=1, + ).cuda(rank) + + for i in range(100): + y = model(input_var[rank].cuda(rank)) + y.mean().backward() + + running_mean, running_var = ( + model.module.running_mean, + model.module.running_var, + ) + torch.testing.assert_close(running_mean, all_input_var.mean(1)) + torch.testing.assert_close(running_var, all_input_var.var(1)) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self): + group, group_id, rank = self._init_global_test() + # only do single GPU per process + gpus = [rank] + + # cpu training setup + model = BN_NET + + num_processes = dist.get_world_size() + local_bs = rank + 2 + bs_offset = int((rank + 3) * rank / 2) + global_bs = int((num_processes + 3) * num_processes / 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_half(self): + group, group_id, rank = self._init_global_test() + + model = copy.deepcopy(BN_NET) + model = model.half() + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = nn.parallel.DistributedDataParallel(model.cuda(rank), device_ids=[rank]) + inp = torch.randn(2, 2, dtype=torch.float16, device=torch.device(rank)) + # Check that forward/backward do not error with dtype mismatch + out = model(inp) + self.assertEqual(out.dtype, torch.float16) + out.sum().backward() + for param in model.parameters(): + self.assertEqual(param.grad.dtype, torch.float16) + + def _test_ddp_logging_data(self, is_gpu): + rank = dist.get_rank() + model_DDP = copy.deepcopy(DDP_NET) + if is_gpu: + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP.cuda(rank), device_ids=[rank] + ) + else: + model_DDP = nn.parallel.DistributedDataParallel(model_DDP) + + # dummy data initialization + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + if is_gpu: + input = input.cuda(rank) + target = target.cuda(rank) + + model_DDP._set_ddp_runtime_logging_sample_rate(2) + + for idx in range(20): + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + + self._model_step_with_zero_grad(model_DDP) + + # Verify DDP logging data is sampled as expected + # If it has ran more than 10 iterations and this is + # the sampled iteration for measuring run time stats, + # the run time stats for this idx-th iteration will not + # be zeros. + ddp_logging_data = model_DDP._get_ddp_logging_data() + if idx > 0 and (idx < 10 or idx % 2 == 0): + self.assertGreaterEqual( + ddp_logging_data.get("forward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertEqual(ddp_logging_data.get("iteration"), idx) + elif idx > 0: + # if the idx-th iteration is not sampled to set runtime stats, + # ddp_logging_data.iteration will not be updated to current + # iteration. + self.assertNotEqual(ddp_logging_data.get("iteration"), idx) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_ddp_logging_data_cpu(self): + def parse_env(var): + return os.environ[var] if var in os.environ else "N/A" + + dist.set_debug_level(dist.DebugLevel.INFO) + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=False) + + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("world_size"), dist.get_world_size()) + self.assertEqual(ddp_logging_data.get("rank"), dist.get_rank()) + self.assertEqual(ddp_logging_data.get("module_name"), "Net") + self.assertEqual(ddp_logging_data.get("device_ids"), "") + # output_device is -1 in default if it is not set, e.g. + # output_device of CPU training is -1. + self.assertEqual(ddp_logging_data.get("output_device"), -1) + self.assertEqual(ddp_logging_data.get("broadcast_buffers"), 1) + self.assertEqual(ddp_logging_data.get("bucket_cap_bytes"), 25 * 1024 * 1024) + self.assertEqual(ddp_logging_data.get("find_unused_parameters"), 0) + self.assertEqual(ddp_logging_data.get("gradient_as_bucket_view"), 0) + self.assertEqual( + ddp_logging_data.get("backend_name"), dist.get_backend(group_id) + ) + self.assertEqual(ddp_logging_data.get("iteration"), 18) + params = list(model_DDP.parameters()) + num_params = 0 + param_size = 0 + params = list(filter(lambda parameter: parameter.requires_grad, params)) + for p in params: + num_params += 1 + param_size += p.numel() * p.element_size() + self.assertEqual(ddp_logging_data.get("dtypes"), "float") + self.assertEqual( + ddp_logging_data.get("total_parameter_size_bytes"), param_size + ) + self.assertEqual(ddp_logging_data.get("num_parameter_tensors"), num_params) + self.assertEqual(ddp_logging_data.get("bucket_sizes"), str(param_size)) + self.assertEqual( + ddp_logging_data.get("master_port"), parse_env("MASTER_PORT") + ) + self.assertEqual( + ddp_logging_data.get("master_addr"), parse_env("MASTER_ADDR") + ) + self.assertEqual( + ddp_logging_data.get("torch_distributed_debug"), + parse_env("TORCH_DISTRIBUTED_DEBUG"), + ) + self.assertEqual( + ddp_logging_data.get("cuda_visible_devices"), + parse_env("CUDA_VISIBLE_DEVICES"), + ) + if ddp_logging_data.get("backend_name") == "gloo": + self.assertEqual( + ddp_logging_data.get("gloo_socket_ifname"), + parse_env("GLOO_SOCKET_IFNAME"), + ) + self.assertEqual( + ddp_logging_data.get("gloo_device_transport"), + parse_env("GLOO_DEVICE_TRANSPORT"), + ) + default_gloo_threads = 2 + self.assertEqual( + ddp_logging_data.get("gloo_num_threads"), + default_gloo_threads, + ) + + self.assertEqual(ddp_logging_data.get("nccl_socket_ifname"), None) + self.assertEqual(ddp_logging_data.get("nccl_blocking_wait"), None) + self.assertEqual(ddp_logging_data.get("nccl_async_error_handling"), None) + self.assertEqual(ddp_logging_data.get("nccl_debug"), None) + self.assertEqual(ddp_logging_data.get("nccl_nthreads"), None) + self.assertEqual(ddp_logging_data.get("nccl_ib_timeout"), None) + # test runtime logging fields + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("unused_parameter_size", 0), 0) + self.assertEqual(ddp_logging_data.get("has_rebuilt_buckets"), 1) + self.assertEqual( + ddp_logging_data.get("rebuilt_bucket_sizes"), str(param_size) + ) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), 1 + ) + self.assertGreaterEqual(ddp_logging_data.get("avg_backward_comm_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + # test larger net with mixed data types, verify multiple bucket sizes + model = LargeNet() + model.float() + model.fc1.double() + model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5) + ddp_logging_data = model_DDP._get_ddp_logging_data() + params = list(model_DDP.parameters()) + self.assertEqual( + ddp_logging_data.get("bucket_cap_bytes"), int(1.5 * 1024 * 1024) + ) + bucket_sizes = [ + params[1].numel() * params[1].element_size(), + params[0].numel() * params[0].element_size(), + ] + self.assertEqual( + ddp_logging_data.get("bucket_sizes"), + ", ".join(str(x) for x in bucket_sizes), + ) + self.assertEqual(ddp_logging_data.get("dtypes"), "double, float") + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_ddp_logging_data_gpu(self): + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=True) + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("device_ids"), str(rank)) + self.assertEqual(ddp_logging_data.get("output_device"), rank) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # test runtime logging fields + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_static_graph_api_cpu(self): + model_DDP = nn.parallel.DistributedDataParallel(DDP_NET) + expected_err = "should be called before training loop starts" + with self.assertRaisesRegex(RuntimeError, expected_err): + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + offset = dist.get_rank() * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + model_DDP._set_static_graph() + + # Verify error was logged in ddp_logging_data. + verify_ddp_error_logged(model_DDP, expected_err) + + @skipIfNoTorchVision + def test_SyncBatchNorm_process_group(self): + # When adopting `convert_sync_batchnorm` to convert a `nn.modules`, + # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm` + # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models). + + process_ids = 0 + process_group = torch.distributed.new_group([process_ids]) + res50_model = torchvision.models.resnet50() + res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(res50_model), process_group + ) + process_group_sync = res50_model_sync.layer1[0].bn1.process_group + self.assertEqual(process_group_sync, process_group) + + def _run_reduction_test( + self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None + ): + if reduction_fn != dist.all_reduce and dst is None: + raise ValueError(f"Reduction fn {reduction_fn} must specify dst!") + if dst is not None: + reduction_fn(tensor, dst, op) + # Only destination rank tensor is expected to have final result. + if dist.get_rank() == dst: + self.assertEqual(tensor, expected_tensor) + else: + reduction_fn(tensor, op) + self.assertEqual(tensor, expected_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allreduce(self): + torch.cuda.set_device(self.rank) + # Run all_reduce with PRODUCT + element = self.rank % 2 == 0 + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([False, False]).to(self.rank), op + ) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test(input_tensor, expected_tensor, op) + + # Run all_reduce with SUM + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([True, True]).to(self.rank), op + ) + # TODO: NCCL backend does not work correctly for bitwise reduction ops + # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for + # these once it is supported. + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allgather(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, True]} + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + # Preserve a copy of the tensor to compare against after allgather. + input_tensor_copy = input_tensor.clone() + tensor_list = [ + torch.tensor([False, False]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, input_tensor) + + self.assertEqual(len(tensor_list), dist.get_world_size()) + for i, t in enumerate(tensor_list): + expected = torch.tensor(inp[i % 2]).to(self.rank) + self.assertEqual(t, expected) + # Ensure that the input tensor is not modified, since this collective + # does not modify its input. + self.assertEqual(input_tensor_copy, input_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_nccl_backend_bool_reduce(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, False]} + # Run reduce() with product op + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = torch.tensor([False, False]).to(self.rank) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test( + input_tensor, expected_tensor, op, dist.reduce, dst=0 + ) + + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = ( + torch.tensor([True, True]).to(self.rank) + if self.rank == 0 + else input_tensor.clone() + ) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_broadcast(self): + tensor_size = 10 + bcast_tensor = torch.tensor( + [ + (random.random() < 0.5 if self.rank == 0 else False) + for _ in range(tensor_size) + ] + ).to(self.rank) + dist.broadcast(bcast_tensor, src=0) + # Now allgather and ensure the tensors are equal. + tensor_list = [ + torch.tensor([False for _ in range(tensor_size)]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, bcast_tensor) + expected = tensor_list[0] + for tensor in tensor_list[1:]: + self.assertEqual(tensor, expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedSampler_padding(self): + # Tests padding of distributed sampler. + world_size = dist.get_world_size() + + # Simulates the 'casual' dataset size + dataset_size = 100 + world_size + 1 + dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)] + + # Simulates the 'tiny' dataset size + dataset_tiny_size = max(world_size // 2 - 1, 1) + dataset_tiny = [ + torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size) + ] + + # Specifying drop_last=True will cause the tail of the data to be dropped. + dist_sampler = DistributedSampler(dataset=dataset, drop_last=True) + local_num_samples, local_dataset_size = ( + dist_sampler.num_samples, + dist_sampler.total_size, + ) + # The effective dataset size should be the greatest integer that is <= + # dataset_size that is divisible by the world_size. This is to ensure each + # rank processes the same number of samples. + effective_dataset_size = ( + math.ceil((dataset_size - world_size) / world_size) + if dataset_size % world_size != 0 + else dataset_size / world_size + ) + self.assertEqual(local_num_samples, effective_dataset_size) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler)) + self.assertEqual(len(indices_list), local_num_samples) + + def validate_global_samples(local_num_samples): + # Ensure that each rank processes the same number of samples. + world_samples = [ + torch.LongTensor([0]).to(self.rank) for _ in range(world_size) + ] + dist.all_gather( + world_samples, torch.tensor([local_num_samples]).to(self.rank) + ) + world_samples = [sample.item() for sample in world_samples] + self.assertEqual(len(set(world_samples)), 1) + + validate_global_samples(local_num_samples) + + # drop_last=False is the default and will add additional indices to be sampled, + # increasing the effective dataset size. + dist_sampler_added_samples = DistributedSampler(dataset=dataset) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples.num_samples, + dist_sampler_added_samples.total_size, + ) + # The effective dataset size is the smallest integer that is >= dataset_size + # and divisible by the world size. + self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size)) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples)) + self.assertEqual(len(indices_list), local_num_samples) + + # Ensure that each rank processes the same number of samples. + validate_global_samples(local_num_samples) + + # Ensure additional samples are padded even when + # the extremely small dataset is given. + dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples_tiny.num_samples, + dist_sampler_added_samples_tiny.total_size, + ) + self.assertEqual( + local_num_samples, math.ceil(dataset_tiny_size / world_size) + ) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples_tiny)) + self.assertEqual(len(indices_list), local_num_samples) + validate_global_samples(local_num_samples) + + def _test_allgather_object(self, subgroup=None): + # Only set device for NCCL backend since it must use GPUs. + + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + output_gathered = [None for _ in range(dist.get_world_size())] + dist.all_gather_object( + output_gathered, + gather_objects[self.rank % len(gather_objects)], + group=subgroup, + ) + + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + def test_all_gather_object_default_pg(self): + return self._test_allgather_object() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_all_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_allgather_object(subgroup=subgroup) + + def _test_gather_object(self, pg=None): + # Ensure stateful objects can be gathered + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + my_rank = dist.get_rank(pg) + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=my_rank))) + + output_gathered = [None for _ in range(dist.get_world_size(pg))] + gather_on_rank = 0 + dist.gather_object( + gather_objects[self.rank % len(gather_objects)], + object_gather_list=output_gathered + if my_rank == gather_on_rank + else None, + dst=gather_on_rank, + group=pg, + ) + if my_rank != gather_on_rank: + self.assertEqual( + output_gathered, [None for _ in range(dist.get_world_size())] + ) + else: + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + # Validate errors when objects can't be pickled. + class Bar: + pass + + b = Bar() + gather_objects = [b for _ in range(dist.get_world_size())] + with self.assertRaisesRegex(AttributeError, "Can't pickle local object"): + dist.all_gather_object( + [None for _ in range(dist.get_world_size())], + gather_objects[self.rank], + group=pg, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object(self): + return self._test_gather_object() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_gather_object(subgroup) + + def validate_net_equivalence(self, net): + # Helper to validate synchronization of nets across ranks. + net_module_states = list(net.module.state_dict().values()) + # Check that all tensors in module's state_dict() are equal. + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for tensor in tensor_list: + self.assertEqual(tensor, t) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_sync_module_states(self): + # Test that after calling _sync_module_states, models across ranks + # are the same and are equal to the model on the input rank. + dim = 2 + rank = self.rank + rank_to_broadcast = 1 + # Seed to ensure that ranks are initialized with different initial models. + torch.manual_seed(rank) + model = nn.Linear(dim, dim, bias=False) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + new_model = nn.Linear(dim, dim, bias=False).cuda(rank) + net.module = copy.deepcopy(new_model) + # Assert params are different + net_module_states = list(net.module.state_dict().values()) + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for i, tensor in enumerate(tensor_list): + if i == rank: + self.assertEqual(t, tensor) + else: + # tensor from another rank should be different. + self.assertNotEqual(t, tensor) + + _sync_module_states( + module=net.module, + process_group=net.process_group, + broadcast_bucket_size=net.broadcast_bucket_size, + src=rank_to_broadcast, + params_and_buffers_to_ignore=net.parameters_to_ignore, + ) + # Now all model params should be the same. + self.validate_net_equivalence(net) + # Since the network params were broadcast from rank_to_broadcast, validate that + # they are the same as new_model on rank_to_broadcast. + if rank == rank_to_broadcast: + expected_states = new_model.state_dict().values() + for t, expected in zip(net_module_states, expected_states): + self.assertEqual(t, expected) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_grad_div_uneven_inputs(self): + # Test gradient division during training with join() API. If + # divide_by_initial_world_size=False, we scale by the effective world + # size when allreducing grads. + dim = 5 + batch = 1 + grad_scale = 50 + rank = self.rank + model = nn.Linear(dim, dim, bias=False) + inp = torch.ones(batch, dim, device=self.rank) * grad_scale + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + n_iters = 3 + if self.rank > 0: + n_iters += 2 + + with net.join(divide_by_initial_world_size=False): + for _ in range(n_iters): + loss = net(inp).sum() + loss.backward() + # The grad is always expected_grad, since we divide by the number + # of currently active processes and inactive processes contribute + # zero gradient. If we kept dividing by static initial world + # size as processes leave, the grad would be smaller. + expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grads so that it's the same every iteration + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + # If divide_by_initial_world_size=True (default), we always scale grads + # by the initial world_size. + with net.join(divide_by_initial_world_size=True): + for i in range(n_iters): + loss = net(inp).sum() + loss.backward() + effective_ws = dist.get_world_size() + if i >= 3: + effective_ws -= 1 + expected_grad = ( + torch.ones(dim, dim, device=self.rank) + * grad_scale + * effective_ws + ) / dist.get_world_size() + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grad so that it's the same every iteration. + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + def _test_ddp_profiling(self, profiler_ctx): + batch = 3 + dim = 10 + num_iters = 6 + torch.cuda.set_device(self.rank) + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + profiler_ctx_copy = copy.deepcopy(profiler_ctx) + + with profiler_ctx as prof: + for i in range(num_iters): + loss = net(inp).sum() + loss.backward() + + all_reduce_event_name = f"{dist.get_backend()}:all_reduce" + events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) + event_count = sum(e.count for e in events) + self.assertEqual(event_count, num_iters) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, all_reduce_event_name) + + broadcast_event_name = f"{dist.get_backend()}:broadcast" + broadcast_events = get_profiling_event(broadcast_event_name, prof, dedup_gpu_user_annotation=True) + event_count = sum(e.count for e in broadcast_events) + # Broadcast is called during rebuild_buckets + self.assertGreaterEqual(event_count, 1) + for event in broadcast_events: + self.assertEqual(event.name, broadcast_event_name) + + # Run DDP with profiling for a few iterations, then enable profiling + # for a single pass, and ensure it is recorded. This tests that the + # thread local state is correctly updated. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + for i in range(3): + loss = net(inp).sum() + loss.backward() + # Now enable the profiler. + with profiler_ctx_copy as prof: + loss = net(inp).sum() + loss.backward() + + events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) + self.assertGreaterEqual(len(events), 1) + self.assertGreaterEqual(events[0].count, 1) + self.assertEqual(events[0].name, all_reduce_event_name) + for event in events: + self.assertTrue(event.is_async) + # Ensure searching unused parameters was profiled + events = get_profiling_event("search_unused_parameters", prof) + self.assertEqual(len(events), 1) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle("Currently failing in NVIDIA internal CI") + def test_ddp_profiling_autograd_profiler(self): + autograd_profiler_ctx = torch.autograd.profiler.profile() + return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_ddp_profiling_torch_profiler(self): + cpu_act = torch.profiler.ProfilerActivity.CPU + cuda_act = torch.profiler.ProfilerActivity.CUDA + torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act]) + self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_join_model_equivalence(self): + # Verifies equivalence with model training locally and with DDP under + # the join context manager. + batch = 3 + dim = 10 + learning_rate = 0.03 + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + local_model = copy.deepcopy(model) + local_model = local_model.cuda(self.rank) + rank_to_iter_mapping = { + rank: 2 * (rank + 1) for rank in range(dist.get_world_size()) + } + # run local model + local_iters = sum(rank_to_iter_mapping.values()) + local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate) + for _ in range(local_iters): + local_optim.zero_grad() + out = local_model(inp) + loss = out.sum() + loss.backward() + local_optim.step() + + # run DDP model with join API + num_iters = rank_to_iter_mapping[self.rank] + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + ddp_optim = torch.optim.SGD( + model.parameters(), lr=learning_rate * dist.get_world_size() + ) + with net.join(): + for i in range(num_iters): + ddp_optim.zero_grad() + out = net(inp) + loss = out.sum() + loss.backward() + torch.cuda.synchronize(device=self.rank) + ddp_optim.step() + + # Validate model state dicts are equal + for (_, local_tensor), (_, dist_tensor) in zip( + local_model.state_dict().items(), net.module.state_dict().items() + ): + self.assertEqual(local_tensor, dist_tensor) + + def _run_uneven_inputs_test( + self, + test_case, + iteration_mapping, + find_unused_params, + ): + model = test_case.model + inp = test_case.inp + rank = self.rank + sync_interval = test_case.sync_interval + torch.cuda.set_device(rank) + # Ensure all outstanding GPU work is completed so this test runs independently. + dist.barrier() + # Bucket_cap_mb is intentionally low to test allreduce scheduling when + # there are many buckets. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), + device_ids=[rank], + bucket_cap_mb=1, + find_unused_parameters=find_unused_params, + ) + # Register hook if specified + if test_case.hook is not None: + net.register_comm_hook(test_case.state, test_case.hook) + print(f"registered hook {test_case.hook}") + + # Determine num iters for this rank via the passed in mapping. + num_iters = iteration_mapping[rank] + # If we throw when earliest rank terminates, we should ensure + # that we iterate for that minimum number of times. + num_iters_tensor = torch.tensor( + [num_iters], device=torch.cuda.current_device() + ) + dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN) + min_num_iters = num_iters_tensor.item() + total_iters = 0 + if test_case.throw_on_early_termination: + if min_num_iters == num_iters: + # Early termination rank(s) + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + else: + exception_ctx = nullcontext() + with exception_ctx: + with net.join( + throw_on_early_termination=test_case.throw_on_early_termination + ): + for i in range(num_iters): + # Use model.no_sync() to disable grad synchronization every + # sync_interval. + if i % sync_interval != 0: + context = net.no_sync() + else: + context = nullcontext() + with context: + if isinstance(inp, tuple): + loss = net(*inp).sum() + else: + loss = net(inp).sum() + loss.backward() + self._model_step(net) + # Ensure completion of GPU kernels (including allreduce). If the + # join API is not properly implemented, then this should hang + # since the allreduce will hang. + torch.cuda.synchronize(device=rank) + total_iters += 1 + if test_case.throw_on_early_termination: + # Ensure we iterated min_num_iters times. + self.assertEqual(total_iters, min_num_iters) + else: + # Ensure we iterated at least min_num_iters times. + self.assertGreaterEqual(total_iters, min_num_iters) + + # Ensure completion of all GPU kernels. + torch.cuda.synchronize(device=rank) + # When throwing on early rank termination, we do not + # broadcast model state from an authoritative rank. All models + # should already be in sync. + if not test_case.throw_on_early_termination: + self.assertTrue(net._authoritative_rank) + # All ranks should have agreed on the same authoritative_rank! + final_rank_tensor = torch.tensor( + [net._authoritative_rank], device=self.rank + ) + tensor_list = [ + torch.zeros_like(final_rank_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, final_rank_tensor) + max_rank = dist.get_world_size() - 1 + self.assertSetEqual( + {max_rank}, {tensor.item() for tensor in tensor_list} + ) + # Ensure that all models are the same across ranks after all have joined. + self.validate_net_equivalence(net) + # Ensure that running with DDP uneven inputs was logged. + ddp_logging_data = net._get_ddp_logging_data() + self.assertTrue(ddp_logging_data.get("join_uneven_inputs")) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs_stop_iteration_sync_bn(self): + # Tests that uneven inputs join handler correctly throws StopIteration + # for models with SyncBN or general collective comm when + # throw_on_early_termination=True. + class ModelWithComm(torch.nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(2, 40, bias=False) + + def forward(self, x): + x = self.lin(x) + dist.all_reduce(x) + return x + + torch.cuda.set_device(self.rank) + model_bn = BN_NET + model_bn = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(model_bn) + ).cuda(self.rank) + comm_model = ModelWithComm().cuda(self.rank) + model_input = torch.randn(10, 2).cuda(torch.cuda.current_device()) + + for model in [model_bn, comm_model]: + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + min_num_iters = 5 + if self.rank != 0: + # Early termination rank(s) + num_iters = min_num_iters + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + num_iters = min_num_iters * 2 + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + n = 0 + with exception_ctx: + with model.join(throw_on_early_termination=True): + for i in range(num_iters): + loss = model(model_input).sum() + loss.backward() + self._model_step(model) + n += 1 + + self.assertEqual(n, min_num_iters) + # Verify model equivalence + self.validate_net_equivalence(model) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs(self): + dim = 1000 + batch = 1 + # Create a variety of models to run uneven input tests on. + large_model = nn.Sequential( + nn.Conv2d(1, 20, 5), + nn.ReLU(), + nn.Conv2d(20, 32, 5), + nn.ReLU(), + nn.Conv2d(32, 256, 5), + nn.ReLU(), + ) + small_model = nn.Linear(dim, dim, bias=False) + bn_net = BatchNormNet() + + class UnusedParamModule(nn.Module): + def __init__(self, unused_params_rank): + super().__init__() + self.t0 = Task() + self.t1 = Task() + self.unused_params_rank = unused_params_rank + + def task_parameters(self): + return (self.t0.p, self.t1.p) + + def forward(self, x, rank): + return ( + self.t1(self.t0(x)) + if rank != self.unused_params_rank + else self.t1(x) + ) + + unjoined_rank_with_unused_params_model = UnusedParamModule(1) + joined_rank_with_unused_params_model = UnusedParamModule(0) + + rank = self.rank + models_to_test = [ + # Network with batchnorm + DDPUnevenTestInput( + name="batch_norm_net", + model=bn_net, + inp=torch.ones(batch, 2, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="large_conv_model", + model=large_model, + inp=torch.ones(batch, batch, dim, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model", + model=small_model, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + # Unused parameter test where rank that does not join early has unused params + DDPUnevenTestInput( + name="unjoined_rank_with_unused_params_model", + model=unjoined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + # Unused parameter test where rank that does join early has unused params + DDPUnevenTestInput( + name="joined_rank_with_unused_params_model", + model=joined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + ] + + # Test models that have hook installed. + models_with_hook = [ + DDPUnevenTestInput( + name="small_model_allreduce_hook", + model=small_model, + hook=default.allreduce_hook, + state=None, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model_power_sgd_hook", + model=small_model, + hook=powerSGD.powerSGD_hook, + state=powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + # Config so that powerSGD runs immediately instead of + # allreduce. + start_powerSGD_iter=1, + warm_start=False, + use_error_feedback=False, + ), + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + ] + models_to_test.extend(models_with_hook) + + # Add resnet model if we have torchvision installed. + if HAS_TORCHVISION: + resnet_model = torchvision.models.resnet50() + models_to_test.append( + DDPUnevenTestInput( + name="resnet_model", + model=resnet_model, + inp=torch.ones(1, 3, 1000, 1000), + sync_interval=1, + ) + ) + + # Test with no_sync every 2, 3, 4, ... iterations. + models_with_sync = [] + for i, test_input in enumerate(models_to_test): + models_with_sync.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=i + 2, + ) + ) + + throw_on_early_term_tests = [] + for test_input in models_to_test: + throw_on_early_term_tests.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=test_input.sync_interval, + throw_on_early_termination=True, + ) + ) + + models_to_test.extend(models_with_sync) + models_to_test.extend(throw_on_early_term_tests) + + # 0 iteration tests for when one process does not train model at all, so + # we must shadow the broadcast calls made when rebuilding buckets. + baseline_num_iters = [0, 5] + iteration_offsets = [2, 3, 10] + num_uneven_ranks = [1] + if dist.get_world_size() > 2: + num_uneven_ranks.append(2) + iteration_mappings = [] + # Generate rank : num_iters mappings for various uneven input scenarios. + # This includes cases where rank 0 joins early and all other ranks join + # later, and scenarios where multiple ranks join early, but at different + # iterations, and later ranks join later. + for num_early_join_ranks in num_uneven_ranks: + for baseline_iter in baseline_num_iters: + for offset in iteration_offsets: + mapping = dict.fromkeys(range(0, num_early_join_ranks), baseline_iter) + # if num_early_join_ranks > 1, ranks > 0 that will join early + # iterate offset//2 more times than rank 0, to test nodes + # depleting inputs at different times. + if num_early_join_ranks > 1: + for rank in mapping.keys(): + if rank > 0: + mapping[rank] += offset // 2 + mapping.update( + dict.fromkeys(range(num_early_join_ranks, dist.get_world_size()), baseline_iter + offset) + ) + iteration_mappings.append(mapping) + + for (test_case, iteration_mapping) in itertools.product( + models_to_test, iteration_mappings + ): + if self.rank == 0: + print( + f"""Running test: {test_case.name} sync interval + {test_case.sync_interval} with iteration mapping + {iteration_mapping}""" + ) + self._run_uneven_inputs_test( + test_case, + iteration_mapping, + find_unused_params=("unused_params_model" in test_case.name), + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_join_disable(self): + # tests that if net.join() with enable=False is specified, DDP works as + # expected with even inputs. + torch.manual_seed(self.rank) + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) * self.rank + n_iters = 5 + world_size = dist.get_world_size() + with net.join(enable=False): + for _ in range(n_iters): + # Clear grads + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + out = net(inp) + loss = out.sum() + loss.backward() + # Validate gradients to ensure that we divide by the correct + # world_size when join mode is disabled. + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual(net.module.weight.grad.item(), expected_grad) + + join_config = net._join_config + self.assertFalse(join_config.enable) + self.validate_net_equivalence(net) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_exception(self): + # Tests that exceptions during training are correctly propagated by the + # context manager. + error_str = "Intentional error" + + class ExceptionModule(nn.Module): + def __init__(self): + super().__init__() + self.param = nn.Parameter(torch.ones(1, requires_grad=True)) + + def forward(self, _): + raise ValueError(error_str) + + exception_module = ExceptionModule() + net = torch.nn.parallel.DistributedDataParallel( + exception_module.cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) + with self.assertRaisesRegex(ValueError, error_str): + with net.join(): + out = net(inp) + loss = out.sum() + loss.backward() + + def _test_broadcast_object_list(self, group=None): + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + # Only set device for NCCL backend since it must use GPUs. + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + backend = os.environ["BACKEND"] + if backend == "nccl": + torch.cuda.set_device(next_rank) + + src_rank = 0 + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + if IS_FBCODE: + # Create Tensor with > 2^31 Bytes storage requirements + # Only on FBCODE as testing OOMs in OSS + gather_objects.append(Foo(torch.randn(3, 178956971))) + objects = ( + gather_objects + if self.rank == src_rank + else [None for _ in gather_objects] + ) + + # Single object test with device specified. Backend="gloo", device=cpu + if backend != "nccl": + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device("cpu") + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="gloo", device=current_device+1 + # The test is gated by the fact GPU count is the same as world size to avoid the case + # when backend is gloo but there is no multiple GPU devices. + if backend != "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="nccl", device=current_device+1 + if backend == "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test: backward compatibility with device unspecified + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list(single_obj_list, src=0, group=group) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Multiple input objects test + if self.rank != src_rank: + self.assertNotEqual(objects, gather_objects) + dist.broadcast_object_list(objects, src=0, group=group) + self.assertEqual(objects, gather_objects) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_broadcast_object_list(self): + return self._test_broadcast_object_list() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + def _test_broadcast_object_list_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_broadcast_object_list(subgroup) + + def _test_ddp_ignore_params_arg(self, static_graph=False): + class TestModel(nn.Module): + def __init__(self, rank): + self.rank = rank + super().__init__() + self.fc1 = nn.Linear(1, 1, bias=False) + # Proxy that will be materialized to another architecture later. + # (after wrapping model with DDP) + if self.rank == 0: + self.fc2 = nn.Linear(1, 10, bias=False) + else: + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + device_id = self.rank + # Ensure the test works for both find_unused_parameter and broadcast_buffer settings. + for (find_unused, broadcast_buffers) in itertools.product( + [False, True], [False, True] + ): + model = TestModel(self.rank).float().to(device_id) + # Note that the model can have different shape buffers if we pass + # them in to be ignored as well. + model.fc2.register_buffer( + "ignore_buffer", torch.zeros(5 + self.rank, device=self.rank) + ) + proxy_params = list(model.fc2.parameters()) + proxy_buffers = list(model.fc2.buffers()) + model_fc2_name = next( + module_name + for module_name, module in model.named_modules() + if module is model.fc2 + ) + proxy_param_names = [ + f"{model_fc2_name}.{param_name}" + for param_name, _ in model.fc2.named_parameters() + ] + proxy_buffer_names = [ + f"{model_fc2_name}.{buf_name}" + for buf_name, _ in model.fc2.named_buffers() + ] + # Specify that we should ignore proxy_params since it will be + # materialized later. + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, proxy_param_names + proxy_buffer_names + ) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[device_id], + find_unused_parameters=find_unused, + broadcast_buffers=broadcast_buffers, + static_graph=static_graph, + ) + # Materialize new params. These are not registered in DDP and thus + # don't have autograd hooks installed on them. + ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id) + + # local model with the new materialized parameters. + local_model = copy.deepcopy(ddp.module).cuda(self.rank) + + inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1) + for i in range(6): + ddp(inp).sum().backward() + + local_model(inp).sum().backward() + # materialized param grad is not touched by DDP, so its grad should + # be the same as if running locally. + for materialized_param, local_param in zip( + ddp.module.fc2.parameters(), local_model.fc2.parameters() + ): + self.assertEqual(materialized_param.grad, local_param.grad) + + # fc1 parameter grad should still be different, due to allreduce. + for synced_param, local_param in zip( + ddp.module.fc1.parameters(), local_model.fc1.parameters() + ): + self.assertFalse(synced_param.grad == local_param.grad) + + # Proxy module grad should not be touched + for proxy_param in proxy_params: + self.assertTrue(proxy_param.grad is None) + + # Synchronize since we run multiple iterations of this test, to + # isolate failure hangs. + torch.cuda.synchronize(device=self.rank) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_ignore_params_arg(self): + self._test_ddp_ignore_params_arg(static_graph=False) + self._test_ddp_ignore_params_arg(static_graph=True) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_unused_params_rebuild_buckets_exception(self): + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.net1(x) + + ddp = torch.nn.parallel.DistributedDataParallel( + ToyModel().cuda(self.rank), device_ids=[self.rank] + ) + for i in range(2): + inp = torch.rand(1, 10) + if i > 0: + # On 2nd iteration, this will fail during rebuild_buckets, + # but we should report an error regarding unused parameters + # since that is the underlying root cause. + try: + ddp(inp).sum().backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(ddp, msg) + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["net2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse( + True, "DDP unused parameters error not raised." + ) + else: + ddp(inp).sum().backward() + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_shared_grad_acc_unused_params(self): + # When find_unused_parameters=True, ensure we mark unused parameters + # even if they share gradient accumulators. + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + # net1, bias, and net1.bias are all unused params. + self.net1 = nn.Linear(10, 5, bias=False) + self.bias = nn.Parameter(torch.zeros(5)) + # net1.bias and self.bias are names for the same underlying + # parameter, so they share the same grad acc. This caused + # the bug reported in https://github.com/pytorch/pytorch/issues/41324. + self.net1.bias = self.bias + self.net2 = nn.Linear(10, 5) + + def forward(self, x): + return self.net2(x).sum() + + torch.cuda.set_device(self.rank) + model = ToyModel().to(torch.cuda.current_device()) + for static in [True, False]: + ddp_model = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + find_unused_parameters=True, + static_graph=static, + ) + inp = torch.randn(20, 10, device=self.rank) + for i in range(6): + loss = ddp_model(inp) + # To test https://github.com/pytorch/pytorch/issues/61982 + loss /= 10 + loss.backward() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device(self): + m = nn.Linear(10, 10).to(self.rank) + expected_len = 2 + + class TensorWrapper: + __slots__ = ["t", "moved_to_gpu"] + + def __init__(self, t): + self.t = t + self.moved_to_gpu = False + + # Handlers for specific types of validation we want to do based on + # the input type. + + def tuple_and_list_validator(x): + self.assertTrue(len(x), expected_len) + self.assertEqual(1, len({t.device for t in x})) + self.assertEqual(x[0].device.index, self.rank) + return x[0] + x[1] + + def namedtuple_validator(x): + self.assertEqual(x._fields, EXPECTED_FIELDS) + self.assertEqual(x.a.device.index, x.b.device.index) + self.assertEqual(x.a.device.index, self.rank) + return x.a + x.b + + def custom_type_validator(x): + self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu")) + x.t = x.t.to(self.rank) + x.moved_to_gpu = True + return x.t + + def dict_validator(x): + self.assertTrue(EXPECTED_FIELDS[0] in x.keys()) + self.assertTrue(EXPECTED_FIELDS[1] in x.keys()) + self.assertEqual(1, len({t.device for t in x.values()})) + self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank) + return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]] + + validators = { + TensorWrapper: custom_type_validator, + tuple: tuple_and_list_validator, + list: tuple_and_list_validator, + TestNamedTupleInput_0: namedtuple_validator, + TestNamedTupleInput_1: namedtuple_validator, + dict: dict_validator, + } + + class ToyModel(torch.nn.Module): + def __init__(_self): # noqa: B902 + super().__init__() + _self.lin = nn.Linear(10, 10, bias=False) + + def forward(_self, x, expected_type): # noqa: B902 + # Similar to scatter, the recursive to in the single-device + # case does not move tensors if they are in a custom type. + self.assertTrue(isinstance(x, expected_type)) + fwd_tensor = validators[expected_type](x) + return _self.lin(fwd_tensor) + + model = torch.nn.parallel.DistributedDataParallel( + ToyModel().to(self.rank), device_ids=[self.rank] + ) + + def train_iter(inp, input_type): + for _ in range(4): + out = model(inp, input_type) + out.sum().backward() + + # CPU tuple input, should be moved to the proper device before call + # to forward. + inp = tuple(torch.randn(10, 10) for _ in range(expected_len)) + train_iter(inp, tuple) + + # List CPU input, should be moved to proper device before call to + # forward. + inp = [torch.randn(10, 10) for _ in range(expected_len)] + train_iter(inp, list) + # Custom type containing tensor. The type is maintained, but the + # device is not propagated (which is what happens with scatter too) + inp = TensorWrapper(torch.randn(10, 10)) + train_iter(inp, TensorWrapper) + # NamedTuple input. The type should be maintained and tensor inputs + # should be moved to the correct device as in scatter. + batch = 5 + dim = 10 + a = torch.rand(batch, dim) + b = torch.rand(batch, dim) + + inp = TestNamedTupleInput_0(a, b) + train_iter(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + train_iter(inp, type(inp)) + + # dictionary input. + inp = { + EXPECTED_FIELDS[0]: a, + EXPECTED_FIELDS[1]: b, + } + train_iter(inp, type(inp)) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_namedtuple(self): + batch = 5 + dim = 10 + + a = torch.rand(batch, dim, device=self.rank) + b = torch.rand(batch, dim, device=self.rank) + + class NamedTupleModule(torch.nn.Module): + def __init__(_self): # noqa: B902 + super().__init__() + _self.lin = nn.Linear(10, 1) + + def forward(_self, input, expected_type): # noqa: B902 + # Without NamedTuple support, this would be of type tuple. + self.assertTrue( + isinstance(input, expected_type), + f"Expected type {expected_type} but got {type(input)}", + ) + self.assertEqual(input._fields, EXPECTED_FIELDS) + self.assertEqual(a, input.a) + self.assertEqual(b, input.b) + return _self.lin(torch.mul(input.a, input.b)) + + model = torch.nn.parallel.DistributedDataParallel( + NamedTupleModule().cuda(self.rank), device_ids=[self.rank] + ) + inp = TestNamedTupleInput_0(a, b) + # The following would fail if DDP does not propagate NamedTuples correctly. + model(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + model(inp, type(inp)) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_same_across_ranks(self): + # Control flow that is the same across ranks. + batch = 20 + dim = 10 + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used. + local_used_map = model.reducer._get_local_used_map() + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, world_size], device=self.rank, dtype=torch.int32 + ) + + # Validate parameter usage. + variable_usage_tensor = local_used_map + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + # 2nd linear layer is unused + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_invalid_static_graph(self): + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + static_graph=True, + ) + random_input = torch.randn(20, 10, device=self.rank) + ones_input = torch.ones(20, 10, device=self.rank) + # unused parameter in the first iteration got used + # in second iteration. + expected_err = "Your training graph has changed in this iteration" + with self.assertRaisesRegex(RuntimeError, expected_err): + for i in range(2): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, expected_err) + + # used parameter in the first iteration got unused + # in second iteration. + with self.assertRaisesRegex( + RuntimeError, + "Expected to have finished reduction in the prior iteration " + "before starting a new one. This error indicates that your " + "training graph has changed in this iteration, " + "e.g., one parameter is used in first iteration, " + "but then got unused in the second iteration. " + "this is not compatible with static_graph set to True.\n" + "Parameter indices which did not receive grad for", + ): + for i in range(2): + if i % 2 != 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, "Expected to have finished reduction") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_different_across_ranks(self): + # Control flow that is different across ranks. + batch = 20 + dim = 10 + + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + # Control-flow that is rank and input dependent for the + # model. + use_second_layer = ( + torch.equal(x, torch.ones(batch, dim, device=x.device)) + and self.rank == 1 + ) + + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used only on rank 1. + local_used_map = model.reducer._get_local_used_map() + + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, 1], device=self.rank, dtype=torch.int32 + ) + + variable_usage_tensor = local_used_map + # Validate parameter usage. On odd iterations, 2nd param is only + # used on rank 1. + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available({"gloo"}) + def test_scatter_object_list(self): + src_rank = 0 + scatter_list = ( + COLLECTIVES_OBJECT_TEST_LIST + if self.rank == src_rank + else [None for _ in COLLECTIVES_OBJECT_TEST_LIST] + ) + world_size = dist.get_world_size() + scatter_list = scatter_list[:world_size] + i = 0 + while len(scatter_list) < world_size: + scatter_list.append(scatter_list[i]) + i += 1 + + output_obj_list = [None] + dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank) + self.assertEqual( + output_obj_list[0], + COLLECTIVES_OBJECT_TEST_LIST[ + self.rank % len(COLLECTIVES_OBJECT_TEST_LIST) + ], + ) + # Ensure errors are raised upon incorrect arguments. + with self.assertRaisesRegex( + ValueError, + "Expected argument scatter_object_output_list to be a list of size at least 1.", + ): + dist.scatter_object_list([], scatter_list, src=src_rank) + + def _generate_sparse_tensors_for_bucket_assignment_test(self): + tensors = [ + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + ] + + tensors_sparse = [t.to_sparse() for t in tensors] + return tensors_sparse + + def _test_compute_bucket_assignment_by_size(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + + # Create a valid model. The constructor initializes the logger that we use later. + # We never actually use the rest of the model - we only need its logger. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # if we don't pass a logger then we can only check that an exception was thrown. + expected_err = "No support for sparse tensors." + with self.assertRaisesRegex(RuntimeError, expected_err): + tensors_sparse = ( + self._generate_sparse_tensors_for_bucket_assignment_test() + ) + if use_logger: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400], logger=net.logger + ) + else: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400] + ) + if use_logger: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_without_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=False) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_with_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=True) + + def _determine_expected_error_verify_model_across_rank( + self, group_to_use, diff_num_params=False + ): + # When running with NCCL backend, we don't expect an error on rank 0, + # rather, it will be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING. When + # running with Gloo or with debug mode wrapper, we expect the error + # to be caught inline. + # All ranks report same error when there is a # of parameter + # mismatch since we use allgather in the impl. + if diff_num_params: + expected_err = "DDP expects same model across all ranks" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + is_detail_dbg_mode = dist.get_debug_level() == dist.DebugLevel.DETAIL + if self.rank == 0: + if ( + dist.get_backend(group_to_use) == dist.Backend.NCCL + and not is_detail_dbg_mode + ): + expected_err = "caught collective operation timeout" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + else: + expected_err = None + ctx = self.assertRaises(RuntimeError) + else: + expected_err = "appears not to match" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + def _test_verify_model_across_rank(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + + # Create a valid model. The constructor initializes the logger that we use later. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # Modify the model so that the number of parameters are different for each rank. + # This will cause a RuntimeError to be thrown below in _verify_param_shape_across_processes, + # so we can check if the correct error is thrown and is logged. + # We can't do this in the constructor above otherwise the logger will + # not be properly initialized. + net.module.lin = nn.Linear(100 if self.rank == 0 else 10, 1) + + # if we pass a logger we can verify that it was logged + with ctx: + if use_logger: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()), net.logger + ) + else: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()) + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(group_to_use) + + # We don't check when self.rank != 0 because the logger doesn't log + # the error "Caught collective operation" as that is not thrown in the reducer. + if use_logger and self.rank != 0: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_with_logger(self): + self._test_verify_model_across_rank(use_logger=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_without_logger(self): + self._test_verify_model_across_rank(use_logger=False) + + def _run_test_ddp_model_with_diff_params(self, ctx, net, ddp_group, group_gloo): + with ctx: + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), device_ids=[self.rank], process_group=ddp_group + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(ddp_group) + + # can't use verify_ddp_error_logged here because net was never properly constructed + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_shape_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + # Creates network with different sized embedding table on different + # ranks. This should throw an error during DDP init. + net = EmbeddingNetDifferentParams(self.rank) + self._run_test_ddp_model_with_diff_params( + ctx, net, group_to_use, group_gloo + ) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_num_params_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use, diff_num_params=True + ) + + # Creates network with diff # of param across ranks, reducer should + # recognize this and throw appropriate error. + net = EmbeddingNetDifferentParams( + self.rank, diff_num_params=(self.rank == 1) + ) + + self._run_test_ddp_model_with_diff_params( + ctx, + net, + group_to_use, + group_gloo, + ) + + def _test_output_unused_in_loss(self, module_cls, gradient_as_bucket_view): + model = module_cls() + local_net = copy.deepcopy(model) + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + + # Tests that certain parameters not getting gradient since the + # output is unused in loss computation is supported. Specifically, + # checks that the grads remain unchanged and are the same as local + # training. + inp = torch.randn(10, 10) + + # Ensure that if a param is not used in loss computation, its + # gradient is untouched, i.e. if it is None before it is None after, + # not zero. + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + + loss_dist = b_dist.sum() + loss_dist.backward() + + # Ensure that gradient corresponding to parameter "a" was not + # touched, i.e. it is None and matches the local grad. + if module_cls == DictOutputModule: + self.assertTrue(net.module.module.a.weight.grad is None) + self.assertEqual( + net.module.module.a.weight.grad, local_net.module.a.weight.grad + ) + else: + self.assertTrue(net.module.a.weight.grad is None) + self.assertEqual(net.module.a.weight.grad, local_net.a.weight.grad) + + saved_a_local_grad = None + saved_a_dist_grad = None + net.zero_grad() + local_net.zero_grad() + for i in range(6): + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + if i < 2: + # Use both params in loss computation. Later, "a" will go + # unused and we check to ensure DDP supports this and + # gradients remain the same as local training. + t = a @ b + t_dist = a_dist @ b_dist + loss = t.sum() + loss_dist = t_dist.sum() + else: + # Model output "a" unused in loss. + loss = b.sum() + loss_dist = b_dist.sum() + loss.backward() + loss_dist.backward() + if i == 1: + # Save grads to compare with them in next iterations. + if module_cls == DictOutputModule: + saved_a_local_grad = local_net.module.a.weight.grad + saved_a_dist_grad = net.module.module.a.weight.grad + else: + saved_a_local_grad = local_net.a.weight.grad + saved_a_dist_grad = net.module.a.weight.grad + self.assertEqual(saved_a_local_grad, saved_a_dist_grad) + elif i >= 2: + # parameter "a" of both models should be the same and not change + if module_cls == DictOutputModule: + self.assertEqual( + net.module.module.a.weight.grad, saved_a_dist_grad + ) + self.assertEqual( + local_net.module.a.weight.grad, saved_a_local_grad + ) + else: + self.assertEqual(net.module.a.weight.grad, saved_a_dist_grad) + self.assertEqual(local_net.a.weight.grad, saved_a_local_grad) + + # Verify grads are the same + for (local_param, dist_param) in zip( + local_net.parameters(), net.parameters() + ): + local_grad = local_param.grad + dist_grad = dist_param.grad + self.assertEqual(local_grad, dist_grad) + + dist.barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_tuple_module(self): + module_cls = UnusedParamTwoLinLayerNet + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_dict_module(self): + module_cls = DictOutputModule + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_undefined_grad_parity_unused_parameters(self): + # TODO: enable this for general training use cases: + # https://github.com/pytorch/pytorch/issues/58511. + x = torch.ones(1, 2).to(self.rank) + net = Net().to(self.rank) + local_net = copy.deepcopy(net) + net = torch.nn.parallel.DistributedDataParallel( + net, + device_ids=[self.rank], + find_unused_parameters=True, + ) + out = net(x).sum() + local_out = local_net(x).sum() + # Simulates undefined gradients. + torch._C._functions.UndefinedGrad()(out).backward() + torch._C._functions.UndefinedGrad()(local_out).backward() + for (dist_param_name, dist_param), (local_param_name, local_param) in zip( + net.named_parameters(), local_net.named_parameters() + ): + dist_grad = dist_param.grad + local_grad = local_param.grad + self.assertEqual( + dist_grad, + local_grad, + f"""DDP param {dist_param_name} with grad {dist_grad} + does not match local param {local_param_name} with grad + {local_grad}""", + ) + + def _test_different_graph_across_ranks( + self, find_unused_parameters=False, static_graph=False + ): + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + if self.rank == 0: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + torch.manual_seed(31415) + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = ToyModel(self.rank).cuda(self.rank) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused_parameters, + gradient_as_bucket_view=True, + static_graph=static_graph, + ) + random_input = torch.randn(20, 10, device=self.rank) + for i in range(10): + out = ddp_model(random_input) + loss = out.sum() + loss.backward() + return ddp_model + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_different_graph_across_ranks(self): + base_model = self._test_different_graph_across_ranks( + find_unused_parameters=True + ) + self.assertFalse( + base_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + static_model = self._test_different_graph_across_ranks(static_graph=True) + self.assertTrue( + static_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + for i, j in zip(base_model.parameters(), static_model.parameters()): + self.assertEqual(i, j) + + @require_backend_is_available({"gloo"}) + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_gloo(self): + tensors = [torch.ones(10) * self.rank] + # Kick off some allreduce work on all ranks + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + # Run monitored barrier and ensure it passes + timeout = timedelta(seconds=2) + dist.monitored_barrier(timeout=timeout) + # Check monitored_barrier success with wait_all_ranks=True + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + # All ranks besides 1 call into barrier, rank 0 should report failure + # while others report gloo error. + failed_rank = 1 + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank != failed_rank: + # Other ranks should not pass barrier since rank 0 failed. + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + # We need a barrier since otherwise failed_rank exits too early + # and cause a timeout. + self._barrier(timeout=30) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_subgroup(self): + # Tests that monitored_barrier works as expected on non-default + # process groups. + failed_rank = 1 + timeout = 0.1 + subgroup = dist.new_group(ranks=[0, 1]) + + if self.rank == failed_rank: + return + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(subgroup, timeout) + else: + # Other ranks call into monitored_barrier, but this should be a + # noop because they are not part of the subgroup. Verify that + # there are no errors here. + dist.monitored_barrier(subgroup, timeout) + + def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks): + # tests expected behavior when nonzero rank hangs. + nccl_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + # provide sufficient timeout so communicators + # can be initialized in ctor. + timeout=timedelta(seconds=15), + backend=dist.Backend.NCCL, + ) + gloo_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + backend=dist.Backend.GLOO, + ) + tensors = [torch.ones(10, device=self.rank) * self.rank] + # Let all ranks call allreduce first to set up communicators etc. + # Directly simulating error here will run into store issue described + # in https://github.com/pytorch/pytorch/issues/54524. + nccl_pg.allreduce(tensors).wait(timedelta(seconds=5)) + # All ranks besides 0 call into allreduce. This is to simulate a + # desync across the world, where some ranks call into + # monitored_barrier() and others are stuck in collective comm. In + # practice, we don't need TORCH_NCCL_BLOCKING_WAIT, but we use it in this + # test to ensure it exits cleanly. + if self.rank != 0: + # Can get different errors here depending on whether gloo-based + # wrapper PG is enabled or not, since with wrapper pg, it will + # fail in a collective synchronization check and not actually + # call into the nccl pg. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + err_regex = "Timed out waiting" + else: + err_regex = "caught collective operation timeout" + with self.assertRaisesRegex(RuntimeError, err_regex): + nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1)) + else: + # Rank 0 should report first (in order) timed out rank or all ranks + # depending on wait_all_ranks flag passed into monitored_barrier. + if wait_all_ranks: + rank_str = ", ".join( + [str(i) for i in range(1, int(self.world_size))] + ) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + else: + expected_first_fail_rank = 1 + err_regex = f"Rank {expected_first_fail_rank} failed to pass monitoredBarrier" + monitored_barrier_timeout_seconds = timedelta(seconds=0.1) + with self.assertRaisesRegex(RuntimeError, err_regex): + gloo_pg.monitored_barrier( + monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks + ) + + self._barrier(timeout=30) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang(self): + # tests expected behavior when nonzero rank hangs and we want to + # report first timed out rank. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang_wait_all_ranks(self): + # tests expected behavior when nonzero rank hangs and we want to + # report all timed out ranks. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_rank_0_timeout(self): + # tests error when rank 0 exhausts its given timeout. + process_group = dist.new_group(ranks=list(range(int(self.world_size)))) + timeout = timedelta(seconds=0) + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} timed out in monitoredBarrier" + ): + process_group.monitored_barrier(timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_failure_order(self): + # Ensure that the first (in sorted order) rank is reported when + # multiple ranks fail to pass the monitored_barrier. + # TODO(#54879): Provide ability to wait and report all failed ranks + expected_first_failed_rank = 2 + timeout = timedelta(seconds=2) + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {expected_first_failed_rank}" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank == 1: + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + def test_monitored_barrier_wait_all_ranks(self): + # Tests simple case where > 1 rank does not call into monitored + # barrier and verifies all ranks are reported by rank 0. + if self.rank == 0: + timeout = timedelta(seconds=0.1) + rank_str = ", ".join([str(i) for i in range(1, int(self.world_size))]) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping(self): + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "a.weight", 1: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test when DDP is used with ignored parameters. + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test errors are raised when DDP and module parameters mismatch. + # This generally indicates a bug with DDP and is not expected to + # happen in user applications. + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + net_params, _ = net._build_params_for_reducer() + if self.rank == 0: + print(type(net_params[0])) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + with self.assertRaisesRegex(ValueError, "Expected param to name mapping"): + net._build_debug_param_to_name_mapping(net_params) + + net_params = net_params[:-3] + with self.assertRaisesRegex(ValueError, "Param with name"): + net._build_debug_param_to_name_mapping(net_params) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping_requires_grad(self): + class Net(nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(10, 10) + # Is not tracked by DDP and should not show up in param to + # name mapping. + self.lin.bias.requires_grad_(False) + + def forward(self, x): + return self.lin(x) + + model = Net() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + expected_mapping = { + 0: "lin.weight", + } + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertEqual(param_to_name_mapping, expected_mapping) + + def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse): + debug_mode_off = dist.get_debug_level() == dist.DebugLevel.OFF + + class SubModule(nn.Module): + def __init__(self): + super().__init__() + self.embedding_net = EmbeddingNetDifferentParams(0) + self.lin = TwoLinLayerNet() + self.bn = BatchNormNet() + self.lin_layer = nn.Linear(4, 10, bias=False) + + def forward(self, x): + x = self.bn(x) + x = self.lin_layer(x) + x = self.lin.a(x) # self.lin.b param unused + # EmbeddingNetDifferentParams entirely unused: self.embedding_net.embedding and + # self.embedding_net.lin unused. + return x + + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.sub_module = SubModule() + + def forward(self, x): + return self.sub_module(x) + + model = MyModel() + sparse_embedding_fqns = [] + if ignore_sparse: + for module_name, module in model.named_modules(): + if module == model.sub_module.embedding_net.embedding: + for parameter_name, param in module.named_parameters( + recurse=False + ): + fqn = f"{module_name}.{parameter_name}" + sparse_embedding_fqns.append(fqn) + + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, sparse_embedding_fqns + ) + unused_modules = [ + model.sub_module.embedding_net.lin, + model.sub_module.lin.b, + ] + else: + unused_modules = list(model.sub_module.embedding_net.modules()) + [ + model.sub_module.lin.b, + ] + + expected_unused_param_fqns = [] + used_param_fqns = [] # Validate that these don't mistakenly show up. + fqn_to_param_index = {} + index = 0 + for module_name, module in model.named_modules(): + for parameter_name, param in module.named_parameters(recurse=False): + fqn = f"{module_name}.{parameter_name}" + fqn_to_param_index[fqn] = index + if fqn not in sparse_embedding_fqns: + index += 1 + if module in unused_modules: + expected_unused_param_fqns.append(fqn) + else: + if ( + not ignore_sparse + or module != model.sub_module.embedding_net.embedding + ): + used_param_fqns.append(fqn) + + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + batch, dim = 10, 2 + inp = torch.ones(batch, dim) + for i in range(2): + if i == 0: + out = net(inp) + loss = out.sum() + loss.backward() + else: + try: + out = net(inp) + loss = out.sum() + loss.backward() + except RuntimeError as e: + e = str(e) + + unused_param_substr = e[e.find("did not receive grad") :] + # Validate that each unused param fully qualified name + # shows up in error logs. We do this instead of + # constructing a joined string since order of parameters + # can be different in Reducer. In addition, validate + # param indices show up as well. + for unused_param_fqn in expected_unused_param_fqns: + self.assertTrue( + unused_param_fqn in unused_param_substr + or debug_mode_off + ) + self.assertTrue( + str(fqn_to_param_index[unused_param_fqn]) + in unused_param_substr, + f"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}", + ) + + # Validate that used param fqns don't show up in error + # logs. + for used_param_fqn in used_param_fqns: + self.assertFalse(used_param_fqn in unused_param_substr) + # Validate that ignored param fqns don't show up as unused + # (since DDP does not track them) + for sparse_param_fqn in sparse_embedding_fqns: + self.assertFalse(sparse_param_fqn in unused_param_substr) + else: + self.assertTrue(False, "Expected error was not raised!") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_error(self): + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_err_ignore_params(self): + # Tests unused parameter reporting when DDP is configured to ignore + # certain parameters. + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_ddp_inference(self): + # tests that DDP module can be run on a single node with no_grad + # or eval setting and there is no hang. + rank = self.rank + torch.cuda.set_device(rank) + model = Net().cuda() + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + syncbn_model = nn.SyncBatchNorm( + 2, momentum=0.99, track_running_stats=False + ).cuda() + local_syncbn_model = copy.deepcopy(syncbn_model) + syncbn_model = torch.nn.parallel.DistributedDataParallel( + syncbn_model, device_ids=[rank] + ) + inp = torch.randn(10, 2, device=rank) + inp_syncbn = torch.randn(10, 2, 4, 4, device=rank) + tests = [ + (model, local_model, inp), + (syncbn_model, local_syncbn_model, inp_syncbn), + ] + for test in tests: + test_model, test_local_model, test_inp = test + if self.rank == 0: + test_model.eval() + test_local_model.eval() + for _ in range(6): + self.assertEqual( + test_model(test_inp), test_local_model(test_inp) + ) + + # Barrier since only rank 0 runs inference. Test should be + # much faster than 30s, but this is to avoid flakiness. + self._barrier(timeout=30) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_ddp_sync_bn_training_vs_eval(self): + rank = self.rank + torch.cuda.set_device(rank) + # Need to set track_running_stats=False, when track_running_stats=True, + # bn_training is False and sync could not occur in eval model. + model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda( + rank + ) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank]) + # Test sync occurs in training mode. + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model(inp) + loss = out.sum() + loss.backward() + + # SyncBN allgathers stats across all ranks, so verify call to + # all_gather in profiler. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertNotEqual([], all_gather_calls) + + # Only do inference on one rank. If SyncBN did collective stats sync, + # this would hang/error. + model_inference = model.module + if self.rank == 0: + model_inference.eval() + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model_inference(inp) + loss = out.sum() + loss.backward() + + # Ensure sync does not occur in eval() mode. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertEqual([], all_gather_calls) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_python_error_logged(self): + # Most python exceptions in DDP are raised during init before + # reducer is constructed, so we don't have a logger in those cases. + # However, the below is one example where a python error is thrown + # after reducer is constructed. + model = TwoLinLayerNet().cuda(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + expected_err = "must be callable" + with self.assertRaisesRegex(TypeError, expected_err): + model.register_comm_hook({}, {}) + + verify_ddp_error_logged(model, expected_err) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_static_graph_nested_types(self): + # Tests for static graph training when outputs are not just tensors + # but can be (nested) tuple, list, dict, etc. + rank = self.rank + torch.cuda.set_device(rank) + + class NestedOutputModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(100, 1, bias=False) + + def forward(self, inp, output_type): + if output_type == "tuple": + return ( + self.lin(inp), + ( + self.lin(inp), + self.lin(inp), + ), + ) + elif output_type == "list": + return [ + self.lin(inp), + [ + self.lin(inp), + self.lin(inp), + ], + ] + elif output_type == "dict": + return { + "a": self.lin(inp), + "b": { + "c": self.lin(inp), + }, + } + + def get_loss(model_output): + loss = 0.0 + if isinstance(model_output, torch.Tensor): + return model_output.sum() + elif isinstance(model_output, dict): + for value in model_output.values(): + loss += get_loss(value) + elif isinstance(model_output, (tuple, list)): + for x in model_output: + loss += get_loss(x) + else: + raise ValueError(f"Unknown model output type {type(model_output)}") + return loss + + model = NestedOutputModule().cuda(rank) + model_static_graph = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + model_static_graph = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + static_graph=True, + ) + inp = torch.randn(10, 100) + type_mapping = { + "list": list, + "tuple": tuple, + "dict": dict, + } + for output_type in type_mapping.keys(): + for i in range(6): + out = model(inp, output_type=output_type) + loss = get_loss(out) + loss.backward() + self._model_step(model) + out_static = model_static_graph(inp, output_type=output_type) + self.assertTrue(isinstance(out_static, type_mapping[output_type])) + loss_static = get_loss(out_static) + loss_static.backward() + self._model_step(model_static_graph) + for (p, p_static) in zip( + model.parameters(), model_static_graph.parameters() + ): + self.assertEqual(p, p_static) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_returns_tensor_with_no_grad(self): + # Tests case where module returns tensor that does not require grad. + torch.cuda.set_device(self.rank) + + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc2(F.relu(self.fc1(x))) + y = x.clone() + x = x.detach() + assert not x.requires_grad + return (x, y) + + model = MyModel().to(self.rank) + inp = torch.randn(1, 10, device=self.rank) + for (find_unused, static_graph) in itertools.product( + [True, False], [True, False] + ): + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + for i in range(6): + out = ddp(inp) + self.assertFalse(out[0].requires_grad) + o = (out[0] + out[1]).sum() + o.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_detect_ddp_is_actually_static(self): + class ToyModel(nn.Module): + def __init__(self): + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10) + + def forward(self, x, find_unused, dynamic): + if find_unused: + if dynamic: + return self.net2(self.net1(x)) + else: + return self.net2(x) + else: + return self.net2(self.net1(x)) + + # Set of unused parameters don't change across iterations + torch.cuda.set_device(self.rank) + model = ToyModel().cuda() + for find_unused in [True, False]: + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused, + ) + inp = torch.randn(1, 10, device="cuda") + for _ in range(6): + out = ddp(inp, find_unused=find_unused, dynamic=False) + loss = out.sum() + loss.backward() + self.assertTrue(ddp.reducer._ddp_graph_static()) + + # Set of unused parameters dynamically change + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + ) + inp = torch.randn(1, 10, device="cuda") + for i in range(6): + out = ddp(inp, find_unused=True, dynamic=i % 2 == 0) + loss = out.sum() + loss.backward() + self.assertFalse(ddp.reducer._ddp_graph_static()) + + def _test_ddp_new_tensor_in_fwd(self, static_graph): + # Test from https://github.com/pytorch/pytorch/issues/60733 + class MyModel(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + self.device = self.fc1.weight.device + + def __init_opt(self): + opt = torch.randn(1, 10, device=self.device) + return opt + + def forward(self, x, opt_1, opt_2, opt_nested): + x = F.relu(self.fc1(x)) + x = self.fc2(x) + if opt_1 is None: + opt_1 = self.__init_opt() + if opt_2 is None: + opt_2 = self.__init_opt() + if opt_nested is None or not torch.is_tensor(opt_nested): + opt_nested = self.__init_opt() + # Test multiple tensors as well as newly created tensors + # within a struct. + return x, opt_1, opt_2, {"tensor": opt_nested} + + model = MyModel().to(self.rank) + for find_unused in [True, False]: + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + broadcast_buffers=False, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + + opt = [None for _ in range(3)] + for i in range(2): + ddp.zero_grad() + x = torch.randn(1, 10, device=self.rank) + out, opt[0], opt[1], opt[2] = ddp( + x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2] + ) + for i in range(len(opt)): + if torch.is_tensor(opt[i]): + self.assertEqual(opt[i].grad_fn, None) + else: + self.assertEqual(opt[i]["tensor"].grad_fn, None) + out.mean().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd_static_graph(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=True) + + def _test_ddp_buffer_hook_allreduce(self, return_futures): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + buffers = [buffer for (_, buffer) in named_buffers.items()] + futs = [ + dist.all_reduce( + buffer, group=ddp.process_group, async_op=True + ).get_future() + for buffer in buffers + ] + if return_futures: + return futs + else: + torch.futures.collect_all(futs).wait() + + hook_pre_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.PRE_FORWARD + ) + hook_post_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.POST_FORWARD + ) + for hook_run_location in [ + hook_pre_fwd, + hook_post_fwd, + ]: + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook( + model_ddp, buffer_comm_hook, hook_run_location + ) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + broadcast_buffers=False, + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + # Since buffer reduction is done pre-forward, simulate it for + # no hook case here. + # Simulate allreduce appropriately depending on hook location. + if hook_run_location == hook_pre_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + + loss_no_hook = model_ddp_no_hook(inp).sum() + if hook_run_location == hook_post_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + torch.cuda.synchronize() + + # if return_futures, they are only awaited on by DDP + # at the end of the backwards pass for maximum overlap. + if not return_futures: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + # Note that when custom hooks return futures, this + # comparison is not expected to work when hook run location + # is pre-forward pass. This is because the hook does async + # communication and forward pass modifies the buffer without + # appropriate synchronization. Therefore, if returning + # futures from custom buffer hooks, it is advised to set + # hook run location to post forward. + if return_futures and hook_run_location == hook_post_fwd: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce_return_future(self): + self._test_ddp_buffer_hook_allreduce(return_futures=True) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce(self): + self._test_ddp_buffer_hook_allreduce(return_futures=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer_via_hook(self): + # test that _distributed_broadcast_coalesced via registered hook is + # equivalent to DDP's default broadcast coalesced. + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + # named_buffers is a Dict[str, Tensor] representing a mapping + # from buffer name to buffer. + buffers = [buffer for (_, buffer) in named_buffers.items()] + ddp._default_broadcast_coalesced(buffers) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook(model_ddp, buffer_comm_hook) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + loss_no_hook = model_ddp_no_hook(inp).sum() + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_remove_autograd_hooks(self): + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError() + + class MyModel(nn.Module): + def __init__(self, device): + super().__init__() + self.error = True + self.fc1 = nn.Linear(10, 10).cuda(device) + + def forward(self, inp): + if self.error: + return self.fc1(SimulateError.apply(inp)) + else: + return self.fc1(inp) + + + # Run with error to trigger backward pass that marks fc1 as being marked + # ready. If we don't remove autograd hooks before running below it would + # fail on the old autograd hook. + model = MyModel(self.rank) + input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) + model_ddp1 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + + with self.assertRaises(RuntimeError): + model_ddp1(input).sum().backward() + + # Remove autograd hooks on old instance. + model_ddp1._remove_autograd_hooks() + + # Try another DDP instance without error now. + model.error = False + model_ddp2 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp2(input).sum().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @unittest.skip("Test is failing, tracking issue at https://github.com/pytorch/pytorch/issues/102751") + def test_ddp_has_finalized(self): + + @dataclass + class MyClass: + obj: torch.Tensor + + class MyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.rank = rank + self.fc1 = nn.Linear(1024, 1024).cuda(rank) + self.fc2 = nn.Linear(1024, 2 * 1024).cuda(rank) + + def forward(self, inp): + if self.rank == 0: + return self.fc1(inp), MyClass(self.fc2(inp)) + else: + return self.fc1(inp), self.fc2(inp) + + model = MyModel(self.rank) + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=(1024 * 4 / 1024 / 1024), # One bucket per parameter. + ) + + if self.rank == 0: + out1, _ = ddp(input) + out1.sum().backward() + else: + out1, out2 = ddp(input) + (out1.sum() + out2.sum()).backward() + + if self.rank == 0: + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp._check_reducer_finalized() + + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp(input) + else: + ddp._check_reducer_finalized() + ddp(input) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "TORCH_NCCL_USE_COMM_NONBLOCKING only applies to NCCL" + ) + def test_nccl_init_abort(self): + """ + Tests that we can abort a NCCL communicator during initialization and + recover appropriately. + """ + # Reinitialize global process group with TORCH_NCCL_USE_COMM_NONBLOCKING=1 + os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1" + dist.destroy_process_group() + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + + # Abort pg in background thread. + running = True + + def abort(device): + pg = _get_default_group() + while running: + pg._get_backend(torch.device(device))._shutdown() + time.sleep(1) + + if self.rank != 1: + import threading + t = threading.Thread(target=abort, args=(self.rank,)) + t.start() + with self.assertRaises(RuntimeError): + # First collective triggers initialization via ncclCommInitRank. + torch.distributed.barrier() + running = False + t.join() + + def _run_ddp_update_process_group(self, new_pg): + def get_num_torch_recompiles(): + guard_failures = torch._dynamo.utils.guard_failures + num_recompiles = [len(guard_failures[code]) for code in guard_failures] + return 0 if len(num_recompiles) == 0 else max(num_recompiles) + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError() + + class MyModel(torch.nn.Module): + def __init__(self, device): + super().__init__() + # 4MB for multiple buckets. + self.fc1 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc2 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc3 = torch.nn.Linear(1024, 1024).cuda(device) + + def forward(self, inp, error): + if error: + return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) + else: + return self.fc3(self.fc2(self.fc1(inp))) + + + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + MyModel(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=1, + ) + model = torch.compile(ddp) + + def run_iteration(): + # Run regular iteration. + out = model(input, error=False) + out.sum().backward() + torch.cuda.synchronize() + + # Run with error. + with self.assertRaises(RuntimeError): + out = model(input, error=True) + out.sum().backward() + torch.cuda.synchronize() + + run_iteration() + assert 0 == get_num_torch_recompiles() + + if new_pg: + # Now reduce world_size and run iteration. + group_size_2 = dist.new_group(ranks=[0, 1]) + ddp._update_process_group(group_size_2) + if self.rank in [0, 1]: + run_iteration() + + # Increase the world size and run iteration. + group_size_3 = dist.new_group(ranks=[1, 2, 3]) + ddp._update_process_group(group_size_3) + if self.rank in [1, 2, 3]: + run_iteration() + + # Back to default size. + ddp._update_process_group(_get_default_group()) + run_iteration() + else: + # Create default pg of smaller size. + dist.destroy_process_group() + + if self.rank in [1, 2, 3]: + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=3, + rank=self.rank - 1, + timeout=timedelta(seconds=default_pg_timeout), + ) + ddp._update_process_group(_get_default_group()) + run_iteration() + dist.destroy_process_group() + + # Need a barrier here to ensure ranks 1, 2 and 3 are done. + self._barrier(wait_for=4) + + # Need to init pg again for "_barrier" to succeed. + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=4, + rank=self.rank, + timeout=timedelta(seconds=default_pg_timeout), + ) + + # Validate no more recompiles. + assert 0 == get_num_torch_recompiles() + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_new_group(self): + self._run_ddp_update_process_group(new_pg=True) + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_default_group(self): + self._run_ddp_update_process_group(new_pg=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer(self): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + class NetWithBuffers(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + return self.b(self.a(x)) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + if rank == 0: + model_ddp.module.buffer = model_ddp.module.buffer + 1 + loss = model_ddp(inp).sum() + loss.backward() + # Ensure all buffers are synchronized. + bufs = [ + torch.empty_like(model_ddp.module.buffer) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(bufs, model_ddp.module.buffer) + rank_0_buf = bufs[0] + for buf in bufs[1:]: + self.assertEqual(rank_0_buf, buf) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_static_graph_multi_forward(self): + class Net(nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(10, 10) + self.relu = nn.ReLU() + + def forward(self, x): + return self.relu(self.lin(x)) + + torch.cuda.set_device(self.rank) + torch.manual_seed(42 << 1337 % (self.rank + 1)) + model = Net().cuda(self.rank) + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], static_graph=True + ) + inp = torch.ones(2, 10, device="cuda") + for _ in range(3): + model.zero_grad() + local_model.zero_grad() + a = model(inp) + b = model(inp) + loss = a.sum() + b.sum() + loss.backward() + # Grads should be equal to a local model that ran through inp twice and averaged grads + if self.rank == 0: + inp_clone = inp.clone() + for _ in range(2): + a = local_model(inp_clone) + b = local_model(inp_clone) + loss = a.sum() + b.sum() + loss.backward() + + ws = dist.get_world_size() + for p in local_model.parameters(): + p.grad.data = p.grad / dist.get_world_size() + + for p_ddp, p_local in zip( + model.parameters(), + local_model.parameters() + ): + self.assertTrue( + torch.allclose( + p_ddp.grad, p_local.grad + ), + f"{p_ddp.grad} vs {p_local.grad}" + ) + + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_sync_bn_logged(self): + model = BN_NET + rank = self.rank + # single gpu training setup + model_gpu = model.cuda(rank) + no_sync_bn = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model_gpu), + device_ids=[self.rank], + ) + ddp_logging_data = no_sync_bn._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", True) + self.assertFalse(sync_bn_logged) + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(model_gpu) + model_DDP = torch.nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=[self.rank], + ) + ddp_logging_data = model_DDP._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", False) + self.assertTrue(sync_bn_logged) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_stateless_api_with_ddp(self): + class MockModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(1, 1) + buffer = torch.ones(1) + self.register_buffer("buffer", buffer) + + def forward(self, x): + return self.l1(x) + self.buffer + + device = self.rank + module = MockModule().to(device) + module = torch.nn.parallel.DistributedDataParallel( + module, device_ids=[device] + ) + x = torch.rand((1, 1)).to(device) + weight = torch.tensor([[1.0]], device=device, requires_grad=True) + bias = torch.tensor([0.0], device=device, requires_grad=True) + buffer = torch.tensor([0.0], device=device) + parameters = { + "module.l1.weight": weight, + "module.l1.bias": bias, + "module.buffer": buffer, + } + prev_weight = module.module.l1.weight.clone() + prev_buffer = module.module.buffer.clone() + + res = torch.func.functional_call(module, parameters, x) + self.assertEqual(x, res) + # check that the weight remain unmodified + cur_weight = module.module.l1.weight + cur_buffer = module.module.buffer + self.assertEqual(cur_weight, prev_weight) + self.assertEqual(cur_buffer, prev_buffer) + # run a backward pass and check the gradients + res.backward() + self.assertIsNotNone(weight.grad) + self.assertIsNotNone(bias.grad) + # Gradient was not calculated for the module stated and buffers + self.assertIsNone(buffer.grad) + self.assertIsNone(module.module.l1.weight.grad) + self.assertIsNone(module.module.l1.bias.grad) + self.assertIsNone(module.module.buffer.grad) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_forward_backward_hook(self): + class DummyTestModel(nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.fc = nn.Linear(2, 2) + + def forward(self, x): + return self.fc(x) + + def relu_hook(module, input): + return nn.functional.relu(input[0]) + + def gelu_hook(module, _input, output): + return nn.functional.gelu(output) + + def celu_hook(module, _input, output): + return (nn.functional.celu(output[0]),) + + local_model = DummyTestModel() + ddp_model = DummyTestModel() + local_model.fc.register_forward_pre_hook(relu_hook) + local_model.fc.register_forward_hook(gelu_hook) + ddp_model.fc.register_forward_pre_hook(relu_hook) + ddp_model.fc.register_forward_hook(gelu_hook) + local_model.fc.register_backward_hook(celu_hook) + ddp_model.fc.register_backward_hook(celu_hook) + ddp_model = DistributedDataParallel( + ddp_model.to(self.rank), device_ids=[self.rank] + ) + input_data = torch.rand(5, 2) + output_local = local_model(input_data) + output_ddp = ddp_model(input_data.to(self.rank)) + self.assertEqual(output_local, output_ddp) + output_local.sum().backward() + output_ddp.sum().backward() + ddp_grads = [p.grad for p in ddp_model.parameters()] + self.assertEqual(ddp_grads[0], local_model.fc.weight.grad) + self.assertEqual(ddp_grads[1], local_model.fc.bias.grad) + + def _test_hook_pickling(self, hook, hook_state): + torch.manual_seed(0) + learning_rate = 0.01 + chkpt_file = tempfile.gettempdir() + "/checkpoint.pt" + rank = self.rank + + input = torch.randn(7, 1, device=rank) + target = torch.randn(7, 5, device=rank) + net = torch.nn.Linear(1, 5).to(rank) + ddp_model = DistributedDataParallel(copy.deepcopy(net), device_ids=[rank]) + dummy_ddp_model = DistributedDataParallel( + copy.deepcopy(net), device_ids=[rank] + ) + optimizer = torch.optim.SGD(ddp_model.parameters(), lr=learning_rate) + ddp_model.register_comm_hook(hook_state, hook) + ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + out = ddp_model(input) + loss = F.mse_loss(out, target) + loss.backward() + optimizer.step() + + state = { + "state_dict": ddp_model.state_dict(), + "comm_hook": hook, + "comm_hook_state": hook_state, + } + + if rank == 0: + with self.assertLogs("torch.distributed") as captured: + torch.save(state, chkpt_file) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group is not serializable and excluded from a saved state.", + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % rank} + with self.assertLogs("torch.distributed") as captured: + checkpoint = torch.load(chkpt_file, map_location=map_location) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group will be set to a default group (i.e. the world size).\ + If a different group is desired, please set `self.process_group` after PowerSGD state is loaded.", + ) + + dummy_ddp_model.load_state_dict(checkpoint["state_dict"]) + dummy_hook = checkpoint["comm_hook"] + dummy_hook_state = checkpoint["comm_hook_state"] + dummy_optimizer = torch.optim.SGD( + dummy_ddp_model.parameters(), lr=learning_rate + ) + + # Check that loaded function is correct + self.assertEqual(dummy_hook.__qualname__, hook.__qualname__) + + # Check that all slots' keys were restored correctly + self.assertEqual(hook_state.__slots__, dummy_hook_state.__slots__) + + # Check that all slots' attributes are restored correctly + # Excluding ``process_group`` and ``rng``. + for entry in dummy_hook_state.__slots__: + if entry != "process_group" and entry != "rng": + self.assertEqual( + getattr(dummy_hook_state, entry), getattr(hook_state, entry) + ) + + # Check that ``process_group`` was set to default + self.assertEqual(dummy_hook_state.process_group, _get_default_group()) + + # Check that a random state was restored properly: + # ``np.random.RandomState.get_state`` returns a tuple with entries: + # ``bit_generator`` - str, + # ``state.key`` - ndarray dtype[uint32], + # ``state.pos`` - int, + # ``has_gauss`` - int, + # ``gauss`` - float + # (refer to https://github.com/numpy/numpy/blob/266aad7478bc7fbcc55eea7f942a0d373b838396/numpy/random/mtrand.pyi) + # To make sure random state was restored properly, all entries should equal the original + for entry1, entry2 in zip( + hook_state.rng.get_state(), dummy_hook_state.rng.get_state() + ): + np.testing.assert_array_equal(entry1, entry2) + + dummy_ddp_model.register_comm_hook(dummy_hook_state, dummy_hook) + dummy_ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + dummy_optimizer.zero_grad() + out_origin = ddp_model(input) + out_dummy = dummy_ddp_model(input) + loss_origin = F.mse_loss(out_origin, target) + loss_dummy = F.mse_loss(out_dummy, target) + loss_origin.backward() + loss_dummy.backward() + optimizer.step() + dummy_optimizer.step() + + # Check that gradients after 10 epochs are the same + for orig_param, dummy_param in zip( + ddp_model.parameters(), dummy_ddp_model.parameters() + ): + self.assertEqual(orig_param.grad, dummy_param.grad) + + dist.barrier() + if rank == 0: + os.remove(chkpt_file) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + @skip_but_pass_in_sandcastle_if( + True, "Skipped due to flakiness" + ) + def test_ddp_hook_pickling_powerSGD(self): + + hook = powerSGD.powerSGD_hook + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=4, + ) + self._test_hook_pickling(hook, powersgd_state) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device_mesh_initialization(self): + """ + Test DDP with device_mesh initialization. + """ + world_size = int(os.environ["WORLD_SIZE"]) + + from torch.distributed.device_mesh import init_device_mesh + device_mesh = init_device_mesh("cuda", (world_size,)) + + pg = _get_default_group() + + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_mesh=device_mesh) + self.assertEqual(ddp_model.device_mesh, device_mesh) + self.assertEqual(ddp_model.device_mesh.get_group(mesh_dim=0), pg) + + with self.assertRaisesRegex( + RuntimeError, "Cannot specify both process_group and device_mesh arguments." + ): + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, process_group=pg, device_mesh=device_mesh + ) + + with self.assertRaisesRegex( + RuntimeError, "Only 1D device mesh is supported," + ): + device_mesh = init_device_mesh("cuda", (2, world_size // 2)) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_mesh=device_mesh + ) + + @skip_if_lt_x_gpu(2) + @require_world_size(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_compile_static_graph(self): + "Tests that DDP works with torch compile when static_graph=True" + model = torch.nn.Linear(10, 10).cuda(self.rank) + model_clone = copy.deepcopy(model) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + ddp_static = torch.nn.parallel.DistributedDataParallel( + model_clone, + device_ids=[self.rank], + static_graph=True + ) + ddp = torch.compile(ddp) + ddp_static = torch.compile(ddp_static) + input = torch.rand(10, 10).cuda(self.rank) + # verify output and gradient parity + for _ in range(6): + out_ddp = ddp(input).sum() + out_ddp_static = ddp_static(input).sum() + self.assertEqual(out_ddp, out_ddp_static) + out_ddp.backward() + out_ddp_static.backward() + for p1, p2 in zip(ddp.parameters(), ddp_static.parameters()): + self.assertEqual(p1.grad, p2.grad) + + +instantiate_parametrized_tests(DistributedTest._DistTestBase) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8e78f5f977e52df3b26a76ff3587812e4208df86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py @@ -0,0 +1,66 @@ +# mypy: ignore-errors + +from contextlib import contextmanager +from datetime import timedelta +from functools import ( + partial, + wraps, +) + +import torch.distributed as dist +import torch.distributed.distributed_c10d as c10d + +class MockProcessGroup(dist.ProcessGroup): + + def __init__(self, rank, world): + super().__init__(rank, world) + + def getBackendName(self): + return "mock_process_group" + +def create_mock_pg(prefix_store, rank, world_size, timeout): + return MockProcessGroup(rank, world_size) + +dist.Backend.register_backend('mock_process_group', create_mock_pg) + +def mock_init_dist(rank, world_size): + # !!! WARNING !!! + # Kids don't try this at home, this is a cute pile of hacks that + # depends on a small mountain of c10d internals + assert not dist.is_initialized() + store = dist.HashStore() + # Trick _store_based_barrier into believing everyone else already checked-in + # Zero is the group index + store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1) + dist.init_process_group( + backend="mock_process_group", + rank=rank, + world_size=world_size, + store=store, + group_name="fake", + timeout=timedelta(seconds=1)) + +@contextmanager +def with_dist(rank=0, world_size=2): + """ + Context manager that initializer c10d with a fake process group. + """ + mock_init_dist(rank=rank, world_size=world_size) + try: + yield + finally: + dist.destroy_process_group() + +def with_fake_comms(func=None, rank=0, world_size=2): + """ + Function wrapper that inits a fake process group designed for testing. + Right now only querying for world size is available + """ + if func is None: + return partial(with_fake_comms, rank=rank, world_size=world_size) + + @wraps(func) + def wrapper(self, *args, **kwargs): + with with_dist(rank, world_size): + func(self, *args, **kwargs) + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..fe858fff042f9c77a5e0f37861777a9d3a51bf5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py @@ -0,0 +1,32 @@ +# mypy: ignore-errors + +import torch.distributed as dist + +from torch._C._distributed_c10d import ( + FakeProcessGroup, +) + + +class FakeStore(dist.Store): + """ + A fake store is a fake Key-Value store simply for initialization usage + the of fake process group, one can either use FakeStore or HashStore. + """ + pass + + +def _create_fake_pg(prefix_store, rank, world_size, timeout): + """ + A fake process group (not related to FakeTensor) is a process group which + doesn't actually do any communication, it just hallucinates some + communication. You can run a single rank with a fake process group + without needing multiple processes (simulates per-rank behavior) + + NOTE: This is not a real process group, and it would produce wrong results + for every collective. It should be used as a convinient tool when playing + with distributed but don't care about the actual data. + """ + return FakeProcessGroup(rank, world_size) + + +dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda']) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..d6741582328d94a77108f3e6b5553012234a37f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py @@ -0,0 +1,494 @@ +# mypy: ignore-errors + +import sys +import threading +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union +from functools import partial, reduce + +import torch +import torch.distributed as dist +import weakref +from torch._C._distributed_c10d import ( + _create_work_from_future, + AllgatherOptions, + AllreduceOptions, + AllToAllOptions, + BarrierOptions, + BroadcastOptions, + ReduceScatterOptions, + ScatterOptions, + Store, + ReduceOp, +) +from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp +from torch.futures import Future +from torch.utils import _pytree as pytree + +""" +TODO: +Lots of missing collectives. +Collectives validation. +Make timeout robust by making collectives respect the test deadline. +Make tests robust by making collectives interruptible. +We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures. + +""" + + +def flatten_list(lst): + return pytree.tree_leaves(lst) + + +def ret_work(ret): + fut = Future() + fut.set_result(ret) + return _create_work_from_future(fut) + +def binop_reduce(tensors, op): + res = op(torch.stack(tensors), dim=0) + if isinstance(res, torch.Tensor): + return res + # min/max return a namedtuple + return res.values + +def bitwise_reduce(tensors, op): + return reduce(op, tensors) + +_reduce_ops = { + ReduceOp.SUM: partial(binop_reduce, op=torch.sum), + ReduceOp.AVG: partial(binop_reduce, op=torch.mean), + ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod), + ReduceOp.MIN: partial(binop_reduce, op=torch.min), + ReduceOp.MAX: partial(binop_reduce, op=torch.max), + ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and), + ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or), + ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor), +} + +class AllToAll: + @torch.no_grad() + def work(self, data): + world_size = len(data) + for dest_rank in range(world_size): + output_tensor_list, _ = data[dest_rank] + for src_rank in range(world_size): + _, input_tensor_list = data[src_rank] + output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank]) + +class AllReduce: + def __init__(self, op): + if op.op not in _reduce_ops: + raise NotImplementedError( + f"AllReduce op {op.op} not supported on multithreaded pg for now." + ) + self.op = op.op + + @torch.no_grad() + def work(self, data): + for i in range(len(data[0])): + tensors = [] + # use rank0 as the device for sum + rank_0_device = data[0][i].device + # collect all data to the list and make them + # all on rank 0 device + for src_rank in range(0, len(data)): + tensors.append(data[src_rank][i].to(rank_0_device)) + + # now mimic reduce across all ranks + res = _reduce_ops[self.op](tensors) + + # copy all the reduced value to each rank + for src_rank in range(len(data)): + data[src_rank][i].copy_(res.to(data[src_rank][i].device)) + + +class AllGather: + @torch.no_grad() + def work(self, data): + for src_rank in range(len(data)): + in_tensor_list = data[src_rank][1] + # Can't handle all_gather with multiple tensors + assert len(in_tensor_list) == 1 + src_tensor = in_tensor_list[0] + + for dest in data: + dest_tensor = dest[0][0][src_rank] + dest_tensor.copy_(src_tensor) + + +class Scatter: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + src_in_tensor_list = data[self.src][1] + # Can't handle scatter with multiple input tensor list + assert len(src_in_tensor_list) == 1 + src_in_tensors = src_in_tensor_list[0] + + for rank, each_rank_data in enumerate(data): + out_tensor_list = each_rank_data[0] + # Can't handle scatter with multiple output tensor + assert len(out_tensor_list) == 1 + dest_tensor = out_tensor_list[0] + dest_tensor.copy_(src_in_tensors[rank]) + + +class Gather: + def __init__(self, dst): + self.dst = dst + + @torch.no_grad() + def work(self, data): + # Can't handle gather with multiple tensor lists + assert len(data[self.dst][0]) == 1 + out_tensor_list = data[self.dst][0][0] + for rank, each_rank_data in enumerate(data): + src_in_tensor_list = each_rank_data[1] + # Can't handle gather with multiple tensor lists + assert len(src_in_tensor_list) == 1 + dest_tensor = out_tensor_list[rank] + dest_tensor.copy_(src_in_tensor_list[0]) + +class ReduceScatter: + def __init__(self, op): + if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG: + raise NotImplementedError(f"ReduceScatter does not support {op}") + self.op = op + + @torch.no_grad() + def work(self, data): + start_reduction = [False for _ in range(len(data))] + for each_rank_data in data: + # Can't handle reduce_scatter with multiple scatter list + assert len(each_rank_data[1]) == 1 + to_scatter = each_rank_data[1][0] + for i in range(len(to_scatter)): + dest_tensor_on_rank_i = data[i][0] + # Can't handle reduce_scatter with multiple output tensor + assert len(dest_tensor_on_rank_i) == 1 + dst_tensor_device = dest_tensor_on_rank_i[0].device + if not start_reduction[i]: + dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device)) + start_reduction[i] = True + else: + dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device)) + if self.op == dist.ReduceOp.AVG: + num_ranks = len(data) + for each_rank_data in data: + each_rank_data[0][0] /= num_ranks + + +class Broadcast: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + in_tensor_list = flatten_list(data[self.src]) + for i in range(len(data)): + out_tensor_list = flatten_list(data[i]) + for j in range(len(in_tensor_list)): + out_tensor_list[j].copy_(in_tensor_list[j]) + + +class Collective: + def __init__(self, world_size, collective, pg): + self._world_size = world_size + self._collective = collective + + self._start_cond = threading.Condition() + self._done_cond = threading.Condition() + + self._data = [None] * world_size + self._count = 0 + self._done = False + + self._pg = pg + + def join(self, rank, data): + with self._start_cond: + self._data[rank] = data + self._count += 1 + + # notify rank 0 + if self._count == self._world_size: + if rank > 0: + self._start_cond.notify() + + if rank == 0: + self._start_cond.wait_for( + lambda: self._count == self._world_size or self._pg._terminate.is_set() + ) + # SystemExit is not a subclass of Exception but BaseException + # and can be distinguished from normal exception raised from program errors + # so that we can hide it from the exception queue + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + + with self._done_cond: + # wait for rank 0 to finish + if rank > 0: + self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set()) + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + else: + # copy data around + self._collective.work(self._data) + self._done = True + self._done_cond.notify_all() + return ret_work(data) + + +class ProcessLocalGroup(dist.ProcessGroup): + _coll_lock = threading.Lock() + _cur_coll_on_pgs = {} + + _terminate = threading.Event() + + @classmethod + def _start_coll(cls, collective, pg): + with cls._coll_lock: + # pg_name is unique, we use that to record the mapping between pg and collective + if pg.pg_name not in cls._cur_coll_on_pgs: + cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls) + return cls._cur_coll_on_pgs[pg.pg_name] + + @classmethod + def _end_coll(cls, collective, pg): + # This is racily called by all ranks, so only one will work + with cls._coll_lock: + if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective: + cls._cur_coll_on_pgs.pop(pg.pg_name) + + @classmethod + def exception_handle(cls, exc): + cls._terminate.set() + for coll in cls._cur_coll_on_pgs.values(): + with coll._start_cond: + coll._start_cond.notify() + with coll._done_cond: + coll._done_cond.notify_all() + + @classmethod + def reset(cls): + with cls._coll_lock: + cls._cur_coll_on_pgs = {} + cls._terminate.clear() + + def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()): + coll = ProcessLocalGroup._start_coll(AllToAll(), self) + res = coll.join(self._rank, (output_tensor_list, input_tensor_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def barrier(self, opts=BarrierOptions()): + return self.allreduce(tensor_list=[torch.ones(1)]) + + def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()): + coll = ProcessLocalGroup._start_coll(AllGather(), self) + res = coll.join(self._rank, (output_tensors, input_tensor)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()): + tensor_list = list(torch.chunk(output_tensor, self._world_size)) + return self.allgather([tensor_list], [input_tensor], opts) + + def broadcast(self, tensor_list, opts=BroadcastOptions()): + coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def gather(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()): + coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self) + res = coll.join(self._rank, (output_tensor, scatter_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()): + tensor_list = list(torch.chunk(input_tensor, self._world_size)) + return self.reduce_scatter([output_tensor], [tensor_list], opts) + + def reduce_scatter_tensor_coalesced(self, output_tensors, input_tensors, opts=ReduceScatterOptions()): + works = [ + self._reduce_scatter_base(output_tensor, input_tensor, opts) + for output_tensor, input_tensor + in zip(output_tensors, input_tensors) + ] + for work in works[:-1]: + work.wait() + return works[-1] + + def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOptions()): + res = None + for o_t, i_t in zip(output_tensor_list, input_tensor_list): + res = self._allgather_base(o_t, i_t) + return res + + def __init__(self, rank, world_size): + super().__init__(rank, world_size) + self._rank = rank + self._world_size = world_size + world = dist.distributed_c10d._world + if isinstance(world, ThreadLocalWorld): + world = world._get_world() + self._world = weakref.ref(world) + self._ctx = torch.autograd.set_multithreading_enabled(False) + + def size(self): + return self._world_size + + @property + def pg_name(self): + """ + return the global registered name of the current pg in the world + """ + return self._world().pg_names[self] + + @property + def group_name(self): + return self.pg_name + + def getBackendName(self): + return "threaded" + + def __repr__(self): + return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}" + + +def _create_threaded_pg(prefix_store, rank, world_size, timeout): + pg = ProcessLocalGroup(rank, world_size) + # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional + # When device mesh involves sub groups while store based barrier is not enabled in c10d, + # even though threaded pg actual collectives are assumed to be single threaded, + # different threads may be initializing different groups, + # leading to race conditions. + # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups + # (dim 0 and 1) would be initialized in different threads independently. + # In this case we can no longer rely on class or global variables + # but have to rely on store based barrier to make sure each group + # is ready separately before we can invoke collectives in any of the groups. + + # the prefix store is already per group so we pass an empty name here + _store_based_barrier(rank, prefix_store, "", world_size, timeout) + return pg + + +dist.Backend.register_backend("threaded", _create_threaded_pg, devices=["cpu", "cuda"]) + + +@dataclass +class WorldData: + default_pg: dist.ProcessGroup + pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]] + pg_names: Dict[dist.ProcessGroup, str] + pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]] + pg_backend_config: Dict[dist.ProcessGroup, str] + group_count: int + tags_to_pg: Dict[str, List[dist.ProcessGroup]] + pg_to_tag: Dict[dist.ProcessGroup, str] + pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]] + pg_default_device: Dict[dist.ProcessGroup, torch.device] + + +class ThreadLocalWorld: + _world = threading.local() + + def _get_world(self) -> WorldData: + if not hasattr(ThreadLocalWorld._world, "world"): + ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {}) + return ThreadLocalWorld._world.world + + @property + def default_pg(self): + return self._get_world().default_pg + + @default_pg.setter + def default_pg(self, value): + self._get_world().default_pg = value + + @property + def pg_map(self): + return self._get_world().pg_map + + @property + def pg_names(self): + return self._get_world().pg_names + + @property + def pg_group_ranks(self): + return self._get_world().pg_group_ranks + + @property + def pg_backend_config(self): + return self._get_world().pg_backend_config + + @property + def group_count(self) -> int: + return self._get_world().group_count + + @group_count.setter + def group_count(self, value): + self._get_world().group_count = value + + @property + def tags_to_pg(self): + return self._get_world().tags_to_pg + + @property + def pg_to_tag(self): + return self._get_world().pg_to_tag + + @property + def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]: + return self._get_world().pg_coalesce_state + + @property + def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]: + return self._get_world().pg_default_device + + +_old_pg_world = None +_ctx_manager = None + + +def _install_threaded_pg(): + global _old_pg_world + global _ctx_manager + _old_pg_world = dist.distributed_c10d._world + dist.distributed_c10d._world = ThreadLocalWorld() + _ctx_manager = torch.autograd.set_multithreading_enabled(False) + + return dist.distributed_c10d._world + + +def _uninstall_threaded_pg(): + dist.distributed_c10d._world = _old_pg_world diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed9f3cc96dfc1acf0b8753c3d35a88c82a0069d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py @@ -0,0 +1,149 @@ +# mypy: ignore-errors + +import torch +import torch.distributed as dist + +from torch import nn +from torch.nn.parallel import DistributedDataParallel +from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import ( + requires_gloo, + requires_nccl, + skip_if_lt_x_gpu, + skip_if_rocm, +) +from torch.distributed.pipeline.sync import Pipe + +class PipeWithDDPTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return 2 + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_never(self): + self._run_basic_test("nccl", "never") + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_never_find_unused(self): + self._run_basic_test("nccl", "never", find_unused_parameters=True) + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_always(self): + self._run_basic_test("nccl", "always", static_graph=True) + + @skip_if_lt_x_gpu(4) + @requires_nccl() + @dist_init + @skip_if_rocm + def test_basic_nccl_ckpt_except_last(self): + self._run_basic_test("nccl", "except_last", static_graph=True) + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_never(self): + self._run_basic_test("gloo", "never") + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_never_find_unused(self): + self._run_basic_test("gloo", "never", find_unused_parameters=True) + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_always(self): + self._run_basic_test("gloo", "always", static_graph=True) + + @skip_if_lt_x_gpu(4) + @requires_gloo() + @dist_init + @skip_if_rocm + def test_basic_gloo_ckpt_except_last(self): + self._run_basic_test("gloo", "except_last", static_graph=True) + + def _run_basic_test(self, backend, checkpoint, find_unused_parameters=False, static_graph=False): + dist.init_process_group( + backend=backend, + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + # Use 4 GPUs, two replicas of a pipe across GPU 0 and 1 and another + # pipe between GPU 2 and 3. Both replicas are replicated via DDP. + fc1 = nn.Linear(16, 8, bias=False).cuda(2 * self.rank) + + class MyModule(nn.Module): + def __init__(self, device): + super().__init__() + self.fc2 = nn.Linear(8, 4, bias=False).cuda(device) + self.fc3 = nn.Linear(4, 2, bias=False).cuda(device) + + def forward(self, inp): + if find_unused_parameters: + return self.fc2(inp) + else: + return self.fc3(self.fc2(inp)) + + layer2 = MyModule(2 * self.rank + 1) + model = nn.Sequential( + fc1, + layer2 + ) + model = Pipe(model, chunks=2, checkpoint=checkpoint) + model = DistributedDataParallel( + model, + find_unused_parameters=find_unused_parameters, + static_graph=static_graph, + ) + + # Ensure inputs are different across ranks to verify that gradient + # sync indeed occurs. + model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1) + out = model(model_input).local_value() + out.sum().backward() + + # Run forward again for find_unused_parameters to trigger any potential errors. + if find_unused_parameters: + # Ensure inputs are different across ranks to verify that gradient + # sync indeed occurs. + unused_param_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1) + model(unused_param_input).local_value().sum().backward() + + # Run a few more iterations of fwd + bwd to ensure gradient synchronization + # occurs properly across iterations via delay_all_reduce/bucketized allreduce. + for _ in range(3): + model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1) + out = model(model_input).local_value() + out.sum().backward() + + # Check grads + output = [torch.empty_like(fc1.weight.grad), torch.empty_like(fc1.weight.grad)] + dist.all_gather(output, fc1.weight.grad) + self.assertEqual(output[0], output[1]) + + output = [torch.empty_like(layer2.fc2.weight.grad), torch.empty_like(layer2.fc2.weight.grad)] + dist.all_gather(output, layer2.fc2.weight.grad) + self.assertEqual(output[0], output[1]) + + if not find_unused_parameters: + output = [torch.empty_like(layer2.fc3.weight.grad), torch.empty_like(layer2.fc3.weight.grad)] + dist.all_gather(output, layer2.fc3.weight.grad) + self.assertEqual(output[0], output[1]) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cdbbdcfd06814e484bb6aea9220b6dbd1d1c911b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py @@ -0,0 +1,185 @@ +# mypy: ignore-errors + +import os +import sys +import unittest +from typing import Dict, List, Type + +from torch.testing._internal.common_distributed import MultiProcessTestCase +from torch.testing._internal.common_utils import ( + TEST_WITH_DEV_DBG_ASAN, + find_free_port, + IS_SANDCASTLE, +) +from torch.testing._internal.distributed.ddp_under_dist_autograd_test import ( + CudaDdpComparisonTest, + DdpComparisonTest, + DdpUnderDistAutogradTest, +) +from torch.testing._internal.distributed.pipe_with_ddp_test import ( + PipeWithDDPTest, +) +from torch.testing._internal.distributed.nn.api.remote_module_test import ( + CudaRemoteModuleTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, +) +from torch.testing._internal.distributed.rpc.dist_autograd_test import ( + DistAutogradTest, + CudaDistAutogradTest, + FaultyAgentDistAutogradTest, + TensorPipeAgentDistAutogradTest, + TensorPipeCudaDistAutogradTest +) +from torch.testing._internal.distributed.rpc.dist_optimizer_test import ( + DistOptimizerTest, +) +from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import ( + JitDistAutogradTest, +) +from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest +from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import ( + JitFaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import ( + FaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_test import ( + CudaRpcTest, + RpcTest, + TensorPipeAgentRpcTest, + TensorPipeAgentCudaRpcTest, +) +from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest +from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import ( + ReinforcementLearningRpcTest, +) + + +def _check_and_set_tcp_init(): + # if we are running with TCP init, set main address and port + # before spawning subprocesses, since different processes could find + # different ports. + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + os.environ["MASTER_ADDR"] = '127.0.0.1' + os.environ["MASTER_PORT"] = str(find_free_port()) + +def _check_and_unset_tcp_init(): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + del os.environ["MASTER_ADDR"] + del os.environ["MASTER_PORT"] + +# The tests for the RPC module need to cover multiple possible combinations: +# - different aspects of the API, each one having its own suite of tests; +# - different agents (ProcessGroup, TensorPipe, ...); +# To avoid a combinatorial explosion in code size, and to prevent forgetting to +# add a combination, these are generated automatically by the code in this file. +# Here, we collect all the test suites that we need to cover. +# We then have one separate file for each agent, from which +# we call the generate_tests function of this file, passing to it a fixture for +# the agent, which then gets mixed-in with each test suite. + +@unittest.skipIf( + TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues" +) +class SpawnHelper(MultiProcessTestCase): + def setUp(self): + super().setUp() + _check_and_set_tcp_init() + self._spawn_processes() + + def tearDown(self): + _check_and_unset_tcp_init() + super().tearDown() + + +# This list contains test suites that are agent-agnostic and that only verify +# compliance with the generic RPC interface specification. These tests should +# *not* make use of implementation details of a specific agent (options, +# attributes, ...). These test suites will be instantiated multiple times, once +# for each agent (except the faulty agent, which is special). +GENERIC_TESTS = [ + RpcTest, + ParameterServerTest, + DistAutogradTest, + DistOptimizerTest, + JitRpcTest, + JitDistAutogradTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, + DdpUnderDistAutogradTest, + DdpComparisonTest, + ReinforcementLearningRpcTest, +] +GENERIC_CUDA_TESTS = [ + CudaRpcTest, + CudaDistAutogradTest, + CudaRemoteModuleTest, + CudaDdpComparisonTest, + PipeWithDDPTest, +] + + +# This list contains test suites that will only be run on the TensorPipeAgent. +# These suites should be standalone, and separate from the ones in the generic +# list (not subclasses of those!). +TENSORPIPE_TESTS = [ + TensorPipeAgentRpcTest, + TensorPipeAgentDistAutogradTest, +] +TENSORPIPE_CUDA_TESTS = [ + TensorPipeAgentCudaRpcTest, + TensorPipeCudaDistAutogradTest, +] + + +# This list contains test suites that will only be run on the faulty RPC agent. +# That agent is special as it's only used to perform fault injection in order to +# verify the error handling behavior. Thus the faulty agent will only run the +# suites in this list, which were designed to test such behaviors, and not the +# ones in the generic list. +FAULTY_AGENT_TESTS = [ + FaultyAgentRpcTest, + FaultyAgentDistAutogradTest, + JitFaultyAgentRpcTest, +] + + +def generate_tests( + prefix: str, + mixin: Type[RpcAgentTestFixture], + tests: List[Type[RpcAgentTestFixture]], + module_name: str, +) -> Dict[str, Type[RpcAgentTestFixture]]: + """Mix in the classes needed to autogenerate the tests based on the params. + + Takes a series of test suites, each written against a "generic" agent (i.e., + derived from the abstract RpcAgentTestFixture class), as the `tests` args. + Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a + certain agent, as the `mixin` arg. Produces all combinations of them. + Returns a dictionary of class names to class type + objects which can be inserted into the global namespace of the calling + module. The name of each test will be a concatenation of the `prefix` arg + and the original name of the test suite. + The `module_name` should be the name of the calling module so + that the classes can be fixed to make it look like they belong to it, which + is necessary for pickling to work on them. + """ + ret: Dict[str, Type[RpcAgentTestFixture]] = {} + for test_class in tests: + if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN: + print( + f'Skipping test {test_class} on sandcastle for the following reason: ' + 'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr) + continue + + name = f"{prefix}{test_class.__name__}" + class_ = type(name, (test_class, mixin, SpawnHelper), {}) + class_.__module__ = module_name + ret[name] = class_ + return ret diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d64a9436a6f2df2ce1b3bdd2bc7f9e998fa0270f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py @@ -0,0 +1,87 @@ +# mypy: ignore-errors + +import torch +import re +import unittest +import functools +from subprocess import CalledProcessError + +from torch._inductor.codecache import CppCodeCache +from torch.utils._triton import has_triton +from torch.testing._internal.common_utils import ( + LazyVal, + IS_FBCODE, +) +from torch._dynamo.backends.registry import register_backend +from torch._inductor.compile_fx import compile_fx, count_bytes_inner +from torch.testing._internal.common_utils import TestCase + +def test_cpu(): + try: + CppCodeCache.load("") + return not IS_FBCODE + except ( + CalledProcessError, + OSError, + torch._inductor.exc.InvalidCxxCompiler, + torch._inductor.exc.CppCompileError, + ): + return False + +HAS_CPU = LazyVal(test_cpu) + +HAS_CUDA = torch.cuda.is_available() and has_triton() + +HAS_GPU = HAS_CUDA + +GPUS = ["cuda"] + +HAS_MULTIGPU = any( + getattr(torch, gpu).is_available() and getattr(torch, gpu).device_count() >= 2 + for gpu in GPUS +) + +tmp_gpus = [x for x in GPUS if getattr(torch, x).is_available()] +assert len(tmp_gpus) <= 1 +GPU_TYPE = "cuda" if len(tmp_gpus) == 0 else tmp_gpus.pop() +del tmp_gpus + +@register_backend +def count_bytes_inductor(gm, example_inputs): + return compile_fx(gm, example_inputs, inner_compile=count_bytes_inner) + +def _check_has_dynamic_shape( + self: TestCase, + code, +): + for_loop_found = False + has_dynamic = False + lines = code.split("\n") + for line in lines: + if "for(" in line: + for_loop_found = True + if re.search(r";.*ks.*;", line) is not None: + has_dynamic = True + break + self.assertTrue( + has_dynamic, msg=f"Failed to find dynamic for loop variable\n{code}" + ) + self.assertTrue(for_loop_found, f"Failed to find for loop\n{code}") + + +def skipDeviceIf(cond, msg, *, device): + if cond: + def decorate_fn(fn): + def inner(self, *args, **kwargs): + if self.device == device: + raise unittest.SkipTest(msg) + return fn(self, *args, **kwargs) + return inner + else: + def decorate_fn(fn): + return fn + + return decorate_fn + +skipCUDAIf = functools.partial(skipDeviceIf, device="cuda") +skipCPUIf = functools.partial(skipDeviceIf, device="cpu") diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8171a95918939f2e34f352fe93ca5f0ab7e4bfa4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py @@ -0,0 +1,722 @@ +# mypy: ignore-errors + +# Torch +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 +import torch.nn.functional as F +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +from torch.testing._internal.common_nn import module_tests, new_module_tests +from torch.testing._internal.common_utils import is_iterable_of_tensors, noncontiguous_like + +import collections +from copy import deepcopy +from typing import Any, Dict, List, Union +import math # noqa: F401 + +# Testing utils +from torch import inf + +assert torch.get_default_dtype() == torch.float32 + +L = 20 +M = 10 +S = 5 + + +def unpack_variables(args): + if isinstance(args, tuple): + return tuple(unpack_variables(elem) for elem in args) + else: + return args + +class dont_convert(tuple): + pass + +non_differentiable = collections.namedtuple('non_differentiable', ['tensor']) + +def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None): + if not isinstance(call_args, tuple): + call_args = (call_args,) + + def map_arg(arg): + def maybe_non_contig(tensor): + if not non_contiguous or tensor.numel() < 2: + return tensor.clone() + + return noncontiguous_like(tensor) + + def conjugate(tensor): + return tensor.conj() + + if isinstance(arg, (torch.Size, dont_convert)): + return arg + elif isinstance(arg, tuple) and len(arg) == 0: + var = conjugate(torch.randn((), dtype=dtype, device=device)) + var.requires_grad = requires_grad + return var + elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor): + return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad) + # double check casting + elif isinstance(arg, non_differentiable): + if isinstance(arg.tensor, torch.Tensor): + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + elif isinstance(arg, torch.Tensor): + if arg.is_complex() != dtype.is_complex: + raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ", + "which is not supported for now") + # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards + v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone() + v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex()) + return v + elif callable(arg): + return map_arg(arg(dtype=dtype, device=device)) + else: + return arg + args_out = tuple(map_arg(arg) for arg in call_args) + kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {} + return args_out, kwargs_out + +# NB: JIT script tests for all nn functional interfaces, script mode does +# not support in_place operations yet, so no inplace operation tests added. +# removed all the deprecated functions +# +# ( +# method name, +# input size/constructing fn, +# args (tuple represents shape of a tensor arg), +# test variant name(will be used at test name suffix, +# 'inplace' skips grad tests), // optional +# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional +# fn to determine if test should be skipped, // optional +# fn mapping output to part that should be gradcheck'ed, // optional +# kwargs for function, // optional +# ) +nn_functional_tests = [ + ('conv1d', (S, S, S), ((S, S, S),)), + ('conv2d', (S, S, S, S), ((S, S, S, S),)), + ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_transpose1d', (S, S, S), ((S, S, S),)), + ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)), + ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)), + ('avg_pool1d', (S, S, S), (3,)), + ('avg_pool2d', (S, S, S, S), (3,), '', (True,)), + ('avg_pool3d', (S, S, S, S, S), (3,)), + ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)), + ('max_pool1d', (S, S, S), (2, 1)), + ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'), + ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')), + ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')), + ('max_pool3d', (S, S, S, S, S), (2, 1)), + ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)), + ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)), + ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)), + ('lp_pool1d', (S, S, S), (2., 3, 2,)), + ('lp_pool2d', (S, S, S, S), (2., 3, 2,)), + ('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)), + ('adaptive_max_pool1d', (S, S, S), (5,)), + ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)), + ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)), + ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)), + ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)), + ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)), + ('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')), + ('alpha_dropout', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S, S), (0.5,), 'batched'), + ('dropout3d', (S, S, S, S), (0.5,)), + ('dropout3d', (S, S, S, S, S), (0.5,), 'batched'), + ('feature_alpha_dropout', (S, S, S), (0.5,)), + ('threshold', (S, S, S), (0.1, 2.), '', (True,)), + ('threshold', (S, S, S), (0.1, 2., True), 'inplace'), + ('relu', (S, S, S), (), '', (True,)), + ('relu', (S, S, S), (), 'inplace'), + ('glu', (S - 1, S - 1, S - 1), (),), + ('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)), + ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'), + ('relu6', (S, S, S), (), '', (True,)), + ('relu6', (S, S, S), (True), 'inplace'), + ('elu', (S, S, S), (0.9,),), + ('elu', (S, S, S), (0.9, True), 'inplace'), + ('selu', (S, S, S), (),), + ('selu', (S, S, S), (True), 'inplace'), + ('celu', (S, S, S), (0.9,),), + ('celu', (S, S, S), (0.9, True), 'inplace'), + ('leaky_relu', (S, S, S), (0.02,), '', (True,)), + ('leaky_relu', (S, S, S), (0.02,), 'inplace'), + ('rrelu', (S, S), (0.1, 0.3, False),), + ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'), + ('hardshrink', (S, S, S), (0.4,), '', (True,)), + ('tanhshrink', (S, S, S), (),), + ('softsign', (S, S, S), (),), + ('softplus', (S, S, S), (), '', (True,)), + ('softmin', (S, S, S), (0,),), + ('softmax', (S, S, S), (0,), '', (True,)), + ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)), + ('tanh', (S, S, S), (), '', (True,)), + ('sigmoid', (S, S, S), (), '', (True,)), + ('silu', (S, S, S), (), '', (True,)), + ('log_softmax', (S, S, S), (0,), '', (True,)), + ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])), + ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])), + ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),), + ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)), + ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ), + 'training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), True, ), + 'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, True, ), + 'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, None, False, ), + 'inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ), + 'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), False, ), + 'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, False, ), + 'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')), + ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),), + ('layer_norm', (S, S, S, S), ([5],), '', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)), + non_differentiable(torch.rand(S))), 'with_weight_and_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])), + ('group_norm', (S, S, S), (1, torch.rand(5),),), + ('local_response_norm', (S, S, S), (2, ),), + ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'), + ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),), + ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),), + ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),), + ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('margin_ranking_loss', (S,), ((S,), (S,)),), + ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),), + ('pixel_shuffle', (1, 9, 4, 4), (3,),), + ('pixel_unshuffle', (1, 1, 12, 12), (3,),), + ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),), + ('pad', (3, 3, 4, 2), ([1, 1],),), + ('pairwise_distance', (S, S), ((S, S),),), + ('pdist', (S, S), (),), + ('cosine_similarity', (S, S), ((S, S),),), + ('triplet_margin_loss', (S, S), ((S, S), (S, S)),), + ('normalize', (S, S, S), (),), + ('unfold', (S, S, S, S), ([2, 3]),), + ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),), + ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),), + ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),), + ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)), + 1, 1., non_differentiable(torch.randn(S))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), + (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'), + ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(), + (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long), + torch.randint(1, S, (S,), dtype=torch.long))), + ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'), + ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False), + 'nearest_4d_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False), + 'nearest_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False), + 'bilinear_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False), + 'bilinear_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False), + 'bicubic_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False), + 'bicubic_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False), + 'nearest_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False), + 'nearest_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False), + 'linear_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False), + 'linear_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False), + 'nearest_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False), + 'nearest_5d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False), + 'trilinear_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False), + 'trilinear_5d_with_size_not_recompute_scale_factor'), +] + +script_template = ''' +def the_method({}): + return {} +''' + +def value_to_literal(value): + if isinstance(value, str): + # Quotes string and escapes special characters + return ascii(value) + if isinstance(value, torch.Tensor): + return 'torch.' + str(value) + else: + return str(value) + +def get_call(method_name, func_type, args, kwargs): + kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()]) + self_arg = args[0] + if func_type == 'method': + args = args[1:] + + argument_str = ', '.join(args) + argument_str += ', ' if len(args) and len(kwargs) else '' + argument_str += kwargs_str + + if func_type == 'functional' or func_type == 'function': + call = f'torch.{method_name}({argument_str})' + elif func_type == 'method': + call = f'{self_arg}.{method_name}({argument_str})' + elif func_type == 'nn_functional': + call = f'torch.nn.functional.{method_name}({argument_str})' + else: + raise TypeError('Unsupported function type') + + return call + +def get_constant(x): + if x == inf: + return 'math.inf' + if x == -inf: + return '-math.inf' + return x + +def get_script_args(args): + formals: List[str] = [] + tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + actuals: List[str] = [] + for arg in args: + if isinstance(arg, torch.Tensor): + name = f'i{len(formals)}' + formals.append(name) + actuals.append(name) + tensors.append(arg) + elif is_iterable_of_tensors(arg): + name = f'i{len(formals)}' + formals.append(name + ': List[torch.Tensor]') + actuals.append(name) + tensors.append(list(arg)) + elif isinstance(arg, str): + actuals.append(f"'{arg}'") + else: + actuals.append(str(get_constant(arg))) + return (formals, tensors, actuals) + +# create a script function from (name, func_type, output_process_fn), +# and returns the compiled function and example inputs +def gen_script_fn_and_args(method_name, func_type, *args, **kwargs): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + return CU.the_method, tensors + +# create a script function from (name, func_type), +# returns a function takes in (args, kwargs) and runs the compiled function +def create_script_fn(self, method_name, func_type): + # function returns tuple containing original output and + # filtered output to be used in checking gradients + def script_fn(*args, **kwargs): + fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs) + self.assertExportImport(fn.graph, tensors) + output = fn(*tensors) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined] + return output + return script_fn + +class SplitInputs: + all_tensors: List[Any] + tensor_args: List[Any] + nontensor_args: List[Any] + arg_types: List[str] + tensor_kwargs: Dict[str, Any] + kwarg_order: List[str] + nontensor_kwargs: Dict[str, Any] + kwarg_types: Dict[str, Any] + + @staticmethod + def _is_tensor_input(arg): + return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg) + + def __init__(self, args, kwargs): + self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args] + self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()} + self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)] + self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)] + self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)} + self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)} + self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]] + self.kwarg_order = [k for k, v in kwargs.items()] + + def nontensors_match(self, other: 'SplitInputs'): + if self.arg_types != other.arg_types: + return False + if self.kwarg_types != other.kwarg_types: + return False + if self.kwarg_order != other.kwarg_order: + return False + if self.nontensor_args != other.nontensor_args: + return False + if self.nontensor_kwargs != other.nontensor_kwargs: + return False + return True + +# make a new function where all non-tensor arguments in 'args' have been partially +# applied, and all tensor arguments remain. +# used to trace functions when some arguments are not tensors +def partial_apply_nontensors(fn, args, kwargs): + inputs = SplitInputs(args, kwargs) + + def new_fn(*tensors_): + tensors = iter(tensors_) + full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)] + full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()} + return fn(*full_args, **full_kwargs) + + return new_fn, inputs + +# create a trace function from input fn +def create_traced_fn(self, fn, cache_traced_fn=False): + def traced_fn(*inputs, **kwargs): + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `check_against_reference` already does all the checks + # against python function + fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs) + if not cache_traced_fn or not hasattr(traced_fn, 'traced'): + traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False) + self.assertExportImport(traced.graph, split_inputs.all_tensors) + output = traced(*split_inputs.all_tensors) + if cache_traced_fn: + traced_fn.traced = traced + traced_fn.split_inputs = split_inputs + else: + # Guard to check that nontensor inputs are the same as during tracing + self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs)) + output = traced_fn.traced(*split_inputs.all_tensors) + traced = traced_fn.traced + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined] + traced_fn.graph = traced.graph # type: ignore[attr-defined] + return output + return traced_fn + +# known to be failing in script +EXCLUDE_SCRIPT = { + 'test_norm_fro_default', + 'test_norm_fro_cpu', + 'test_norm_nuc', + 'test_norm_fro', + 'test_norm_nuc_batched', + + # aten op has additional cudnn argument + 'test_nn_unfold', + + # flaky test - TODO fix + 'test_nn_ctc_loss', + + # unknown builtin op + 'test_nn_fold', + + # jit doesn't support sparse tensors. + 'test_to_sparse', + 'test_to_sparse_dim', +} + +# generates a script function and set of example inputs +# from a specified test in the format of nn_functional_tests +def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args): + test_name = 'test_nn_' + name + + if variant_name != '': + test_name = test_name + '_' + variant_name + + no_grad = variant_name == 'inplace' + + self_variable = create_input((self_size,))[0][0] + kwargs = None + + # need to record this because methods can change the size (e.g. unsqueeze) + args_variable, kwargs_variable = create_input(args) + + self_tensor = deepcopy(self_variable.data) + args_tensor = deepcopy(unpack_variables(args_variable)) + + f_args_variable = (self_variable,) + args_variable + f_args_tensor = (self_tensor,) + args_tensor + with torch._jit_internal._disable_emit_hooks(): + script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable) + return script_fn, inputs + + +# additional modules test +# TODO: delete this list once we make all nn_tests work +additional_module_tests = [ + { + 'module_name': 'Bilinear', + 'constructor_args': (S, S, M), + 'input_size': (S, S), + 'extra_args': ((S, S),) + }, + { + 'module_name': 'RNNCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'LSTMCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'GRUCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'MultiheadAttention', + 'constructor_args': (128, 8), + 'input_size': (10, 8, 128), + 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)), + 'slowTest': True + }, + { + 'module_name': 'Transformer', + 'constructor_args': (1, 1, 1, 1, 2), + 'input_size': (3, 1, 1), + 'extra_args': (torch.randn(1, 1, 1),), + 'slowTest': True + } +] + +EXCLUDE_SCRIPT_MODULES = { + 'test_nn_AdaptiveAvgPool2d_tuple_none', + 'test_nn_AdaptiveAvgPool3d_tuple_none', + 'test_nn_AdaptiveMaxPool2d_tuple_none', + 'test_nn_AdaptiveMaxPool3d_tuple_none', + + # Doesn't use future division, so this is not supported + 'test_nn_CrossMapLRN2d', + # Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented + 'test_nn_TransformerDecoderLayer_gelu_activation', + 'test_nn_TransformerDecoderLayer_relu_activation', + 'test_nn_TransformerEncoderLayer_gelu_activation', + 'test_nn_TransformerEncoderLayer_relu_activation', + 'test_nn_Transformer_multilayer_coder', +} + +script_method_template = ''' +def forward({}): + return {} +''' + +def create_script_module(self, nn_module, constructor_args, *args, **kwargs): + def script_module(*args, **kwargs): + formals, tensors, actuals = get_script_args(args) + + method_args = ', '.join(['self'] + actuals) + call_args_str = ', '.join(actuals) + call = f"self.submodule({call_args_str})" + script = script_method_template.format(method_args, call) + + submodule_constants = [] + if kwargs.get('is_constant'): + submodule_constants = ['submodule'] + + # Create module to use the script method + class TheModule(torch.jit.ScriptModule): + __constants__ = submodule_constants + + def __init__(self): + super().__init__() + self.submodule = nn_module(*constructor_args) + + def make_module(script): + module = TheModule() + # check __repr__ + str(module) + module.define(script) + return module + + module = make_module(script) + if self: + self.assertExportImportModule(module, tensors) + module(*args) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + create_script_module.last_graph = module.graph # type: ignore[attr-defined] + return module + return script_module + +def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + # to clean up IR + torch._C._jit_pass_inline(CU.the_method.graph) + torch._C._jit_pass_constant_propagation(CU.the_method.graph) + torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name) + +def get_nn_module_name_from_kwargs(**kwargs): + if 'module_name' in kwargs: + return kwargs['module_name'] + elif 'fullname' in kwargs: + return kwargs['fullname'] + elif 'constructor' in kwargs: + return kwargs['constructor'].__name__ + +def get_nn_mod_test_name(**kwargs): + if 'fullname' in kwargs: + test_name = kwargs['fullname'] + else: + test_name = get_nn_module_name_from_kwargs(**kwargs) + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + return f'test_nn_{test_name}' + +def get_nn_module_class_from_kwargs(**kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + index = name.find("_") + if index == -1: + return name + else: + return name[0:name.find("_")] + +def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + + if 'desc' in kwargs and 'eval' in kwargs['desc']: + # eval() is not supported, so skip these tests + return + + test_name = name + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + test_name = get_nn_mod_test_name(**kwargs) + + if test_name in EXCLUDE_SCRIPT_MODULES: + return + if 'constructor' in kwargs: + nn_module = kwargs['constructor'] + else: + nn_module = getattr(torch.nn, name) + + if "FunctionalModule" in str(nn_module): + return + + if 'constructor_args_fn' in kwargs: + constructor_args = kwargs['constructor_args_fn']() + else: + constructor_args = kwargs.get('constructor_args', ()) + + # Set up inputs from tuple of sizes or constructor fn + input_dtype = torch.double + if 'input_fn' in kwargs: + input = kwargs['input_fn']() + if isinstance(input, torch.Tensor): + input = (input,) + + if all(tensor.is_complex() for tensor in input): + input_dtype = torch.cdouble + else: + input = (kwargs['input_size'],) + + # Extra parameters to forward() + if 'extra_args' in kwargs: + input = input + kwargs['extra_args'] + + if 'target_size' in kwargs: + input = input + (kwargs['target_size'],) + elif 'target_fn' in kwargs: + if torch.is_tensor(input): + input = (input,) + input = input + (kwargs['target_fn'](),) + + args_variable, kwargs_variable = create_input(input, dtype=input_dtype) + f_args_variable = deepcopy(unpack_variables(args_variable)) + out_var = deepcopy(f_args_variable) + + args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable) + + return mod, out_var + + +def get_all_nn_module_tests(): + return module_tests + new_module_tests + additional_module_tests diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c0109ecacf7f7d3fd6a1849cdf127b7f41a49578 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py @@ -0,0 +1,893 @@ +# mypy: ignore-errors + +# Torch +from torch.autograd import Variable +from torch.autograd.function import _nested_map +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 + +from torch.onnx import OperatorExportTypes +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized +import zipfile +import functools + +# Testing utils +from torch.testing import FileCheck +from torch.testing._internal.common_utils import IS_WINDOWS, \ + freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \ + is_iterable_of_tensors +from torch.testing._internal.common_jit import JitCommonTestCase +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from contextlib import contextmanager +from functools import reduce +from io import StringIO +from collections import defaultdict + +import importlib.util +import inspect +import io +import math +import os +import pickle +import sys +import tempfile +import textwrap +from importlib.abc import Loader +from typing import Any, Dict, List, Tuple, Union + +RUN_CUDA = torch.cuda.is_available() +RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1 +RUN_CUDA_HALF = RUN_CUDA +# HIP supports half, no version check necessary +if torch.cuda.is_available() and not torch.version.hip: + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(torch.cuda.device_count()): + major = torch.cuda.get_device_capability(d)[0] + if (major < 6): + RUN_CUDA_HALF = False + +def execWrapper(code, glob, loc): + exec(code, glob, loc) + +def do_input_map(fn, input): + return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input) + +def clear_class_registry(): + torch._C._jit_clear_class_registry() + torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() + torch.jit._state._clear_class_state() + +def get_execution_plan(graph_executor_state): + execution_plans = list(graph_executor_state.execution_plans.values()) + num_plans = len(execution_plans) + if num_plans != 1: + raise RuntimeError('This test assumes this GraphExecutor should ' + f'only have one execution plan, got: {num_plans}') + return execution_plans[0] + +class _AssertRaisesRegexWithHighlightContext: + """ + A context manager that is useful for checking that error messages highlight + the correct part of the source code. + """ + + def __init__(self, test_case, exception, regex, highlight): + self.test_case = test_case + self.exception_type = exception + self.regex = regex + self.highlight = highlight + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + with self.test_case.assertRaisesRegex(self.exception_type, self.regex): + if type: + raise value + + if self.highlight: + FileCheck().check_source_highlighted(self.highlight).run(str(value)) + + return True + +FUSION_GROUP = "prim::TensorExprGroup" + +class JitTestCase(JitCommonTestCase): + _do_cuda_memory_leak_check = True + _restored_warnings = False + + class capture_stdout(list): + """ + Replace sys.stdout with a temporary StringIO + """ + def __enter__(self): + self.sys_stdout = sys.stdout + self.stringio = StringIO() + sys.stdout = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stdout = self.sys_stdout + + class capture_stderr(list): + """ + Replace sys.stderr with a temporary StringIO + """ + def __enter__(self): + self.sys_stderr = sys.stderr + self.stringio = StringIO() + sys.stderr = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stderr = self.sys_stderr + + def setHooks(self): + torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook) + + def clearHooks(self): + torch._C._jit_set_emit_hooks(None, None) + + def setUp(self): + super().setUp() + # unittest overrides all warning filters and forces all of them to show up + # after we install our own to silence those coming from inside PyTorch. + # This will ensure that our filter still takes precedence. + if not JitTestCase._restored_warnings: + torch.jit.TracerWarning.ignore_lib_warnings() + JitTestCase._restored_warnings = True + self.setHooks() + + def tearDown(self): + super().tearDown() + # needs to be cleared because python might be unloaded before + # the callback gets destructed + self.clearHooks() + clear_class_registry() + + def assertAllFused(self, graph, except_for=()): + + # note this helper collects nodes on 'fast path' only + # i.e. the true blocks of specialized checks + def get_nodes_and_parents_recursively(block, kind, acc): + for node in block.nodes(): + if node.kind() == kind: + acc[block].append(node) + elif node.kind() == 'prim::DifferentiableGraph': + get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc) + elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or + node.inputs().__next__().node().kind() == 'prim::TypeCheck' or + node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'): + get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc) + else: + for inner_block in node.blocks(): + get_nodes_and_parents_recursively(inner_block, kind, acc) + + allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate', + 'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for) + + fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list) + get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups) + self.assertTrue(len(fusion_groups) == 1, f'got {graph}') + (graph, fusion_nodes) = next(iter(fusion_groups.items())) + # the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes` + self.assertTrue(len(fusion_nodes) == 1, f'got {graph}') + self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()), + f'got {graph}') + + def _isHookExceptionOk(self, e): + se = str(e) + allowed = ("Could not export Python function", + "closures are not exportable") + for a in allowed: + if a in se: + return True + return False + + def _compared_saved_loaded(self, m): + def extract_files(buffer): + # crack open the zip format to get at the main module code + archive = zipfile.ZipFile(buffer) + # check that we have no duplicate names + self.assertEqual(len(set(archive.namelist())), len(archive.namelist())) + files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist())) + # unwrap all the code files into strings + code_files_str = filter(lambda x: x.endswith('.py'), files) + code_files_stream = (archive.open(f) for f in code_files_str) + code_files = ("".join([line.decode() for line in file]) for file in code_files_stream) + + # unpickled all the debug files + debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files) + debug_files_stream = (archive.open(f) for f in debug_files_str) + debug_files = (pickle.load(f) for f in debug_files_stream) + return code_files, debug_files + + # disable the hook while we parse code, otherwise we will re-enter the hook + with torch._jit_internal._disable_emit_hooks(): + try: + # short-circuit if this is an empty function or module + if len(m.code) == 0: + return + if isinstance(m, torch._C.ScriptModule): + if len(m._method_names()) == 0: + return + + # save the module to a buffer + buffer = io.BytesIO() + torch.jit.save(m, buffer) + # copy the data in the buffer so we can restore it later. This + # is because py2 and py3 have different semantics with zipfile + # and it's easier to just work with a fresh copy each time. + buffer_copy = buffer.getvalue() + + code_files, debug_files = extract_files(buffer) + + except RuntimeError as e: + if not self._isHookExceptionOk(e): + raise + else: + return + + # import the model again (from a the copy we made of the original) + buffer2 = io.BytesIO(buffer_copy) + imported = torch.jit.load(buffer2) + + # save it again + saved_module_buffer_2 = io.BytesIO() + torch.jit.save(imported, saved_module_buffer_2) + + saved_module_buffer_2.seek(0) + code_files_2, debug_files_2 = extract_files(saved_module_buffer_2) + + for a, b in zip(code_files, code_files_2): + self.assertMultiLineEqual(a, b) + + if isinstance(m, torch._C.ScriptModule): + self.assertTrue(torch._C._ivalue_tags_match(m, imported._c)) + + + def emitFunctionHook(self, func): + # func has invalid names for export, skip the jitter check + if func.name == "" or "aten::" in func.name: + return + self._compared_saved_loaded(func) + + def emitModuleHook(self, module): + self._compared_saved_loaded(module) + + + def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None) + torch.jit.save(m, buffer) + m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + + if not also_test_file: + return imported + + # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile + # opens the file, and it cannot be opened multiple times in Windows. To support Windows, + # close the file after creation and try to remove it manually + f = tempfile.NamedTemporaryFile(delete=False) + try: + f.close() + imported.save(f.name) + result = torch.jit.load(f.name, map_location=map_location) + finally: + os.unlink(f.name) + + result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + return result + + def assertGraphContains(self, graph, kind, consider_subgraphs=False): + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + self.assertTrue(count > 0) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + self.assertTrue(len(out_nodes) > 0) + + def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False): + def perform_assert(graph, kind, actual, expected, consider_subgraphs): + if actual == expected: + return + subgraph = 'including' if consider_subgraphs else 'excluding' + raise AssertionError( + f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}') + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + perform_assert(graph, kind, count, num_kind_nodes, + consider_subgraphs) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + perform_assert(graph, kind, len(out_nodes), num_kind_nodes, + consider_subgraphs) + + def assertExpectedONNXGraph(self, g, *args, **kwargs): + g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX) + self.assertExpectedGraph(g, *args, **kwargs) + + def assertExpectedGraph(self, trace, *args, **kwargs): + if isinstance(trace, torch._C.Graph): + graph = trace + else: + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_dce(graph) + torch._C._jit_pass_lint(graph) + graph = torch._C._jit_pass_canonicalize(graph) + torch._C._jit_pass_lint(graph) + self.assertExpected(str(graph), *args, **kwargs) + + def run_pass(self, name, trace): + if isinstance(trace, torch._C.Graph): + graph = trace + set_graph = False + else: + set_graph = True + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + result = getattr(torch._C, '_jit_pass_' + name)(graph) + if result is not None and not isinstance(result, bool): + graph = result + torch._C._jit_pass_lint(graph) + + if set_graph: + trace.set_graph(graph) + return graph + + def get_frame_vars(self, frames_up): + frame = inspect.currentframe() + if not frame: + raise RuntimeError("failed to inspect frame") + i = 0 + while i < frames_up + 1: + frame = frame.f_back + if not frame: + raise RuntimeError("failed to get frame") + i += 1 + defined_vars: Dict[str, Any] = {} + defined_vars.update(frame.f_locals) + defined_vars.update(frame.f_globals) + return defined_vars + + def assertRaisesRegexWithHighlight(self, exception, regex, highlight): + return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight) + + def checkScriptRaisesRegex(self, script, inputs, exception, regex, + name=None, outputs=None, capture_output=False, + frames_up=1, profiling=ProfilingMode.PROFILING): + """ + Checks that a given function will throw the correct exception, + when executed with normal python, the string frontend, and the + AST frontend. Logic taken from `checkScript` (see comments there + for details) + """ + with enable_profiling_mode_for_profiling_tests(): + # Normal Python + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + else: + python_fn = script + + python_fn(*inputs) + + # String frontend + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + string_frontend = getattr(cu, name) + else: + source = textwrap.dedent(inspect.getsource(script)) + cu = torch.jit.CompilationUnit(source, _frames_up=frames_up) + string_frontend = getattr(cu, script.__name__) + + string_frontend(*inputs) + + # Python AST frontend + if not isinstance(script, str): + with self.assertRaisesRegex(exception, regex): + ge = torch.jit.script(python_fn) + ge(*inputs) + + def checkBailouts(self, model, inputs, expected): + state = model.get_debug_state() + plan = get_execution_plan(state) + num_bailouts = plan.code.num_bailouts() + for i in range(0, num_bailouts): + plan.code.request_bailout(i) + bailout_outputs = model(*inputs) + self.assertEqual(bailout_outputs, expected) + + def checkScript(self, + script, + inputs, + name='func', + optimize=True, + inputs_requires_grad=False, + capture_output=False, + frames_up=1, + profiling=ProfilingMode.PROFILING, + atol=None, + rtol=None): + """ + Checks that a given script generates the same output as the Python + version using the given inputs. + """ + with torch.jit.optimized_execution(optimize): + with enable_profiling_mode_for_profiling_tests(): + extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs) + if isinstance(script, str): + # Compile the string to a Script function + # with enable_profiling_mode(): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + + # Execute the Python function so we can run it later and get its + # outputs + + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + scripted_fn = getattr(cu, name) + else: + + # Check the string frontend first + source = textwrap.dedent(inspect.getsource(script)) + self.checkScript( + source, + inputs, + script.__name__, + optimize=optimize, + inputs_requires_grad=inputs_requires_grad, + capture_output=capture_output, + profiling=profiling, + frames_up=2) + + # Continue checking the Python frontend + scripted_fn = torch.jit.script(script, _frames_up=1) + python_fn = script + + if inputs_requires_grad: + recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs) + else: + recording_inputs = inputs + + if capture_output: + with self.capture_stdout() as script_stdout: + script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as opt_script_stdout: + opt_script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as _python_stdout: + python_outputs = python_fn(*inputs) + if not IS_WINDOWS: + self.assertExpected(script_stdout[0], subname='stdout') + self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol) + else: + # profiling run + script_outputs = scripted_fn(*recording_inputs) + if inputs_requires_grad or extra_profile_runs: + opt_script_outputs = scripted_fn(*recording_inputs) + # optimized run + opt_script_outputs = scripted_fn(*recording_inputs) + if TEST_BAILOUTS: + self.checkBailouts(scripted_fn, inputs, opt_script_outputs) + python_outputs = python_fn(*inputs) + self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol) + self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol) + return scripted_fn + + def checkTrace(self, func, reference_tensors, input_tensors=None, + drop=None, allow_unused=False, verbose=False, + inputs_require_grads=True, check_tolerance=1e-5, export_import=True, + _force_outplace=False, grad_atol=None, grad_rtol=None): + + # TODO: check gradients for parameters, not just inputs + def allSum(vs): + # drop allows us to remove some values from ever being used + # to test unused outputs + if drop is not None: + vs = vs[:-drop] + # we don't want all the grad for all the outputs to be the same + # so we multiply each by a constant + return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None) + if input_tensors is None: + input_tensors = reference_tensors + + def flatten_inputs(inputs): + def input_reduce(input, fn, acc): + if isinstance(input, torch.Tensor): + fn(input, acc) + elif isinstance(input, dict): + reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc) + else: + reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc) + return acc + return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), [])) + + nograd_inputs = reference_tensors + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + else: + recording_inputs = reference_tensors + + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `checkTrace` already does all the checks + # against python function + ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance, + _force_outplace=_force_outplace, check_trace=False) + + if export_import: + ge = self.getExportImportCopy(ge) + + if verbose: + print(ge.graph) + + # test no gradients case + outputs = func(*nograd_inputs) + outputs_ge = ge(*nograd_inputs) + self.assertEqual(outputs, outputs_ge) + + # test gradients case + outputs = func(*recording_inputs) + if inputs_require_grads: + grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs, + allow_unused=allow_unused) + + outputs_ge = ge(*recording_inputs) + if inputs_require_grads: + grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + + # test the grad grad case + outputs = func(*recording_inputs) + l1 = allSum(outputs) + if inputs_require_grads: + grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True, + allow_unused=allow_unused) + if inputs_require_grads: + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused) + + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + + outputs_ge = ge(*recording_inputs) + l1_ge = allSum(outputs_ge) + if inputs_require_grads: + grads_ge = torch.autograd.grad( + l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused) + + if inputs_require_grads: + l2_ge = (allSum(grads_ge) * l1_ge) + grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + for g2, g2_ge in zip(grads2, grads2_ge): + if g2 is None and g2_ge is None: + continue + self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4) + + return ge + + def checkModule(self, nn_module, args): + """ + Check that a nn.Module's results in Script mode match eager and that it + can be exported + """ + sm = torch.jit.script(nn_module) + + with freeze_rng_state(): + eager_out = nn_module(*args) + + with freeze_rng_state(): + script_out = sm(*args) + + self.assertEqual(eager_out, script_out) + self.assertExportImportModule(sm, args) + + return sm + +class NoTracerWarnContextManager: + def __enter__(self): + self.prev = torch._C._jit_get_tracer_state_warn() + torch._C._jit_set_tracer_state_warn(False) + + def __exit__(self, *args): + torch._C._jit_set_tracer_state_warn(self.prev) + +@contextmanager +def inline_everything_mode(should_inline): + old = torch._C._jit_get_inline_everything_mode() + torch._C._jit_set_inline_everything_mode(should_inline) + try: + yield + finally: + torch._C._jit_set_inline_everything_mode(old) + +@contextmanager +def set_fusion_group_inlining(inlining): + old = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(inlining) + try: + yield + finally: + torch._C._debug_set_fusion_group_inlining(old) + +# note: not re-entrant, use unnested only +@contextmanager +def disable_autodiff_subgraph_inlining(enabled=True): + torch._C._debug_set_autodiff_subgraph_inlining(not enabled) + try: + yield + finally: + torch._C._debug_set_autodiff_subgraph_inlining(True) + +def _inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(True): + fn(*args, **kwargs) + return wrapper + +# this exists for forward compatibility reasons temporarily. +# TODO(suo) remove +def _tmp_donotuse_dont_inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(False): + fn(*args, **kwargs) + return wrapper + +# make it easy to quicky define/trace a function for these tests +def _trace(*args, **kwargs): + def wrapper(func): + return torch.jit.trace(func, args, **kwargs) + return wrapper + + +def enable_cpu_fuser(fn): + def wrapper(*args, **kwargs): + torch._C._jit_override_can_fuse_on_cpu_legacy(True) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_set_te_must_use_llvm_cpu(False) + try: + fn(*args, **kwargs) + finally: + torch._C._jit_override_can_fuse_on_cpu_legacy(False) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_set_te_must_use_llvm_cpu(True) + return wrapper + + +def enable_cpu_fuser_if(cond): + if cond: + return enable_cpu_fuser + else: + def noop_fuser(fn): + def wrapper(*args, **kwargs): + return fn(*args, **kwargs) + return wrapper + return noop_fuser + +def get_forward(c): + return c._get_method('forward') + +def get_forward_graph(c): + return c._get_method('forward').graph + +def get_module_method(m, module, method): + return m._c.getattr(module)._get_method(method) + +def attrs_with_prefix(module, prefix): + return [x for x, _ in module._modules._c.items() + if x.startswith(prefix)] + +def warmup_backward(f, *args): + profiling_count = 3 + results = [] + for i in range(profiling_count): + if len(args) > 0: + r = torch.autograd.grad(f, *args) + results.append(r) + else: + f.backward(retain_graph=True) + + return results + +# TODO: Remove me once https://bugs.python.org/issue42666 is resolved +def make_global(*args): + for arg in args: + setattr(sys.modules[arg.__module__], arg.__name__, arg) + +# Helper function to eval Python3 code without causing a syntax error for +# this file under py2 +def _get_py3_code(code, fn_name): + with tempfile.TemporaryDirectory() as tmp_dir: + script_path = os.path.join(tmp_dir, 'script.py') + with open(script_path, 'w') as f: + f.write(code) + spec = importlib.util.spec_from_file_location(fn_name, script_path) + module = importlib.util.module_from_spec(spec) + loader = spec.loader + assert isinstance(loader, Loader) # Assert type to meet MyPy requirement + loader.exec_module(module) + fn = getattr(module, fn_name) + return fn + +class TensorExprTestOptions: + def __init__(self): + self.old_profiling_executor = torch._C._jit_set_profiling_executor(True) + self.old_profiling_mode = torch._C._get_graph_executor_optimize(True) + + self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu() + self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu() + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled() + torch._C._jit_set_texpr_fuser_enabled(True) + self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(False) + self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu() + torch._C._jit_set_te_must_use_llvm_cpu(False) + + def restore(self): + torch._C._jit_set_profiling_executor(self.old_profiling_executor) + torch._C._get_graph_executor_optimize(self.old_profiling_mode) + + torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state) + torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state) + torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state) + torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining) + torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu) + +def clone_inputs(args): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(arg.detach().clone()) + elif is_iterable_of_tensors(arg): + inputs.append([t.detach().clone() for t in arg]) + else: + inputs.append(arg) + + return inputs + +def get_traced_sample_variant_pairs(device, dtype, op): + # tuples of (variant, sample) + outputs: List[Tuple[Any, Any]] = [] + + samples = op.sample_inputs(device, dtype) + + # Acquires variants to test + func = op.get_op() + method = op.get_method() + variants = { + # TODO: inplace tests currently fail, fix and add inplace variant + 'function': func, 'method': method, + } + + # TODO: find better way to standardize on op registration itself.. + has_fake_function = op.name in ["resize_", 'resize_as_'] + + if has_fake_function: + variants = {'method': getattr(torch.Tensor, op.name)} + + # In eager mode, these ops can take (Tensor, bool) args; but in + # JIT they can only take (Tensor, Scalar), and bool is not a + # scalar in the JIT type system. So to test these in JIT, the bool + # is converted to an int for the test. + ops_with_unsupported_bool_args = [ + { + "name": "div_floor_rounding", + "arg_idx": [0], + }, + { + "name": "div_no_rounding_mode", + "arg_idx": [0], + }, + { + "name": "div_trunc_rounding", + "arg_idx": [0], + }, + { + "name": "index_fill", + "arg_idx": [2], + }, + { + "name": "full_like", + "arg_idx": [0], + }, + { + "name": "mul", + "arg_idx": [0], + }, + { + "name": "new_full", + "arg_idx": [1], + }, + ] + + # doesn't support tracing + if has_fake_function: + return outputs + + for sample in samples: + for variant in variants.values(): + if variant is None: + continue + + if is_lambda(variant): + continue + + matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args) + for op_data in matching_ops: + for idx in op_data["arg_idx"]: + args = list(sample.args) + if len(sample.args) > idx and isinstance(sample.args[idx], bool): + args[idx] = int(args[idx]) + sample.args = tuple(args) + + outputs.append((variant, sample)) + + return outputs + +# types.LambdaType gave false positives +def is_lambda(lamb): + LAMBDA = lambda: 0 # noqa: E731 + return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e3efd9c7376eb8ac7434ea4e05d03b0ec9a9768c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py @@ -0,0 +1,208 @@ +# mypy: ignore-errors + +import torch._dynamo.test_case +import unittest.mock +import os +import contextlib +import torch._logging +import torch._logging._internal +from torch._dynamo.utils import LazyString +import logging +import io + +@contextlib.contextmanager +def preserve_log_state(): + prev_state = torch._logging._internal._get_log_state() + torch._logging._internal._set_log_state(torch._logging._internal.LogState()) + try: + yield + finally: + torch._logging._internal._set_log_state(prev_state) + torch._logging._internal._init_logs() + +def log_settings(settings): + exit_stack = contextlib.ExitStack() + settings_patch = unittest.mock.patch.dict(os.environ, {"TORCH_LOGS": settings}) + exit_stack.enter_context(preserve_log_state()) + exit_stack.enter_context(settings_patch) + torch._logging._internal._init_logs() + return exit_stack + +def log_api(**kwargs): + exit_stack = contextlib.ExitStack() + exit_stack.enter_context(preserve_log_state()) + torch._logging.set_logs(**kwargs) + return exit_stack + + +def kwargs_to_settings(**kwargs): + INT_TO_VERBOSITY = {10: "+", 20: "", 40: "-"} + + settings = [] + + def append_setting(name, level): + if isinstance(name, str) and isinstance(level, int) and level in INT_TO_VERBOSITY: + settings.append(INT_TO_VERBOSITY[level] + name) + return + else: + raise ValueError("Invalid value for setting") + + for name, val in kwargs.items(): + if isinstance(val, bool): + settings.append(name) + elif isinstance(val, int): + append_setting(name, val) + elif isinstance(val, dict) and name == "modules": + for module_qname, level in val.items(): + append_setting(module_qname, level) + else: + raise ValueError("Invalid value for setting") + + return ",".join(settings) + + +# Note on testing strategy: +# This class does two things: +# 1. Runs two versions of a test: +# 1a. patches the env var log settings to some specific value +# 1b. calls torch._logging.set_logs(..) +# 2. patches the emit method of each setup handler to gather records +# that are emitted to each console stream +# 3. passes a ref to the gathered records to each test case for checking +# +# The goal of this testing in general is to ensure that given some settings env var +# that the logs are setup correctly and capturing the correct records. +def make_logging_test(**kwargs): + def wrapper(fn): + def test_fn(self): + + torch._dynamo.reset() + records = [] + # run with env var + if len(kwargs) == 0: + with self._handler_watcher(records): + fn(self, records) + else: + with log_settings(kwargs_to_settings(**kwargs)), self._handler_watcher(records): + fn(self, records) + + # run with API + torch._dynamo.reset() + records.clear() + with log_api(**kwargs), self._handler_watcher(records): + fn(self, records) + + + return test_fn + + return wrapper + +def make_settings_test(settings): + def wrapper(fn): + def test_fn(self): + torch._dynamo.reset() + records = [] + # run with env var + with log_settings(settings), self._handler_watcher(records): + fn(self, records) + + return test_fn + + return wrapper + +class LoggingTestCase(torch._dynamo.test_case.TestCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._exit_stack.enter_context( + unittest.mock.patch.dict(os.environ, {"___LOG_TESTING": ""}) + ) + cls._exit_stack.enter_context( + unittest.mock.patch("torch._dynamo.config.suppress_errors", True) + ) + cls._exit_stack.enter_context( + unittest.mock.patch("torch._dynamo.config.verbose", False) + ) + + @classmethod + def tearDownClass(cls): + cls._exit_stack.close() + torch._logging._internal.log_state.clear() + torch._logging._init_logs() + + def getRecord(self, records, m): + record = None + for r in records: + # NB: not r.msg because it looks like 3.11 changed how they + # structure log records + if m in r.getMessage(): + self.assertIsNone( + record, + msg=LazyString( + lambda: f"multiple matching records: {record} and {r} among {records}" + ), + ) + record = r + if record is None: + self.fail(f"did not find record with {m} among {records}") + return record + + # This patches the emit method of each handler to gather records + # as they are emitted + def _handler_watcher(self, record_list): + exit_stack = contextlib.ExitStack() + + def emit_post_hook(record): + nonlocal record_list + record_list.append(record) + + # registered logs are the only ones with handlers, so patch those + for log_qname in torch._logging._internal.log_registry.get_log_qnames(): + logger = logging.getLogger(log_qname) + num_handlers = len(logger.handlers) + self.assertLessEqual( + num_handlers, + 2, + "All pt2 loggers should only have at most two handlers (debug artifacts and messages above debug level).", + ) + + self.assertGreater(num_handlers, 0, "All pt2 loggers should have more than zero handlers") + + for handler in logger.handlers: + old_emit = handler.emit + + def new_emit(record): + old_emit(record) + emit_post_hook(record) + + exit_stack.enter_context( + unittest.mock.patch.object(handler, "emit", new_emit) + ) + + return exit_stack + + +def logs_to_string(module, log_option): + """Example: + logs_to_string("torch._inductor.compile_fx", "post_grad_graphs") + returns the output of TORCH_LOGS="post_grad_graphs" from the + torch._inductor.compile_fx module. + """ + log_stream = io.StringIO() + handler = logging.StreamHandler(stream=log_stream) + + @contextlib.contextmanager + def tmp_redirect_logs(): + try: + logger = torch._logging.getArtifactLogger(module, log_option) + logger.addHandler(handler) + yield + finally: + logger.removeHandler(handler) + + def ctx_manager(): + exit_stack = log_settings(log_option) + exit_stack.enter_context(tmp_redirect_logs()) + return exit_stack + + return log_stream, ctx_manager diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97c38f3560625213fbd59d09a9cfd22bad26ba04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py @@ -0,0 +1,4 @@ +# mypy: ignore-errors + +import torch.testing._internal.opinfo.core +import torch.testing._internal.opinfo.definitions diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbb8107067a9134e0ab5102859faec976dc7a713 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58dc9bcaebf5591d81abac07cdb22144e6cda31e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6182088c512bc1ccf2aa3e74b86a7db7c53dbe88 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ead53efc7e9506ec37efc712482887c3aace20c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d1d992952698229ba73d922df15bb9f356ce70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py @@ -0,0 +1,2864 @@ +# mypy: ignore-errors + +import collections +import collections.abc +import math +import operator +import unittest +from dataclasses import asdict, dataclass +from enum import Enum +from functools import partial +from itertools import product +from typing import Any, Callable, Iterable, List, Optional, Tuple + +from torchgen.utils import dataclass_repr + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + skipCPUIfNoFFT, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, +) +from torch.testing._internal.common_utils import ( + is_iterable_of_tensors, + noncontiguous_like, + TEST_WITH_ROCM, + torch_to_numpy_dtype_dict, + TrackedInputIter, +) +from torch.testing._internal.opinfo import utils + +# Reasonable testing sizes for dimensions +L = 20 +M = 10 +S = 5 +XS = 3 + +# Unique value to distinguish default from anything else +_NOTHING = object() + + +# Extension of getattr to support qualified names +# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm +def _getattr_qual(obj, name, default=_NOTHING): + try: + for path in name.split("."): + obj = getattr(obj, path) + return obj + except AttributeError: + if default is not _NOTHING: + return default + else: + raise + + +class DecorateInfo: + """Describes which test, or type of tests, should be wrapped in the given + decorators when testing an operator. Any test that matches all provided + arguments will be decorated. The decorators will only be applied if the + active_if argument is True.""" + + __slots__ = [ + "decorators", + "cls_name", + "test_name", + "device_type", + "dtypes", + "active_if", + ] + + def __init__( + self, + decorators, + cls_name=None, + test_name=None, + *, + device_type=None, + dtypes=None, + active_if=True, + ): + self.decorators = ( + list(decorators) + if isinstance(decorators, collections.abc.Sequence) + else [decorators] + ) + self.cls_name = cls_name + self.test_name = test_name + self.device_type = device_type + self.dtypes = dtypes + self.active_if = active_if + + # Validate dtypes + if self.dtypes is not None: + for dtype in self.dtypes: + assert isinstance(dtype, torch.dtype) + + def is_active(self, cls_name, test_name, device_type, dtype, param_kwargs): + return ( + self.active_if + and (self.cls_name is None or self.cls_name == cls_name) + and (self.test_name is None or self.test_name == test_name) + and (self.device_type is None or self.device_type == device_type) + and (self.dtypes is None or dtype in self.dtypes) + # Support callables over kwargs to determine if the decorator is active. + and ( + self.active_if(param_kwargs) + if isinstance(self.active_if, Callable) + else self.active_if + ) + ) + + +# FIXME +# Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying +# to support scalar inputs, too. Some tests still depend on 'input' being a Tensor +# or TensorList, however. +class SampleInput: + """Represents sample inputs to a function.""" + + __slots__ = [ + "input", + "args", + "kwargs", + "output_process_fn_grad", + "broadcasts_input", + "name", + ] + + def __init__( + self, + input, + *var_args, + args=None, + kwargs=None, + output_process_fn_grad=None, + broadcasts_input=None, + name=None, + **var_kwargs, + ): + # input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]). + # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...). + self.input = input + + # Allow calling either as SampleInput(input, args=args, kwargs=kwargs), or as + # SampleInput(input, *args, **kwargs) but not to mix the two forms + if args is not None or kwargs is not None: + assert ( + not var_args and not var_kwargs + ), """ +A SampleInput can be constructed "naturally" with *args and **kwargs or by +explicitly setting the "args" and "kwargs" parameters, but the two +methods of construction cannot be mixed!""" + elif len(var_args) or len(var_kwargs): + assert ( + output_process_fn_grad is None + and broadcasts_input is None + and name is None + ), """ +A SampleInput constructed "naturally" with *args and **kwargs +cannot specify additional metadata in keyword arguments""" + + self.args = args if args is not None else var_args + assert isinstance(self.args, tuple) + self.kwargs = kwargs if kwargs is not None else var_kwargs + assert isinstance(self.kwargs, dict) + + self.output_process_fn_grad = ( + output_process_fn_grad + if output_process_fn_grad is not None + else lambda x: x + ) + self.name = name if name is not None else "" + + # Specifies if `self.input` is broadcasted or not, + # given that the operator supports broadcasting. + # This field is used to verify the behavior for inplace variant. + # + # If a SampleInput is marked with `broadcasts_input=True`, + # it is verified that we get a `RuntimeError` with this sample, + # and inplace variant. Also inplace grad{grad} tests are skipped, + # for such inputs (as they will error out otherwise). + self.broadcasts_input = ( + broadcasts_input if broadcasts_input is not None else False + ) + + def with_metadata( + self, *, output_process_fn_grad=None, broadcasts_input=None, name=None + ): + if output_process_fn_grad is not None: + self.output_process_fn_grad = output_process_fn_grad + if broadcasts_input is not None: + self.broadcasts_input = broadcasts_input + if name is not None: + self.name = name + return self + + def _repr_helper(self, formatter): + # Helper function to return the details of the SampleInput as `str` + # It consolidates all the fields of SampleInput and allows, + # formatting the fields like `input`, `args`, etc with `formatter` + # callable to customize the representation. + # Look at `summary` method for example. + arguments = [ + f"input={formatter(self.input)}", + f"args={formatter(self.args)}", + f"kwargs={formatter(self.kwargs)}", + f"broadcasts_input={self.broadcasts_input}", + f"name={repr(self.name)}", + ] + + return f'SampleInput({", ".join(a for a in arguments if a is not None)})' + + def __repr__(self): + return self._repr_helper(lambda x: x) + + def summary(self): + # Returns the SampleInput details in a more + # friendly format. + # It formats `Tensor` and `TensorList` + # in a more condensed representation. + def formatter(arg): + # Format any instance of `Tensor` (standalone, in list, or in dict) + # by Tensor[TensorShape] + # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4] + if isinstance(arg, torch.Tensor): + shape = str(tuple(arg.shape)) + dtype = str(arg.dtype) + device = str(arg.device) + contiguity_suffix = "" + # NB: sparse CSR tensors annoyingly return is_sparse=False + is_sparse = arg.is_sparse or arg.layout == torch.sparse_csr + if not is_sparse and not arg.is_contiguous(): + contiguity_suffix = ", contiguous=False" + return f'Tensor[size={shape}, device="{device}", dtype={dtype}{contiguity_suffix}]' + elif isinstance(arg, dict): + return {k: formatter(v) for k, v in arg.items()} + elif is_iterable_of_tensors(arg): + return "TensorList[" + ", ".join(map(formatter, arg)) + "]" + elif isinstance(arg, (list, tuple)): # Handle list, tuple + return "(" + ",".join(map(formatter, arg)) + ")" + + return repr(arg) + + return self._repr_helper(formatter) + + # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput + def transform(self, f): + def tt(t): + def _tt(t): + with torch.no_grad(): + return f(t) + + if isinstance(t, torch.Tensor): + return _tt(t) + elif isinstance(t, torch.dtype): + return _tt(t) + elif isinstance(t, list): + return list(map(tt, t)) + elif isinstance(t, tuple): + return tuple(map(tt, t)) + elif isinstance(t, dict): + return {k: tt(v) for k, v in t.items()} + else: + return t + + sample_tt_input, tt_args, tt_kwargs = ( + tt(self.input), + tt(self.args), + tt(self.kwargs), + ) + + # Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid! + return SampleInput( + sample_tt_input, + args=tt_args, + kwargs=tt_kwargs, + output_process_fn_grad=self.output_process_fn_grad, + broadcasts_input=self.broadcasts_input, + name=self.name + "_transformed", + ) + + # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs) + # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them + # Converts dtypes by remapping them using torch_to_numpy_dtype_dict + def numpy(self): + def to_numpy(t): + if isinstance(t, torch.Tensor): + if t.dtype is torch.bfloat16: + return t.detach().cpu().to(torch.float32).numpy() + if t.dtype is torch.chalf: + return t.detach().cpu().to(torch.cfloat).numpy() + return t.detach().cpu().numpy() + elif isinstance(t, torch.dtype): + return torch_to_numpy_dtype_dict[t] + + return t + + return self.transform(to_numpy) + + def noncontiguous(self): + def to_noncontiguous(t): + if isinstance(t, torch.Tensor): + return noncontiguous_like(t) + elif isinstance(t, torch.dtype): + return t + + return t + + return self.transform(to_noncontiguous) + + +NumericsFilter = collections.namedtuple("NumericsFilter", ["condition", "safe_val"]) + + +class ErrorInput: + """ + A SampleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["sample_input", "error_type", "error_regex"] + + def __init__(self, sample_input, *, error_type=RuntimeError, error_regex): + self.sample_input = sample_input + self.error_type = error_type + self.error_regex = error_regex + + +class AliasInfo: + """Class holds alias information. For example, torch.abs -> + torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_ + """ + + def __init__(self, alias_name): + self.name = alias_name + self.op = _getattr_qual(torch, alias_name) + self.method_variant = getattr(torch.Tensor, alias_name, None) + self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None) + + def __call__(self, *args, **kwargs): + return self.op(*args, **kwargs) + + +# Note [OpInfos] +# ~~~~~~~~~~~~~~ +# +# The majority of this note was written shortly after the PyTorch 1.9 release. +# If you notice it's out-of-date or think it could be improved then please +# file an issue. +# +# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261) +# See also: "Writing Test Templates" in common_device_type.py to learn how to +# parametrize a test template using OpInfos. +# See also: PyTorch's GitHub wiki on running and writing tests +# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests +# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py +# +# An OpInfo is a collection of metadata related to a PyTorch operator. This +# metadata is used to generate tests that validate properties of the operator, +# like if it implements the correct gradient formula. +# +# WHY OPINFOS? +# ~~~~~~~~~~~~ +# +# OpInfos are principally intended to do three things: +# +# 1) to allow systematic testing over all PyTorch's operators +# 2) to simplify operating testing by autogenerating many tests +# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test +# against every PyTorch operator +# +# All these goals are still a work in progress. Not every operator has an +# OpInfo, and some operator tests that could be automatically generated +# still have to be written manually. +# +# It's helpful to understand that OpInfos are both about test simplification and +# modularity. PyTorch is a complicated framework with many interrelated systems, +# too many for any one person to keep track of. An OpInfo can be thought of as the +# interface between an operator implementer and those other systems. Instead of +# requiring the implementer of torch.foo understand how to test its forward +# mode AD or NNC support that's typically handled automatically just by +# defining an OpInfo. +# +# It's often surprising to OpInfo writers that just implementing an OpInfo +# typically can't verify an operator is actually implemented correctly: +# +# "If an OpInfo doesn't validate my op works as expected, what's the point +# of it?" +# +# But the point of is the above. OpInfos are intended to let you focus on testing +# the operator logic you're familiar with instead of having to write tests for +# how the operator interacts with each of PyTorch's many systems. +# +# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES +# validate your op works as expected, but that's only in special +# cases. See below for details. +# +# WHAT'S AN OPINFO? +# ~~~~~~~~~~~~~~~~~ +# +# So what is an OpInfo? It's a Python class that describes an operator's properties, +# like which dtypes it supports on the CPU and whether it has any aliases. +# These properties can be divided into three categories: +# +# 1) Metadata describing the operator, like the operator's name and if it +# "supports" the out kwarg. +# 2) Test directives, like "skips" that tell the test suite to skip some +# tests. +# 3) A "sample inputs" function that generates valid inputs for the operator. +# +# OpInfo attributes are described in more detail below. +# +# THE SAMPLE INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The "sample inputs" function merits special elaboration. This function is +# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator +# as a black box. There's no structure for the test to understand or exploit. +# Without "sample inputs" it wouldn't even know how to call the OpInfo's +# operator. The sample input function saves the day by providing different +# "SampleInputs" that can be used to call the operator. A sample input +# function should have the following signature: +# +# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs): +# +# And should return an iterable of SampleInputs (see the class description +# above). Each SampleInput defines an "input", "args", "kwargs", an +# "output_process_fn_grad" function, the "broadcasts_input" bool and a +# "name". +# +# All the "sample_inputs" functions are invoked within a `torch.no_grad()` +# environment for efficiency and correctness. As such remember to set the +# "requires_grad" flag on the inputs **after** performing any transformations +# on them. +# +# The "input" is the first argument to the operator, or the tensor that +# the method or inplace variants of the operator should be called on, and +# should be on the requested device, of the requested dtype, and its +# requires_grad attribute should be set to the requires_grad argument. +# +# "args" should contain positional arguments, and "kwargs" keyword arguments. +# +# "output_process_fn_grad" has an interesting name. It's a function that maps +# the operator's output (when given the input, args, and kwargs) to the +# portion of the output to gradcheck. For example, consider an operator +# like torch.linalg.slogdet +# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html). +# This operator returns a tuple of two tensors, but the first tensor +# cannot be backwarded through. Its "output_process_fn_grad" filters +# this output tuple to just the second argument, which we can call backward +# on. Functions that produce a single tensor can ignore this argument. +# +# "broadcasts_input" is a bool indicated if the SampleInput causes the operator +# to broadcast the "input" argument. This is important for tests to understand +# because inplace variants of operations throw a runtime error if they +# would broadcast their input arguments, so tests that work with inplace +# variants filter SampleInputs that broadcast their input. +# +# "name" is a string that's just used for debugging. It appears when printing +# the SampleInput. +# +# Sample inputs are designed to be used with many tests, some +# that are very time consuming, so they should be a small +# set with small tensors. An elaborated set of sample inputs +# can be specified using the "reference_inputs_func" attribute. +# The "reference inputs" for an operation are an extended +# set of sample inputs that can more exhausively test an +# operator. They are used by only a few tests that are careful +# not to take too long to run. Adding reference inputs +# is highly encouraged! +# +# THE (OPTIONAL) ERROR INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# OpInfos may optionally specify "error inputs" through an error function. If +# specified test_errors in test_ops.py will call the op with these inputs +# and validate that the desired error is thrown. +# +# Error inputs automate a common testing pattern where multiple inputs are +# passed to an operation and the errors they thrown are reviewed. Tests +# written in this style should be ported to the new OpInfo pattern. +# +# Error inputs are specified using the ErrorInputs class, which contains +# a SampleInput (see above) and data about the expected error. +# +# OPINFO FILE ORGANIZATION +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# +# All OpInfos are currently defined in this file. Most OpInfo tests are defined +# in test_ops.py, but some system-specific tests are defined in those +# systems' test files, and subclass-specific tests are defined in the test +# file that corresponds to that subclass (see the below). +# Expect a reorganization in the future. +# +# WHAT'S TESTED? +# ~~~~~~~~~~~~~~ +# +# Every OpInfo in the op_db sequence has the following properties validated in +# test_ops.py: +# +# - that its supported dtypes are specified correctly +# - that the operation produces the same results when called with noncontiguous inputs +# - that it supports the out= argument properly (if it allows out=), +# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch +# - that it works with the conjugate view bit properly +# - that its function, method, and inplace variants perform the same operation +# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all +# do the same thing). +# - that its inplace variant preserves the input's storage +# - that its gradient formula is implemented correctly, and that it supports +# gradgrad and complex grad and gradgrad and forward mode AD properly for +# the op's function and inplace variants (method variants are skipped +# to reduce test time). +# - that the operation performs the same operation when traced or scripted +# using the jit +# - that the operation is autodifferentiated by the jit as expected +# - that the operator's aliases, if any, perform the same operation and that +# the jit understands the alias +# - that the operator throws the correct errors (if error_inputs is defined) +# - that the operator produces the same results as a NumPy reference (if ref is defined) +# - that the operator produces the same results as a NumPy reference on an extended +# set of "reference inputs" (if both ref and reference_inputs_func are defined) +# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only +# ref is defined, because they effectively autogenerate reference inputs) +# - that the operator works on different CUDA devices +# +# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py, +# and test_fx.py. These tests validate that operators work with NNC and FX +# as expected. +# +# For performance, some of the above tests may only run on the first +# SampleInput returned by an OpInfo's sample input function. +# +# In addition to these tests, some subclasses (discussed in the next section) +# define additional tests. +# +# Critically, as mentioned above, what's not necessarily tested is that the operator +# works as expected. When implementing an OpInfo an engineer must still +# typically write one or more tests validating the operator's behavior. +# The exception to this is if reference testing is sufficient, or if +# the operation belongs to an OpInfo subclass that has more exhaustive +# operator testing. Elementwise unary and elementwise binary operators, +# in particular, usually don't require additional testing beyond +# writing an Opinfo. +# +# +# OPINFO (SUB)CLASSES +# ~~~~~~~~~~~~~~~~~~~ +# +# In addition to the OpInfo base class there are several specialized OpInfo +# subclasses. For example, the UnaryUfuncInfo subclass is used for +# unary elementwise operations. These operations have a common structure +# that test_unary_ufuncs.py exploits with additional automated testing. +# The automated testing in test_unary_ufuncs.py is so thorough, comparing +# the operator to a NumPy reference function on a plethora of values, that +# just implementing an OpInfo for a unary elementwise operation is often +# sufficient testing. +# +# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a +# very unique class of operations. These OpInfos aren't included in the +# op_db sequence and have their own tests. +# +# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience +# when writing OpInfos. +# +# TESTING A NEW OPERATOR +# ~~~~~~~~~~~~~~~~~~~~~~ +# +# If you're adding a new operator to any of the following namespaces: +# - torch +# - torch.fft +# - torch.linalg, +# - torch.special +# - torch.nn.functional +# then you should typically add an OpInfo for it. +# +# As mentioned a couple times above, implementing an OpInfo is not +# usually sufficient testing (unless the operator is a unary or binary elementwise +# operator). The OpInfo will only test the properties described in the +# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is +# implemented correctly. +# +# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to +# be consumed by a variety of systems it can be hard to understand how to +# deal with test failures or how to set the OpInfo metadata properly. +# +# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs +# function must be defined, and the operator's dtypes must be specified. +# Once that's done you should run the operator's tests in test_ops.py +# (these can be filtered using the "-k" argument in pytest). Tests that +# fail should provide an error message that describes what to change about +# your OpInfo. You don't need to worry about changing an OpInfo's default +# values unless a test yells at you. +# +# Similarly, if you're writing a test that consumes OpInfos then it's critical +# your test provides a clear error message describing what to do when it +# fails. You should not assume the OpInfo implementer is familiar with your +# system. +# +# If you see a confusing error message while developing an OpInfo then please +# file an issue describing what happened. +# +# This trial-and-error approach to writing an OpInfo can be frustrating, +# but it's probably necessary as long as OpInfos don't require +# learning about all the systems that consume them. One thing that can help +# is the get_supported_dtypes() function defined in utils.py. This +# function can be used to programmatically specify the dtypes an operator +# supports, and is especially useful if writing an OpInfo on a machine +# without a CUDA device. See its documentation for more details. +# +# THE FUTURE OF OPINFOS AND OPINFO TESTING +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# In the future we expect OpInfo coverage to improve and cover +# the great majority of PyTorch's (public) operators. +# + + +# Classes and methods for the operator database +@dataclass +class OpInfo: + """Operator information and helper functions for acquiring it.""" + + # the string name of the function + name: str + + # An optional reference function that accepts ndarrays (AKA "NumPy arrays"). + # If given, the op will be compared with its reference on each of its sample inputs. + ref: Optional[Callable] = None + + # the following metadata describes the operator, its variants, and its aliases, if any + + # iterable of aliases, e.g. ("absolute",) for torch.abs + aliases: Iterable = None + + # additional string to include in the test name + # this is useful when an op needs multiple OpInfos, + # like divide does, often because it's really several + # different ops behind the scenes + variant_test_name: str = "" + + # the function variant of the operation, populated as torch. if None + op: Callable = None + + # allows the method variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated method + # - if a Callable, then that callable should be the method associated with this operation + method_variant: Callable = _NOTHING + + # allows the inplace variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace variant + # - if a Callable, then that callable should be the inplace variant associated with this operation + inplace_variant: Callable = _NOTHING + + # allows the operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated operator + # - if a Callable, then that callable should be the operator associated with this operation + operator_variant: Callable = _NOTHING + + # allows the inplace operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace operator + # - if a Callable, then that callable should be the inplace operator associated with this operation + inplace_operator_variant: Callable = _NOTHING + + # the following metadata are test directives for skipping or modifying tests + + # information about which tests to skip + skips: Tuple = tuple() + + # decorators to apply to generated tests + decorators: Tuple = tuple() + + # the following are pointers to functions to generate certain classes of inputs + + # function to generate sample inputs with strided layouts + sample_inputs_func: Callable = None + + # function to generate a more thorough set of samples inputs with strided layouts + reference_inputs_func: Callable = None + + # function to generate inputs that will throw errors + error_inputs_func: Callable = None + + # function to generate sparse (coo, csr, csc, bsr, bsc) inputs that will throw errors + error_inputs_sparse_func: Callable = None + + # function to generate sample inputs with sparse coo layouts + sample_inputs_sparse_coo_func: Callable = None + + # function to generate sample inputs with sparse csr layouts + sample_inputs_sparse_csr_func: Callable = None + + # function to generate sample inputs with sparse csc layouts + sample_inputs_sparse_csc_func: Callable = None + + # function to generate sample inputs with sparse bsr layouts + sample_inputs_sparse_bsr_func: Callable = None + + # function to generate sample inputs with sparse bsc layouts + sample_inputs_sparse_bsc_func: Callable = None + + # the following metadata relates to dtype support and is tested for correctness in test_ops.py + + # dtypes this function works with on the CPU, + # inherited by other device types that don't specify their own dtypes + dtypes: _dispatch_dtypes = None + + # the following dtypesIf... options override the dtypes value on their respective device types + + # dtypes this function is expected to work with on CUDA + dtypesIfCUDA: _dispatch_dtypes = None + + # dtypes this function is expected to work with on ROCM + dtypesIfROCM: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with + backward_dtypes: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on CUDA + backward_dtypesIfCUDA: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on ROCM + backward_dtypesIfROCM: _dispatch_dtypes = None + + # the following metadata describes the operators out= support + + # whether the op supports the out kwarg + # defaults to True, if the op does not allow the out kwarg or + # supports it incorrectly then test_out in test_ops.py should fail + supports_out: bool = True + + # the following metadata relates to autograd support + # whether the operation supports backward mode AD + # if true, gradient correctness is tested in test_ops.py + # using the op's sample inputs + supports_autograd: bool = True + + # whether the op supports second order gradients + # if true, gradgrad correctness is tested in test_ops.py + # defaults to support_autograd's value + # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below + supports_gradgrad: bool = None + + # whether the ops supports second order gradients via + # forward-over-reverse. If True, forward-over-reverse gradgrad correctness + # is tested. If False, test that forward grad is not implemented. + # Defaults to False. + supports_fwgrad_bwgrad: bool = False + + # whether the operation supports inplace autograd + # if true, tested in test_ops.py + # defaults to supports_autograd's value + supports_inplace_autograd: bool = None + + # Whether the operation support forward mode AD + # If the value is True, we check that the gradients are correct + # If the value is False, we test that forward grad is not implemented + supports_forward_ad: bool = False + + # Whether the operation has a varargs variant + # (e.g. functions like ones, zeros, methods like view, permute) + supports_varargs: bool = False + + # Whether the operation avoids materializing COW tensor inputs + supports_cow_input_no_materialize: bool = True + + # wrapper function for gradcheck + gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs) + + # whether to check batched grad when doing gradcheck + # defaults to support_autograd's value + check_batched_grad: bool = None + + # whether to check batched grad grad when doing gradgradcheck + # default's to support_gradgrad's value + check_batched_gradgrad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `supports_forward_ad` + check_batched_forward_grad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `check_batched_forward_grad` + check_inplace_batched_forward_grad: bool = None + + # tolerance for nondeterminism while performing gradcheck + gradcheck_nondet_tol: float = 0.0 + + # Whether to use the fast implmentation for gradcheck/gradgradcheck. + # When set to None, defers to the default value provided by the wrapper + # function around gradcheck (testing._internal.common_utils.gradcheck) + gradcheck_fast_mode: bool = None + + # the following metadata relates to JIT support and is tested for correctness in test_ops.py + + # name of the corresponding aten:: operator + aten_name: str = None + + # if this is a composite implicit autograd op, the decomposed op + decomp_aten_name: Optional[str] = None + + # name of the corresponding aten:: operator for backwards + aten_backward_name: Optional[str] = None + + # if a op's aten::node is expected to be symbolically autodiffed + assert_autodiffed: bool = False + + # a list of strings with node names that are expected to be in a + # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'], + # default is populated to be ['aten::(name of Python operator)'] + autodiff_nonfusible_nodes: List[str] = None + + # a list of strings with node names that are expected to be in FusionGroups + # inside of DifferentiableGraphs when this operation is autodiffed. + # Ex: ['aten::add', 'aten::mm'], defaults to an empty list + # Note: currently no ops use fusible nodes + autodiff_fusible_nodes: List[str] = None + + # the following metadata relates to sparse support and is used in test_sparse.py + + # whether the op supports sparse coo inputs, defaults to False + # TODO: rename supports_sparse to supports_sparse_coo + supports_sparse: bool = None + + # only run tracing tests + supports_scripting: bool = True + + # if the operator can be traced + supports_tracing: bool = True + + # the following metadata relates to sparse compressed support and + # is used in test_sparse_csr.py and test_sparse.py + + # whether the op supports sparse csr inputs, defaults to False + supports_sparse_csr: bool = None + # whether the op supports sparse csc inputs, defaults to False + supports_sparse_csc: bool = None + # whether the op supports sparse bsr inputs, defaults to False + supports_sparse_bsr: bool = None + # whether the op supports sparse bsc inputs, defaults to False + supports_sparse_bsc: bool = None + + # whether the op promotes integer inputs to float + promotes_int_to_float: bool = False + + # the following metadata relates to complex support and is checked in test_ops.py + + test_conjugated_samples: bool = True + + test_neg_view: bool = True + + # assert that jit shape analysis fully propagates shape + assert_jit_shape_analysis: bool = False + + # the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py + + supports_expanded_weight: bool = False + + is_factory_function: bool = False + + def __post_init__(self): + self._original_opinfo_args = asdict(self).copy() + + assert self.dtypes is not None, f"OpInfo for {self.name} has no dtypes!" + + dtypes_args = (self.dtypes, self.dtypesIfCUDA, self.dtypesIfROCM) + + # Validates the dtypes are generated from the dispatch-related functions + for dtype_list in dtypes_args: + assert isinstance(dtype_list, (_dispatch_dtypes, type(None))) + + if self.aten_name is None: + self.aten_name = self.name + + # Attribute to verify dynamic_dtypes are used. + self.dynamic_dtypes = any( + isinstance(dtypes, utils._dynamic_dispatch_dtypes) for dtypes in dtypes_args + ) + + if self.dynamic_dtypes: + # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU + # This is because, below we set dtypesIfCUDA to dtypes if they are None. + assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), ( + f"To use dynamic dypes for operator {self.name}, " + "acquire the dtypes dynamically for argument `dtypesIfCUDA`." + "This is to ensure that CUDA dtypes are acquired correctly as they" + "differ from CPU dtypes occasionally" + ) + + self.dtypes = set(self.dtypes) + + # NOTE: backward dtypes must be acquired before forward dtypes + # since they fallback to explicit (not implicit!) specifications of + # forward dtypes + self.backward_dtypesIfROCM = ( + set(self.backward_dtypesIfROCM) + if self.backward_dtypesIfROCM is not None + else ( + self.backward_dtypesIfCUDA + if self.backward_dtypesIfCUDA is not None + else self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfROCM + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypesIfCUDA = ( + set(self.backward_dtypesIfCUDA) + if self.backward_dtypesIfCUDA is not None + else ( + self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypes = ( + set(self.backward_dtypes) + if self.backward_dtypes is not None + else self.dtypes + ) + + self.dtypesIfCUDA = ( + set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes + ) + self.dtypesIfROCM = ( + set(self.dtypesIfROCM) + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + ) + + # NOTE: if the op is unspecified it is assumed to be under the torch namespace + if not self.op: + self.op = _getattr_qual(torch, self.name) + + if self.method_variant is _NOTHING: + self.method_variant = getattr(torch.Tensor, self.name, None) + + # attributes like real, imag are not callable + if not callable(self.method_variant): + self.method_variant = None + + if self.inplace_variant is _NOTHING: + inplace_name = self.name + "_" + self.inplace_variant = getattr(torch.Tensor, inplace_name, None) + + if self.operator_variant is _NOTHING: + self.operator_variant = getattr(operator, self.name, None) + + if self.inplace_operator_variant is _NOTHING: + # Note: operator.i will use operator. and assign the result to the lhs when no + # __i__ method is found. This results in the appearance of an inplace operator variant which + # does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace + # operator with a check that an inplace variant exists. + if self.inplace_variant is not None: + inplace_operator_name = "i" + self.name + self.inplace_operator_variant = getattr( + operator, inplace_operator_name, None + ) + else: + self.inplace_operator_variant = None + + self.decorators = (*self.decorators, *self.skips) + + # Specifying sample inputs function without specifying the + # corresponding layout support implies the layout support: + if self.supports_sparse is None: + self.supports_sparse = self.sample_inputs_sparse_coo_func is not None + if self.sample_inputs_sparse_coo_func is None: + self.sample_inputs_sparse_coo_func = self._sample_inputs_unspecified + + if self.supports_sparse_csr is None: + self.supports_sparse_csr = self.sample_inputs_sparse_csr_func is not None + if self.sample_inputs_sparse_csr_func is None: + self.sample_inputs_sparse_csr_func = self._sample_inputs_unspecified + + if self.supports_sparse_csc is None: + self.supports_sparse_csc = self.sample_inputs_sparse_csc_func is not None + if self.sample_inputs_sparse_csc_func is None: + self.sample_inputs_sparse_csc_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsr is None: + self.supports_sparse_bsr = self.sample_inputs_sparse_bsr_func is not None + if self.sample_inputs_sparse_bsr_func is None: + self.sample_inputs_sparse_bsr_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsc is None: + self.supports_sparse_bsc = self.sample_inputs_sparse_bsc_func is not None + if self.sample_inputs_sparse_bsc_func is None: + self.sample_inputs_sparse_bsc_func = self._sample_inputs_unspecified + + # We run the sampling functions without tracking the gradiends of the creation of inputs + self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func) + self.sample_inputs_sparse_coo_func = torch.no_grad()( + self.sample_inputs_sparse_coo_func + ) + self.sample_inputs_sparse_csr_func = torch.no_grad()( + self.sample_inputs_sparse_csr_func + ) + self.sample_inputs_sparse_csc_func = torch.no_grad()( + self.sample_inputs_sparse_csc_func + ) + self.sample_inputs_sparse_bsr_func = torch.no_grad()( + self.sample_inputs_sparse_bsr_func + ) + self.sample_inputs_sparse_bsc_func = torch.no_grad()( + self.sample_inputs_sparse_bsc_func + ) + if self.reference_inputs_func is not None: + self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func) + + if not self.autodiff_fusible_nodes: + self.autodiff_fusible_nodes = [] + + if self.autodiff_nonfusible_nodes is None: + self.autodiff_nonfusible_nodes = ["aten::" + self.name] + + # Autograd support + + # Autograd flags that depend on backward AD only + # - If setting has been explicitly set, raise error if inconsistent + if self.supports_gradgrad is None: + self.supports_gradgrad = self.supports_autograd + else: + assert not (self.supports_gradgrad and not self.supports_autograd), ( + "supports_gradgrad refines the part of autograd is supported, so it should " + "not be set if supports_autograd is False" + ) + if self.check_batched_grad is None: + self.check_batched_grad = self.supports_autograd or self.supports_forward_ad + else: + assert not ( + self.check_batched_grad + and not (self.supports_autograd or self.supports_forward_ad) + ), ( + "check_batched_grad refines the part of autograd that will be checked (by gradcheck), so " + "it should not be set if supports_autograd is False" + ) + if self.check_batched_gradgrad is None: + self.check_batched_gradgrad = self.supports_gradgrad + else: + assert not (self.check_batched_gradgrad and not self.supports_gradgrad), ( + "check_batched_gradgrad refines the part of autograd that will be checked (by " + "gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd " + "is False." + ) + if self.check_batched_forward_grad is None: + self.check_batched_forward_grad = self.supports_forward_ad + else: + assert not ( + self.check_batched_forward_grad and not self.supports_forward_ad + ), ( + "check_batched_forward_grad should only be used when supports_forward_ad " + "is True. It is used to disable the test in the specific cases " + "where the op supports forward ad but fails to compute " + "batched forward grad." + ) + + if self.check_inplace_batched_forward_grad is None: + self.check_inplace_batched_forward_grad = self.check_batched_forward_grad + else: + assert not ( + self.check_inplace_batched_forward_grad + and not self.check_batched_forward_grad + ), ( + "check_batched_forward_grad should only be used when check_batched_forward_grad " + "is True. It is used to disable the test in the specific cases " + "where the op supports batched forward grad but fails to compute batched forward " + "grad for the inplace variant of the op." + ) + + assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), ( + "supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be " + "True if backward ad is also checked, i.e., supports_forward_ad should be True.", + self.name, + ) + + # Autograd flags that depend on both forward AD and backward AD + if self.supports_inplace_autograd is None: + self.supports_inplace_autograd = ( + self.supports_autograd or self.supports_forward_ad + ) + else: + assert not ( + self.supports_inplace_autograd + and not self.supports_autograd + and not self.supports_forward_ad + ), ( + "supports_inplace_autograd refines the part of autograd that is supported, so " + "it should not be set if both supports_autograd and supports_forward_ad are False" + ) + + if self.aliases is not None: + self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment] + else: + self.aliases = () + + def __call__(self, *args, **kwargs): + """Calls the function variant of the operator.""" + return self.op(*args, **kwargs) + + def __str__(self): + return dataclass_repr(self) + + def get_op(self): + """Returns the function variant of the operator, torch..""" + return self.op + + def get_method(self): + """Returns the method variant of the operator, torch.Tensor.. + Returns None if the operator has no method variant. + """ + return self.method_variant + + def get_inplace(self): + """Returns the inplace variant of the operator, torch.Tensor._. + Returns None if the operator has no inplace variant. + """ + return self.inplace_variant + + def get_operator(self): + """Returns operator variant of the operator, e.g. operator.neg + Returns None if the operator has no operator variant. + """ + return self.operator_variant + + def get_inplace_operator(self): + """Returns the inplace operator variant of the operator, e.g operator.iadd + Returns None if the operator has no inplace operator variant""" + return self.inplace_operator_variant + + def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs but with the tensor input or first + tensor in a sequence input conjugated. + """ + + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + conj_samples = list(samples) + + def conjugate(tensor): + _requires_grad = tensor.requires_grad + tensor = tensor.conj() + return tensor.requires_grad_(_requires_grad) + + for i, sample in enumerate(samples): + sample = conj_samples[i] + # Note: it is assumed that the input here is either a tensor or tensorlist + if isinstance(sample.input, torch.Tensor): + sample.input = conjugate(sample.input) + else: + sample.input[0] = conjugate(sample.input[0]) + + return TrackedInputIter(iter(conj_samples), "conjugate sample input") + + def sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + These samples should be sufficient to test the function works correctly + with autograd, TorchScript, etc. + """ + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + + if kwargs.get("include_conjugated_inputs", False): + conj_samples = self.conjugate_sample_inputs( + device, dtype, requires_grad, **kwargs + ) + samples_list = list(samples) + samples_list.extend(conj_samples) + samples = tuple(samples_list) + + return TrackedInputIter(iter(samples), "sample input") + + def reference_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + Distinct from sample_inputs() above because this returns an expanded set + of inputs when reference_inputs_func is defined. If undefined this returns + the sample inputs. + """ + if self.reference_inputs_func is None: + samples = self.sample_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter(iter(samples), "sample input") + + if kwargs.get("include_conjugated_inputs", False): + raise NotImplementedError + + references = self.reference_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter(iter(references), "reference input") + + def error_inputs(self, device, **kwargs): + """ + Returns an iterable of ErrorInputs. + """ + errs = self.error_inputs_func(self, device, **kwargs) + return TrackedInputIter( + iter(errs), "error input", callback=lambda e: e.sample_input + ) + + def error_inputs_sparse(self, device, layout, **kwargs): + """ + Returns an iterable of ErrorInputs that contain sparse sample + inputs with a specified layout. + """ + if not self.supports_sparse_layout(layout): + raise unittest.SkipTest("unsupported sparse layout") + return self.error_inputs_sparse_func(self, device, layout, **kwargs) + + def supports_sparse_layout(self, layout): + """Return True if OpInfo supports the specified sparse layout.""" + layout_name = str(layout).split(".")[-1] + # map torch.sparse_coo to OpInfo.supports_sparse: + layout_name = layout_name.replace("_coo", "") + return getattr(self, f"supports_{layout_name}") + + def sample_inputs_sparse( + self, layout, device, dtype, requires_grad=False, **kwargs + ): + """Returns an iterable of SampleInputs that contain inputs with a + specified sparse layout. + """ + layout_name = str(layout).split(".")[-1] + sample_inputs_mth = getattr(self, "sample_inputs_" + layout_name) + + def non_empty_sampler(op, generator): + found_sample = False + for sample in generator: + found_sample = True + yield sample + if not found_sample: + raise unittest.SkipTest("NO SAMPLES!") + + return non_empty_sampler( + self, + sample_inputs_mth(device, dtype, requires_grad=requires_grad, **kwargs), + ) + + def _sample_inputs_unspecified(self, *args, **kwargs): + """Raises an NotImplemented exception in a OpInfo instance creation + that specifies supports_sparse(|_csr|_csc|_bsr|_bsc)=True + without specifying the corresponding sample function as + sample_inputs_sparse_(coo|csr|csc|bsr|bsc)_func. + + To avoid this, either define the corresponding sample function, + or re-map unsupported samples to error inputs in an appropiate + + opinfo/definitions/sparse.py:_validate_sample_input_sparse_ + + function. + """ + raise NotImplementedError("no sample function specified") + + def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + coo layout. + """ + return self.sample_inputs_sparse_coo_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csr layout. + """ + return self.sample_inputs_sparse_csr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csc layout. + """ + return self.sample_inputs_sparse_csc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsr layout. + """ + return self.sample_inputs_sparse_bsr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsc layout. + """ + return self.sample_inputs_sparse_bsc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + """Returns the decorators targeting the given test.""" + result = [] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active( + test_class, test_name, device, dtype, param_kwargs + ): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + def supported_dtypes(self, device_type): + if device_type == "privateuse1": + device_type = torch._C._get_privateuse1_backend_name() + device_type = torch.device(device_type).type + if device_type == "cuda": + return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA + return self.dtypes + + def supported_backward_dtypes(self, device_type): + if not self.supports_autograd: + return set() + + if device_type == "privateuse1": + device_type = torch._C._get_privateuse1_backend_name() + device_type = torch.device(device_type).type + backward_dtypes = None + if device_type == "cuda": + backward_dtypes = ( + self.backward_dtypesIfROCM + if TEST_WITH_ROCM + else self.backward_dtypesIfCUDA + ) + else: + backward_dtypes = self.backward_dtypes + + allowed_backward_dtypes = floating_and_complex_types_and( + torch.bfloat16, torch.float16, torch.complex32 + ) + return set(allowed_backward_dtypes).intersection(backward_dtypes) + + def supports_dtype(self, dtype, device_type) -> bool: + return dtype in self.supported_dtypes(device_type) + + @property + def formatted_name(self): + """Returns a formatted full name for this OpInfo that can be used in test names.""" + variant = ( + "_" + self.variant_test_name.replace(".", "_") + if self.variant_test_name + else "" + ) + return f"{self.name.replace('.', '_')}{variant}" + + +def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs): + """Generates input tensors for testing reduction operators""" + yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor( + [3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad + ) + + +def _generate_reduction_kwargs(ndim, supports_multiple_dims=True): + """Generates a subset of all valid dim and keepdim kwargs given ndim that + is appropriate for testing reduction operators. + """ + + # Test default dim and keepdim + yield {} + + # Test reducing inner and outer most dimensions + yield {"dim": 0, "keepdim": True} + yield {"dim": -1, "keepdim": False} + + # Test reducing middle dimension + if ndim > 2: + yield {"dim": ndim // 2, "keepdim": True} + + if supports_multiple_dims: + # Test reducing all dimensions + yield {"dim": tuple(range(ndim)), "keepdim": False} + + # Test reducing both first and last dimensions + if ndim > 1: + yield {"dim": (0, -1), "keepdim": True} + + # Test reducing every other dimension starting with the second + if ndim > 3: + yield {"dim": tuple(range(1, ndim, 2)), "keepdim": False} + + +def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for reduction operators.""" + + # TODO(@heitorschueroff) Once all reduction operators are using + # ReductionOpInfo use op_info.supports_multiple_dims directly. + supports_multiple_dims: bool = kwargs.get("supports_multiple_dims", True) + + # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo + # use op_info.generate_args_kwargs directly. + generate_args_kwargs = kwargs.get( + "generate_args_kwargs", lambda *args, **kwargs: (yield tuple(), {}) + ) + + for t in _generate_reduction_inputs(device, dtype, requires_grad): + for reduction_kwargs in _generate_reduction_kwargs( + t.ndim, supports_multiple_dims + ): + for args, kwargs in generate_args_kwargs(t, **reduction_kwargs): + kwargs.update(reduction_kwargs) + yield SampleInput( + t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs + ) + + +# NOTE [Reductions]: +# +# For testing purposes, we relax the definition of a reduction operator +# as defined in the docstring below. We do this to capture operators with +# a similar API so they can be tested automatically. However... +# +# Strictly speaking a reduction operator is an operator that can reduce an +# array to a single scalar value and that can be computed from the partial +# result of reducing subarrays. This usually means that the reduction operation +# should be commutative and associative. This definition is important when it +# comes to implementation as it determines how a reduction can be parallelized. +# +# For example, many summary statistics such as median, mode and quantile cannot +# be computed from partial results because these are sorting and counting based +# algorithms that need information that would be lost in the reduced value. +class ReductionOpInfo(OpInfo): + """Reduction operator information. + + An operator is a reduction operator if it reduces one or more dimensions of + the input tensor to a single value. Reduction operators must implement the + following signature: + + - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor` + + ReductionOpInfo tests that reduction operators implement a consistent API. + Optional features such as reducing over multiple dimensions are captured in + the optional keyword parameters of the ReductionOpInfo constructor. + + If a reduction operator does not yet implement the full required API of + reduction operators, this should be documented by xfailing the failing + tests rather than adding optional parameters to ReductionOpInfo. + + NOTE + The API for reduction operators has not yet been finalized and some + requirements may change. + + See tests in test/test_reductions.py + """ + + def __init__( + self, + name, + *, + # The identity value for the operator if it has one. + identity: Optional[Any] = None, + # The nan policy for the operator if it implements one. + # - propagate: NaN values are propagated to the output + # - omit: NaN values are discarded during the reduction + nan_policy: Optional[str] = None, + # Whether the operator supports reducing multiple dimensions. + supports_multiple_dims: bool = True, + # Whether the operator promotes integral to floating point dtypes. + promotes_int_to_float: bool = False, + # Whether the operator promotes all integral dtypes to int64. + promotes_int_to_int64: bool = False, + # If a specific dtype is given, then the operator always returns that + # dtype irrespective of the input dtype. If None, the operator returns + # the dtype according to the type promotion rules above. + result_dtype: Optional[torch.dtype] = None, + # Casts complex results to real (e.g. linalg.norm or torch.var) + complex_to_real: bool = False, + # ReductionOpInfo tests generate their own input, dim and keepdim + # arguments and call this function to generate tuples of extra args and + # kwargs to use when calling the op. This is required for operators that + # have other required parameters besides the input tensor. + generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: ( + yield tuple(), + {}, + ), + # Options from the OpInfo base class + **kwargs, + ): + self._original_reduction_args = locals().copy() + assert nan_policy in (None, "propagate", "omit") + + # These are mutually exclusive options + assert not (result_dtype and promotes_int_to_float) + assert not (result_dtype and promotes_int_to_int64) + assert not (result_dtype and complex_to_real) + assert not (promotes_int_to_float and promotes_int_to_int64) + + # Default sample_inputs_func for ReductionOpInfo which augments sample + # inputs from sample_inputs_reduction with the args and kwargs from + # generate_args_kwargs. This is only used if sample_inputs_func is None. + def sample_inputs_func(*args, **kwargs): + kwargs["supports_multiple_dims"] = supports_multiple_dims + kwargs["generate_args_kwargs"] = generate_args_kwargs + yield from sample_inputs_reduction(*args, **kwargs) + + # Override OpInfo defaults and call base class __init__ + kwargs.setdefault("inplace_variant", None) + kwargs.setdefault("sample_inputs_func", sample_inputs_func) + super().__init__(name, promotes_int_to_float=promotes_int_to_float, **kwargs) + + self.identity = identity + self.nan_policy = nan_policy + self.supports_multiple_dims = supports_multiple_dims + self.promotes_int_to_int64 = promotes_int_to_int64 + self.complex_to_real = complex_to_real + self.result_dtype = result_dtype + self.generate_args_kwargs = generate_args_kwargs + + +# The base reference input generation for elementwise binary operations +def _reference_inputs_elementwise_binary( + op, device, dtype, requires_grad, exclude_zero, **kwargs +): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + yield from generate_elementwise_binary_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + if dtype is not torch.bool: + yield from generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + if dtype not in (torch.bool, torch.uint8, torch.int8): + yield from generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + yield from generate_elementwise_binary_broadcasting_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield from generate_elementwise_binary_with_scalar_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + if dtype.is_floating_point or dtype.is_complex: + yield from generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + +# Note that these references inputs use scalars for the SampleInput.input value, +# and many tests require SampleInput.input be a tensor or a list of tensors +def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + gen = partial( + _reference_inputs_elementwise_binary, + op, + device, + dtype, + requires_grad, + exclude_zero, + **kwargs, + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_binary_noncontiguous_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + yield from generate_elementwise_binary_arbitrarily_strided_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + +# A functional that extends an elementwise binary operator's bespoke error inputs +# with generic error inputs for the class of elementwise binary operations +def make_error_inputs_elementwise_binary(error_inputs_func): + def error_inputs_func_wrapper(op, device, **kwargs): + if error_inputs_func is not None: + yield from error_inputs_func(op, device, **kwargs) + + if not op.supports_rhs_python_scalar: + si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if not op.supports_one_python_scalar: + si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if ( + not kwargs.get("skip_two_python_scalars", False) + and not op.supports_two_python_scalars + ): + si = SampleInput(2, args=(3,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + return error_inputs_func_wrapper + + +# The following functions and classes are for testing elementwise binary operators. + + +# Returns a generator of pairs of contiguous tensors on the requested device +# and with the requested dtype. +# +# This function is intended to test the non-vectorized and vectorized code +# paths of elementwise binary functions, as well as their handling of odd tensor +# sizes (like zero-dim tensors and tensors with zero elements). +# +# Each iterable will include an a tensor with no elements, +# zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and +# a large 2D tensor. +def generate_elementwise_binary_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + # tensors with no elements + (0,), + (1, 0, 3), + # zero dim (scalar) tensor + (), + # small 1D tensor + (20,), + # medium 1D tensor + (812,), + # large 2D tensor + (1029, 917), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + b = make_arg(shape) + yield SampleInput(a, args=(b,)) + + +# Returns a generator of pairs of contiguous tensors on the requested device and with +# the requested dtype. +# +# Unlike the previous function, the values in these tensors are specified manually. +def generate_elementwise_binary_small_value_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=None +): + if exclude_zero is None: + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + # defines interesting values + _unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254) + _int_vals = (0, -1, 1, -55, 55, -127, 127, -128) + _float_vals = ( + 0.0, + -0.0, + -0.001, + 0.001, + -0.25, + 0.25, + -1.0, + 1.0, + -math.pi / 2, + math.pi / 2, + -math.pi + 0.00001, + math.pi - 0.00001, + -math.pi, + math.pi, + -math.pi - 0.00001, + math.pi + 0.00001, + ) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_vals, _float_vals) + elif dtype.is_complex: + complex_vals = product(_float_vals, _float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64): + prod = product(_int_vals, _int_vals) + elif dtype is torch.uint8: + prod = product(_unsigned_int_vals, _unsigned_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + if r == 0 and exclude_zero: + r_vals.append(1) + else: + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + _large_int_vals = (-1113, 1113, -10701, 10701) + _large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7) + _large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20) + + l_vals = [] + r_vals = [] + + if dtype == torch.float16: + prod = product(_large_float16_vals, _large_float16_vals) + elif dtype.is_floating_point: + prod = product(_large_float_vals, _large_float_vals) + elif dtype.is_complex: + complex_vals = product(_large_float_vals, _large_float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int16, torch.int32, torch.int64): + prod = product(_large_int_vals, _large_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + _float_extremals = (float("inf"), float("-inf"), float("nan")) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_extremals, _float_extremals) + elif dtype.is_complex: + complex_vals = product(_float_extremals, _float_extremals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + # Test case for NaN propagation + nan = ( + float("nan") if dtype.is_floating_point else complex(float("nan"), float("nan")) + ) + lhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + lhs.view(-1)[::3] = nan + rhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + rhs.view(-1)[::3] = nan + + yield SampleInput(lhs, args=(rhs,)) + + +# Returns a generator of pairs of contiguous and noncontiguous tensors that +# require broadcasting +def generate_elementwise_binary_broadcasting_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + ((1,), ()), + ((2,), ()), + ((1,), (2,)), + ((2, 1), (2,)), + ((1, 2), (2,)), + ((3, 2), (2,)), + ((1, 3, 2), (2,)), + ((1, 3, 2), (3, 2)), + ((3, 1, 2), (3, 2)), + ((2, 3, 2), ()), + ((3, 1, 2), (1, 3, 2)), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, noncontiguous in product(shapes, [True, False]): + shape_lhs, shape_rhs = shape + lhs = make_arg( + shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs + ) + rhs = make_arg( + shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs + ) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=True) + + +# Returns a generator of pairs of contiguous tensors and scalars +def generate_elementwise_binary_with_scalar_samples( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5)) + if op.supports_rhs_python_scalar: + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs, args=(rhs_scalar,)) + + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(lhs_scalar, args=(rhs,)) + + if op.supports_two_python_scalars: + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs_scalar, args=(rhs_scalar,)) + + +# Returns a generator of pairs of contiguous tensors and 0d tensors and scalars and type promotion +def generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, *, device, dtype, requires_grad=False +): + # add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars + if op.name in ( + "eq", + "ne", + "gt", + "ge", + "lt", + "le", + "logical_and", + "logical_or", + "logical_xor", + ): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + shape = ( + 23, + ) # this shape is big enough to trigger vectorization, and has non-vectorized tail + values = (float("nan"), float("inf"), -float("inf")) + scalar_tensors = tuple(torch.tensor(val) for val in values) + if op.supports_rhs_python_scalar: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + for scalar in values + scalar_tensors: + yield SampleInput(lhs, args=(scalar,)) + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(scalar, args=(rhs,)) + + +# Returns a generator of pairs of noncontiguous tensors +def generate_elementwise_binary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + # Generic noncontiguity + lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs) + rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.clone(), args=(rhs.clone(),)) + yield SampleInput(lhs.contiguous(), args=(rhs,)) + + # Transposed + lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs) + rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.T, args=(rhs.T,)) + + # More noncontiguity + shapes = ((5, 7), (1024,)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + lhs_non_contig.copy_(lhs) + + rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + rhs_non_contig.copy_(rhs) + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Noncontiguous indices + shape = (2, 2, 1, 2) + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs[:, 1, ...] + rhs_non_contig = rhs[:, 1, ...] + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs.expand(3, -1, -1) + rhs_non_contig = rhs.expand(3, -1, -1) + + yield SampleInput(lhs_non_contig, args=(rhs_non_contig,)) + + +# Sample inputs for elementwise binary operators, like add +def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + _M = S if kwargs.get("small_inputs_only", False) else M + _S = XS if kwargs.get("small_inputs_only", False) else S + + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + shapes = ( + ((), ()), + ((_S,), ()), + ((_S, 1), (_S,)), + ((_M, _S), ()), + ((_S, _M, _S), (_M, _S)), + ((_S, _M, _S), (_S, _M, _S)), + ((_M, 1, _S), (_M, _S)), + ((_M, 1, _S), (1, _M, _S)), + ((0, 1, XS), (0, _M, XS)), + ) + + sample_kwargs = kwargs.get("sample_kwargs", {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput( + lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input + ) + + +# Metadata class for binary "universal functions (ufuncs)" that accept two +# tensor and have common properties +class BinaryUfuncInfo(OpInfo): + """Operator information for 'universal binary functions (binary ufuncs).' + These are functions of two tensors with common properties like: + - they are elementwise functions + - the output shape is determined by the input shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/stable/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, + *, + sample_inputs_func=sample_inputs_elementwise_binary, + reference_inputs_func=reference_inputs_elementwise_binary, + error_inputs_func=None, + lhs_make_tensor_kwargs=None, + rhs_make_tensor_kwargs=None, + always_returns_bool=False, # Set to true if the op always returns bool tensors + supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs + supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs + supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs + **kwargs, + ): + self._original_binary_ufunc_args = locals().copy() + + # Elementwise binary operations perform the equivalent of test_numpy_refs + # in test_binary_ufuncs, but with additional test granularity. So the + # generic test_ops.py test is skipped because it's redundant. + common_skips = ( + DecorateInfo( + unittest.skip("Skipping redundant test."), + "TestCommon", + "test_numpy_refs", + ), + ) + kwargs["skips"] = kwargs.get("skips", tuple()) + common_skips + super().__init__( + name, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func), + **kwargs, + ) + + # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on. + if lhs_make_tensor_kwargs is None: + lhs_make_tensor_kwargs = {} + self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs + + if rhs_make_tensor_kwargs is None: + rhs_make_tensor_kwargs = {} + self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs + + self.always_returns_bool = always_returns_bool + self.supports_rhs_python_scalar = supports_rhs_python_scalar + self.supports_one_python_scalar = supports_one_python_scalar + self.supports_two_python_scalars = supports_two_python_scalars + + if self.supports_two_python_scalars: + self.supports_one_python_scalar = True + + if self.supports_one_python_scalar: + assert ( + supports_rhs_python_scalar + ), "Can't support lhs and rhs Python scalars but not rhs scalars!" + + +# The following functions and classes are for testing elementwise unary operators. +def sample_inputs_elementwise_unary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + _L = S if kwargs.get("small_inputs_only", False) else L + + low, high = op_info.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op_info._domain_eps + high = high if high is None or not is_floating else high - op_info._domain_eps + if ( + op_info.supports_sparse_csr + or op_info.supports_sparse_csc + or op_info.supports_sparse_bsr + or op_info.supports_sparse_bsc + ): + # Tensors with dim=2 for sparse compressed testing + yield SampleInput( + make_tensor( + (_L, _L), + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + else: + # Creates a 1D, empty, and scalar tensor + for shape in ((_L,), (1, 0, 3), ()): + yield SampleInput( + make_tensor( + shape, + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + + +# Replace values satisfying condition with a safe value. This is used to block +# out values the could cause singularity like tan(pi/2) +def _replace_values_in_tensor(tensor, condition, safe_value): + mask = condition(tensor) + tensor.masked_fill_(mask, safe_value) + + +# Helper to create a unary elementwise tensor with valid inputs +def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs): + low, high = op.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs) + + if op.reference_numerics_filter is not None and dtype is not torch.bool: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + return a + + +# Restricts the values in the tensor to the domain of the +# given elementwise unary operator +def _filter_unary_elementwise_tensor(a, *, op): + # short-circuits for boolean tensors + if a.dtype is torch.bool: + return a + + low, high = op.domain + is_floating = a.dtype.is_floating_point or a.dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + if a.dtype is torch.uint8 and low is not None: + low = max(low, 0) + + if not a.dtype.is_floating_point and not a.dtype.is_complex: + low = math.ceil(low) if low is not None else None + high = math.floor(high) if high is not None else None + + if op.reference_numerics_filter is not None: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + if low is not None or high is not None: + if a.dtype.is_complex: + a.real.clamp_(low, high) + a.imag.clamp_(low, high) + else: + a.clamp_(min=low, max=high) + + return a + + +def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs): + # Special-cases bool + if dtype is torch.bool: + tensors = ( + torch.empty(0, device=device, dtype=torch.bool), + torch.tensor(True, device=device), + torch.tensor(False, device=device), + torch.tensor((True, False), device=device), + make_tensor((812,), device=device, dtype=dtype), + make_tensor((1029, 917), device=device, dtype=dtype), + ) + for a in tensors: + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + shapes = ( + (1029, 917), + (812,), + # Empty sizes + (0,), + (0, 3, 3), + (1, 0, 5), + (6, 0, 0, 0), + (3, 0, 1, 0), + ) + + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + for shape in shapes: + a = make_arg(shape) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_small_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + yield SampleInput( + sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0] + ) + + +def generate_elementwise_unary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + + # Generic noncontiguity + t = make_arg((1026,), noncontiguous=True) + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Transposed + t = make_arg((1024, 1024)).T + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + t = make_arg(shape) + t_non_contig = t.expand(3, -1, -1) + yield SampleInput( + t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0] + ) + + +def generate_elementwise_unary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +# Reuses the elementwise binary generators for consistency +# TODO: in the future generalize the reference generators to handle n-ary elementwise operations +def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + yield from generate_elementwise_unary_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype is not torch.bool: + yield from generate_elementwise_unary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + if dtype not in (torch.bool, torch.uint8, torch.int8) and ( + op.handles_large_floats + or (not dtype.is_floating_point and not dtype.is_complex) + ): + yield from generate_elementwise_unary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype.is_floating_point or ( + op.handles_complex_extremal_values and dtype.is_complex + ): + yield from generate_elementwise_unary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + gen = partial( + _reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_unary_noncontiguous_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield from generate_elementwise_unary_arbitrarily_strided_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +# Metadata class for unary "universal functions (ufuncs)" that accept a single +# tensor and have common properties like: +class UnaryUfuncInfo(OpInfo): + """Operator information for 'universal unary functions (unary ufuncs).' + These are functions of a single tensor with common properties like: + - they are elementwise functions + - the input shape is the output shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, # the string name of the function + *, + dtypes=floating_types(), + domain=(None, None), # the [low, high) domain of the function + handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf) + handles_large_floats=True, # whether the op correctly handles large float values (like 1e20) + supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle + sample_inputs_func=sample_inputs_elementwise_unary, + reference_inputs_func=reference_inputs_elementwise_unary, + sample_kwargs=lambda device, dtype, input: ({}, {}), + reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested + **kwargs, + ): + self._original_unary_ufunc_args = locals().copy() + + super().__init__( + name, + dtypes=dtypes, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + **kwargs, + ) + self.domain = domain + self.handles_complex_extremal_values = handles_complex_extremal_values + self.handles_large_floats = handles_large_floats + self.supports_complex_to_float = supports_complex_to_float + self.reference_numerics_filter = reference_numerics_filter + + # test_unary_ufuncs.py generates its own inputs to test the consistency + # of the operator on sliced tensors, non-contig tensors, etc. + # `sample_kwargs` is a utility function to provide kwargs + # along with those inputs if required (eg. clamp). + # It should return two dictionaries, first holding kwarg for + # torch operator and second one for reference NumPy operator. + self.sample_kwargs = sample_kwargs + + # Epsilon to ensure grad and gradgrad checks don't test values + # outside a function's domain. + self._domain_eps = 1e-5 + + +def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs): + is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half + if not is_fp16_or_chalf: + nd_tensor = partial( + make_tensor, + (S, S + 1, S + 2), + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, (31,), device=device, dtype=dtype, requires_grad=requires_grad + ) + else: + # cuFFT supports powers of 2 for half and complex half precision + # NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args + # where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two + low = None + high = None + if self.name in ["fft.hfft", "fft.irfft", "_refs.fft.hfft", "_refs.fft.irfft"]: + shapes = ((2, 9, 9), (33,)) + elif self.name in [ + "fft.hfft2", + "fft.irfft2", + "_refs.fft.hfft2", + "_refs.fft.irfft2", + ]: + shapes = ((2, 8, 9), (33,)) + elif self.name in [ + "fft.hfftn", + "fft.irfftn", + "_refs.fft.hfftn", + "_refs.fft.irfftn", + ]: + shapes = ((2, 2, 33), (33,)) + # Adjusting the limits because the test would be flaky due to over-saturation of float16 + # See: https://github.com/pytorch/pytorch/pull/81416 + low = -1.0 + high = 1.0 + else: + shapes = ((2, 8, 16), (32,)) + nd_tensor = partial( + make_tensor, + shapes[0], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, + shapes[1], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + + if self.ndimensional == SpectralFuncType.ND: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(8,)) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3, (0, -1)]) + elif self.ndimensional == SpectralFuncType.TwoD: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(6, 8) if not is_fp16_or_chalf else (4, 8)) + yield SampleInput(nd_tensor(), dim=0) + yield SampleInput(nd_tensor(), dim=(0, -1)) + yield SampleInput(nd_tensor(), dim=(-3, -2, -1)) + else: + yield SampleInput( + nd_tensor(), + n=10 if not is_fp16_or_chalf else 8, + dim=1, + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), n=7 if not is_fp16_or_chalf else 8) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3]) + + +SpectralFuncType = Enum("SpectralFuncType", ("OneD", "TwoD", "ND")) + + +# Metadata class for Fast Fourier Transforms in torch.fft. +class SpectralFuncInfo(OpInfo): + """Operator information for torch.fft transforms.""" + + def __init__( + self, + name, # the string name of the function + *, + ref=None, # Reference implementation (probably in np.fft namespace) + dtypes=floating_and_complex_types(), + ndimensional: SpectralFuncType, + sample_inputs_func=sample_inputs_spectral_ops, + decorators=None, + **kwargs, + ): + self._original_spectral_func_args = dict(locals()).copy() + self._original_spectral_func_args.update(kwargs) + + decorators = list(decorators) if decorators is not None else [] + decorators += [ + skipCPUIfNoFFT, + DecorateInfo( + toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}), + "TestCommon", + "test_complex_half_reference_testing", + ), + ] + + super().__init__( + name=name, + dtypes=dtypes, + decorators=decorators, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + self.ndimensional = ndimensional + + +class ShapeFuncInfo(OpInfo): + """Early version of a specialized OpInfo for Shape manipulating operations like tile and roll""" + + def __init__( + self, + name, # the string name of the function + *, + ref, # a reference function + dtypes=floating_types(), + dtypesIfCUDA=None, + dtypesIfROCM=None, + sample_inputs_func=None, + **kwargs, + ): + super().__init__( + name, + dtypes=dtypes, + dtypesIfCUDA=dtypesIfCUDA, + dtypesIfROCM=dtypesIfROCM, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + + +def sample_inputs_foreach( + self, + device, + dtype, + N, + *, + noncontiguous=False, + same_size=False, + low=None, + high=None, + zero_size: bool, + requires_grad: bool, + # mutually exclusive from same_size and zero_size, which are all or nothing + intersperse_empty_tensors: bool = False, +): + if zero_size: + return [torch.empty(0, dtype=dtype, device=device) for _ in range(N)] + if same_size: + return [ + make_tensor( + (N, N), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for _ in range(N) + ] + else: + # interweave some empty tensors + have the last 2 tensors be empty (see #100701) + return [ + torch.empty(0, dtype=dtype, device=device, requires_grad=requires_grad) + if (i % 3 == 0 or i >= N - 2) and intersperse_empty_tensors + else make_tensor( + (N - i, N - i), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for i in range(N) + ] + + +def get_foreach_method_names(name): + # get torch inplace reference function + op_name = "_foreach_" + name + inplace_op_name = op_name + "_" + + op = getattr(torch, op_name, None) + inplace_op = getattr(torch, inplace_op_name, None) + + ref = getattr(torch, name, None) + ref_inplace = getattr(torch.Tensor, name + "_", None) + return op, inplace_op, ref, ref_inplace + + +class ForeachFuncInfo(OpInfo): + """Early version of a specialized OpInfo for foreach functions""" + + def __init__( + self, + name, + sample_inputs_func, + *, + dtypes=floating_and_complex_types(), + dtypesIfCUDA=floating_and_complex_types_and(torch.half), + dtypesIfROCM=None, + supports_alpha_param=False, + supports_autograd=True, + supports_inplace_autograd=True, + supports_scalar_self_arg=False, + supports_forward_ad=True, + backward_requires_result=False, + supports_out=True, + **kwargs, + ): + ( + foreach_method, + foreach_method_inplace, + torch_ref_method, + torch_ref_inplace, + ) = get_foreach_method_names(name) + if not supports_out: + # note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call + # `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero` + # is not defined at the moment. Thus to skip the qualification, set a similar torch + # function. + assert foreach_method is None + assert torch_ref_method is None + foreach_method = foreach_method_inplace + torch_ref_method = torch_ref_inplace + super().__init__( + name="_foreach_" + name, + op=foreach_method, + ref=torch_ref_method, + method_variant=foreach_method, + inplace_variant=foreach_method_inplace, + dtypes=dtypes, + dtypesIfCUDA=dtypesIfCUDA, + dtypesIfROCM=dtypesIfROCM, + sample_inputs_func=sample_inputs_func, + supports_autograd=supports_autograd, + supports_forward_ad=supports_forward_ad, + supports_out=supports_out, + **kwargs, + ) + self.supports_scalar_self_arg = supports_scalar_self_arg + + self.ref_inplace = torch_ref_inplace + self.supports_alpha_param = supports_alpha_param + self.backward_requires_result = backward_requires_result + self.has_no_in_place = self.inplace_variant is None + self.supports_inplace_autograd = supports_inplace_autograd + + if name == "norm": + self.ref = torch.linalg.vector_norm + elif name == "minimum": + # because minimum ref does not support inplace or scalar + self.ref = torch.clamp_max + self.ref_inplace = torch.Tensor.clamp_max_ + elif name == "maximum": + # because maximum ref does not support inplace or scalar + self.ref = torch.clamp_min + self.ref_inplace = torch.Tensor.clamp_min_ + + def sample_zero_size_inputs(self, device, dtype, requires_grad=False, **kwargs): + if not hasattr(self.sample_inputs_func, "sample_zero_size_tensor_inputs"): + return [] + return self.sample_inputs_func.sample_zero_size_tensor_inputs( + self, device, dtype, requires_grad, **kwargs + ) + + +def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs): + """Gradcheck wrapper for functions that take Hermitian matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the Hermitian property of the input. + """ + return op(input + input.mH, *args, **kwargs) + + +def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs): + """Gradcheck wrapper for functions that take lower or upper triangular matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the triangular property of the input. + `idx` is used to specific which `args[idx]` is to be triangularized. + """ + triangular_arg = args[idx].triu() if upper else args[idx].tril() + return op(*args[:idx], triangular_arg, *args[idx + 1 :], upper, **kwargs) + + +def gradcheck_wrapper_triangular_input_real_positive_diagonal( + op, *args, upper=False, idx=0, **kwargs +): + """Gradcheck wrapper for functions that take lower/upper triangular matrices + with real and positive diagonals, for example, cholesky-like operations. + """ + arg = args[idx] + arg_diag = arg.diagonal(0, -2, -1) + arg_diag_embed = torch.diag_embed(arg_diag) + id_diag_tensor = torch.ones_like(arg_diag) + id_tensor = torch.diag_embed(id_diag_tensor) + # new_arg = arg - diag(arg) + I + new_arg = arg - arg_diag_embed + id_tensor + return gradcheck_wrapper_triangular_input( + op, *args[:idx], new_arg, *args[idx + 1 :], upper=upper, idx=idx, **kwargs + ) + + +def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked operations. + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + mask = kwargs.get("mask") + if mask is not None: + output_mask = torch.masked._output_mask(op, input, *args, **kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def gradcheck_wrapper_masked_pointwise_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked pointwise operations. Assumes that the result + will be masked iff both tensors are masked at a specific index + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + input_mask = kwargs.get("input_mask") + other_mask = kwargs.get("other_mask") + if input_mask is not None and other_mask is not None: + combined_mask = torch.logical_and(input_mask, other_mask) + new_kwargs = dict(mask=combined_mask, **kwargs) + output_mask = torch.masked._input_mask(input, *args, **new_kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def clone_sample(sample, **kwargs): + """ + Given a SampleInput, this function analyzes its input, args and kwargs, + and produces a copy with each non-Tensor entry being copied by reference, + and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)` + """ + + def clone_tensor(t): + if isinstance(t, torch.Tensor): + return t.detach().clone().requires_grad_(t.requires_grad) + else: + return t + + sample_kwargs = kwargs if kwargs else sample.kwargs + + return SampleInput( + clone_tensor(sample.input), + args=tuple(map(clone_tensor, sample.args)), + kwargs={k: clone_tensor(v) for k, v in sample_kwargs.items()}, + ) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2ae805370ccd6792a0c756224ff89ad756aa10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py @@ -0,0 +1,27 @@ +# mypy: ignore-errors + +from typing import List + +from torch.testing._internal.opinfo.core import OpInfo +from torch.testing._internal.opinfo.definitions import ( + _masked, + fft, + linalg, + signal, + special, +) + +# Operator database +op_db: List[OpInfo] = [ + *fft.op_db, + *linalg.op_db, + *signal.op_db, + *special.op_db, + *_masked.op_db, +] + +python_ref_db: List[OpInfo] = [ + *fft.python_ref_db, + *linalg.python_ref_db, + *special.python_ref_db, +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ff2b39f3f5d9f99017d1a9dd54c4b605e6c3ed2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1c04d52c69a9b24d02997d6380ba013b32b730f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d62081e572471c01072e7964b77761475abb1a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f8558e6b762c2047b9d467da4cf9a30da8319ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70b357f6d2edf31e4d4110e9ee70c7bd48e01de4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f27e4928e9681c3cd49dcaa8fd3bd0922baa29f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfe52cb023f8e551e2fd76dbb9eb5e5b8705406e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py new file mode 100644 index 0000000000000000000000000000000000000000..2baa5f80840940de01c93aa9cf94d43b78238d15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py @@ -0,0 +1,1184 @@ +# mypy: ignore-errors + +import unittest +from collections.abc import Sequence +from functools import partial +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import tol, toleranceOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, + complex_types, + floating_and_complex_types_and, + floating_types_and, + integral_types, +) +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + M, + OpInfo, + ReductionOpInfo, + S, + sample_inputs_reduction, + SampleInput, +) +from torch.testing._internal.opinfo.utils import prod_numpy, reference_reduction_numpy + + +# Used for log_softmax, softmax, softmin +def sample_inputs_softmax_variant( + op_info, + device, + dtype, + requires_grad, + with_dtype=False, + use_zero_dimensions=True, + **kwargs, +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), (0,)), + ((S, S), (0,)), + ((S, S), (1,)), + ((S, S), (-1,)), + ((S, M, S), (2,)), + *([((S, 0, 0), (-1,))] if use_zero_dimensions else []), + ] + kwargs = dict(dtype=torch.float64) if with_dtype else None + + # PyTorch on XLA throws an error when passed with dim argument for 0d tensor. + # See https://github.com/pytorch/xla/issues/3061 for more details. + if torch.device(device).type != "xla": + cases.append(((), (0,))) + + return ( + SampleInput(make_arg(shape), args=dim, kwargs=kwargs) for shape, dim in cases + ) + + +def _generate_masked_op_mask(input_shape, device, **kwargs): + make_arg = partial( + make_tensor, dtype=torch.bool, device=device, requires_grad=False + ) + yield None + yield make_arg(input_shape) + if len(input_shape) > 2: + # broadcast last mask dimension: + yield make_arg(input_shape[:-1] + (1,)) + # broadcast middle mask dimension: + yield make_arg(input_shape[:1] + (1,) + input_shape[2:]) + # broadcast first mask dimension: + yield make_arg((1,) + input_shape[1:]) + # mask.ndim < input.ndim + yield make_arg(input_shape[1:]) + # mask.ndim == 1 + yield make_arg(input_shape[-1:]) + # masks that require broadcasting of inputs (mask.ndim > + # input.ndim) will not be supported, however, we may + # reconsider this if there will be demand on this kind of + # degenerate cases. + + +def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked reduction operators. + + Masked reduction operator is a reduction operator with trailing + mask optional argument. A mask is a bool tensor with the same + shape as input or a shape that is broadcastable to input shape. + """ + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_sparse_coo_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse coo layouts. + """ + if op_info.supports_sparse: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse()) + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in {"prod", "amax", "amin"}: + # FIXME: for now reductions with non-zero reduction identity and + # unspecified mask are not supported for sparse COO + # tensors, see torch.masked.prod implementation + # for details. + continue + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def sample_inputs_sparse_csr_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse csr layouts. + """ + if op_info.supports_sparse_csr: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if not ( + sample_input.input.ndim == 2 and sample_input.kwargs.get("keepdim") + ): + # - sparse CSR tensors are always 2-D tensors + # - masked reduction on CSR tensors are defined only if keepdim is True. + continue + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse_csr()) + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in ["prod", "amax", "amin", "mean"]: + # reductions with non-zero reduction identity and + # unspecified mask is not supported for sparse CSR + # tensors, see torch.masked.prod implementation + # for details. + continue + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + yield new_sample + if sample_input.kwargs["dim"] == 0: + # Reductions of CSR tensors use different implementations for + # inner and/or outer dimensions. So, as a minimum of testing CSR + # implementations the following kwargs must be generated: + # dict(dim=0, keepdim=True) + # dict(dim=1, keepdim=True) + # dict(dim=(0, 1), keepdim=True) + # Here we generate the dim=1 case from the dim=0 case. + sample_input_kwargs = new_sample.kwargs.copy() + sample_input_kwargs.update(dim=1) + yield SampleInput( + new_sample.input.clone(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked norm.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + sample_input_args, sample_input_kwargs = ( + ord, + ) + sample_input.args, sample_input.kwargs.copy() + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def reference_masked_std_var( + numpy_fn, +): + ref = reference_reduction_numpy(numpy_fn) + + # Translate unbiased or correction arguments into ddof + def func( + input, + dim=None, + unbiased=None, + *, + correction=None, + **kwargs, + ): + ddof = 1 + if unbiased is not None: + ddof = 1 if unbiased else 0 + if correction is not None: + ddof = correction + + if isinstance(dim, Sequence): + dim = tuple(dim) + + return ref(input, dim, ddof=ddof, **kwargs) + + return func + + +def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked std/var.""" + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + from torch.testing._internal.common_methods_invocations import sample_inputs_std_var + + def masked_samples(): + for sample_input in sample_inputs_std_var( + op_info, device, dtype, requires_grad, **kwargs + ): + if len(sample_input.args) and isinstance(sample_input.args[0], bool): + continue # masked.{std, var} doesn't support `.var(unbiased)` + + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + for sample_input in masked_samples(): + correction = sample_input.kwargs.get("correction") + if correction is None: + correction = int(sample_input.kwargs.get("unbiased", True)) + + dim = sample_input.kwargs.get("dim", None) + + if sample_input.kwargs.get("mask") is None: + orig_count = torch.masked.sum( + torch.ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + ) + else: + inmask = torch.masked._input_mask( + sample_input.input, *sample_input.args, **sample_input.kwargs + ) + orig_count = torch.masked.sum( + inmask.new_ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + mask=inmask, + ) + if orig_count.min() <= correction + 1: + # Skip samples that lead to nans in var computation + continue + + yield sample_input + + +def sample_inputs_masked_softmax( + op_info, device, dtype, requires_grad, with_dtype=False, **kwargs +): + """Sample inputs for masked softmax, log_softmax, and softmin. + + Masked normalization operator is a reduction operator with + trailing mask optional argument. A mask is a bool tensor with the + same shape as input or a shape that is broadcastable to input + shape. + """ + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input.args, + mask=mask, + **sample_input.kwargs, + ) + + +def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked cumsum and cumprod.""" + inputs: List[SampleInput] = [] + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + if type(mask) != torch.Tensor: + continue + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + if "keepdim" in sample_input_kwargs: + sample_input_kwargs.pop("keepdim") + # dimension is required + if sample_input_args: + dim = sample_input.args[0] + else: + if "dim" not in sample_input_kwargs: + continue + dim = sample_input_kwargs.pop("dim") + sample_input_args = (dim,) + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input_args, + **sample_input_kwargs, + ) + + +def sample_inputs_masked_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked logaddexp.""" + shapes = [(S,), (S, S), (S, M, S)] + input_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + other_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for shape, input_masks, other_masks in zip( + shapes, input_mask_lists, other_mask_lists + ): + for input_mask, other_mask in zip(input_masks, other_masks): + yield SampleInput( + make_arg(shape), + make_arg(shape), + input_mask=input_mask, + other_mask=other_mask, + ) + + +def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked normalize.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, use_zero_dimensions=False, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + ord, + *sample_input.args, + **sample_input.kwargs, + ) + + +op_db: List[OpInfo] = [ + ReductionOpInfo( + "masked.sum", + ref=reference_reduction_numpy(np.sum), + method_variant=None, + identity=0, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=5e-2), + torch.float16: tol(atol=1e-03, rtol=5e-3), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=0.1, rtol=0.1), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestMasked", + "test_mask_layout", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + ReductionOpInfo( + "masked.prod", + ref=prod_numpy, + method_variant=None, + identity=1, + nan_policy="propagate", + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + "TestReductions", + "test_ref_small_input", + dtypes=(torch.int8, torch.int16, torch.int32), + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + device_type="cuda", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_duplicate_values", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1.5e-03)}), + "TestMasked", + "test_mask_layout", + device_type="cpu", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + OpInfo( + "masked.cumsum", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.cumprod", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCompositeCompliance", + "test_backward", + device_type="cuda", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=2e-3)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amax", + nan_policy="propagate", + supports_out=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amax), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amin", + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amin), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmax", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmax + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmin", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmin + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.mean", + ref=reference_reduction_numpy(np.mean) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_sparse_csr=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_duplicate_values", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_small_input", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=0.05), + torch.float16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=2e-03)}), + "TestSparseCompressed", + "test_consistency", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.median", + dtypes=floating_types_and(torch.bfloat16, torch.float16), + method_variant=None, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=partial( + sample_inputs_masked_softmax, use_zero_dimensions=False + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.norm", + identity=0, + method_variant=None, + nan_policy="propagate", + supports_out=False, + promotes_int_to_float=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # torch.jit.frontend.NotSupportedError: Compiled functions + # can't take variable number of arguments or use + # keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_norm, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.var", + ref=reference_masked_std_var(np.var) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestMasked", + "test_reference_masked", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + ReductionOpInfo( + "masked.std", + ref=reference_masked_std_var(np.std) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-02, rtol=1e-02), + torch.float16: tol(atol=1e-02, rtol=1e-02), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=5e-03, rtol=5e-04), + } + ), + "TestMasked", + "test_reference_masked", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + OpInfo( + "masked.softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.log_softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + ], + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.softmin", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.normalize", + method_variant=None, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_normalize, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.logaddexp", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestFwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestBwdGradients", "test_fn_gradgrad" + ), + ), + sample_inputs_func=sample_inputs_masked_logaddexp, + gradcheck_wrapper=gradcheck_wrapper_masked_pointwise_operation, + ), + ReductionOpInfo( + "masked.logsumexp", + dtypes=all_types_and(torch.half, torch.bfloat16), + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.skip("Skipped!"), "TestReductions", "test_dim_empty_keepdim" + ), + # Identity can't be -torch.inf without overflow + DecorateInfo( + unittest.skip("Skipped!"), + "TestReductions", + "test_empty_tensor_empty_slice", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # all the values are the same except for -inf vs nan + DecorateInfo(unittest.skip("Skipped!"), "TestDecomp", "test_comprehensive"), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1d43ee9f89cd6d70da12ab349a9455c7b504e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py @@ -0,0 +1,784 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from typing import List + +import numpy as np + +import torch + +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import SM53OrLater +from torch.testing._internal.common_device_type import precisionOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, +) +from torch.testing._internal.common_utils import TEST_SCIPY, TEST_WITH_ROCM +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + sample_inputs_spectral_ops, + SampleInput, + SpectralFuncInfo, + SpectralFuncType, +) +from torch.testing._internal.opinfo.refs import ( + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, +) + +has_scipy_fft = False +if TEST_SCIPY: + try: + import scipy.fft + + has_scipy_fft = True + except ModuleNotFoundError: + pass + + +class SpectralFuncPythonRefInfo(SpectralFuncInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant="", + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant, op_db=op_db + ) + assert isinstance(self.torch_opinfo, SpectralFuncInfo) + + inherited = self.torch_opinfo._original_spectral_func_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) + + +def error_inputs_fft(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Zero-dimensional tensor has no dimension to take FFT of + yield ErrorInput( + SampleInput(make_arg()), + error_type=IndexError, + error_regex="Dimension specified as -1 but tensor has no dimensions", + ) + + +def error_inputs_fftn(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Specifying a dimension on a zero-dimensional tensor + yield ErrorInput( + SampleInput(make_arg(), dim=(0,)), + error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions", + ) + + +def sample_inputs_fft_with_min( + op_info, device, dtype, requires_grad=False, *, min_size, **kwargs +): + yield from sample_inputs_spectral_ops( + op_info, device, dtype, requires_grad, **kwargs + ) + if TEST_WITH_ROCM: + # FIXME: Causes floating point exception on ROCm + return + + # Check the "Invalid number of data points" error isn't too strict + # https://github.com/pytorch/pytorch/pull/109083 + a = make_tensor(min_size, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(a) + + +def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor( + shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield SampleInput(mt((9, 10))) + yield SampleInput(mt((50,)), kwargs=dict(dim=0)) + yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,))) + yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1))) + yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2))) + + +# Operator database +op_db: List[OpInfo] = [ + SpectralFuncInfo( + "fft.fft", + aten_name="fft_fft", + decomp_aten_name="_fft_c2c", + ref=np.fft.fft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + SpectralFuncInfo( + "fft.fft2", + aten_name="fft_fft2", + ref=np.fft.fft2, + decomp_aten_name="_fft_c2c", + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_complex_half_reference_testing", + device_type="cuda", + dtypes=[torch.complex32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + SpectralFuncInfo( + "fft.fftn", + aten_name="fft_fftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.fftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + ), + SpectralFuncInfo( + "fft.hfft", + aten_name="fft_hfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.hfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=2), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + check_batched_gradgrad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + ), + ), + SpectralFuncInfo( + "fft.hfft2", + aten_name="fft_hfft2", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfft2 if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + ), + ), + SpectralFuncInfo( + "fft.hfftn", + aten_name="fft_hfftn", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + ), + ), + SpectralFuncInfo( + "fft.rfft", + aten_name="fft_rfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + skips=(), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.rfft2", + aten_name="fft_rfft2", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft2, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.rfftn", + aten_name="fft_rfftn", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.ifft", + aten_name="fft_ifft", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + ), + SpectralFuncInfo( + "fft.ifft2", + aten_name="fft_ifft2", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ifftn", + aten_name="fft_ifftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ihfft", + aten_name="fft_ihfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.ihfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fft, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + skips=(), + check_batched_grad=False, + ), + SpectralFuncInfo( + "fft.ihfft2", + aten_name="fft_ihfft2", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=( + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warnings"), + ), + ), + SpectralFuncInfo( + "fft.ihfftn", + aten_name="fft_ihfftn", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + ], + ), + SpectralFuncInfo( + "fft.irfft", + aten_name="fft_irfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.irfft2", + aten_name="fft_irfft2", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.irfftn", + aten_name="fft_irfftn", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + OpInfo( + "fft.fftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "fft.ifftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), +] + +python_ref_db: List[OpInfo] = [ + SpectralFuncPythonRefInfo( + "_refs.fft.fft", + torch_opinfo_name="fft.fft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft", + torch_opinfo_name="fft.ifft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft", + torch_opinfo_name="fft.rfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft", + torch_opinfo_name="fft.irfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft", + torch_opinfo_name="fft.hfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft", + torch_opinfo_name="fft.ihfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fftn", + torch_opinfo_name="fft.fftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifftn", + torch_opinfo_name="fft.ifftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfftn", + torch_opinfo_name="fft.rfftn", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfftn", + torch_opinfo_name="fft.irfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfftn", + torch_opinfo_name="fft.hfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfftn", + torch_opinfo_name="fft.ihfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fft2", + torch_opinfo_name="fft.fft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft2", + torch_opinfo_name="fft.ifft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft2", + torch_opinfo_name="fft.rfft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft2", + torch_opinfo_name="fft.irfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft2", + torch_opinfo_name="fft.hfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft2", + torch_opinfo_name="fft.ihfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + PythonRefInfo( + "_refs.fft.fftshift", + op_db=op_db, + torch_opinfo_name="fft.fftshift", + skips=( + # TODO Move fftshift to decomps + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"), + ), + ), + PythonRefInfo( + "_refs.fft.ifftshift", + op_db=op_db, + torch_opinfo_name="fft.ifftshift", + skips=( + # TODO Move ifftshift to decomps + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"), + ), + ), +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..a1b6531b158061dc2226783edbdacd5647a149bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py @@ -0,0 +1,2454 @@ +# mypy: ignore-errors + +import itertools +import random +import unittest +from functools import partial +from itertools import chain, product +from typing import Iterable, List, Tuple + +import numpy as np +from numpy import inf + +import torch + +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import ( + _get_magma_version, + _get_torch_cuda_version, + with_tf32_off, +) +from torch.testing._internal.common_device_type import ( + has_cusolver, + skipCPUIfNoLapack, + skipCUDAIf, + skipCUDAIfNoCusolver, + skipCUDAIfNoMagma, + skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoMagmaAndNoLinalgsolver, + skipCUDAIfRocm, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + all_types_and_complex, + all_types_and_complex_and, + floating_and_complex_types, + floating_and_complex_types_and, + get_all_complex_dtypes, +) +from torch.testing._internal.common_utils import ( + GRADCHECK_NONDET_TOL, + IS_MACOS, + make_fullrank_matrices_with_distinct_singular_values, + skipIfSlowGradcheckEnv, + slowTest, + TEST_WITH_ROCM, +) +from torch.testing._internal.opinfo.core import ( + clone_sample, + DecorateInfo, + ErrorInput, + gradcheck_wrapper_hermitian_input, + L, + M, + OpInfo, + ReductionOpInfo, + S, + SampleInput, +) +from torch.testing._internal.opinfo.refs import PythonRefInfo, ReductionPythonRefInfo + + +def sample_kwargs_vector_norm(t, **kwargs): + # orders with / without identity + def ords(): + has_id = (6, 4, 2, 1, 0, 0.9) + no_id = (inf, -2.1, -inf) + if t.numel() == 0: + dim = kwargs.get("dim") + if dim is None: + return has_id + if not isinstance(dim, Iterable): + dim = (dim,) + for d in dim: + if t.size(d) == 0: + return has_id + return has_id + no_id + + return (((), dict(ord=o)) for o in ords()) + + +def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + is_linalg_svd = "linalg.svd" in op_info.name + batches = [(), (0,), (3,)] + ns = [0, 3, 5] + + def uniformize(usv): + S = usv[1] + k = S.shape[-1] + U = usv[0][..., :k] + Vh = usv[2] if is_linalg_svd else usv[2].mH + Vh = Vh[..., :k, :] + return U, S, Vh + + def fn_U(usv): + U, _, _ = uniformize(usv) + return U.abs() + + def fn_S(usv): + return uniformize(usv)[1] + + def fn_Vh(usv): + # We also return S to test + _, S, Vh = uniformize(usv) + return S, Vh.abs() + + def fn_UVh(usv): + U, S, Vh = uniformize(usv) + return U @ Vh, S + + fns = (fn_U, fn_S, fn_Vh, fn_UVh) + + fullmat = "full_matrices" if is_linalg_svd else "some" + + for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns): + shape = batch + (n, k) + yield SampleInput( + make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn + ) + + +def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + yield SampleInput(make_arg((S, 3)), args=(make_arg((S, 3)),)) + yield SampleInput( + make_arg((S, 3, S)), args=(make_arg((S, 3, S)),), kwargs=dict(dim=1) + ) + yield SampleInput(make_arg((1, 3)), args=(make_arg((S, 3)),), kwargs=dict(dim=-1)) + + +def error_inputs_cross(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + sample = SampleInput(input=make_arg((S, 3)), args=(make_arg((S, 1)),)) + err = "inputs dimension -1 must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((5, S, 3)), args=(make_arg((S, 3)),)) + err = "inputs must have the same number of dimensions" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((S, 2)), args=(make_arg((S, 2)),)) + err = "must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput( + input=make_arg((S, 2)), args=(make_arg((S, 2)),), kwargs=dict(dim=2) + ) + err = "Dimension out of range" + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + + +def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs): + """ + This function generates input for torch.linalg.householder_product (torch.orgqr). + The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors. + Empty, square, rectangular, batched square and batched rectangular input is generated. + """ + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + low=-2, + high=2, + ) + # Each column of the matrix is getting multiplied many times leading to very large values for + # the Jacobian matrix entries and making the finite-difference result of grad check less accurate. + # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here. + yield SampleInput(make_arg((S, S)), make_arg((S,))) + yield SampleInput(make_arg((S + 1, S)), make_arg((S,))) + yield SampleInput(make_arg((2, 1, S, S)), make_arg((2, 1, S))) + yield SampleInput(make_arg((2, 1, S + 1, S)), make_arg((2, 1, S))) + yield SampleInput( + make_arg((0, 0), low=None, high=None), + make_arg((0,), low=None, high=None), + ) + yield SampleInput(make_arg((S, S)), make_arg((0,), low=None, high=None)) + # m = n = S, k = S - 2 + yield SampleInput(make_arg((S, S)), make_arg((S - 2,), low=None, high=None)) + # m = S, n = S -1, k = S - 2 + yield SampleInput(make_arg((S, S - 1)), make_arg((S - 2,), low=None, high=None)) + + +def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype) + + def make_singular_matrix_batch_base(size, rank): + assert size[-1] == size[-2] + assert rank > 0 and rank < size[-1] + + n = size[-1] + a = make_arg(size[:-2] + (n, rank)) / 10 + b = make_arg(size[:-2] + (rank, n)) / 10 + x = a @ b + lu, pivs, _ = torch.linalg.lu_factor_ex(x) + p, l, u = torch.lu_unpack(lu, pivs) + u_diag_abs = u.diagonal(0, -2, -1).abs() + u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values + u_diag_abs_smallest_idxs = torch.topk( + u_diag_abs, k=(n - rank), largest=False + ).indices + u.diagonal(0, -2, -1).div_(u_diag_abs_largest) + u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps + matrix = p @ l @ u + + matrix.requires_grad_(requires_grad) + return matrix + + for batch, size in product(((), (2,), (2, 2)), range(6)): + shape = batch + (size, size) + for rank in range(1, size): + yield SampleInput(make_singular_matrix_batch_base(shape, rank)) + + +def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_arg_fullrank = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + # (, ()) + test_sizes = [ + (1, ()), + (2, (0,)), + (2, (2,)), + ] + + for matrix_size, batch_sizes in test_sizes: + size = batch_sizes + (matrix_size, matrix_size) + for n in (0, 3, 5): + yield SampleInput(make_arg(size), args=(n,)) + for n in [-4, -2, -1]: + yield SampleInput(make_arg_fullrank(*size), args=(n,)) + + +def sample_inputs_linalg_det_logdet_slogdet( + op_info, device, dtype, requires_grad, **kwargs +): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + batches = [(), (0,), (3,)] + ns = [0, 1, 5] + + is_logdet = op_info.name == "logdet" + + for ( + batch, + n, + ) in product(batches, ns): + shape = batch + (n, n) + A = make_arg(*shape) + # Need to make the matrices in A have positive determinant for autograd + # To do so, we multiply A by its determinant to flip the sign of its determinant + if is_logdet and not A.is_complex() and A.numel() > 0: + s = torch.linalg.slogdet(A).sign + A = A * s.unsqueeze(-1).unsqueeze(-1) + A.requires_grad_(requires_grad) + yield SampleInput(A) + + +def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """Samples the inputs for both linalg.lu_solve and lu_solve""" + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_a = partial(make_fn, dtype=dtype, device=device) + make_b = partial(make_tensor, dtype=dtype, device=device) + + def clone(X, requires_grad): + Y = X.clone() + Y.requires_grad_(requires_grad) + return Y + + is_linalg_lu_solve = op_info.name == "linalg.lu_solve" + + batches = ((), (0,), (2,)) + ns = (3, 1, 0) + nrhs = (4, 1, 0) + + for n, batch, rhs in product(ns, batches, nrhs): + A = make_a(*(batch + (n, n))) + LU, pivots = torch.linalg.lu_factor(A) + + B = make_b(batch + (n, rhs)) + + grads = (False,) if not requires_grad else (True, False) + # we try all possible combinations of requires_grad for each input + for LU_grad, B_grad in product(grads, grads): + # when requires_grad == True, at least one input has to have requires_grad enabled + if requires_grad and not LU_grad and not B_grad: + continue + + if is_linalg_lu_solve: + for adjoint, left in product((True, False), repeat=2): + yield SampleInput( + clone(LU, LU_grad), + args=(pivots, clone(B if left else B.mT, B_grad)), + kwargs=dict(adjoint=adjoint, left=left), + ) + else: + yield SampleInput(clone(B, B_grad), args=(clone(LU, LU_grad), pivots)) + + +def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs): + # Each test case consists of the sizes in the chain of multiplications + # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5) + test_cases = [ + [1, 2, 1], + [2, 0, 2], + [0, 2, 2], + [2, 2, 2, 2], + [2, 3, 4, 5], + [5, 4, 0, 2], + [2, 4, 3, 5, 3, 2], + ] + + for sizes in test_cases: + tensors = [] + for size in zip(sizes[:-1], sizes[1:]): + t = make_tensor( + size, dtype=dtype, device=device, requires_grad=requires_grad + ) + tensors.append(t) + yield SampleInput(tensors) + + +def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs): + low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32) + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + sizes = ((2, 2), (2, 3, 2)) + if dtype in low_precision_dtypes: + # svdvals not supported for low precision dtypes + ords = ("fro", inf, -inf, 1, -1) + else: + ords = ("fro", "nuc", inf, -inf, 1, -1, 2, -2) + dims = ((-2, -1), (-1, 0)) + + for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]): + yield SampleInput(make_arg(size), args=(ord, dim, keepdim)) + + +def sample_inputs_linalg_norm( + op_info, device, dtype, requires_grad, *, variant=None, **kwargs +): + if variant is not None and variant not in ("subgradient_at_zero",): + raise ValueError( + f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}" + ) + + test_sizes = [ + (S,), + (0,), + (S, S), + (0, 0), + (S, 0), + (0, S), + (S, S, S), + (0, S, S), + (S, 0, S), + (0, 0, 0), + ] + + vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf) + if dtype in {torch.float16, torch.bfloat16, torch.complex32}: + # svdvals not supported for low precision dtypes + matrix_ords = ("fro", inf, -inf, 1, -1) + else: + matrix_ords = (None, "fro", "nuc", inf, -inf, 1, -1, 2, -2) + + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + low=None, + high=None, + ) + + for test_size in test_sizes: + is_vector_norm = len(test_size) == 1 + is_matrix_norm = len(test_size) == 2 + + # IndexError: amax(): Expected reduction dim 0 to have non-zero size. + is_valid_for_p2 = is_vector_norm or (test_size[-1] != 0 and test_size[-2] != 0) + + for keepdim in [False, True]: + if variant != "subgradient_at_zero" and is_valid_for_p2: + yield SampleInput(make_arg(test_size), keepdim=keepdim) + + if not (is_vector_norm or is_matrix_norm): + continue + + ords = vector_ords if is_vector_norm else matrix_ords + + for ord in ords: + if is_vector_norm and test_size[-1] == 0: + if ord == np.inf or (ord is not None and ord < 0): + # RuntimeError: linalg.vector_norm cannot compute the + # {ord} norm on an empty tensor because the operation + # does not have an identity + continue + elif is_matrix_norm: + dims_to_check = { + None: (0,), + np.inf: (0,), + 2: (0, 1), + 1: (1,), + -1: (1,), + -2: (0, 1), + -np.inf: (0,), + }.get(ord, ()) + + if any(test_size[d] == 0 for d in dims_to_check): + # IndexError: amax(): Expected reduction dim {dim} to + # have non-zero size. + continue + + if variant == "subgradient_at_zero": + yield SampleInput( + torch.zeros( + test_size, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ), + ord, + keepdim=keepdim, + ) + else: + yield SampleInput(make_arg(test_size), ord, keepdim=keepdim) + + if ord in ["nuc", "fro"]: + yield SampleInput( + make_arg(test_size), ord=ord, keepdim=keepdim, dim=(0, 1) + ) + + +def sample_inputs_linalg_vecdot(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + batches = ((), (0,), (1,), (5,)) + ns = (0, 1, 3, 5) + for b, n in product(batches, ns): + shape = b + (n,) + yield SampleInput(make_arg(shape), args=(make_arg(shape),)) + for i in range(len(shape)): + yield SampleInput( + make_arg(shape), args=(make_arg(shape),), kwargs=dict(dim=i) + ) + + +def sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates invertible inputs for linear algebra ops + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + + for batch, n in product(batches, ns): + yield SampleInput(make_arg(*batch, n, n)) + + +def sample_inputs_matrix_rank(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function produces inputs for matrix rank that test + all possible combinations for atol and rtol + """ + + def make_tol_arg(kwarg_type, inp): + if kwarg_type == "none": + return None + if kwarg_type == "float": + return 1.0 + assert kwarg_type == "tensor" + return torch.ones(inp.shape[:-2], device=device) + + for tol_type in ["float", "tensor"]: + for atol_type, rtol_type in product(["none", tol_type], repeat=2): + if ( + not atol_type and not rtol_type + ): # default behavior, so skipped here so it's not tested 2 extra times + continue + for sample in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad + ): + assert sample.kwargs == {} + sample.kwargs = { + "atol": make_tol_arg(atol_type, sample.input), + "rtol": make_tol_arg(rtol_type, sample.input), + } + yield sample + + # default kwargs + yield from sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + + +def sample_inputs_linalg_pinv_singular( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to + test the backward method of `linalg_pinv`. That way we always preserve the rank of the + input no matter the perturbations applied to it by the gradcheck. + Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood. + """ + batches = [(), (0,), (2,), (1, 1)] + # the size of at least 30 is required to cause failures for the previous implicit implementation + # of the pinv's backward method, albeit it is slow. + size = [0, 3, 50] + + for batch, m, n in product(batches, size, size): + for k in range(min(3, m, n)): + # Note that by making the columns of `a` and `b` orthonormal we make sure that + # the product matrix `a @ b.t()` has condition number 1 when restricted to its image + a = ( + torch.rand(*batch, m, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + b = ( + torch.rand(*batch, n, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + yield SampleInput(a, args=(b,)) + + +def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # autograd is not supported for inputs with zero number of elements + shapes = ( + (S, S), + (2, S, S), + (2, 1, S, S), + ) + + for shape in shapes: + yield SampleInput(make_arg(shape)) + + +def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + shapes = ( + (), + (1,), + (S,), + (2, S), + ) + + for shape in shapes: + if len(shape) > 0 and shape[-1] > 1: + yield SampleInput(make_arg(shape)) + n = shape[-1] if len(shape) > 0 else 1 + for i in range(3): + # n-1, n, n+1 + N = n + i - 1 + if N < 2: + continue + yield SampleInput(make_arg(shape), kwargs=dict(N=N)) + + +def np_vander_batched(x, N=None): + # Wrapper around np.vander that supports batches of 1 dimension (enough for the tests) + if x.ndim == 0: + x = x[np.newaxis] + if x.ndim == 1: + y = np.vander(x, N=N, increasing=True) + return y + else: + if N is None: + N = x.shape[-1] + y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N)) + return y + + +def sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + # Cholesky factorization is for positive-definite matrices + single_well_conditioned_matrix = random_well_conditioned_matrix( + S, S, dtype=dtype, device=device + ) + batch_well_conditioned_matrices = random_well_conditioned_matrix( + 2, S, S, dtype=dtype, device=device + ) + single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH + batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH + + inputs = ( + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + single_pd, + batch_pd, + ) + test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs) + for l in test_cases: + # generated lower-triangular samples + l.requires_grad = requires_grad + yield SampleInput(l) # upper=False by default + yield SampleInput( + l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False) + ) + + # generate upper-triangular inputs + u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad) + yield SampleInput(u, kwargs=dict(upper=True)) + + +def sample_inputs_linalg_ldl_factor( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + + # Symmetric inputs + yield SampleInput( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # single matrix + yield SampleInput( + random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # batch of matrices + yield SampleInput( + torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # 0x0 matrix + yield SampleInput( + torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # zero batch of matrices + + # Hermitian inputs + # hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+ + magma_254_available = device.type == "cuda" and _get_magma_version() >= (2, 5, 4) + if dtype.is_complex and (device.type == "cpu" or magma_254_available): + yield SampleInput( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # single matrix + yield SampleInput( + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # batch of matrices + + +def sample_inputs_linalg_ldl_solve( + op_info, device, dtype, requires_grad=False, **kwargs +): + # Generate LDL factors of symmetric (and Hermitian on CPU) matrices + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + symmetric_inputs = ( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix + random_symmetric_pd_matrix( + S, 2, dtype=dtype, device=device + ), # batch of matrices + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + ) + hermitian_inputs = ( + ( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + ) + if device.type == "cpu" and dtype.is_complex + else () + ) + test_cases1 = ( + torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs + ) + test_cases2 = ( + torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs + ) + + # Symmetric case + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for test_case in test_cases1: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=False) + ) + + # Hermitian case + for test_case in test_cases2: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=True) + ) + + +def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + device = torch.device(device) + + drivers: Tuple[str, ...] + if device.type == "cuda": + drivers = ("gels",) + else: + drivers = ("gels", "gelsy", "gelss", "gelsd") + + # we generate matrices of shape (..., n + delta, n) + deltas: Tuple[int, ...] + if device.type == "cpu" or has_cusolver(): + deltas = (-1, 0, +1) + # only square systems if Cusolver is not available + # becase we solve a lstsq problem with a transposed matrix in the backward + else: + deltas = (0,) + + for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas): + shape = batch + (3 + delta, 3) + a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device) + a.requires_grad_(requires_grad) + b = make_tensor( + shape, + dtype=dtype, + device=device, + low=None, + high=None, + requires_grad=requires_grad, + ) + yield SampleInput(a, b, driver=driver) + + +def error_inputs_lstsq(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d,)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def error_inputs_lstsq_grad_oriented(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d, None)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # Shapes for 2D Tensors + shapes_2d = ((S, S), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((S, S, S),) + + kwargs_2d = (dict(), dict(offset=2), dict(offset=2), dict(offset=1)) + kwargs_3d = ( + dict(offset=1, dim1=1, dim2=2), + dict(offset=2, dim1=0, dim2=1), + dict(offset=-2, dim1=0, dim2=1), + ) + + for shape, kwarg in chain( + product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d) + ): + yield SampleInput(make_arg(shape), kwargs=kwarg) + + +def error_inputs_diagonal_diag_embed(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + shapes1d = (0, 1, (0,), (1,)) + shapes2d = ((M, L),) + shapes3d = ((M, S, L),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 == dim2 is not allowed + dict(dim1=1, dim2=1), + # out of bounds dims are not allowed + dict(dim1=10000), + dict(dim2=10000), + ) + + kwargs3d = kwargs2d + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + arg = make_arg(shape) + sample = SampleInput(input=arg, kwargs=kwargs) + + dim1 = kwargs.get("dim1") + dim2 = kwargs.get("dim2") + + if "diagonal" in op_info.name: + num_dim = arg.dim() + elif op_info.name in ("diag_embed", "_refs.diag_embed"): + # these are valid inputs for diag_embed + if shape in ((0,), (1,)): + continue + num_dim = arg.dim() + 1 + else: + raise RuntimeError("should be unreachable") + + bound1 = -num_dim + bound2 = num_dim - 1 + dim_range = range(bound1, bound2 + 1) + dim1_cond = dim1 and dim1 not in dim_range + dim2_cond = dim2 and dim2 not in dim_range + + if dim1 == dim2: + err = f"diagonal dimensions cannot be identical {dim1}, {dim2}" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + elif dim1_cond or dim2_cond: + err_dim = dim1 if dim1_cond else dim2 + err = ( + r"Dimension out of range \(expected to be in range of " + rf"\[{bound1}, {bound2}\], but got {err_dim}\)" + ) + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + else: + raise RuntimeError("should be unreachable") + + +def sample_inputs_linalg_cholesky( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates always positive-definite input for torch.linalg.cholesky using + random_hermitian_pd_matrix. + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + from torch.testing._internal.common_utils import random_hermitian_pd_matrix + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + for batch, n, upper in product(batches, ns, [True, False]): + a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device) + a.requires_grad = requires_grad + yield SampleInput(a, upper=upper) + + +def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eig + """ + + def out_fn(output): + return output[0], abs(output[1]) + + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument. + """ + + def out_fn(output): + if isinstance(output, tuple): + # eigh function + return output[0], abs(output[1]) + else: + # eigvalsh function + return output + + # Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + # Note: we cannot use np.random.choice here as TorchDynamo + # does not support tensors of strings. + sample.kwargs = {"UPLO": random.choice(["L", "U"])} + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.pinv with hermitian=False keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + real_dtype = o.input.real.dtype if dtype.is_complex else dtype + # requires_grad path for rtol tensor is not implemented + for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)): + o = clone_sample(o) + o.kwargs = {"rtol": rtol} + yield o + + +def sample_inputs_linalg_pinv_hermitian( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates input for torch.linalg.pinv with hermitian=True keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + o.kwargs = {"hermitian": True} + yield o + + +def sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs +): + """ + This function generates always solvable input for torch.linalg.solve + We sample a fullrank square matrix (i.e. invertible) A + The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'. + The second input is generated as the product of 'batches', 'ns' and 'nrhs'. + In total this function generates 18 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices. + 'ns' gives 0x0 and 5x5 matrices. + and 'nrhs' controls the number of vectors to solve for: + () - using 1 as the number of vectors implicitly + (1,) - same as () but explicit + (3,) - solve for 3 vectors. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs. + torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow + 1D tensors (vectors) as the right-hand-side. + Once torch.solve / triangular_solve / cholesky_solve and its testing are removed, + 'vector_rhs_allowed' may be removed here as well. + """ + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_a = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_b = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,)] + ns = [5, 0] + if vector_rhs_allowed: + nrhs = [(), (1,), (3,)] + else: + nrhs = [(1,), (3,)] + + for n, batch, rhs in product(ns, batches, nrhs): + yield SampleInput(make_a(*batch, n, n), args=(make_b(batch + (n,) + rhs),)) + + +def sample_inputs_linalg_solve_triangular( + op_info, device, dtype, requires_grad=False, **kwargs +): + make_arg = partial(make_tensor, dtype=dtype, device=device) + bs = (1, 2, 0) + ns = (3, 0) + ks = (1, 3, 0) + + for b, n, k, (left, upper, uni) in product( + bs, ns, ks, product((True, False), repeat=3) + ): + if b == 1: + A = make_arg((n, n)) if left else make_arg((k, k)) + B = make_arg((n, k)) + else: + A = make_arg((b, n, n)) if left else make_arg((b, k, k)) + B = make_arg((b, n, k)) + if uni: + # Not really necessary, but writing it for consistency + A.diagonal(0, -2, -1).fill_(1.0) + else: + d = A.diagonal(0, -2, -1) + d[d.abs() < 1e-6] = 1.0 + if upper: + A.triu_() + else: + A.tril_() + kwargs = {"upper": upper, "left": left, "unitriangular": uni} + if requires_grad: + for grad_A, grad_B in product((True, False), repeat=2): + # Either A or B needs to have a gradient + if not grad_A and not grad_B: + continue + yield SampleInput( + A.clone().requires_grad_(grad_A), + args=(B.clone().requires_grad_(grad_B),), + kwargs=kwargs, + ) + else: + yield SampleInput(A, args=(B,), kwargs=kwargs) + + +def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates always solvable input for legacy solve functions + (the ones that are not in torch.linalg module). + The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation + should have b.ndim >= 2, vectors are not allowed. + Also the arguments order is swapped. + """ + out = sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False + ) + + def out_fn(output): + return output[0] + + # Reverses tensor order + for sample in out: + sample.input, sample.args = sample.args[0], (sample.input,) + if op_info.name == "solve": + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs): + full_rank = op_info.name == "linalg.lu_factor" + make_fn = ( + make_tensor + if not full_rank + else make_fullrank_matrices_with_distinct_singular_values + ) + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + def out_fn(output): + if op_info.name == "linalg.lu": + return output[1], output[2] + else: + return output + + batch_shapes = ((), (3,), (3, 3)) + # pivot=False only supported in CUDA + pivots = (True, False) if torch.device(device).type == "cuda" else (True,) + deltas = (-2, -1, 0, +1, +2) + for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas): + shape = batch_shape + (S + delta, S) + # Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple! + A = make_arg(shape) if not full_rank else make_arg(*shape) + yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn) + + +def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, m, n in product(batches, ns, ns): + yield SampleInput(make_arg(batch + (m, n))) + + +def sample_inputs_linalg_qr_geqrf( + op_info, device, dtype, requires_grad=False, **kwargs +): + # QR is just well defined when the matrix is full rank + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, (m, n) in product(batches, product(ns, ns)): + shape = batch + (m, n) + yield SampleInput(make_arg(*shape)) + + +def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs): + a_shapes = [(2, 3, 6), (3, 4, 4, 3)] + # Zero-dim tensors are not supported in NumPy, so we skip them for now. + # NumPy is used in reference check tests. + # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix. + # a_shapes += [(0, 0, 1, 2, 3, 0)] + dimss = [None, (0, 2)] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for a_shape, dims in itertools.product(a_shapes, dimss): + a = make_arg(a_shape) + b = make_arg(a_shape[:2]) + yield SampleInput(a, b, dims=dims) + + +def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = make_fullrank_matrices_with_distinct_singular_values + + def make_input(): + return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad) + + # lhs / rhs shape can have any number of dimensions as long as their product equals 12 + shapes = [ + ((2, 2, 3), (12, 1)), + ((4, 3), (6, 1, 2)), + ] + + for shape_lhs, shape_rhs in shapes: + inp = make_input().reshape(*shape_lhs, *shape_rhs).detach() + inp.requires_grad_(requires_grad) + yield SampleInput(inp, ind=len(shape_lhs)) + + +op_db: List[OpInfo] = [ + OpInfo( + "linalg.cross", + ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim), + op=torch.linalg.cross, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + aten_name="linalg_cross", + sample_inputs_func=sample_inputs_cross, + error_inputs_func=error_inputs_cross, + supports_out=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + check_batched_gradgrad=False, + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + variant_test_name="singular", + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_det_singular, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("The backward may give different results"), + "TestCommon", + "test_noncontiguous_samples", + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestBwdGradients", + "test_fn_grad", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + # Both Hessians are incorrect on complex inputs?? + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93044" + ), + "TestBwdGradients", + "test_fn_grad", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93045" + ), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.diagonal", + aten_name="linalg_diagonal", + aten_backward_name="diagonal_backward", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.float16, torch.chalf + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed, + ), + OpInfo( + "linalg.cholesky", + aten_name="linalg_cholesky", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.cholesky_ex", + aten_name="linalg_cholesky_ex", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vecdot", + aten_name="linalg_vecdot", + ref=lambda x, y, *, dim=-1: (x.conj() * y).sum(dim), + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_linalg_vecdot, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.cond", + aten_name="linalg_cond", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cond, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.eig", + aten_name="linalg_eig", + op=torch.linalg.eig, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eig, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo( + unittest.expectedFailure, "TestCommon", "test_out", device_type="cpu" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + ), + OpInfo( + "linalg.eigvals", + aten_name="linalg_eigvals", + op=torch.linalg.eigvals, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigh", + aten_name="linalg_eigh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigvalsh", + aten_name="linalg_eigvalsh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.householder_product", + aten_name="linalg_householder_product", + op=torch.linalg.householder_product, + aliases=("orgqr",), + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + # TODO: backward uses in-place operations that vmap doesn't like + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_householder_product, + decorators=[ + skipCUDAIfNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}) + ), + DecorateInfo( + unittest.skip("Skipped! Flaky"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cpu", + dtypes=(torch.complex128,), + ), + ], + ), + OpInfo( + "linalg.ldl_factor", + aten_name="linalg_ldl_factor", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_factor_ex", + aten_name="linalg_ldl_factor_ex", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_solve", + aten_name="linalg_ldl_solve", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_solve, + decorators=[ + skipCUDAIf( + _get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1" + ), + skipCUDAIfNoCusolver, + skipCUDAIfRocm, + skipCPUIfNoLapack, + ], + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + dtypes=floating_and_complex_types(), + supports_out=True, + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # we skip gradient checks for this suite as they are tested in + # variant_test_name='grad_oriented' + DecorateInfo(unittest.skip("Skipped!"), "TestFwdGradients"), + DecorateInfo(unittest.skip("Skipped!"), "TestBwdGradients"), + # The values for attribute 'shape' do not match + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + variant_test_name="grad_oriented", + # gradchecks for forward AD fails with multi-Tensor outputs + op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0], + supports_out=False, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq_grad_oriented, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # tests do not work with passing lambda for op + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + ), + ), + OpInfo( + "linalg.matrix_power", + aliases=("matrix_power",), + aten_name="linalg_matrix_power", + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_power, + ), + OpInfo( + "linalg.multi_dot", + # Need this lambda because gradcheck does not work with TensorList inputs + aten_name="linalg_multi_dot", + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_inplace_autograd=False, + # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407) + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_multi_dot, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo( + unittest.skip("67470!"), "TestCommon", "test_noncontiguous_samples" + ), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo( + unittest.skip("Skipped!"), + "TestOpInfo", + device_type="xla", + dtypes=(torch.long,), + ), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + device_type="cpu", + dtypes=(torch.long,), + ), + ), + ), + # NB: linalg.norm has two variants so that different skips can be used for different sample inputs + OpInfo( + "linalg.norm", + aten_name="linalg_norm", + op=torch.linalg.norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_norm, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.norm", + op=torch.linalg.norm, + variant_test_name="subgradients_at_zero", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=partial( + sample_inputs_linalg_norm, variant="subgradient_at_zero" + ), + aten_name="linalg_norm", + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # [NEW] Skips specifically for sample inputs at zero + # norm's vjp/jvp are not well-conditioned near zero + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_fn_fwgrad_bwgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_forward_mode_AD" + ), + DecorateInfo(unittest.expectedFailure, "TestBwdGradients", "test_fn_grad"), + ), + ), + OpInfo( + "linalg.matrix_norm", + aten_name="linalg_matrix_norm", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_norm, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.qr", + aten_name="linalg_qr", + op=torch.linalg.qr, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.slogdet", + aten_name="linalg_slogdet", + op=torch.linalg.slogdet, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vander", + aten_name="linalg_vander", + ref=np_vander_batched, + op=torch.linalg.vander, + dtypes=all_types_and_complex(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_vander, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + ReductionOpInfo( + "linalg.vector_norm", + op=torch.linalg.vector_norm, + identity=0, + nan_policy="propagate", + supports_multiple_dims=True, + complex_to_real=True, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + generate_args_kwargs=sample_kwargs_vector_norm, + aten_name="linalg_vector_norm", + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + OpInfo( + "linalg.lu_factor", + aten_name="linalg_lu_factor", + op=torch.linalg.lu_factor, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_factor_ex", + aten_name="linalg_lu_factor_ex", + op=torch.linalg.lu_factor_ex, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu", + aten_name="linalg_lu", + op=torch.linalg.lu, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_solve", + op=torch.linalg.lu_solve, + aten_name="linalg_lu_solve", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo( + unittest.skip("Tests different backward paths"), + "TestCommon", + "test_floating_inputs_are_differentiable", + ), + ), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + ), + OpInfo( + "linalg.inv", + aten_name="linalg_inv", + op=torch.linalg.inv, + aliases=("inverse",), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.inv_ex", + aten_name="linalg_inv_ex", + op=torch.linalg.inv_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve", + aten_name="linalg_solve", + op=torch.linalg.solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_ex", + aten_name="linalg_solve_ex", + op=torch.linalg.solve_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_triangular", + aten_name="linalg_solve_triangular", + op=torch.linalg.solve_triangular, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve_triangular, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result); + supports_forward_ad=True, + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_matrix_rank, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + # jit doesn't accept tensor inputs for matrix rank + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=[torch.complex64, torch.float32], + ), + ), + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + op=torch.linalg.pinv, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # errors with "leaked XXXX bytes CUDA memory on device 0" + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="singular", + # pinv is Frechet-differentiable in a rank-preserving neighborhood, + # so we feed inputs that are the products of two full-rank factors, + # to avoid any rank changes caused by the perturbations in the gradcheck + op=lambda a, b: torch.linalg.pinv(a @ b.mT), + dtypes=floating_and_complex_types(), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv_singular, + # Only large tensors show issues with implicit backward used prior to + # explicit backward implementation. + decorators=[slowTest, skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # CUDA runs out of memory + DecorateInfo( + unittest.skip("Skipped!"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + # This test takes almost 2 hours to run! + DecorateInfo( + unittest.skip("Skipped!"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + # This test is flaky under slow gradcheck, likely due to rounding issues + DecorateInfo( + skipIfSlowGradcheckEnv, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.svd", + op=torch.linalg.svd, + aten_name="linalg_svd", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_svd, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.svdvals", + op=torch.linalg.svdvals, + aten_name="linalg_svdvals", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # We're using at::allclose, which does not have a batching rule + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_svdvals, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.tensorinv", + ref=np.linalg.tensorinv, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorinv, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.tensorsolve", + ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorsolve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), +] + +python_ref_db: List[OpInfo] = [ + # + # torch.linalg + # + PythonRefInfo( + "_refs.linalg.cross", + torch_opinfo_name="linalg.cross", + supports_out=True, + op_db=op_db, + skips=( + # no _refs support for Tensor.__getitem__ + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"), + # TODO: is this really needed? + DecorateInfo( + unittest.expectedFailure, "TestCommon", "test_python_ref_errors" + ), + ), + ), + PythonRefInfo( + "_refs.linalg.diagonal", + torch_opinfo_name="linalg.diagonal", + supports_out=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.vecdot", + torch_opinfo_name="linalg.vecdot", + op_db=op_db, + ), + ReductionPythonRefInfo( + "_refs.linalg.vector_norm", + torch_opinfo_name="linalg.vector_norm", + supports_out=True, + op_db=op_db, + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + PythonRefInfo( + "_refs.linalg.matrix_norm", + torch_opinfo_name="linalg.matrix_norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.norm", + torch_opinfo_name="linalg.norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svd", + torch_opinfo_name="linalg.svd", + supports_out=True, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svdvals", + torch_opinfo_name="linalg.svdvals", + supports_out=True, + op_db=op_db, + ), +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py new file mode 100644 index 0000000000000000000000000000000000000000..6f51e388966a7f3849a3fdd373d04a697a2158e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py @@ -0,0 +1,458 @@ +# mypy: ignore-errors + +import unittest +from functools import partial + +from itertools import product +from typing import Callable, List, Tuple + +import numpy + +import torch +from torch.testing._internal.common_dtype import floating_types +from torch.testing._internal.common_utils import TEST_SCIPY +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + SampleInput, +) + +if TEST_SCIPY: + import scipy.signal + + +def sample_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Base function used to create sample inputs for windows. + + For additional required args you should use *args, as well as **kwargs for + additional keyword arguments. + """ + + # Tests window sizes up to 5 samples. + for size, sym in product(range(6), (True, False)): + yield SampleInput( + size, + *args, + sym=sym, + device=device, + dtype=dtype, + requires_grad=requires_grad, + **kwargs, + ) + + +def reference_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Reference inputs function to use for windows which have a common signature, i.e., + window size and sym only. + + Implement other special functions for windows that have a specific signature. + See exponential and gaussian windows for instance. + """ + yield from sample_inputs_window( + op_info, device, dtype, requires_grad, *args, **kwargs + ) + + cases = (8, 16, 32, 64, 128, 256) + + for size in cases: + yield SampleInput(size, sym=False) + yield SampleInput(size, sym=True) + + +def reference_inputs_exponential_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"center": 4, "tau": 0.5}), + (16, {"center": 8, "tau": 2.5}), + (32, {"center": 16, "tau": 43.5}), + (64, {"center": 20, "tau": 3.7}), + (128, {"center": 62, "tau": 99}), + (256, {"tau": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + kw["center"] = None + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_gaussian_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"std": 0.1}), + (16, {"std": 1.2}), + (32, {"std": 2.1}), + (64, {"std": 3.9}), + (128, {"std": 4.5}), + (256, {"std": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_kaiser_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"beta": 2}), + (16, {"beta": 12}), + (32, {"beta": 30}), + (64, {"beta": 35}), + (128, {"beta": 41.2}), + (256, {"beta": 100}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_cosine_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"a": [0.5, 0.5]}), + (16, {"a": [0.46, 0.54]}), + (32, {"a": [0.46, 0.23, 0.31]}), + (64, {"a": [0.5]}), + (128, {"a": [0.1, 0.8, 0.05, 0.05]}), + (256, {"a": [0.2, 0.2, 0.2, 0.2, 0.2]}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_hamming_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"alpha": 0.54}), + (16, {"alpha": 0.5}), + (32, {"alpha": 0.23}), + (64, {"alpha": 0.8}), + (128, {"alpha": 0.9}), + (256, {"alpha": 0.05}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def error_inputs_window(op_info, device, *args, **kwargs): + # Tests for windows that have a negative size + yield ErrorInput( + SampleInput(-1, *args, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="requires non-negative window length, got M=-1", + ) + + # Tests for window tensors that are not torch.strided, for instance, torch.sparse_coo. + yield ErrorInput( + SampleInput( + 3, + *args, + layout=torch.sparse_coo, + device=device, + dtype=torch.float32, + **kwargs, + ), + error_type=ValueError, + error_regex="is implemented for strided tensors only, got: torch.sparse_coo", + ) + + # Tests for window tensors that are not floating point dtypes, for instance, torch.long. + yield ErrorInput( + SampleInput(3, *args, dtype=torch.long, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.int64", + ) + + # Tests for window tensors that are bfloat16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.bfloat16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.bfloat16", + ) + + # Tests for window tensors that are float16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.float16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.float16", + ) + + +def error_inputs_exponential_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, **kwargs) + + # Tests for negative decay values. + yield ErrorInput( + SampleInput(3, tau=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Tau must be positive, got: -1 instead.", + ) + + # Tests for symmetric windows and a given center value. + yield ErrorInput( + SampleInput(3, center=1, sym=True, dtype=torch.float32, device=device), + error_type=ValueError, + error_regex="Center must be None for symmetric windows", + ) + + +def error_inputs_gaussian_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, std=0.5, **kwargs) + + # Tests for negative standard deviations + yield ErrorInput( + SampleInput(3, std=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Standard deviation must be positive, got: -1 instead.", + ) + + +def error_inputs_kaiser_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, beta=12, **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, beta=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="beta must be non-negative, got: -1 instead.", + ) + + +def error_inputs_general_cosine_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, a=[0.54, 0.46], **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, a=None, dtype=torch.float32, device=device, **kwargs), + error_type=TypeError, + error_regex="Coefficients must be a list/tuple", + ) + + yield ErrorInput( + SampleInput(3, a=[], dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Coefficients cannot be empty", + ) + + +def reference_signal_window(fn: Callable): + r"""Wrapper for scipy signal window references. + + Discards keyword arguments for window reference functions that don't have a matching signature with + torch, e.g., gaussian window. + """ + + def _fn( + *args, + dtype=numpy.float64, + device=None, + layout=torch.strided, + requires_grad=False, + **kwargs, + ): + r"""The unused arguments are defined to disregard those values""" + return fn(*args, **kwargs).astype(dtype) + + return _fn + + +def make_signal_windows_opinfo( + name: str, + ref: Callable, + sample_inputs_func: Callable, + reference_inputs_func: Callable, + error_inputs_func: Callable, + *, + skips: Tuple[DecorateInfo, ...] = (), +): + r"""Helper function to create OpInfo objects related to different windows.""" + return OpInfo( + name=name, + ref=ref if TEST_SCIPY else None, + dtypes=floating_types(), + dtypesIfCUDA=floating_types(), + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=error_inputs_func, + supports_out=False, + supports_autograd=False, + skips=( + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # skip these tests since we have non tensor input + DecorateInfo( + unittest.skip("Skipped!"), "TestCommon", "test_noncontiguous_samples" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_conj_view"), + DecorateInfo( + unittest.skip("Skipped!"), "TestMathBits", "test_neg_conj_view" + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_neg_view"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_vmap_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_op_has_batch_rule", + ), + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + *skips, + ), + ) + + +op_db: List[OpInfo] = [ + make_signal_windows_opinfo( + name="signal.windows.hamming", + ref=reference_signal_window(scipy.signal.windows.hamming) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.hann", + ref=reference_signal_window(scipy.signal.windows.hann) if TEST_SCIPY else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.bartlett", + ref=reference_signal_window(scipy.signal.windows.bartlett) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.blackman", + ref=reference_signal_window(scipy.signal.windows.blackman) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.cosine", + ref=reference_signal_window(scipy.signal.windows.cosine) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.exponential", + ref=reference_signal_window(scipy.signal.windows.exponential) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, tau=2.78), + reference_inputs_func=partial(reference_inputs_exponential_window, tau=2.78), + error_inputs_func=error_inputs_exponential_window, + ), + make_signal_windows_opinfo( + name="signal.windows.gaussian", + ref=reference_signal_window(scipy.signal.windows.gaussian) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, std=1.92), + reference_inputs_func=partial(reference_inputs_gaussian_window, std=1.92), + error_inputs_func=error_inputs_gaussian_window, + skips=( + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + make_signal_windows_opinfo( + name="signal.windows.kaiser", + ref=reference_signal_window(scipy.signal.windows.kaiser) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, beta=12.0), + reference_inputs_func=partial(reference_inputs_kaiser_window, beta=12.0), + error_inputs_func=error_inputs_kaiser_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_cosine", + ref=reference_signal_window(scipy.signal.windows.general_cosine) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, a=[0.54, 0.46]), + reference_inputs_func=partial( + reference_inputs_general_cosine_window, a=[0.54, 0.46] + ), + error_inputs_func=error_inputs_general_cosine_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_hamming", + ref=reference_signal_window(scipy.signal.windows.general_hamming) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, alpha=0.54), + reference_inputs_func=partial( + reference_inputs_general_hamming_window, alpha=0.54 + ), + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.nuttall", + ref=reference_signal_window(scipy.signal.windows.nuttall) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..ce80886afdeeb262aad47b74b27e345f3e3838fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py @@ -0,0 +1,933 @@ +# mypy: ignore-errors + +import os + +import torch +from torch.testing import make_tensor # noqa: F401 +from torch.testing._internal.opinfo.core import ( # noqa: F401 + BinaryUfuncInfo, + ErrorInput, + generate_elementwise_binary_tensors, + ReductionOpInfo, + sample_inputs_reduction, + SampleInput, +) + + +def _check_validate(op_info, sample): + def _check_fail(sample): + try: + op_info( + sample.sample_input.input, + *sample.sample_input.args, + **sample.sample_input.kwargs, + ) + except sample.error_type: + pass + except Exception as msg: + raise AssertionError( # noqa: TRY200 + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got {type(msg).__name__}: {msg}" + ) + else: + raise AssertionError( + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got none." + ) + + def _check_success(sample): + try: + op_info(sample.input, *sample.args, **sample.kwargs) + except Exception as msg: + raise AssertionError( # noqa: TRY200 + f"{op_info.name} on {sample=} expected to succeed " + f", got {type(msg).__name__}: {msg}" + ) + + if isinstance(sample, ErrorInput): + _check_fail(sample) + else: + _check_success(sample) + + +def _sample_inputs_sparse( + sample_inputs, + maybe_failing_sample_inputs, + validate_sample_input, + op_info, + *args, + **kwargs, +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + # Error inputs are handled in error_inputs_sparse + + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + + +def _error_inputs_sparse( + maybe_failing_sample_inputs, validate_sample_input, op_info, *args, **kwargs +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, ErrorInput): + yield sample + # Sample inputs are handled in sample_inputs_sparse + + +def _apply_requires_grad_to_samples(sample_inputs): + """Decorator to _maybe_failing_sample_inputs_... generator functions + that clones and sets requires_grad argument to tensors in sample + input arguments. This is needed when the generated samples share + tensor instances. + """ + + def wrapper(op_info, device, dtype, requires_grad, layout, **kwargs): + def apply_requires_grad(x): + if ( + not isinstance(x, torch.Tensor) + or x.requires_grad + or not requires_grad + or not (x.is_floating_point() or x.is_complex()) + ): + return x + return x.detach().clone().requires_grad_(requires_grad) + + if requires_grad: + for sample_input in sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ): + yield sample_input.transform(apply_requires_grad) + else: + yield from sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ) + + return wrapper + + +def sample_inputs_sparse_reduction( + op_info, device, dtype, requires_grad, layout, blocksize=None, **kwargs +): + """Sample inputs for reduction operations on sparse tensors.""" + layout_name = str(layout).split(".", 1)[-1].rsplit("_coo", 1)[0] + op_supports_layout = getattr(op_info, "supports_" + layout_name) + if not op_supports_layout: + return + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if sample_input.input.ndim == 0: + # scalar sparse tensors are not supported + continue + + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if sample_input.input.ndim < 2: + # conversion to sparse compressed tensors requires at + # least 2 dimensional tensors + continue + if sample_input.input.ndim > 2 and (sample_input.input == 0).any(): + # Skip batched sparse compressed samples that contain + # explicit zeros because to_sparse(layout=..) will + # fail, see gh-98495. + # TODO: remove this if-block after gh-98495 is fixed. + continue + + if layout in {torch.sparse_bsr, torch.sparse_bsc} and blocksize is None: + blocksize = (1, 1) + + yield SampleInput( + sample_input.input.detach() + .to_sparse(layout=layout, blocksize=blocksize) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if layout is torch.sparse_coo and (dtype.is_floating_point or dtype.is_complex): + # uncoalesced samples + inp = sample_input.input.detach().to_sparse(layout=layout) + inp = torch.sparse_coo_tensor( + inp.indices().repeat(1, 2), + inp.values().repeat(2), + inp.shape, + dtype=inp.dtype, + device=inp.device, + ) + assert not inp.is_coalesced() + yield SampleInput( + inp.requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if sample_input.input.ndim > 2: + # hybrid samples + yield SampleInput( + sample_input.input.detach() + .to_sparse( + layout=layout, + blocksize=blocksize, + dense_dim=sample_input.input.ndim - 2, + ) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_sparse_reduction(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + UNSPECIFIED = object() + if op_info.name == "sum": + sample = _validate_sample_input_sparse_reduction_sum(sample) + + if op_info.name in {"masked.sum"}: + mask = sample.kwargs.get("mask", UNSPECIFIED) + if ( + mask not in {None, UNSPECIFIED} + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif mask is UNSPECIFIED: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + + if op_info.name in {"masked.amax", "masked.amin", "masked.mean", "masked.prod"}: + t_inp = sample.input + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + mask = sample.kwargs.get("mask") + if ( + mask is not None + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif mask is None: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif ( + mask.layout is sample.input.layout + and mask.ndim > 2 + and op_info.name == "masked.mean" + ): + sample = ErrorInput( + sample, + error_type=TypeError, + error_regex=( + "where[(][)] received an invalid combination of arguments" + " - got [(]Tensor, Tensor, NoneType[)]" + ), + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif ( + sample.input.ndim > 2 + and (sample.kwargs.get("dim") not in {0, 1}) + and mask.ndim > 2 + and mask.layout is not torch.strided + ): + if sample.kwargs.get("dim") == (0, -1): + sample = ErrorInput( + sample, + error_regex="tensor dimensionality must be sum of batch, base, and dense dimensionalities", + ) + elif op_info.name == "masked.prod": + sample = ErrorInput( + sample, + error_regex="input_dim == 2 INTERNAL ASSERT FAILED at", + ) + else: + sample = ErrorInput( + sample, + error_type=AssertionError, + error_regex="Sparse CSR tensors are 2D and only support reduction along dim 0 or 1.", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + elif ( + mask.layout is t_inp.layout + and mask._nnz() != t_inp._nnz() + and t_inp.dense_dim() > 0 + ): + sample = ErrorInput( + sample, + error_regex="Index tensor must have the same number of dimensions as src tensor", + ) + + if check_validate: + _check_validate(op_info, sample) + + return sample + + +def _validate_sample_input_sparse_reduction_sum(sample, check_validate=False): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + dim = t_kwargs.get("dim") + keepdim = t_kwargs.get("keepdim") + layout = t_inp.layout + if isinstance(dim, (int, list, tuple)): + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}: + return ErrorInput( + sample, + error_regex=( + "Currently the only compressed sparse format supported for sum.dim_IntList is CSR, but got layout" + ), + ) + if layout in {torch.sparse_csr, torch.sparse_csc} and not keepdim: + return ErrorInput( + sample, + error_regex=( + "reduction operations on CSR tensors with keepdim=False is unsupported" + ), + ) + if t_inp.dim() != 2: + return ErrorInput( + sample, + error_regex=("input_dim == 2 INTERNAL ASSERT"), + ) + if layout == torch.sparse_csr: + if t_inp.dtype == torch.bool: + return ErrorInput( + sample, + error_regex=("_sparse_csr_sum_cpu not implemented for 'Bool'"), + ) + if t_inp.dtype == torch.complex32: + return ErrorInput( + sample, + error_regex=( + "_sparse_csr_sum_cuda not implemented for 'ComplexHalf'" + ), + ) + return sample + + +def _maybe_failing_sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + if layout in [ + torch.sparse_csr, + torch.sparse_csc, + ]: + # NotImplementedError: Could not run 'aten::sum.IntList_out' with arguments from the 'SparseCsrCPU' backend. + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + if layout in [ + torch.sparse_bsr, + torch.sparse_bsc, + ]: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(2, 2)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1), dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + +def sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for sum on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_reduction, + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_reduction_sum(op_info, device, layout, **kwargs): + """Error inputs for sum on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def sample_inputs_sparse_elementwise_binary_operation( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for elementwise binary operations on sparse tensors. + + The samples include regular, zero-sized, batched, and hybrid + sparse tensors as well as rhs scalars. All tensors are full tensors. + """ + + def _to_sparse(tensor, **kwargs): + return tensor.detach().to_sparse(**kwargs).requires_grad_(requires_grad) + + for sample_input in generate_elementwise_binary_tensors( + op_info, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=True, + **kwargs, + ): + lhs, rhs = sample_input.input, sample_input.args[0] + min_dense_dim = 0 + max_dense_dim = lhs.ndim - 1 + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if lhs.ndim < 2: + # sparse compressed tensors sparse_dim must be 2 + continue + max_dense_dim = lhs.ndim - 2 + + for dense_dim in range(min_dense_dim, max_dense_dim + 1): + if layout in {torch.sparse_bsr, torch.sparse_bsc}: + blocksizes = [(1, 1)] + if lhs.numel() > 0: + blocksizes.append( + ( + lhs.shape[lhs.ndim - 2 - dense_dim], + lhs.shape[lhs.ndim - 1 - dense_dim], + ) + ) + else: + blocksizes = [None] + for blocksize in blocksizes: + to_sparse_kwargs = dict( + layout=layout, dense_dim=dense_dim, blocksize=blocksize + ) + lhs_sparse = _to_sparse(lhs, **to_sparse_kwargs) + rhs_sparse = _to_sparse(rhs, **to_sparse_kwargs) + # op(sparse, sparse) + yield SampleInput( + lhs_sparse, + args=(rhs_sparse, *sample_input.args[1:]), + kwargs=sample_input.kwargs, + ) + # op(sparse, scalar) + yield SampleInput( + lhs_sparse, + args=( + make_tensor( + (), dtype=dtype, device=device, requires_grad=requires_grad + ), + *sample_input.args[1:], + ), + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_elementwise_binary_sparse_mul(sample): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + layout = t_inp.layout + dtype = t_inp.dtype + if layout is torch.sparse_csr and batch_dim > 0 and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex=( + "coo_to_sparse_csr: conversion from Sparse to SparseCsr for input" + " tensors with sparse_dim[(][)]!=2 is not supported" + ), + ) + elif layout is torch.sparse_csc and t_args[0].ndim > 0: + return ErrorInput( + sample, error_regex="Expected result Tensor to be of format CSR" + ) + elif layout is torch.sparse_bsr and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsr", + ) + elif layout is torch.sparse_bsc and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsc", + ) + elif ( + layout is torch.sparse_coo + and dtype is torch.bool + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + and t_inp.dense_dim() > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Bool'" + ) + elif ( + layout in {torch.sparse_coo, torch.sparse_csr} + and dtype is torch.bool + and t_inp._nnz() > 0 + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + ): + return ErrorInput( + sample, error_regex="\"mul_out_sparse\" not implemented for 'Bool'" + ) + elif ( + layout is torch.sparse_csr + and t_args[0].layout is torch.strided + and 0 < t_args[0].ndim + and t_args[0].ndim < t_inp.ndim + ): + return ErrorInput( + sample, error_regex="sparse_mask_sparse_csr expects self to be 2D" + ) + elif layout is torch.sparse_csr and ( + (t_args[0].layout is torch.strided and 0 < t_args[0].ndim) + or (t_args[0].layout is layout and t_inp.shape != t_args[0].shape) + ): + return ErrorInput( + sample, + error_regex=( + "expects sparse inputs with equal dimensionality, number of sparse dimensions," + " and shape of sparse dimensions" + ), + ) + elif ( + layout is torch.sparse_csr + and t_inp.dense_dim() > 0 + and t_inp._nnz() > 0 + and t_inp.is_cpu + and dtype is torch.float16 + and t_args[0].ndim > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Half'" + ) + return sample + + +@_apply_requires_grad_to_samples +def _maybe_failing_sample_inputs_sparse_elementwise_binary_mul( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + regular = torch.tensor([[1, 2], [3, 4]], device=device, dtype=dtype).to_sparse( + layout=layout, dense_dim=0, blocksize=blocksize + ) + batch = torch.tensor( + [[[1, 2], [3, 4]], [[4, 5], [6, 7]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=0, blocksize=blocksize) + hybrid = torch.tensor( + [[[1], [2]], [[3], [4]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=1, blocksize=blocksize) + + if layout is torch.sparse_csr: + # RuntimeError: crow_indices is supposed to be a vector, but got 2 dimensional tensor + yield SampleInput(batch, args=(batch,)) + # RuntimeError: Only tensors with two sparse dimensions can be + # converted to the SparseCsr layout, got self with 3 sparse + # dimensions. + yield SampleInput( + torch.zeros_like(hybrid).requires_grad_(requires_grad), + args=(torch.zeros_like(hybrid).requires_grad_(requires_grad),), + ) + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_csc: + # RuntimeError: Expected result Tensor to be of format CSR + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsr: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsc: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsc + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_coo: + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if dtype in {torch.bool, torch.float16} and regular.is_cpu: + # RuntimeError: "addcmul_cpu_out" not implemented for '(Bool|Half)' + yield SampleInput(hybrid, args=(hybrid,)) + + +def _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=False +): + if op_info.name == "mul": + sample = _validate_sample_input_elementwise_binary_sparse_mul(sample) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def sample_inputs_sparse_mul(op_info, device, dtype, requires_grad, layout, **kwargs): + """Sample inputs for mul operation on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_elementwise_binary_operation, + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_mul(op_info, device, layout, **kwargs): + """Error inputs for mul operation on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + from torch.testing._internal.common_utils import TestCase + + for tensor in TestCase().generate_simple_inputs( + layout, + device=device, + dtype=dtype, + enable_batch=True, + enable_hybrid=True, + enable_zero_sized=True, + enable_non_contiguous_indices=False, + enable_non_contiguous_values=False, + ): + yield SampleInput(tensor, args=(), kwargs={}) + yield SampleInput( + tensor, args=(), kwargs=dict(device=device, dtype=dtype, layout=layout) + ) + + if dtype is not torch.float64: + yield SampleInput(tensor, args=(), kwargs=dict(dtype=torch.float64)) + + if torch.cuda.is_available(): + other_device = "cuda" if tensor.device.type == "cpu" else "cpu" + yield SampleInput(tensor, args=(), kwargs=dict(device=other_device)) + + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + yield SampleInput(tensor, args=(), kwargs=dict(layout=other_layout)) + + if layout is not torch.sparse_coo: + yield SampleInput(tensor, args=(), kwargs=dict(layout=torch.sparse_coo)) + + +def _validate_sample_input_sparse_like_fns(op_info, sample, check_validate=False): + if sample.input.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if sample.kwargs.get("device", sample.input.device) != sample.input.device: + return ErrorInput( + sample, + error_regex=( + "device of (ccol|crow)_indices \\(=(cpu|cuda.*)\\) must" + " match device of values \\(=(cuda.*|cpu)\\)" + ), + ) + if sample.kwargs.get("layout", sample.input.layout) != sample.input.layout: + return ErrorInput( + sample, + error_regex=( + "empty_like with different sparse layout is not supported" + " \\(self is Sparse(Csc|Csr|Bsc|Bsr) but you requested Sparse(Csr|Csc|Bsr|Bsc)\\)" + ), + ) + if sample.input.layout is torch.sparse_coo: + return ErrorInput( + sample, + error_regex=( + "Could not run 'aten::normal_' with arguments from the 'Sparse(CPU|CUDA)' backend." + ), + ) + if check_validate: + _check_validate(op_info, sample) + return sample + + +def _maybe_failing_sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + if torch.cuda.is_available() and layout is not torch.sparse_coo: + other_device = "cuda" if torch.device(device).type == "cpu" else "cpu" + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(device=other_device), + ) + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(layout=other_layout), + ) + + +def sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for like-functions on sparse tensors.""" + yield from _sample_inputs_sparse( + _sample_inputs_sparse_like_fns, + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_like_fns(op_info, device, layout, **kwargs): + """Error inputs for like-functions on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _validate_sample_input_sparse_default(op_info, sample, check_validate=False): + if op_info.name == "to_sparse": + if ( + sample.input.layout + in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + and len(sample.args) == 1 + and isinstance(sample.args[0], int) + and sample.args[0] != 2 + ): + sample = ErrorInput( + sample, + error_regex="sparse dim argument must be 2 for sparse_compressed_to_sparse", + ) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def validate_sample_input_sparse(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + if isinstance(op_info, ReductionOpInfo): + return _validate_sample_input_sparse_reduction( + op_info, sample, check_validate=check_validate + ) + elif isinstance(op_info, BinaryUfuncInfo): + return _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=check_validate + ) + else: + return _validate_sample_input_sparse_default( + op_info, sample, check_validate=check_validate + ) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py new file mode 100644 index 0000000000000000000000000000000000000000..426eb7e54780d71a8af4b0059e743cb244df2c95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py @@ -0,0 +1,838 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from itertools import product +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + precisionOverride, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import all_types_and, floating_types +from torch.testing._internal.common_utils import TEST_SCIPY, torch_to_numpy_dtype_dict +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + DecorateInfo, + L, + NumericsFilter, + OpInfo, + S, + SampleInput, + UnaryUfuncInfo, +) +from torch.testing._internal.opinfo.refs import ( + ElementwiseBinaryPythonRefInfo, + ElementwiseUnaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, +) + + +if TEST_SCIPY: + import scipy.special + + +# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`, +# supports `exclude` argument. +# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617 +def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs): + exclude_zero = requires_grad and op_info.op == torch.special.i0e + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield SampleInput(make_arg((S,))) + yield SampleInput(make_arg(())) + + if requires_grad and not exclude_zero: + # Special Case for gradient + # Sample with `0` in the input + t = make_arg((S,)) + t[0] = 0 + + yield SampleInput(t) + + +def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, + device=device, + # TODO: eliminate low after gh-106692 is fixed: + low=(1 if dtype in {torch.int32, torch.int64} else None), + dtype=dtype, + requires_grad=requires_grad, + ) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + for shape, n in product(tensor_shapes, ns): + yield SampleInput(make_arg(shape), args=(n,)) + + +def reference_polygamma(x, n): + # WEIRD `scipy.special.polygamma` behavior + # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype + # dtype('float64') + # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype + # dtype('float32') + # + # Thus we cast output to the default torch dtype or preserve double + result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + if x.dtype == np.double: + result_dtype = np.double + return scipy.special.polygamma(n, x).astype(result_dtype) + + +def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs): + low, _ = op_info.domain + + if requires_grad: + low = 0 + op_info._domain_eps + + make_arg = partial( + make_tensor, dtype=dtype, device=device, low=low, requires_grad=requires_grad + ) + yield SampleInput(make_arg((L,))) + yield SampleInput(make_arg(())) + + +def sample_inputs_erfcx(op_info, device, dtype, requires_grad, **kwargs): + for shape in ((L,), (1, 0, 3), ()): + yield SampleInput( + make_tensor( + shape, + device=device, + dtype=dtype, + low=-5, + requires_grad=requires_grad, + ), + ) + + +op_db: List[OpInfo] = [ + UnaryUfuncInfo( + "special.i0e", + aten_name="special_i0e", + ref=scipy.special.i0e if TEST_SCIPY else None, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.i1", + aten_name="special_i1", + ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) + if TEST_SCIPY + else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + ), + UnaryUfuncInfo( + "special.i1e", + aten_name="special_i1e", + ref=scipy.special.i1e if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.ndtr", + aten_name="special_ndtr", + decorators=(precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-4}),), + ref=scipy.special.ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatch stub: unsupported device typemeta + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="meta", + ), + ), + ), + # A separate OpInfo entry for special.polygamma is needed to reorder the arguments + # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939 + UnaryUfuncInfo( + "special.polygamma", + op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs), + variant_test_name="special_polygamma_n_0", + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # lambda impl + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + sample_kwargs=lambda device, dtype, input: ({"n": 0}, {"n": 0}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter( + condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), safe_val=1 + ), + ), + BinaryUfuncInfo( + "special.xlog1py", + aten_name="special_xlog1py", + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test -1 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=-0.99), + ), + BinaryUfuncInfo( + "special.zeta", + aten_name="special_zeta", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + # TODO: FIXME + # OpInfo entry to verify the gradient formula of `other`/`q` + # BinaryUfuncInfo('special.zeta', + # op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs), + # aten_name='special_zeta', + # variant_test_name='grad', + # dtypes=all_types_and(torch.bool), + # promotes_int_to_float=True, + # supports_autograd=True, + # supports_rhs_python_scalar=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable") + # ], + # skips=( + # # Lambda doesn't work in JIT test + # # AssertionError: JIT Test does not execute any logic + # DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"), + # )), + UnaryUfuncInfo( + "special.entr", + ref=scipy.special.entr if TEST_SCIPY else None, + aten_name="special_entr", + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_entr, + ), + UnaryUfuncInfo( + "special.ndtri", + ref=scipy.special.ndtri if TEST_SCIPY else None, + domain=(0, 1), + aten_name="special_ndtri", + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.log_ndtr", + aten_name="special_log_ndtr", + ref=scipy.special.log_ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.erfcx", + ref=scipy.special.erfcx if TEST_SCIPY else None, + aten_name="special_erfcx", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_erfcx, + ), + UnaryUfuncInfo( + "special.airy_ai", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.airy(x)[0] if TEST_SCIPY else None, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + ), + ), + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y1 if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_h", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + # Greatest absolute difference: inf + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_he", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.laguerre_polynomial_l", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.legendre_polynomial_p", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0e if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k1", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1e if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.spherical_bessel_j0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.spherical_jn(0, x) if TEST_SCIPY else None, + supports_autograd=False, + ), +] + +python_ref_db: List[OpInfo] = [ + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j0", + torch_opinfo_name="special.bessel_j0", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j1", + torch_opinfo_name="special.bessel_j1", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.entr", + torch_opinfo_name="special.entr", + op_db=op_db, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.erfcx", + torch_opinfo_name="special.erfcx", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i0e", + torch_opinfo_name="special.i0e", + op_db=op_db, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1", + torch_opinfo_name="special.i1", + op_db=op_db, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1e", + torch_opinfo_name="special.i1e", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.log_ndtr", + torch_opinfo_name="special.log_ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtr", + torch_opinfo_name="special.ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtri", + torch_opinfo_name="special.ndtri", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.spherical_bessel_j0", + torch_opinfo_name="special.spherical_bessel_j0", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.zeta", + torch_opinfo_name="special.zeta", + supports_one_python_scalar=True, + op_db=op_db, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), +] diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py new file mode 100644 index 0000000000000000000000000000000000000000..92bbdf8d6b2eb4e6ecacabe7f7b4c667df13d576 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py @@ -0,0 +1,206 @@ +# mypy: ignore-errors + +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + OpInfo, + ReductionOpInfo, + UnaryUfuncInfo, +) + +# NOTE [Python References] +# Python References emulate existing PyTorch operations, but can ultimately +# be expressed in terms of "primitive" operations from torch._prims. +# +# These references are experimental. +# See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577 +# for additional context. +# +# Python Reference OpInfos should be added to the python_ref_db list below. +# Tests can opt-into running on these references by including +# that list in the Sequence they pass to the @ops decorator. +# +# When a Python Reference OpInfo is constructed a pointer to an +# existing OpInfo must be provided using the torch_opinfo_name kwarg. +# The existing OpInfo with that name and no variant will be found +# to inherit from. +# +# Instead of just inheriting the existing OpInfo's metadata, the +# Python Reference OpInfos inherit the existing OpInfo's +# construction arguments. These arguments can be overridden +# by adding kwargs to the constructor. + + +def _find_referenced_opinfo(referenced_name, variant_name, *, op_db=None): + """ + Finds the OpInfo with the given name that has no variant name. + """ + # NOTE: searching the global op_db doesn't work when OpInfos are split into + # different modules, as otherwise the op_db will not be fully constructed + # yet. So, instead the local op_db must be passed in explicitly. + if op_db is None: + from torch.testing._internal.common_methods_invocations import op_db + + for opinfo in op_db: + if opinfo.name == referenced_name and opinfo.variant_test_name == variant_name: + return opinfo + + +def _inherit_constructor_args(name, op, inherited, overrides): + # inherits metadata + common_kwargs = { + "name": name, + "op": op, + "aliases": None, # TODO add a check for alias coverage + "method_variant": None, + "inplace_variant": None, # TODO: add a check for inplace coverage + "supports_scripting": False, + } + + # Acquires inherited kwargs + kwargs = inherited.copy() + + # Fixes metadata + if "kwargs" in kwargs: + kwargs.update(kwargs["kwargs"]) + del kwargs["kwargs"] + if "self" in kwargs: + del kwargs["self"] + if "__class__" in kwargs: + del kwargs["__class__"] + if "skips" in kwargs: + del kwargs["skips"] + if "decorators" in kwargs: + del kwargs["decorators"] + + # Overrides metadata + kwargs.update(common_kwargs) + kwargs.update(overrides) + + # At the moment no prims support autograd, so we must not run autograd + # tests e.g. when testing dtype support. Once we start writing autograd + # formulas for prims this can be removed. + kwargs["supports_autograd"] = False + kwargs["supports_gradgrad"] = False + kwargs["supports_fwgrad_bwgrad"] = False + kwargs["supports_inplace_autograd"] = False + kwargs["supports_forward_ad"] = False + + return kwargs + + +class PythonRefInfo(OpInfo): + """ + An OpInfo for a Python reference of an OpInfo base class operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + validate_view_consistency=True, + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + self.validate_view_consistency = validate_view_consistency + assert isinstance(self.torch_opinfo, OpInfo) + + inherited = self.torch_opinfo._original_opinfo_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + super().__init__(**ukwargs) + + +class ReductionPythonRefInfo(ReductionOpInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + assert isinstance(self.torch_opinfo, ReductionOpInfo) + + inherited = self.torch_opinfo._original_reduction_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + # See https://github.com/pytorch/pytorch/issues/77216 + self.validate_view_consistency = False + + super().__init__(**ukwargs) + + +class ElementwiseUnaryPythonRefInfo(UnaryUfuncInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + validate_view_consistency=True, + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + self.validate_view_consistency = validate_view_consistency + assert isinstance(self.torch_opinfo, UnaryUfuncInfo) + + inherited = self.torch_opinfo._original_unary_ufunc_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) + + +class ElementwiseBinaryPythonRefInfo(BinaryUfuncInfo): + """ + An OpInfo for a Python reference of an elementwise binary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + assert isinstance(self.torch_opinfo, BinaryUfuncInfo) + + inherited = self.torch_opinfo._original_binary_ufunc_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..34a197ee4bad286d1e6730b3a83c33fa34bb1acb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py @@ -0,0 +1,273 @@ +# mypy: ignore-errors + +import collections +import warnings +from functools import partial, wraps +from typing import Sequence + +import numpy as np + +import torch +from torch.testing._internal.common_cuda import TEST_CUDA +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + all_types, + all_types_and, + all_types_and_complex, + all_types_and_complex_and, + all_types_and_half, + complex_types, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, + floating_types_and, + floating_types_and_half, + integral_types, + integral_types_and, +) +from torch.testing._internal.common_utils import torch_to_numpy_dtype_dict + + +COMPLETE_DTYPES_DISPATCH = ( + all_types, + all_types_and_complex, + all_types_and_half, + floating_types, + floating_and_complex_types, + floating_types_and_half, + integral_types, + complex_types, +) + +EXTENSIBLE_DTYPE_DISPATCH = ( + all_types_and_complex_and, + floating_types_and, + floating_and_complex_types_and, + integral_types_and, + all_types_and, +) + +# Better way to acquire devices? +DEVICES = ["cpu"] + (["cuda"] if TEST_CUDA else []) + + +class _dynamic_dispatch_dtypes(_dispatch_dtypes): + # Class to tag the dynamically generated types. + pass + + +def get_supported_dtypes(op, sample_inputs_fn, device_type): + # Returns the supported dtypes for the given operator and device_type pair. + assert device_type in ["cpu", "cuda"] + if not TEST_CUDA and device_type == "cuda": + warnings.warn( + "WARNING: CUDA is not available, empty_dtypes dispatch will be returned!" + ) + return _dynamic_dispatch_dtypes(()) + + supported_dtypes = set() + for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half): + try: + samples = sample_inputs_fn(op, device_type, dtype, False) + except RuntimeError: + # If `sample_inputs_fn` doesn't support sampling for a given + # `dtype`, we assume that the `dtype` is not supported. + # We raise a warning, so that user knows that this was the case + # and can investigate if there was an issue with the `sample_inputs_fn`. + warnings.warn( + f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}" + ) + continue + + # We assume the dtype is supported + # only if all samples pass for the given dtype. + supported = True + for sample in samples: + try: + op(sample.input, *sample.args, **sample.kwargs) + except RuntimeError as re: + # dtype is not supported + supported = False + break + + if supported: + supported_dtypes.add(dtype) + + return _dynamic_dispatch_dtypes(supported_dtypes) + + +def dtypes_dispatch_hint(dtypes): + # Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH) + # and its string representation for the passed `dtypes`. + return_type = collections.namedtuple("return_type", "dispatch_fn dispatch_fn_str") + + # CUDA is not available, dtypes will be empty. + if len(dtypes) == 0: + return return_type((), str(tuple())) + + set_dtypes = set(dtypes) + for dispatch in COMPLETE_DTYPES_DISPATCH: + # Short circuit if we get an exact match. + if set(dispatch()) == set_dtypes: + return return_type(dispatch, dispatch.__name__ + "()") + + chosen_dispatch = None + chosen_dispatch_score = 0.0 + for dispatch in EXTENSIBLE_DTYPE_DISPATCH: + dispatch_dtypes = set(dispatch()) + if not dispatch_dtypes.issubset(set_dtypes): + continue + + score = len(dispatch_dtypes) + if score > chosen_dispatch_score: + chosen_dispatch_score = score + chosen_dispatch = dispatch + + # If user passed dtypes which are lower than the lowest + # dispatch type available (not likely but possible in code path). + if chosen_dispatch is None: + return return_type((), str(dtypes)) + + return return_type( + partial(dispatch, *tuple(set(dtypes) - set(dispatch()))), + dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))), + ) + + +def is_dynamic_dtype_set(op): + # Detect if the OpInfo entry acquired dtypes dynamically + # using `get_supported_dtypes`. + return op.dynamic_dtypes + + +def str_format_dynamic_dtype(op): + fmt_str = f""" + OpInfo({op.name}, + dtypes={dtypes_dispatch_hint(op.dtypes).dispatch_fn_str}, + dtypesIfCUDA={dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str}, + ) + """ + + return fmt_str + + +def np_unary_ufunc_integer_promotion_wrapper(fn): + # Wrapper that passes PyTorch's default scalar + # type as an argument to the wrapped NumPy + # unary ufunc when given an integer input. + # This mimicks PyTorch's integer->floating point + # type promotion. + # + # This is necessary when NumPy promotes + # integer types to double, since PyTorch promotes + # integer types to the default scalar type. + + # Helper to determine if promotion is needed + def is_integral(dtype): + return dtype in [ + np.bool_, + bool, + np.uint8, + np.int8, + np.int16, + np.int32, + np.int64, + ] + + @wraps(fn) + def wrapped_fn(x): + # As the default dtype can change, acquire it when function is called. + # NOTE: Promotion in PyTorch is from integer types to the default dtype + np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + + if is_integral(x.dtype): + return fn(x.astype(np_dtype)) + return fn(x) + + return wrapped_fn + + +def reference_reduction_numpy(f, supports_keepdims=True): + """Wraps a NumPy reduction operator. + + The wrapper function will forward dim, keepdim, mask, and identity + kwargs to the wrapped function as the NumPy equivalent axis, + keepdims, where, and initiak kwargs, respectively. + + Args: + f: NumPy reduction operator to wrap + supports_keepdims (bool, optional): Whether the NumPy operator accepts + keepdims parameter. If it does not, the wrapper will manually unsqueeze + the reduced dimensions if it was called with keepdim=True. Defaults to True. + + Returns: + Wrapped function + + """ + + @wraps(f) + def wrapper(x: np.ndarray, *args, **kwargs): + # Copy keys into a set + keys = set(kwargs.keys()) + + dim = kwargs.pop("dim", None) + keepdim = kwargs.pop("keepdim", False) + + if "dim" in keys: + dim = tuple(dim) if isinstance(dim, Sequence) else dim + + # NumPy reductions don't accept dim=0 for scalar inputs + # so we convert it to None if and only if dim is equivalent + if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}: + kwargs["axis"] = None + else: + kwargs["axis"] = dim + + if "keepdim" in keys and supports_keepdims: + kwargs["keepdims"] = keepdim + + if "mask" in keys: + mask = kwargs.pop("mask") + if mask is not None: + assert mask.layout == torch.strided + kwargs["where"] = mask.cpu().numpy() + + if "identity" in keys: + identity = kwargs.pop("identity") + if identity is not None: + if identity.dtype is torch.bfloat16: + identity = identity.cpu().to(torch.float32) + else: + identity = identity.cpu() + kwargs["initial"] = identity.numpy() + + result = f(x, *args, **kwargs) + + # Unsqueeze reduced dimensions if NumPy does not support keepdims + if keepdim and not supports_keepdims and x.ndim > 0: + dim = list(range(x.ndim)) if dim is None else dim + result = np.expand_dims(result, dim) + + return result + + return wrapper + + +def prod_numpy(a, *args, **kwargs): + """ + The function will call np.prod with type as np.int64 if the input type + is int or uint64 if is uint. This is necessary because windows np.prod uses by default + int32 while on linux it uses int64. + This is for fixing integer overflow https://github.com/pytorch/pytorch/issues/77320 + + Returns: + np.prod of input + """ + if "dtype" not in kwargs: + if np.issubdtype(a.dtype, np.signedinteger): + a = a.astype(np.int64) + elif np.issubdtype(a.dtype, np.unsignedinteger): + a = a.astype(np.uint64) + + fn = reference_reduction_numpy(np.prod) + return fn(a, *args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py new file mode 100644 index 0000000000000000000000000000000000000000..abc4ab6f7e4734361ec7ecea3d4755910f9cf2ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py @@ -0,0 +1,33 @@ +# mypy: ignore-errors + +import math + +import torch +import torch.nn as nn + + +class LinearReluFunctionalChild(nn.Module): + def __init__(self, N): + super().__init__() + self.w1 = nn.Parameter(torch.empty(N, N)) + self.b1 = nn.Parameter(torch.zeros(N)) + torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5)) + + def forward(self, x): + x = torch.nn.functional.linear(x, self.w1, self.b1) + x = torch.nn.functional.relu(x) + return x + +class LinearReluFunctional(nn.Module): + def __init__(self, N): + super().__init__() + self.child = LinearReluFunctionalChild(N) + self.w1 = nn.Parameter(torch.empty(N, N)) + self.b1 = nn.Parameter(torch.zeros(N)) + torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5)) + + def forward(self, x): + x = self.child(x) + x = torch.nn.functional.linear(x, self.w1, self.b1) + x = torch.nn.functional.relu(x) + return x diff --git a/venv/lib/python3.10/site-packages/torch/testing/_internal/static_module.py b/venv/lib/python3.10/site-packages/torch/testing/_internal/static_module.py new file mode 100644 index 0000000000000000000000000000000000000000..b39daa380d9d91e6ff3f03197d4d22291e82566d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/testing/_internal/static_module.py @@ -0,0 +1,26 @@ +# Owner(s): ["module: unknown"] + +import torch + + +class StaticModule: + def __init__(self, scripted): + # this is an nn.Module + if hasattr(scripted, "_c"): + self.static_module = torch._C._jit_to_static_module(scripted._c) + else: + self.static_module = torch._C._jit_to_static_module(scripted.graph) + + def __call__(self, *args, **kwargs): + return self.static_module(*args, **kwargs) + + def benchmark(self, args, kwargs, warmup_runs, main_runs): + self.static_module.benchmark(args, kwargs, warmup_runs, main_runs) + + def runAsync(self, args, kwargs): + return self.static_module.runAsync(args, kwargs) + + def benchmark_individual_ops(self, args, kwargs, warmup_runs, main_runs): + return self.static_module.benchmark_individual_ops( + args, kwargs, warmup_runs, main_runs + ) diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cca0fb95146039f6dcaedf23485219904b8ee570 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py @@ -0,0 +1,13 @@ +import tensorboard +from torch._vendor.packaging.version import Version + +if not hasattr(tensorboard, "__version__") or Version( + tensorboard.__version__ +) < Version("1.15"): + raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above") + +del Version +del tensorboard + +from .writer import FileWriter, SummaryWriter # noqa: F401 +from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401 diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcecb208681ea7154293b02c1e9cb04ed0d550b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23c8addf79d4137b575fe00485ac11b629951677 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a7b60d40c1e89f778356659ff3d061e413d6942 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22ba9a7905e431c62e20f652aa785c00e45d0448 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ade4e322dc36b15c6a99465d1d9c378ad098535 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c939b54f35e3e01077491a03af41b68bda0ec44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..365986e937eb60da6dcc93c21c0fda2a1d57b4a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..749278a28591592c05985eeae36509910d5af7f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2d664429a532af9fe67d126741b9b4071bc6ad0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d05496f8f2b9d26bc493fa106d2f76eff11e35b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..536746026052d92438e62b60a2ffcdbb61c386ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py @@ -0,0 +1,822 @@ +import copy +import logging +import os +import re + +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.node_def_pb2 import NodeDef +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto + +from caffe2.proto import caffe2_pb2 +from caffe2.python import core, workspace + +from typing import Set, Dict, Tuple, List + +log = logging.getLogger(__name__) + +def _make_unique_name(seen: Set[str], name: str, min_version: int = 0): + """ + Make the name unique by appending a unique number to the name. Used for SSA. + + Args: + seen (set): Set of names that have already been used (with respect to + some context). + name (str): The name to make unique + min_version (number): Starting index. Is incremented continually until + it can make the resulting name unique relative to 'seen'. + + Returns: + x (str): A version of name that is not in seen. + """ + assert name is not None + i = min_version + x = "%s_%d" % (name, i) if i else name + while x in seen: + i += 1 + x = "%s_%d" % (name, i) + seen.add(x) + return x + + +def _rename_tensorflow_style(shapes, blob_name_tracker, ops): + """ + Convert some of the common names in Caffe2 to tensorflow. + + NOTE: The common names in both Caffe2 and Tensorflow are currently + hardcoded, if either side changes at some point, then this code should + change as well. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. The _rename_all() call modifies blob_name_tracker and ops in-place. + """ + WEIGHT = re.compile(r"(_w)$") + WEIGHT_ = re.compile(r"(_w_)") + BN = re.compile(r"(_bn)$") + BN_ = re.compile(r"(_bn_)") + BIAS = re.compile(r"(_b)$") + BIAS_ = re.compile(r"(_b_)") + SCALE = re.compile(r"(_s)$") + SCALE_ = re.compile(r"(_s_)") + SUM = re.compile(r"(_sum)$") + SUM_ = re.compile(r"(_sum_)") + BRANCH = re.compile(r"(_branch)") + + def f(name): + inter_name = WEIGHT_.sub("/weight_", WEIGHT.sub("/weight", name)) + inter_name = BN_.sub("/batchnorm_", BN.sub("/batchnorm", inter_name)) + inter_name = BIAS_.sub("/bias_", BIAS.sub("/bias", inter_name)) + inter_name = SCALE_.sub("/scale_", SCALE.sub("/scale", inter_name)) + inter_name = SUM_.sub("/sum_", SUM.sub("/sum", inter_name)) + new_name = BRANCH.sub("/branch", inter_name) + return new_name + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _convert_to_ssa(shapes, blob_name_tracker, ops): + """ + Convert an operator graph to SSA (i.e. out-of-place). + + i.e. blobs will be renamed so that each blob is produced only once. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. Modifies blob_name_tracker and ops in-place. + """ + ir = core.IR(ops) + seen: Set[str] = set() + versioned: Dict[Tuple[str, int], int] = {} + new_shapes = {} + new_blob_name_tracker = {} + + def ssa_name(name: str, versions: Dict[str, int]) -> int: + assert name in versions + version = versions[name] + if (name, version) in versioned: + return versioned[(name, version)] + # Always setting name2 = `{name}_{version}` would work, but we also try + # to avoid a trailing `_0`, so we have to be careful not to introduce + # name collisions, such as (foo_1, 0) = foo_1 = (foo, 1). + # Note: operator names (if any) will be handled later. + new_name = _make_unique_name(seen, name, min_version=version) + versioned[(name, version)] = new_name + # Transfer shape. + if name in shapes: + new_shapes[new_name] = shapes[name] + if blob_name_tracker and name in blob_name_tracker: + new_blob_name_tracker[new_name] = blob_name_tracker[name] + return new_name + + for (op, ssa) in zip(ops, ir.ssa): + assert op is ssa.op + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs) + op.output.extend(ssa_name(name, ssa.out_versions) for name in outputs) + + shapes.clear() + shapes.update(new_shapes) + if blob_name_tracker: + blob_name_tracker.clear() + blob_name_tracker.update(new_blob_name_tracker) + + +def _get_blob_names(ops): + """ + Get all the operator input and output blobs and perform dedup on their names. + + Args: + ops: List of Caffe2 operators to extract inputs and outputs from + + Returns: + set containing distinct inputs and outputs from 'ops' + """ + names = set() + for op in ops: + names.update(op.input) + names.update(op.output) + return {name: name for name in names} + + +def _remap_keys(old_dict, rename_fn): + """ + Rename keys of 'old_dict' according to 'rename_fn'. + + Args: + old_dict: Dictionary (i.e. containing blob_name -> blob_name + relationships.) + rename_fn: Function string -> string for renaming. + + Returns: + None. Modifies old_dict in-place. + """ + new_dict = {rename_fn(key): value for key, value in old_dict.items()} + old_dict.clear() + old_dict.update(new_dict) + + +def _rename_all(shapes, blob_name_tracker, ops, rename_fn): + """ + Rename all the names in the operators. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + rename_fn: Function string -> string that specifies how to rename + + Returns: + None. Modifies shapes, blob_name_tracker and ops in-place using the + specified 'rename_fn'. + """ + seen: Set[str] = set() + renamed: Dict[Tuple[str, int], int] = {} + + def g(name): + """Collision-free version of f.""" + if name is None: + return None + if name in renamed: + return renamed[name] + new_name = _make_unique_name(seen, rename_fn(name)) + renamed[name] = new_name + return new_name + + for op in ops: + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + op.input.extend(g(name) for name in inputs) + op.output.extend(g(name) for name in outputs) + + _remap_keys(shapes, g) + if blob_name_tracker: + _remap_keys(blob_name_tracker, g) + # Rename all operator names (if any) independently so that the + # unique-fication happens only once in _fill_missing_operator_names(). + seen.clear() + renamed.clear() + for op in ops: + op.name = g(op.name) + + +def _add_gradient_scope(shapes, blob_name_tracker, ops): + """ + For all operators or blobs with name containing "_grad", add a "GRADIENTS/" scope. + + Note: breaks graph execution since the blob -> gradient mapping is + hardcoded. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. Modifies shapes, blob_name_tracker and ops in-place by renaming. + """ + + def f(name): + if "_grad" in name: + return f"GRADIENTS/{name}" + else: + return name + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _replace_colons(shapes, blob_name_tracker, ops, repl): + """ + `:i` has a special meaning in Tensorflow. This function replaces all colons with $ to avoid any possible conflicts. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + repl: String representing the text to replace ':' with. Usually this is + '$'. + + Returns: + None. Modifies blob_name_tracker in-place. + + """ + + def f(name): + return name.replace(":", repl) + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _fill_missing_operator_names(ops): + """ + Give missing operators a name. + + We expect C2 operators to be generally unnamed. This gives them a scope + (inferred from their outputs) and a name after their type. Duplicates will + be postfixed by an index. + + Args: + ops: List of Caffe2 operators to assign names to. + + Returns: + None: Modifies 'ops' in-place. + """ + seen = set() + for op in ops: + # Make sure operator names don't collide with blobs. + seen.update(op.input) + seen.update(op.output) + for op in ops: + if op.name: + name = op.name + elif op.output or op.input: + name_list = [os.path.dirname(name) for name in op.output or op.input] + scope = os.path.commonprefix(name_list) + name = os.path.join(scope, op.type) + else: + name = op.type + assert name + op.name = _make_unique_name(seen, name) + + +def _tf_device(device_option): + """ + Handle the devices. + + Args: + device_option (caffe2_pb2.DeviceOption): DeviceOption protobuf, + associated to an operator, that contains information such as + device_type (optional), cuda_gpu_id (optional), node_name (optional, + tells which node the operator should execute on). See caffe2.proto + in caffe2/proto for the full list. + + Returns: + Formatted string representing device information contained in + device_option. + """ + if not device_option.HasField("device_type"): + return "" + if ( + device_option.device_type == caffe2_pb2.CPU + or device_option.device_type == caffe2_pb2.MKLDNN + ): + return "/cpu:*" + if device_option.device_type == caffe2_pb2.CUDA: + return f"/gpu:{device_option.device_id}" + raise Exception("Unhandled device", device_option) + + +def _add_tf_shape(attr_dict, ints): + """ + Convert a list of ints to a TensorShapeProto representing the dimensions of a blob/object. + + Args: + attr_dict: Dictionary to update (usually attributes of a Node) + ints: List of integers representing dimensions of some object. + + Returns: + None. Modifies attr_dict in-place. + """ + shape_proto = TensorShapeProto() + for i in ints: + dim = TensorShapeProto.Dim() + dim.size = i + shape_proto.dim.extend([dim]) + attr_dict["_output_shapes"].list.shape.extend([shape_proto]) + + +def _set_tf_attr(attr_dict, arg): + """ + Add attributes to a node. Key is the arg.name, and values can be shape, floats, strings, ints or an empty list. + + Args: + attr_dict: Dictionary to update (usually attributes of a Node) + arg: Object with name and data fields. + + Returns: + None. Modifies attr_dict in-place. + """ + k = arg.name + if k == "shape" and arg.ints: + _add_tf_shape(attr_dict, arg.ints) + return + # Float + if arg.HasField("f"): + attr_dict[k].f = arg.f + return + # Integer + if arg.HasField("i"): + attr_dict[k].i = arg.i + return + # String + if arg.HasField("s"): + attr_dict[k].s = ( + arg.s if isinstance(arg.s, bytes) else str(arg.s).encode("utf-8") + ) + return + if arg.floats: + attr_dict[k].list.f.extend(arg.floats) + return + if arg.ints: + attr_dict[k].list.i.extend(arg.ints) + return + if arg.strings: + attr_dict[k].list.s.extend( + s if isinstance(s, bytes) else str(s).encode("utf-8") for s in arg.strings + ) + return + # The value is an empty list. + attr_dict[k].list.s.extend([]) + + +def _operator_to_node(shapes, op): + """ + Convert an operator to a node in a TF graph. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + op: The Caffe2 operator to convert to a TF graph node. + + Returns: + n: The TF graph node created from op. + """ + assert op.name, op + n = NodeDef() + n.name = op.name + n.input.extend(op.input) + n.op = op.type + n.device = _tf_device(op.device_option) + if shapes: + # Add shapes in order. + for output in op.output: + if output not in shapes: + break + _add_tf_shape(n.attr, shapes[output]) + for arg in op.arg: + _set_tf_attr(n.attr, arg) + return n + + +def _operator_to_node_simp(op, inter_blobs, seen): + """ + Convert the operators to nodes. + + Args: + op: Caffe2 operator to convert to node + inter_blobs: Set of intermediate blobs + seen: Names that have already been used and are not unique + + Returns: + nodes: Nodes representing 'op' and the outputs of 'op' + """ + assert op + nodes = [] + outputs = [o for o in op.output if o not in inter_blobs] + seen.update(outputs) + len_outputs = len(outputs) + if len_outputs == 1: + n = NodeDef() + n.name = outputs[0] + # Here we are sure the name is unique. + n.input.extend(op.input) + n.op = op.type + n.device = _tf_device(op.device_option) + for arg in op.arg: + _set_tf_attr(n.attr, arg) + nodes.append(n) + elif len_outputs > 1: + # Create a name that is likely unique + if op.name: + name = op.name + else: + name_list = list(outputs) + scope = os.path.commonprefix(name_list) + name = os.path.join(scope, op.type) + assert name + op.name = _make_unique_name(seen, name) + device = _tf_device(op.device_option) + + # Create additional output nodes + for output in outputs: + n = NodeDef() + n.name = output + n.input.extend([op.name]) + n.op = "Blob" + n.device = device + nodes.append(n) + + # Node for the current op + n = NodeDef() + n.name = op.name + n.input.extend(op.input) + n.op = op.type + n.device = device + for arg in op.arg: + _set_tf_attr(n.attr, arg) + nodes.append(n) + + return nodes + + +def _blob_to_node(producing_ops, shapes, name): + """ + Convert a blob (operator input or output) to a node in a TF graph. + + Args: + producing_ops: Dictionary of blob name to list of + (producing_op, blob_index within producing_op.output) mapping. + shapes: Dictionary mapping blob names to their shapes/dimensions. + name: String representing the name of this blob. + + Returns: + n: The TF graph node created from this blob. + """ + assert name + n = NodeDef() + n.name = name + # Get all ops that have the blob corresponding to 'name' as one of their + # outputs. See _operators_to_graph_def. + produced_by = producing_ops.get(name, []) + if len(produced_by) > 0: + n.op = "Blob" + else: + # This blob is not produced but is instead a TF Placeholder where a + # value is passed in. + n.op = "Placeholder" + n.input.extend("%s:%d" % (p_op.name, i) for p_op, i in produced_by) + if produced_by: + device = produced_by[0][0].device_option + if all(producer[0].device_option == device for producer in produced_by): + n.device = _tf_device(device) + if shapes and name in shapes: + _add_tf_shape(n.attr, shapes[name]) + return n + + +def _clear_debug_info(ops, perform_clear): + """ + Remove debug information from operators, they are copious. + + Args: + ops: List of Caffe2 operators + perform_clear: Boolean passed from _operators_to_graph_def specifying + whether to remove the debug information. This boolean is passed into + this function to reduce the complexity of _operators_to_graph_def. + + Returns: + None. Modifies the list of Caffe2 operators in-place and removes the + 'debug_info' field. + + """ + if not perform_clear: + return + + for op in ops: + if op.HasField("debug_info"): + op.ClearField("debug_info") + + +def _check_if_forward(blob): + """ + Blobs with names containing '_m' or 'grad' are part of the backward pass. + + This function references facebookresearch/Detectron/detectron/utils/net.py. + + Args: + blob: The blob to inspect + + Returns: + Boolean representing whether this blob is part of the forward pass + """ + # + return blob.find("__m") < 0 or blob.find("grad") < 0 + + +def _check_if_cpu(blob): + """ + Check if the blob's name starts with '_gpu'. + + Args: + blob: The blob to inspect + + Returns: + Boolean representing whether this blob is associated with a gpu + """ + return not blob.startswith("_gpu") + + +def _compute_in_out(ops): + """ + Find the input, intermediate and output nodes of a set of operators. + + Args: + ops: List of Caffe2 operators to look through + + Returns: + input_blobs: The input nodes of the set of operators + inter_blobs: The intermediate nodes of the set of operators + output_blobs: The output nodes of the set of operators + """ + in_blobs = set() + out_blobs = set() + + for op in ops: + for input_blob in op.input: + in_blobs.add(input_blob) + for output_blob in op.output: + out_blobs.add(output_blob) + + input_blobs = list(in_blobs.difference(out_blobs)) + output_blobs = list(out_blobs.difference(in_blobs)) + inter_blobs = {b for b in output_blobs if b.startswith("_")} + output_blobs = [b for b in output_blobs if b not in inter_blobs] + + return input_blobs, inter_blobs, output_blobs + + +def _filter_ops(ops, filter_fn, perform_filter): + """ + Filter unwanted operators based on criteria in 'filter_fn'. + + Args: + ops: List of Caffe2 operators to filter + filter_fn: Criteria function for whether inputs/outputs in an operator + should be filtered. + perform_filter: Boolean passed from _operators_to_graph_def specifying + whether to filter operators + + Returns: + new_ops: Subset of ops containing a subset of their inputs and outputs. + """ + if not perform_filter: + return ops + + new_ops = [] + for op in ops: + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + new_inputs = [i for i in inputs if filter_fn(i)] + new_outputs = [o for o in outputs if filter_fn(o)] + + # Only add the op if output is not empty + if new_outputs: + op.input.extend(new_inputs) + op.output.extend(new_outputs) + new_ops.append(op) + + return new_ops + + +def _operators_to_graph_def( + shapes, + ops, + colon_replacement="$", + with_ssa=True, + with_gradient_scope=True, + blob_name_tracker=None, + show_simplified=False, + custom_rename=None, +): + """ + Convert a set of operators to a graph using the main function. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + ops: List of Caffe2 operators, representing some computation graph + ### **kwargs (model_to_graph_def, nets_to_graph_def, protos_to_graph_def) ### + colon_replacement: Symbol to replace ':' with. ':i' in TF has a special + meaning, so we need to replace it with a non-conflicting symbol. + with_ssa: Boolean + with_gradient_scope: Boolean + blob_name_tracker: Dictionary tracking names of blobs (inputs/outputs + from operators) + show_simplified: Whether to show a simplified version of the model graph + Sets all of the following values: + clear_debug_info: Boolean representing whether to silence debug + info (which can be very verbose) + show_forward_only: Boolean representing whether to only show + blobs involved in the forward pass + show_cpu_only: Boolean representing whether to only show blobs + that are not associated with a gpu + use_tensorflow_naming: Boolean representing whether to convert + some common Caffe2 naming conventions to their Tensorflow + counterparts + custom_rename: Function string -> string that defines a custom + renaming function to use. + + Returns: + current_graph: GraphDef representing the computation graph formed by the + set of operators. + """ + if blob_name_tracker is not None: + blob_name_tracker.clear() + else: + blob_name_tracker = {} + + blob_name_tracker.update(_get_blob_names(ops)) + + _clear_debug_info(ops, show_simplified) # clear_debug_info + ops = _filter_ops(ops, _check_if_forward, show_simplified) # show_forward_only + ops = _filter_ops(ops, _check_if_cpu, show_simplified) # show_cpu_only + if custom_rename: + _rename_all(shapes, blob_name_tracker, ops, custom_rename) + if colon_replacement: + _replace_colons(shapes, blob_name_tracker, ops, colon_replacement) + if with_ssa: + _convert_to_ssa(shapes, blob_name_tracker, ops) + if with_gradient_scope: + _add_gradient_scope(shapes, blob_name_tracker, ops) + _fill_missing_operator_names(ops) + if show_simplified: # use_tensorflow_naming + _rename_tensorflow_style(shapes, blob_name_tracker, ops) + producing_ops: Dict[caffe2_pb2.OperatorDef, List] = {} + blobs = set() + input_blobs, inter_blobs, _ = _compute_in_out(ops) + current_graph = GraphDef() + seen = set(input_blobs) + for op in ops: + nodes_from_op = ( + _operator_to_node_simp(op, inter_blobs, seen) + if show_simplified + else [_operator_to_node(shapes, op)] + ) # .extend() expects an iterable + current_graph.node.extend(nodes_from_op) + for input_blob in op.input: + blobs.add(input_blob) + for i, output_blob in enumerate(op.output): + blobs.add(output_blob) + producing_ops.setdefault(output_blob, []).append((op, i)) + + if show_simplified: + # Show a cleaner, easier-to-interpret version of the model graph + blobs = input_blobs + + for blob in sorted(blobs): + current_graph.node.extend([_blob_to_node(producing_ops, {}, blob)]) + + return current_graph + + +def _propagate_device_option(net_def): + """ + Propagate the device options from net to operators. + + Args: + net_def: A caffe2_pb2.NetDef representing a computation graph. The graph + consists of Caffe2 operators. + + Returns: + None. Iterates through all ops contained within the net. For each op, + modifies the op device_option in-place to be the net device_option + if the op has no pre-existing device_option, and leaves the op as-is + if it already has a device_option. + """ + if not net_def.HasField("device_option"): + return + for op in net_def.op: + if not op.HasField("device_option"): + op.device_option.CopyFrom(net_def.device_option) + + +def _try_get_shapes(nets): + """ + Get missing shapes for all blobs contained in the nets. + + Args: + nets: List of core.Net to extract blob shape information from. + + Returns: + Dictionary containing blob name to shape/dimensions mapping. The net + is a computation graph that is composed of operators, and the + operators have input and output blobs, each with their own dims. + """ + try: + # Note: this will inspect the workspace for better or worse. + # We don't care about the types, only the shapes + shapes, _ = workspace.InferShapesAndTypes(nets) + return shapes + except Exception as e: + log.warning("Failed to compute shapes: %s", e) + return {} + + +def model_to_graph_def(model, **kwargs): + """ + Convert a Caffe2 model to a Tensorflow graph. + + This function extracts 'param_init_net' and 'net' from the model and passes it to nets_to_graph() + for further processing. + + Args: + model (cnn.CNNModelHelper, model_helper.ModelHelper): The model to + extract the nets (instances of core.Net) from. + + Returns: + Call to nets_to_graph_def() with extracted 'param_init_net', 'net' and + **kwargs. See _operators_to_graph_def for detailed **kwargs. + """ + nets = [model.param_init_net, model.net] + return nets_to_graph_def(nets, **kwargs) + + +def nets_to_graph_def(nets, shapes=None, **kwargs): + """ + Convert a set of Caffe2 nets to a Tensorflow graph. + + Args: + nets: List of core.Nets. core.Net is a wrapper around a NetDef protobuf. + The corresponding protobuf can be extracted using .Proto(). + shapes: Dictionary mapping blob names to their shapes/dimensions. + + Returns: + Call to protos_to_graph_def() with the extracted NetDef protobufs and + **kwargs. See _operators_to_graph_def for detailed **kwargs. + """ + # if shapes is None: + # shapes = _try_get_shapes(nets) + # _try_get_shapes(nets) depends on workspace.InferShapesAndTypes(nets), + # which is currently broken (segfault). We omit the shapes for now. + shapes = {} + nets = [copy.deepcopy(net.Proto()) for net in nets] + shapes = copy.deepcopy(shapes) + return protos_to_graph_def(nets, shapes, **kwargs) + + +def protos_to_graph_def(net_defs, shapes=None, **kwargs): + """ + Convert a set of Caffe2 net definitions to a Tensorflow graph. + + Args: + net_defs: List of caffe2_pb2.NetDef protobufs representing computation + graphs. + shapes: Dictionary mapping blob names to their shapes/dimensions. + + Returns: + Call to _operators_to_graph_def() with the extracted operators from the + NetDefs and **kwargs. See _operators_to_graph_def for detailed + **kwargs. + """ + for net in net_defs: + _propagate_device_option(net) + shapes = copy.deepcopy(shapes or {}) + ops = [op for net_def in net_defs for op in net_def.op] + return _operators_to_graph_def(shapes, ops, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py new file mode 100644 index 0000000000000000000000000000000000000000..3a4eff542e35459d6e4ba5bd27bde94455b62247 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py @@ -0,0 +1,40 @@ +"""This module converts objects into numpy array.""" +import numpy as np +import torch + + +def make_np(x): + """ + Convert an object into numpy array. + + Args: + x: An instance of torch tensor or caffe blob name + + Returns: + numpy.array: Numpy array + """ + if isinstance(x, np.ndarray): + return x + if isinstance(x, str): # Caffe2 will pass name of blob(s) to fetch + return _prepare_caffe2(x) + if np.isscalar(x): + return np.array([x]) + if isinstance(x, torch.Tensor): + return _prepare_pytorch(x) + raise NotImplementedError( + f"Got {type(x)}, but numpy array, torch tensor, or caffe2 blob name are expected." + ) + + +def _prepare_pytorch(x): + if x.dtype == torch.bfloat16: + x = x.to(torch.float16) + x = x.detach().cpu().numpy() + return x + + +def _prepare_caffe2(x): + from caffe2.python import workspace + + x = workspace.FetchBlob(x) + return x diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..afbe68191aa98fcbbbfe1395c7c325e4c6754f9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py @@ -0,0 +1,85 @@ +import math +import numpy as np +from ._convert_np import make_np +from ._utils import make_grid +from tensorboard.compat import tf +from tensorboard.plugins.projector.projector_config_pb2 import EmbeddingInfo + + +_HAS_GFILE_JOIN = hasattr(tf.io.gfile, "join") + + +def _gfile_join(a, b): + # The join API is different between tensorboard's TF stub and TF: + # https://github.com/tensorflow/tensorboard/issues/6080 + # We need to try both because `tf` may point to either the stub or the real TF. + if _HAS_GFILE_JOIN: + return tf.io.gfile.join(a, b) + else: + fs = tf.io.gfile.get_filesystem(a) + return fs.join(a, b) + + +def make_tsv(metadata, save_path, metadata_header=None): + if not metadata_header: + metadata = [str(x) for x in metadata] + else: + assert len(metadata_header) == len( + metadata[0] + ), "len of header must be equal to the number of columns in metadata" + metadata = ["\t".join(str(e) for e in l) for l in [metadata_header] + metadata] + + metadata_bytes = tf.compat.as_bytes("\n".join(metadata) + "\n") + with tf.io.gfile.GFile(_gfile_join(save_path, "metadata.tsv"), "wb") as f: + f.write(metadata_bytes) + + +# https://github.com/tensorflow/tensorboard/issues/44 image label will be squared +def make_sprite(label_img, save_path): + from PIL import Image + from io import BytesIO + + # this ensures the sprite image has correct dimension as described in + # https://www.tensorflow.org/get_started/embedding_viz + nrow = int(math.ceil((label_img.size(0)) ** 0.5)) + arranged_img_CHW = make_grid(make_np(label_img), ncols=nrow) + + # augment images so that #images equals nrow*nrow + arranged_augment_square_HWC = np.zeros( + (arranged_img_CHW.shape[2], arranged_img_CHW.shape[2], 3) + ) + arranged_img_HWC = arranged_img_CHW.transpose(1, 2, 0) # chw -> hwc + arranged_augment_square_HWC[: arranged_img_HWC.shape[0], :, :] = arranged_img_HWC + im = Image.fromarray(np.uint8((arranged_augment_square_HWC * 255).clip(0, 255))) + + with BytesIO() as buf: + im.save(buf, format="PNG") + im_bytes = buf.getvalue() + + with tf.io.gfile.GFile(_gfile_join(save_path, "sprite.png"), "wb") as f: + f.write(im_bytes) + + +def get_embedding_info(metadata, label_img, subdir, global_step, tag): + info = EmbeddingInfo() + info.tensor_name = f"{tag}:{str(global_step).zfill(5)}" + info.tensor_path = _gfile_join(subdir, "tensors.tsv") + if metadata is not None: + info.metadata_path = _gfile_join(subdir, "metadata.tsv") + if label_img is not None: + info.sprite.image_path = _gfile_join(subdir, "sprite.png") + info.sprite.single_image_dim.extend([label_img.size(3), label_img.size(2)]) + return info + + +def write_pbtxt(save_path, contents): + config_path = _gfile_join(save_path, "projector_config.pbtxt") + with tf.io.gfile.GFile(config_path, "wb") as f: + f.write(tf.compat.as_bytes(contents)) + + +def make_mat(matlist, save_path): + with tf.io.gfile.GFile(_gfile_join(save_path, "tensors.tsv"), "wb") as f: + for x in matlist: + x = [str(i.item()) for i in x] + f.write(tf.compat.as_bytes("\t".join(x) + "\n")) diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..f4274199ffd33ce60792608620973e4b9ca75975 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py @@ -0,0 +1,380 @@ +from collections import OrderedDict +import contextlib +from typing import Dict, Any + +from tensorboard.compat.proto.config_pb2 import RunMetadata +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats +from tensorboard.compat.proto.versions_pb2 import VersionDef + +import torch +from ._proto_graph import node_proto + +methods_OP = [ + "attributeNames", + "hasMultipleOutputs", + "hasUses", + "inputs", + "kind", + "outputs", + "outputsSize", + "scopeName", +] +# Some additional methods to explure for methods_IO are +# +# 'unique' (type int) +# 'type' (type >) +# +# But the below are sufficient for now. +methods_IO = ["node", "offset", "debugName"] + +GETATTR_KIND = "prim::GetAttr" +CLASSTYPE_KIND = "ClassType" + + +class NodeBase: + def __init__( + self, + debugName=None, + inputs=None, + scope=None, + tensor_size=None, + op_type="UnSpecified", + attributes="", + ): + # TODO; Specify a __slots__ for this class or potentially + # used namedtuple instead + self.debugName = debugName + self.inputs = inputs + self.tensor_size = tensor_size + self.kind = op_type + self.attributes = attributes + self.scope = scope + + def __repr__(self): + repr = [] + repr.append(str(type(self))) + for m in dir(self): + if "__" not in m: + repr.append( + m + ": " + str(getattr(self, m)) + str(type(getattr(self, m))) + ) + return "\n".join(repr) + "\n\n" + + +class NodePy(NodeBase): + def __init__(self, node_cpp, valid_methods): + super().__init__(node_cpp) + valid_methods = valid_methods[:] + self.inputs = [] + + for m in valid_methods: + if m == "inputs" or m == "outputs": + list_of_node = list(getattr(node_cpp, m)()) + io_unique_names = [] + io_tensor_sizes = [] + for n in list_of_node: + io_unique_names.append(n.debugName()) + if n.isCompleteTensor(): + io_tensor_sizes.append(n.type().sizes()) + else: + io_tensor_sizes.append(None) + + setattr(self, m, io_unique_names) + setattr(self, m + "tensor_size", io_tensor_sizes) + + else: + setattr(self, m, getattr(node_cpp, m)()) + + +class NodePyIO(NodePy): + def __init__(self, node_cpp, input_or_output=None): + super().__init__(node_cpp, methods_IO) + try: + tensor_size = node_cpp.type().sizes() + except RuntimeError: + tensor_size = [ + 1, + ] # fail when constant model is used. + self.tensor_size = tensor_size + # Kind attribute string is purely descriptive and will be shown + # in detailed information for the node in TensorBoard's graph plugin. + # + # NodePyOP nodes get this from their kind() method. + self.kind = "Parameter" + if input_or_output: + self.input_or_output = input_or_output + self.kind = "IO Node" + + +class NodePyOP(NodePy): + def __init__(self, node_cpp): + super().__init__(node_cpp, methods_OP) + # Replace single quote which causes strange behavior in TensorBoard + # TODO: See if we can remove this in the future + self.attributes = str( + {k: _node_get(node_cpp, k) for k in node_cpp.attributeNames()} + ).replace("'", " ") + self.kind = node_cpp.kind() + + +class GraphPy: + """Helper class to convert torch.nn.Module to GraphDef proto and visualization with TensorBoard. + + GraphDef generation operates in two passes: + + In the first pass, all nodes are read and saved to two lists. + One list is for input/output nodes (nodes_io), which only have inbound + or outbound connections, but not both. Another list is for internal + operator nodes (nodes_op). The first pass also saves all scope name + appeared in the nodes in scope_name_appeared list for later processing. + + In the second pass, scope names are fully applied to all nodes. + debugNameToScopedName is a mapping from a node's ID to its fully qualified + scope name. e.g. Net1/Linear[0]/1. Unfortunately torch.jit doesn't have + totally correct scope output, so this is nontrivial. The function + populate_namespace_from_OP_to_IO and find_common_root are used to + assign scope name to a node based on the connection between nodes + in a heuristic kind of way. Bookkeeping is done with shallowest_scope_name + and scope_name_appeared. + """ + + def __init__(self): + self.nodes_op = [] + self.nodes_io = OrderedDict() + self.unique_name_to_scoped_name = {} + self.shallowest_scope_name = "default" + self.scope_name_appeared = [] + + def append(self, x): + if isinstance(x, NodePyIO): + self.nodes_io[x.debugName] = x + if isinstance(x, NodePyOP): + self.nodes_op.append(x) + + def printall(self): + print("all nodes") + for node in self.nodes_op: + print(node) + for key in self.nodes_io: + print(self.nodes_io[key]) + + def find_common_root(self): + for fullscope in self.scope_name_appeared: + if fullscope: + self.shallowest_scope_name = fullscope.split("/")[0] + + def populate_namespace_from_OP_to_IO(self): + for node in self.nodes_op: + for node_output, outputSize in zip(node.outputs, node.outputstensor_size): + self.scope_name_appeared.append(node.scopeName) + self.nodes_io[node_output] = NodeBase( + node_output, + node.inputs, + node.scopeName, + outputSize, + op_type=node.kind, + attributes=node.attributes, + ) + + self.find_common_root() + + for node in self.nodes_op: + for input_node_id in node.inputs: + self.unique_name_to_scoped_name[input_node_id] = ( + node.scopeName + "/" + input_node_id + ) + + for key, node in self.nodes_io.items(): + if type(node) == NodeBase: + self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName + if hasattr(node, "input_or_output"): + self.unique_name_to_scoped_name[key] = ( + node.input_or_output + "/" + node.debugName + ) + + if hasattr(node, "scope") and node.scope is not None: + self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName + if node.scope == "" and self.shallowest_scope_name: + self.unique_name_to_scoped_name[node.debugName] = ( + self.shallowest_scope_name + "/" + node.debugName + ) + + # replace name + for key, node in self.nodes_io.items(): + self.nodes_io[key].inputs = [ + self.unique_name_to_scoped_name[node_input_id] + for node_input_id in node.inputs + ] + if node.debugName in self.unique_name_to_scoped_name: + self.nodes_io[key].debugName = self.unique_name_to_scoped_name[ + node.debugName + ] + + def to_proto(self): + """Convert graph representation of GraphPy object to TensorBoard required format.""" + # TODO: compute correct memory usage and CPU time once + # PyTorch supports it + nodes = [] + for v in self.nodes_io.values(): + nodes.append( + node_proto( + v.debugName, + input=v.inputs, + outputsize=v.tensor_size, + op=v.kind, + attributes=v.attributes, + ) + ) + return nodes + + +def parse(graph, trace, args=None, omit_useless_nodes=True): + """Parse an optimized PyTorch model graph and produces a list of nodes and node stats. + + Useful for eventual conversion to TensorBoard protobuf format. + + Args: + graph (PyTorch module): The model graph to be parsed. + trace (PyTorch JIT TracedModule): The model trace to be parsed. + args (tuple): input tensor[s] for the model. + omit_useless_nodes (boolean): Whether to remove nodes from the graph. + """ + n_inputs = len(args) + + scope = {} + nodes_py = GraphPy() + for node in graph.inputs(): + if omit_useless_nodes: + if ( + len(node.uses()) == 0 + ): # number of user of the node (= number of outputs/ fanout) + continue + + if node.type().kind() != CLASSTYPE_KIND: + nodes_py.append(NodePyIO(node, "input")) + + attr_to_scope: Dict[Any, str] = {} + for node in graph.nodes(): + if node.kind() == GETATTR_KIND: + attr_name = node.s("name") + attr_key = node.output().debugName() + parent = node.input().node() + if ( + parent.kind() == GETATTR_KIND + ): # If the parent node is not the top-level "self" node + parent_attr_name = parent.s("name") + parent_attr_key = parent.output().debugName() + parent_scope = attr_to_scope[parent_attr_key] + attr_scope = parent_scope.split("/")[-1] + attr_to_scope[attr_key] = f"{parent_scope}/{attr_scope}.{attr_name}" + else: + attr_to_scope[attr_key] = f"__module.{attr_name}" + # We don't need classtype nodes; scope will provide this information + if node.output().type().kind() != CLASSTYPE_KIND: + node_py = NodePyOP(node) + node_py.scopeName = attr_to_scope[attr_key] # type: ignore[attr-defined] + nodes_py.append(node_py) + else: + nodes_py.append(NodePyOP(node)) + + for i, node in enumerate(graph.outputs()): # Create sink nodes for output ops + node_pyio = NodePyIO(node, "output") + node_pyio.debugName = f"output.{i + 1}" + node_pyio.inputs = [node.debugName()] + nodes_py.append(node_pyio) + + def parse_traced_name(module): + if isinstance(module, torch.jit.TracedModule): + module_name = module._name + else: + module_name = getattr(module, "original_name", "Module") + return module_name + + alias_to_name = {} + base_name = parse_traced_name(trace) + for name, module in trace.named_modules(prefix="__module"): + mod_name = parse_traced_name(module) + attr_name = name.split(".")[-1] + alias_to_name[name] = f"{mod_name}[{attr_name}]" + + for node in nodes_py.nodes_op: + module_aliases = node.scopeName.split("/") + replacements = [ + alias_to_name[alias] if alias in alias_to_name else alias.split(".")[-1] + for alias in module_aliases + ] + node.scopeName = base_name + if any(replacements): + node.scopeName += "/" + "/".join(replacements) + + nodes_py.populate_namespace_from_OP_to_IO() + return nodes_py.to_proto() + + +def graph(model, args, verbose=False, use_strict_trace=True): + """ + Process a PyTorch model and produces a `GraphDef` proto that can be logged to TensorBoard. + + Args: + model (PyTorch module): The model to be parsed. + args (tuple): input tensor[s] for the model. + verbose (bool): Whether to print out verbose information while + processing. + use_strict_trace (bool): Whether to pass keyword argument `strict` to + `torch.jit.trace`. Pass False when you want the tracer to + record your mutable container types (list, dict) + """ + with _set_model_to_eval(model): + try: + trace = torch.jit.trace(model, args, strict=use_strict_trace) + graph = trace.graph + torch._C._jit_pass_inline(graph) + except RuntimeError as e: + print(e) + print("Error occurs, No graph saved") + raise e + + if verbose: + print(graph) + list_of_nodes = parse(graph, trace, args) + # We are hardcoding that this was run on CPU even though it might have actually + # run on GPU. Note this is what is shown in TensorBoard and has no bearing + # on actual execution. + # TODO: See if we can extract GPU vs CPU information from the PyTorch model + # and pass it correctly to TensorBoard. + # + # Definition of StepStats and DeviceStepStats can be found at + # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/graph/tf_graph_common/test/graph-test.ts + # and + # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/step_stats.proto + stepstats = RunMetadata( + step_stats=StepStats(dev_stats=[DeviceStepStats(device="/device:CPU:0")]) + ) + return GraphDef(node=list_of_nodes, versions=VersionDef(producer=22)), stepstats + # The producer version has been reverse engineered from standard + # TensorBoard logged data. + + +@contextlib.contextmanager +def _set_model_to_eval(model): + """Context manager to temporarily set the training mode of ``model`` to eval.""" + if not isinstance(model, torch.jit.ScriptFunction): + originally_training = model.training + model.train(False) + try: + yield + finally: + model.train(originally_training) + else: + # Do nothing for ScriptFunction + try: + yield + finally: + pass + + +def _node_get(node: torch._C.Node, key: str): + """Get attributes of a node which is polymorphic over return type.""" + sel = node.kindOf(key) + return getattr(node, sel)(key) diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py new file mode 100644 index 0000000000000000000000000000000000000000..8211f6e0c80241bebacd19ea5824c5b338018849 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py @@ -0,0 +1,983 @@ +import json +import logging +import os +import struct + +from typing import Any, List, Optional + +import torch +import numpy as np + +from google.protobuf import struct_pb2 + +from tensorboard.compat.proto.summary_pb2 import ( + HistogramProto, + Summary, + SummaryMetadata, +) +from tensorboard.compat.proto.tensor_pb2 import TensorProto +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto +from tensorboard.plugins.custom_scalar import layout_pb2 +from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData +from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData + +from ._convert_np import make_np +from ._utils import _prepare_video, convert_to_HWC + +__all__ = [ + "half_to_int", + "int_to_half", + "hparams", + "scalar", + "histogram_raw", + "histogram", + "make_histogram", + "image", + "image_boxes", + "draw_boxes", + "make_image", + "video", + "make_video", + "audio", + "custom_scalars", + "text", + "tensor_proto", + "pr_curve_raw", + "pr_curve", + "compute_curve", + "mesh", +] + +logger = logging.getLogger(__name__) + +def half_to_int(f: float) -> int: + """Casts a half-precision float value into an integer. + + Converts a half precision floating point value, such as `torch.half` or + `torch.bfloat16`, into an integer value which can be written into the + half_val field of a TensorProto for storage. + + To undo the effects of this conversion, use int_to_half(). + + """ + buf = struct.pack("f", f) + return struct.unpack("i", buf)[0] + +def int_to_half(i: int) -> float: + """Casts an integer value to a half-precision float. + + Converts an integer value obtained from half_to_int back into a floating + point value. + + """ + buf = struct.pack("i", i) + return struct.unpack("f", buf)[0] + +def _tensor_to_half_val(t: torch.Tensor) -> List[int]: + return [half_to_int(x) for x in t.flatten().tolist()] + +def _tensor_to_complex_val(t: torch.Tensor) -> List[float]: + return torch.view_as_real(t).flatten().tolist() + +def _tensor_to_list(t: torch.Tensor) -> List[Any]: + return t.flatten().tolist() + +# type maps: torch.Tensor type -> (protobuf type, protobuf val field) +_TENSOR_TYPE_MAP = { + torch.half: ("DT_HALF", "half_val", _tensor_to_half_val), + torch.float16: ("DT_HALF", "half_val", _tensor_to_half_val), + torch.bfloat16: ("DT_BFLOAT16", "half_val", _tensor_to_half_val), + torch.float32: ("DT_FLOAT", "float_val", _tensor_to_list), + torch.float: ("DT_FLOAT", "float_val", _tensor_to_list), + torch.float64: ("DT_DOUBLE", "double_val", _tensor_to_list), + torch.double: ("DT_DOUBLE", "double_val", _tensor_to_list), + torch.int8: ("DT_INT8", "int_val", _tensor_to_list), + torch.uint8: ("DT_UINT8", "int_val", _tensor_to_list), + torch.qint8: ("DT_UINT8", "int_val", _tensor_to_list), + torch.int16: ("DT_INT16", "int_val", _tensor_to_list), + torch.short: ("DT_INT16", "int_val", _tensor_to_list), + torch.int: ("DT_INT32", "int_val", _tensor_to_list), + torch.int32: ("DT_INT32", "int_val", _tensor_to_list), + torch.qint32: ("DT_INT32", "int_val", _tensor_to_list), + torch.int64: ("DT_INT64", "int64_val", _tensor_to_list), + torch.complex32: ("DT_COMPLEX32", "scomplex_val", _tensor_to_complex_val), + torch.chalf: ("DT_COMPLEX32", "scomplex_val", _tensor_to_complex_val), + torch.complex64: ("DT_COMPLEX64", "scomplex_val", _tensor_to_complex_val), + torch.cfloat: ("DT_COMPLEX64", "scomplex_val", _tensor_to_complex_val), + torch.bool: ("DT_BOOL", "bool_val", _tensor_to_list), + torch.complex128: ("DT_COMPLEX128", "dcomplex_val", _tensor_to_complex_val), + torch.cdouble: ("DT_COMPLEX128", "dcomplex_val", _tensor_to_complex_val), + torch.uint8: ("DT_UINT8", "uint32_val", _tensor_to_list), + torch.quint8: ("DT_UINT8", "uint32_val", _tensor_to_list), + torch.quint4x2: ("DT_UINT8", "uint32_val", _tensor_to_list), +} + + +def _calc_scale_factor(tensor): + converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor + return 1 if converted.dtype == np.uint8 else 255 + + +def _draw_single_box( + image, + xmin, + ymin, + xmax, + ymax, + display_str, + color="black", + color_text="black", + thickness=2, +): + from PIL import ImageDraw, ImageFont + + font = ImageFont.load_default() + draw = ImageDraw.Draw(image) + (left, right, top, bottom) = (xmin, xmax, ymin, ymax) + draw.line( + [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], + width=thickness, + fill=color, + ) + if display_str: + text_bottom = bottom + # Reverse list and print from bottom to top. + text_width, text_height = font.getsize(display_str) + margin = np.ceil(0.05 * text_height) + draw.rectangle( + [ + (left, text_bottom - text_height - 2 * margin), + (left + text_width, text_bottom), + ], + fill=color, + ) + draw.text( + (left + margin, text_bottom - text_height - margin), + display_str, + fill=color_text, + font=font, + ) + return image + + +def hparams(hparam_dict=None, metric_dict=None, hparam_domain_discrete=None): + """Output three `Summary` protocol buffers needed by hparams plugin. + + `Experiment` keeps the metadata of an experiment, such as the name of the + hyperparameters and the name of the metrics. + `SessionStartInfo` keeps key-value pairs of the hyperparameters + `SessionEndInfo` describes status of the experiment e.g. STATUS_SUCCESS + + Args: + hparam_dict: A dictionary that contains names of the hyperparameters + and their values. + metric_dict: A dictionary that contains names of the metrics + and their values. + hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that + contains names of the hyperparameters and all discrete values they can hold + + Returns: + The `Summary` protobufs for Experiment, SessionStartInfo and + SessionEndInfo + """ + import torch + from tensorboard.plugins.hparams.api_pb2 import ( + DataType, + Experiment, + HParamInfo, + MetricInfo, + MetricName, + Status, + ) + from tensorboard.plugins.hparams.metadata import ( + EXPERIMENT_TAG, + PLUGIN_DATA_VERSION, + PLUGIN_NAME, + SESSION_END_INFO_TAG, + SESSION_START_INFO_TAG, + ) + from tensorboard.plugins.hparams.plugin_data_pb2 import ( + HParamsPluginData, + SessionEndInfo, + SessionStartInfo, + ) + + # TODO: expose other parameters in the future. + # hp = HParamInfo(name='lr',display_name='learning rate', + # type=DataType.DATA_TYPE_FLOAT64, domain_interval=Interval(min_value=10, + # max_value=100)) + # mt = MetricInfo(name=MetricName(tag='accuracy'), display_name='accuracy', + # description='', dataset_type=DatasetType.DATASET_VALIDATION) + # exp = Experiment(name='123', description='456', time_created_secs=100.0, + # hparam_infos=[hp], metric_infos=[mt], user='tw') + + if not isinstance(hparam_dict, dict): + logger.warning("parameter: hparam_dict should be a dictionary, nothing logged.") + raise TypeError( + "parameter: hparam_dict should be a dictionary, nothing logged." + ) + if not isinstance(metric_dict, dict): + logger.warning("parameter: metric_dict should be a dictionary, nothing logged.") + raise TypeError( + "parameter: metric_dict should be a dictionary, nothing logged." + ) + + hparam_domain_discrete = hparam_domain_discrete or {} + if not isinstance(hparam_domain_discrete, dict): + raise TypeError( + "parameter: hparam_domain_discrete should be a dictionary, nothing logged." + ) + for k, v in hparam_domain_discrete.items(): + if ( + k not in hparam_dict + or not isinstance(v, list) + or not all(isinstance(d, type(hparam_dict[k])) for d in v) + ): + raise TypeError( + f"parameter: hparam_domain_discrete[{k}] should be a list of same type as hparam_dict[{k}]." + ) + hps = [] + + ssi = SessionStartInfo() + for k, v in hparam_dict.items(): + if v is None: + continue + if isinstance(v, (int, float)): + ssi.hparams[k].number_value = v + + if k in hparam_domain_discrete: + domain_discrete: Optional[struct_pb2.ListValue] = struct_pb2.ListValue( + values=[ + struct_pb2.Value(number_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_FLOAT64"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, str): + ssi.hparams[k].string_value = v + + if k in hparam_domain_discrete: + domain_discrete = struct_pb2.ListValue( + values=[ + struct_pb2.Value(string_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_STRING"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, bool): + ssi.hparams[k].bool_value = v + + if k in hparam_domain_discrete: + domain_discrete = struct_pb2.ListValue( + values=[ + struct_pb2.Value(bool_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_BOOL"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, torch.Tensor): + v = make_np(v)[0] + ssi.hparams[k].number_value = v + hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_FLOAT64"))) + continue + raise ValueError( + "value should be one of int, float, str, bool, or torch.Tensor" + ) + + content = HParamsPluginData(session_start_info=ssi, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + ssi = Summary(value=[Summary.Value(tag=SESSION_START_INFO_TAG, metadata=smd)]) + + mts = [MetricInfo(name=MetricName(tag=k)) for k in metric_dict.keys()] + + exp = Experiment(hparam_infos=hps, metric_infos=mts) + + content = HParamsPluginData(experiment=exp, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + exp = Summary(value=[Summary.Value(tag=EXPERIMENT_TAG, metadata=smd)]) + + sei = SessionEndInfo(status=Status.Value("STATUS_SUCCESS")) + content = HParamsPluginData(session_end_info=sei, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + sei = Summary(value=[Summary.Value(tag=SESSION_END_INFO_TAG, metadata=smd)]) + + return exp, ssi, sei + + +def scalar(name, tensor, collections=None, new_style=False, double_precision=False): + """Output a `Summary` protocol buffer containing a single scalar value. + + The generated Summary has a Tensor.proto containing the input Tensor. + Args: + name: A name for the generated node. Will also serve as the series name in + TensorBoard. + tensor: A real numeric Tensor containing a single value. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + new_style: Whether to use new style (tensor field) or old style (simple_value + field). New style could lead to faster data loading. + Returns: + A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf. + Raises: + ValueError: If tensor has the wrong shape or type. + """ + tensor = make_np(tensor).squeeze() + assert ( + tensor.ndim == 0 + ), f"Tensor should contain one element (0 dimensions). Was given size: {tensor.size} and {tensor.ndim} dimensions." + # python float is double precision in numpy + scalar = float(tensor) + if new_style: + tensor_proto = TensorProto(float_val=[scalar], dtype="DT_FLOAT") + if double_precision: + tensor_proto = TensorProto(double_val=[scalar], dtype="DT_DOUBLE") + + plugin_data = SummaryMetadata.PluginData(plugin_name="scalars") + smd = SummaryMetadata(plugin_data=plugin_data) + return Summary( + value=[ + Summary.Value( + tag=name, + tensor=tensor_proto, + metadata=smd, + ) + ] + ) + else: + return Summary(value=[Summary.Value(tag=name, simple_value=scalar)]) + + +def tensor_proto(tag, tensor): + """Outputs a `Summary` protocol buffer containing the full tensor. + The generated Summary has a Tensor.proto containing the input Tensor. + Args: + name: A name for the generated node. Will also serve as the series name in + TensorBoard. + tensor: Tensor to be converted to protobuf + Returns: + A tensor protobuf in a `Summary` protobuf. + Raises: + ValueError: If tensor is too big to be converted to protobuf, or + tensor data type is not supported + """ + if tensor.numel() * tensor.itemsize >= (1 << 31): + raise ValueError( + "tensor is bigger than protocol buffer's hard limit of 2GB in size" + ) + + if tensor.dtype in _TENSOR_TYPE_MAP: + dtype, field_name, conversion_fn = _TENSOR_TYPE_MAP[tensor.dtype] + tensor_proto = TensorProto( + **{ + "dtype": dtype, + "tensor_shape": TensorShapeProto( + dim=[TensorShapeProto.Dim(size=x) for x in tensor.shape] + ), + field_name: conversion_fn(tensor), + }, + ) + else: + raise ValueError(f"{tag} has unsupported tensor dtype {tensor.dtype}") + + plugin_data = SummaryMetadata.PluginData(plugin_name="tensor") + smd = SummaryMetadata(plugin_data=plugin_data) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor_proto)]) + + +def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts): + # pylint: disable=line-too-long + """Output a `Summary` protocol buffer with a histogram. + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + Args: + name: A name for the generated node. Will also serve as a series name in + TensorBoard. + min: A float or int min value + max: A float or int max value + num: Int number of values + sum: Float or int sum of all values + sum_squares: Float or int sum of squares for all values + bucket_limits: A numeric `Tensor` with upper value per bucket + bucket_counts: A numeric `Tensor` with number of values per bucket + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + hist = HistogramProto( + min=min, + max=max, + num=num, + sum=sum, + sum_squares=sum_squares, + bucket_limit=bucket_limits, + bucket=bucket_counts, + ) + return Summary(value=[Summary.Value(tag=name, histo=hist)]) + + +def histogram(name, values, bins, max_bins=None): + # pylint: disable=line-too-long + """Output a `Summary` protocol buffer with a histogram. + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + This op reports an `InvalidArgument` error if any value is not finite. + Args: + name: A name for the generated node. Will also serve as a series name in + TensorBoard. + values: A real numeric `Tensor`. Any shape. Values to use to + build the histogram. + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + values = make_np(values) + hist = make_histogram(values.astype(float), bins, max_bins) + return Summary(value=[Summary.Value(tag=name, histo=hist)]) + + +def make_histogram(values, bins, max_bins=None): + """Convert values into a histogram proto using logic from histogram.cc.""" + if values.size == 0: + raise ValueError("The input has no element.") + values = values.reshape(-1) + counts, limits = np.histogram(values, bins=bins) + num_bins = len(counts) + if max_bins is not None and num_bins > max_bins: + subsampling = num_bins // max_bins + subsampling_remainder = num_bins % subsampling + if subsampling_remainder != 0: + counts = np.pad( + counts, + pad_width=[[0, subsampling - subsampling_remainder]], + mode="constant", + constant_values=0, + ) + counts = counts.reshape(-1, subsampling).sum(axis=-1) + new_limits = np.empty((counts.size + 1,), limits.dtype) + new_limits[:-1] = limits[:-1:subsampling] + new_limits[-1] = limits[-1] + limits = new_limits + + # Find the first and the last bin defining the support of the histogram: + + cum_counts = np.cumsum(np.greater(counts, 0)) + start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right") + start = int(start) + end = int(end) + 1 + del cum_counts + + # TensorBoard only includes the right bin limits. To still have the leftmost limit + # included, we include an empty bin left. + # If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the + # first nonzero-count bin: + counts = ( + counts[start - 1 : end] if start > 0 else np.concatenate([[0], counts[:end]]) + ) + limits = limits[start : end + 1] + + if counts.size == 0 or limits.size == 0: + raise ValueError("The histogram is empty, please file a bug report.") + + sum_sq = values.dot(values) + return HistogramProto( + min=values.min(), + max=values.max(), + num=len(values), + sum=values.sum(), + sum_squares=sum_sq, + bucket_limit=limits.tolist(), + bucket=counts.tolist(), + ) + + +def image(tag, tensor, rescale=1, dataformats="NCHW"): + """Output a `Summary` protocol buffer with images. + + The summary has up to `max_images` summary values containing images. The + images are built from `tensor` which must be 3-D with shape `[height, width, + channels]` and where `channels` can be: + * 1: `tensor` is interpreted as Grayscale. + * 3: `tensor` is interpreted as RGB. + * 4: `tensor` is interpreted as RGBA. + The `name` in the outputted Summary.Value protobufs is generated based on the + name, with a suffix depending on the max_outputs setting: + * If `max_outputs` is 1, the summary value tag is '*name*/image'. + * If `max_outputs` is greater than 1, the summary value tags are + generated sequentially as '*name*/image/0', '*name*/image/1', etc. + Args: + tag: A name for the generated node. Will also serve as a series name in + TensorBoard. + tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width, + channels]` where `channels` is 1, 3, or 4. + 'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8). + The image() function will scale the image values to [0, 255] by applying + a scale factor of either 1 (uint8) or 255 (float32). Out-of-range values + will be clipped. + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + tensor = make_np(tensor) + tensor = convert_to_HWC(tensor, dataformats) + # Do not assume that user passes in values in [0, 255], use data type to detect + scale_factor = _calc_scale_factor(tensor) + tensor = tensor.astype(np.float32) + tensor = (tensor * scale_factor).clip(0, 255).astype(np.uint8) + image = make_image(tensor, rescale=rescale) + return Summary(value=[Summary.Value(tag=tag, image=image)]) + + +def image_boxes( + tag, tensor_image, tensor_boxes, rescale=1, dataformats="CHW", labels=None +): + """Output a `Summary` protocol buffer with images.""" + tensor_image = make_np(tensor_image) + tensor_image = convert_to_HWC(tensor_image, dataformats) + tensor_boxes = make_np(tensor_boxes) + tensor_image = tensor_image.astype(np.float32) * _calc_scale_factor(tensor_image) + image = make_image( + tensor_image.clip(0, 255).astype(np.uint8), + rescale=rescale, + rois=tensor_boxes, + labels=labels, + ) + return Summary(value=[Summary.Value(tag=tag, image=image)]) + + +def draw_boxes(disp_image, boxes, labels=None): + # xyxy format + num_boxes = boxes.shape[0] + list_gt = range(num_boxes) + for i in list_gt: + disp_image = _draw_single_box( + disp_image, + boxes[i, 0], + boxes[i, 1], + boxes[i, 2], + boxes[i, 3], + display_str=None if labels is None else labels[i], + color="Red", + ) + return disp_image + + +def make_image(tensor, rescale=1, rois=None, labels=None): + """Convert a numpy representation of an image to Image protobuf.""" + from PIL import Image + + height, width, channel = tensor.shape + scaled_height = int(height * rescale) + scaled_width = int(width * rescale) + image = Image.fromarray(tensor) + if rois is not None: + image = draw_boxes(image, rois, labels=labels) + try: + ANTIALIAS = Image.Resampling.LANCZOS + except AttributeError: + ANTIALIAS = Image.ANTIALIAS + image = image.resize((scaled_width, scaled_height), ANTIALIAS) + import io + + output = io.BytesIO() + image.save(output, format="PNG") + image_string = output.getvalue() + output.close() + return Summary.Image( + height=height, + width=width, + colorspace=channel, + encoded_image_string=image_string, + ) + + +def video(tag, tensor, fps=4): + tensor = make_np(tensor) + tensor = _prepare_video(tensor) + # If user passes in uint8, then we don't need to rescale by 255 + scale_factor = _calc_scale_factor(tensor) + tensor = tensor.astype(np.float32) + tensor = (tensor * scale_factor).clip(0, 255).astype(np.uint8) + video = make_video(tensor, fps) + return Summary(value=[Summary.Value(tag=tag, image=video)]) + + +def make_video(tensor, fps): + try: + import moviepy # noqa: F401 + except ImportError: + print("add_video needs package moviepy") + return + try: + from moviepy import editor as mpy + except ImportError: + print( + "moviepy is installed, but can't import moviepy.editor.", + "Some packages could be missing [imageio, requests]", + ) + return + import tempfile + + t, h, w, c = tensor.shape + + # encode sequence of images into gif string + clip = mpy.ImageSequenceClip(list(tensor), fps=fps) + + filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name + try: # newer version of moviepy use logger instead of progress_bar argument. + clip.write_gif(filename, verbose=False, logger=None) + except TypeError: + try: # older version of moviepy does not support progress_bar argument. + clip.write_gif(filename, verbose=False, progress_bar=False) + except TypeError: + clip.write_gif(filename, verbose=False) + + with open(filename, "rb") as f: + tensor_string = f.read() + + try: + os.remove(filename) + except OSError: + logger.warning("The temporary file used by moviepy cannot be deleted.") + + return Summary.Image( + height=h, width=w, colorspace=c, encoded_image_string=tensor_string + ) + + +def audio(tag, tensor, sample_rate=44100): + array = make_np(tensor) + array = array.squeeze() + if abs(array).max() > 1: + print("warning: audio amplitude out of range, auto clipped.") + array = array.clip(-1, 1) + assert array.ndim == 1, "input tensor should be 1 dimensional." + array = (array * np.iinfo(np.int16).max).astype(" 127: # weird, value > 127 breaks protobuf + num_thresholds = 127 + data = np.stack((tp, fp, tn, fn, precision, recall)) + pr_curve_plugin_data = PrCurvePluginData( + version=0, num_thresholds=num_thresholds + ).SerializeToString() + plugin_data = SummaryMetadata.PluginData( + plugin_name="pr_curves", content=pr_curve_plugin_data + ) + smd = SummaryMetadata(plugin_data=plugin_data) + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=data.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=data.shape[0]), + TensorShapeProto.Dim(size=data.shape[1]), + ] + ), + ) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) + + +def pr_curve(tag, labels, predictions, num_thresholds=127, weights=None): + # weird, value > 127 breaks protobuf + num_thresholds = min(num_thresholds, 127) + data = compute_curve( + labels, predictions, num_thresholds=num_thresholds, weights=weights + ) + pr_curve_plugin_data = PrCurvePluginData( + version=0, num_thresholds=num_thresholds + ).SerializeToString() + plugin_data = SummaryMetadata.PluginData( + plugin_name="pr_curves", content=pr_curve_plugin_data + ) + smd = SummaryMetadata(plugin_data=plugin_data) + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=data.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=data.shape[0]), + TensorShapeProto.Dim(size=data.shape[1]), + ] + ), + ) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) + + +# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py +def compute_curve(labels, predictions, num_thresholds=None, weights=None): + _MINIMUM_COUNT = 1e-7 + + if weights is None: + weights = 1.0 + + # Compute bins of true positives and false positives. + bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1))) + float_labels = labels.astype(np.float64) + histogram_range = (0, num_thresholds - 1) + tp_buckets, _ = np.histogram( + bucket_indices, + bins=num_thresholds, + range=histogram_range, + weights=float_labels * weights, + ) + fp_buckets, _ = np.histogram( + bucket_indices, + bins=num_thresholds, + range=histogram_range, + weights=(1.0 - float_labels) * weights, + ) + + # Obtain the reverse cumulative sum. + tp = np.cumsum(tp_buckets[::-1])[::-1] + fp = np.cumsum(fp_buckets[::-1])[::-1] + tn = fp[0] - fp + fn = tp[0] - tp + precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp) + recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn) + return np.stack((tp, fp, tn, fn, precision, recall)) + + +def _get_tensor_summary( + name, display_name, description, tensor, content_type, components, json_config +): + """Create a tensor summary with summary metadata. + + Args: + name: Uniquely identifiable name of the summary op. Could be replaced by + combination of name and type to make it unique even outside of this + summary. + display_name: Will be used as the display name in TensorBoard. + Defaults to `name`. + description: A longform readable description of the summary data. Markdown + is supported. + tensor: Tensor to display in summary. + content_type: Type of content inside the Tensor. + components: Bitmask representing present parts (vertices, colors, etc.) that + belong to the summary. + json_config: A string, JSON-serialized dictionary of ThreeJS classes + configuration. + + Returns: + Tensor summary with metadata. + """ + import torch + from tensorboard.plugins.mesh import metadata + + tensor = torch.as_tensor(tensor) + + tensor_metadata = metadata.create_summary_metadata( + name, + display_name, + content_type, + components, + tensor.shape, + description, + json_config=json_config, + ) + + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=tensor.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=tensor.shape[0]), + TensorShapeProto.Dim(size=tensor.shape[1]), + TensorShapeProto.Dim(size=tensor.shape[2]), + ] + ), + ) + + tensor_summary = Summary.Value( + tag=metadata.get_instance_name(name, content_type), + tensor=tensor, + metadata=tensor_metadata, + ) + + return tensor_summary + + +def _get_json_config(config_dict): + """Parse and returns JSON string from python dictionary.""" + json_config = "{}" + if config_dict is not None: + json_config = json.dumps(config_dict, sort_keys=True) + return json_config + + +# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/mesh/summary.py +def mesh( + tag, vertices, colors, faces, config_dict, display_name=None, description=None +): + """Output a merged `Summary` protocol buffer with a mesh/point cloud. + + Args: + tag: A name for this summary operation. + vertices: Tensor of shape `[dim_1, ..., dim_n, 3]` representing the 3D + coordinates of vertices. + faces: Tensor of shape `[dim_1, ..., dim_n, 3]` containing indices of + vertices within each triangle. + colors: Tensor of shape `[dim_1, ..., dim_n, 3]` containing colors for each + vertex. + display_name: If set, will be used as the display name in TensorBoard. + Defaults to `name`. + description: A longform readable description of the summary data. Markdown + is supported. + config_dict: Dictionary with ThreeJS classes names and configuration. + + Returns: + Merged summary for mesh/point cloud representation. + """ + from tensorboard.plugins.mesh import metadata + from tensorboard.plugins.mesh.plugin_data_pb2 import MeshPluginData + + json_config = _get_json_config(config_dict) + + summaries = [] + tensors = [ + (vertices, MeshPluginData.VERTEX), + (faces, MeshPluginData.FACE), + (colors, MeshPluginData.COLOR), + ] + tensors = [tensor for tensor in tensors if tensor[0] is not None] + components = metadata.get_components_bitmask( + [content_type for (tensor, content_type) in tensors] + ) + + for tensor, content_type in tensors: + summaries.append( + _get_tensor_summary( + tag, + display_name, + description, + tensor, + content_type, + components, + json_config, + ) + ) + + return Summary(value=summaries) diff --git a/venv/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a1e039040f3e1c50c8709632052206db87565b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py @@ -0,0 +1,1270 @@ +"""Provide an API for writing protocol buffers to event files to be consumed by TensorBoard for visualization.""" + +import os +import time +from typing import List, Optional, Union, TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from matplotlib.figure import Figure +from tensorboard.compat import tf +from tensorboard.compat.proto import event_pb2 +from tensorboard.compat.proto.event_pb2 import Event, SessionLog +from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig +from tensorboard.summary.writer.event_file_writer import EventFileWriter + +from ._convert_np import make_np +from ._embedding import get_embedding_info, make_mat, make_sprite, make_tsv, write_pbtxt +from ._onnx_graph import load_onnx_graph +from ._pytorch_graph import graph +from ._utils import figure_to_image +from .summary import ( + audio, + custom_scalars, + histogram, + histogram_raw, + hparams, + image, + image_boxes, + mesh, + pr_curve, + pr_curve_raw, + scalar, + tensor_proto, + text, + video, +) + +__all__ = ["FileWriter", "SummaryWriter"] + + +class FileWriter: + """Writes protocol buffers to event files to be consumed by TensorBoard. + + The `FileWriter` class provides a mechanism to create an event file in a + given directory and add summaries and events to it. The class updates the + file contents asynchronously. This allows a training program to call methods + to add data to the file directly from the training loop, without slowing down + training. + """ + + def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=""): + """Create a `FileWriter` and an event file. + + On construction the writer creates a new event file in `log_dir`. + The other arguments to the constructor control the asynchronous writes to + the event file. + + Args: + log_dir: A string. Directory where event file will be written. + max_queue: Integer. Size of the queue for pending events and + summaries before one of the 'add' calls forces a flush to disk. + Default is ten items. + flush_secs: Number. How often, in seconds, to flush the + pending events and summaries to disk. Default is every two minutes. + filename_suffix: A string. Suffix added to all event filenames + in the log_dir directory. More details on filename construction in + tensorboard.summary.writer.event_file_writer.EventFileWriter. + """ + # Sometimes PosixPath is passed in and we need to coerce it to + # a string in all cases + # TODO: See if we can remove this in the future if we are + # actually the ones passing in a PosixPath + log_dir = str(log_dir) + self.event_writer = EventFileWriter( + log_dir, max_queue, flush_secs, filename_suffix + ) + + def get_logdir(self): + """Return the directory where event file will be written.""" + return self.event_writer.get_logdir() + + def add_event(self, event, step=None, walltime=None): + """Add an event to the event file. + + Args: + event: An `Event` protocol buffer. + step: Number. Optional global step value for training process + to record with the event. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + event.wall_time = time.time() if walltime is None else walltime + if step is not None: + # Make sure step is converted from numpy or other formats + # since protobuf might not convert depending on version + event.step = int(step) + self.event_writer.add_event(event) + + def add_summary(self, summary, global_step=None, walltime=None): + """Add a `Summary` protocol buffer to the event file. + + This method wraps the provided summary in an `Event` protocol buffer + and adds it to the event file. + + Args: + summary: A `Summary` protocol buffer. + global_step: Number. Optional global step value for training process + to record with the summary. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + event = event_pb2.Event(summary=summary) + self.add_event(event, global_step, walltime) + + def add_graph(self, graph_profile, walltime=None): + """Add a `Graph` and step stats protocol buffer to the event file. + + Args: + graph_profile: A `Graph` and step stats protocol buffer. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + graph = graph_profile[0] + stepstats = graph_profile[1] + event = event_pb2.Event(graph_def=graph.SerializeToString()) + self.add_event(event, None, walltime) + + trm = event_pb2.TaggedRunMetadata( + tag="step1", run_metadata=stepstats.SerializeToString() + ) + event = event_pb2.Event(tagged_run_metadata=trm) + self.add_event(event, None, walltime) + + def add_onnx_graph(self, graph, walltime=None): + """Add a `Graph` protocol buffer to the event file. + + Args: + graph: A `Graph` protocol buffer. + walltime: float. Optional walltime to override the default (current) + _get_file_writerfrom time.time()) + """ + event = event_pb2.Event(graph_def=graph.SerializeToString()) + self.add_event(event, None, walltime) + + def flush(self): + """Flushes the event file to disk. + + Call this method to make sure that all pending events have been written to + disk. + """ + self.event_writer.flush() + + def close(self): + """Flushes the event file to disk and close the file. + + Call this method when you do not need the summary writer anymore. + """ + self.event_writer.close() + + def reopen(self): + """Reopens the EventFileWriter. + + Can be called after `close()` to add more events in the same directory. + The events will go into a new events file. + Does nothing if the EventFileWriter was not closed. + """ + self.event_writer.reopen() + + +class SummaryWriter: + """Writes entries directly to event files in the log_dir to be consumed by TensorBoard. + + The `SummaryWriter` class provides a high-level API to create an event file + in a given directory and add summaries and events to it. The class updates the + file contents asynchronously. This allows a training program to call methods + to add data to the file directly from the training loop, without slowing down + training. + """ + + def __init__( + self, + log_dir=None, + comment="", + purge_step=None, + max_queue=10, + flush_secs=120, + filename_suffix="", + ): + """Create a `SummaryWriter` that will write out events and summaries to the event file. + + Args: + log_dir (str): Save directory location. Default is + runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run. + Use hierarchical folder structure to compare + between runs easily. e.g. pass in 'runs/exp1', 'runs/exp2', etc. + for each new experiment to compare across them. + comment (str): Comment log_dir suffix appended to the default + ``log_dir``. If ``log_dir`` is assigned, this argument has no effect. + purge_step (int): + When logging crashes at step :math:`T+X` and restarts at step :math:`T`, + any events whose global_step larger or equal to :math:`T` will be + purged and hidden from TensorBoard. + Note that crashed and resumed experiments should have the same ``log_dir``. + max_queue (int): Size of the queue for pending events and + summaries before one of the 'add' calls forces a flush to disk. + Default is ten items. + flush_secs (int): How often, in seconds, to flush the + pending events and summaries to disk. Default is every two minutes. + filename_suffix (str): Suffix added to all event filenames in + the log_dir directory. More details on filename construction in + tensorboard.summary.writer.event_file_writer.EventFileWriter. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + + # create a summary writer with automatically generated folder name. + writer = SummaryWriter() + # folder location: runs/May04_22-14-54_s-MacBook-Pro.local/ + + # create a summary writer using the specified folder name. + writer = SummaryWriter("my_experiment") + # folder location: my_experiment + + # create a summary writer with comment appended. + writer = SummaryWriter(comment="LR_0.1_BATCH_16") + # folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/ + + """ + torch._C._log_api_usage_once("tensorboard.create.summarywriter") + if not log_dir: + import socket + from datetime import datetime + + current_time = datetime.now().strftime("%b%d_%H-%M-%S") + log_dir = os.path.join( + "runs", current_time + "_" + socket.gethostname() + comment + ) + self.log_dir = log_dir + self.purge_step = purge_step + self.max_queue = max_queue + self.flush_secs = flush_secs + self.filename_suffix = filename_suffix + + # Initialize the file writers, but they can be cleared out on close + # and recreated later as needed. + self.file_writer = self.all_writers = None + self._get_file_writer() + + # Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard + v = 1e-12 + buckets = [] + neg_buckets = [] + while v < 1e20: + buckets.append(v) + neg_buckets.append(-v) + v *= 1.1 + self.default_bins = neg_buckets[::-1] + [0] + buckets + + def _check_caffe2_blob(self, item): + """ + Check if the input is a string representing a Caffe2 blob name. + + Caffe2 users have the option of passing a string representing the name of a blob + in the workspace instead of passing the actual Tensor/array containing the numeric values. + Thus, we need to check if we received a string as input + instead of an actual Tensor/array, and if so, we need to fetch the Blob + from the workspace corresponding to that name. Fetching can be done with the + following: + + from caffe2.python import workspace (if not already imported) + workspace.FetchBlob(blob_name) + workspace.FetchBlobs([blob_name1, blob_name2, ...]) + """ + return isinstance(item, str) + + def _get_file_writer(self): + """Return the default FileWriter instance. Recreates it if closed.""" + if self.all_writers is None or self.file_writer is None: + self.file_writer = FileWriter( + self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix + ) + self.all_writers = {self.file_writer.get_logdir(): self.file_writer} + if self.purge_step is not None: + most_recent_step = self.purge_step + self.file_writer.add_event( + Event(step=most_recent_step, file_version="brain.Event:2") + ) + self.file_writer.add_event( + Event( + step=most_recent_step, + session_log=SessionLog(status=SessionLog.START), + ) + ) + self.purge_step = None + return self.file_writer + + def get_logdir(self): + """Return the directory where event files will be written.""" + return self.log_dir + + def add_hparams( + self, hparam_dict, metric_dict, hparam_domain_discrete=None, run_name=None, global_step=None + ): + """Add a set of hyperparameters to be compared in TensorBoard. + + Args: + hparam_dict (dict): Each key-value pair in the dictionary is the + name of the hyper parameter and it's corresponding value. + The type of the value can be one of `bool`, `string`, `float`, + `int`, or `None`. + metric_dict (dict): Each key-value pair in the dictionary is the + name of the metric and it's corresponding value. Note that the key used + here should be unique in the tensorboard record. Otherwise the value + you added by ``add_scalar`` will be displayed in hparam plugin. In most + cases, this is unwanted. + hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that + contains names of the hyperparameters and all discrete values they can hold + run_name (str): Name of the run, to be included as part of the logdir. + If unspecified, will use current timestamp. + global_step (int): Global step value to record + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + with SummaryWriter() as w: + for i in range(5): + w.add_hparams({'lr': 0.1*i, 'bsize': i}, + {'hparam/accuracy': 10*i, 'hparam/loss': 10*i}) + + Expected result: + + .. image:: _static/img/tensorboard/add_hparam.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_hparams") + if type(hparam_dict) is not dict or type(metric_dict) is not dict: + raise TypeError("hparam_dict and metric_dict should be dictionary.") + exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete) + + if not run_name: + run_name = str(time.time()) + logdir = os.path.join(self._get_file_writer().get_logdir(), run_name) + with SummaryWriter(log_dir=logdir) as w_hp: + w_hp.file_writer.add_summary(exp, global_step) + w_hp.file_writer.add_summary(ssi, global_step) + w_hp.file_writer.add_summary(sei, global_step) + for k, v in metric_dict.items(): + w_hp.add_scalar(k, v, global_step) + + def add_scalar( + self, + tag, + scalar_value, + global_step=None, + walltime=None, + new_style=False, + double_precision=False, + ): + """Add scalar data to summary. + + Args: + tag (str): Data identifier + scalar_value (float or string/blobname): Value to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + with seconds after epoch of event + new_style (boolean): Whether to use new style (tensor field) or old + style (simple_value field). New style could lead to faster data loading. + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + x = range(100) + for i in x: + writer.add_scalar('y=2x', i * 2, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_scalar.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_scalar") + if self._check_caffe2_blob(scalar_value): + from caffe2.python import workspace + + scalar_value = workspace.FetchBlob(scalar_value) + + summary = scalar( + tag, scalar_value, new_style=new_style, double_precision=double_precision + ) + self._get_file_writer().add_summary(summary, global_step, walltime) + + def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None): + """Add many scalar data to summary. + + Args: + main_tag (str): The parent name for the tags + tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + r = 5 + for i in range(100): + writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r), + 'xcosx':i*np.cos(i/r), + 'tanx': np.tan(i/r)}, i) + writer.close() + # This call adds three values to the same scalar plot with the tag + # 'run_14h' in TensorBoard's scalar section. + + Expected result: + + .. image:: _static/img/tensorboard/add_scalars.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_scalars") + walltime = time.time() if walltime is None else walltime + fw_logdir = self._get_file_writer().get_logdir() + for tag, scalar_value in tag_scalar_dict.items(): + fw_tag = fw_logdir + "/" + main_tag.replace("/", "_") + "_" + tag + assert self.all_writers is not None + if fw_tag in self.all_writers.keys(): + fw = self.all_writers[fw_tag] + else: + fw = FileWriter( + fw_tag, self.max_queue, self.flush_secs, self.filename_suffix + ) + self.all_writers[fw_tag] = fw + if self._check_caffe2_blob(scalar_value): + from caffe2.python import workspace + + scalar_value = workspace.FetchBlob(scalar_value) + fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime) + + def add_tensor( + self, + tag, + tensor, + global_step=None, + walltime=None, + ): + """Add tensor data to summary. + + Args: + tag (str): Data identifier + tensor (torch.Tensor): tensor to save + global_step (int): Global step value to record + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + x = torch.tensor([1,2,3]) + writer.add_scalar('x', x) + writer.close() + + Expected result: + Summary::tensor::float_val [1,2,3] + ::tensor::shape [3] + ::tag 'x' + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_tensor") + if self._check_caffe2_blob(tensor): + from caffe2.python import workspace + + tensor = torch.tensor(workspace.FetchBlob(tensor)) + + summary = tensor_proto(tag, tensor) + self._get_file_writer().add_summary(summary, global_step, walltime) + + def add_histogram( + self, + tag, + values, + global_step=None, + bins="tensorflow", + walltime=None, + max_bins=None, + ): + """Add histogram to summary. + + Args: + tag (str): Data identifier + values (torch.Tensor, numpy.ndarray, or string/blobname): Values to build histogram + global_step (int): Global step value to record + bins (str): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find + other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + for i in range(10): + x = np.random.random(1000) + writer.add_histogram('distribution centers', x + i, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_histogram") + if self._check_caffe2_blob(values): + from caffe2.python import workspace + + values = workspace.FetchBlob(values) + if isinstance(bins, str) and bins == "tensorflow": + bins = self.default_bins + self._get_file_writer().add_summary( + histogram(tag, values, bins, max_bins=max_bins), global_step, walltime + ) + + def add_histogram_raw( + self, + tag, + min, + max, + num, + sum, + sum_squares, + bucket_limits, + bucket_counts, + global_step=None, + walltime=None, + ): + """Add histogram with raw data. + + Args: + tag (str): Data identifier + min (float or int): Min value + max (float or int): Max value + num (int): Number of values + sum (float or int): Sum of all values + sum_squares (float or int): Sum of squares for all values + bucket_limits (torch.Tensor, numpy.ndarray): Upper value per bucket. + The number of elements of it should be the same as `bucket_counts`. + bucket_counts (torch.Tensor, numpy.ndarray): Number of values per bucket + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/README.md + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + dummy_data = [] + for idx, value in enumerate(range(50)): + dummy_data += [idx + 0.001] * value + + bins = list(range(50+2)) + bins = np.array(bins) + values = np.array(dummy_data).astype(float).reshape(-1) + counts, limits = np.histogram(values, bins=bins) + sum_sq = values.dot(values) + writer.add_histogram_raw( + tag='histogram_with_raw_data', + min=values.min(), + max=values.max(), + num=len(values), + sum=values.sum(), + sum_squares=sum_sq, + bucket_limits=limits[1:].tolist(), + bucket_counts=counts.tolist(), + global_step=0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram_raw.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_histogram_raw") + if len(bucket_limits) != len(bucket_counts): + raise ValueError( + "len(bucket_limits) != len(bucket_counts), see the document." + ) + self._get_file_writer().add_summary( + histogram_raw( + tag, min, max, num, sum, sum_squares, bucket_limits, bucket_counts + ), + global_step, + walltime, + ) + + def add_image( + self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW" + ): + """Add image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + dataformats (str): Image data format specification of the form + CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to + convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job. + Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as + corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + img = np.zeros((3, 100, 100)) + img[0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + img_HWC = np.zeros((100, 100, 3)) + img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + writer = SummaryWriter() + writer.add_image('my_image', img, 0) + + # If you have non-default dimension setting, set the dataformats argument. + writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC') + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_image.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_image") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime + ) + + def add_images( + self, tag, img_tensor, global_step=None, walltime=None, dataformats="NCHW" + ): + """Add batched image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + dataformats (str): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(N, 3, H, W)`. If ``dataformats`` is specified, other shape will be + accepted. e.g. NCHW or NHWC. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + + img_batch = np.zeros((16, 3, 100, 100)) + for i in range(16): + img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i + img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i + + writer = SummaryWriter() + writer.add_images('my_image_batch', img_batch, 0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_images.png + :scale: 30 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_images") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime + ) + + def add_image_with_boxes( + self, + tag, + img_tensor, + box_tensor, + global_step=None, + walltime=None, + rescale=1, + dataformats="CHW", + labels=None, + ): + """Add image and draw bounding boxes on the image. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + box_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Box data (for detected objects) + box should be represented as [x1, y1, x2, y2]. + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + rescale (float): Optional scale override + dataformats (str): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + labels (list of string): The label to be shown for each bounding box. + Shape: + img_tensor: Default is :math:`(3, H, W)`. It can be specified with ``dataformats`` argument. + e.g. CHW or HWC + + box_tensor: (torch.Tensor, numpy.ndarray, or string/blobname): NX4, where N is the number of + boxes and each 4 elements in a row represents (xmin, ymin, xmax, ymax). + """ + torch._C._log_api_usage_once("tensorboard.logging.add_image_with_boxes") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + if self._check_caffe2_blob(box_tensor): + from caffe2.python import workspace + + box_tensor = workspace.FetchBlob(box_tensor) + if labels is not None: + if isinstance(labels, str): + labels = [labels] + if len(labels) != box_tensor.shape[0]: + labels = None + self._get_file_writer().add_summary( + image_boxes( + tag, + img_tensor, + box_tensor, + rescale=rescale, + dataformats=dataformats, + labels=labels, + ), + global_step, + walltime, + ) + + def add_figure( + self, + tag: str, + figure: Union["Figure", List["Figure"]], + global_step: Optional[int] = None, + close: bool = True, + walltime: Optional[float] = None + ) -> None: + """Render matplotlib figure into an image and add it to summary. + + Note that this requires the ``matplotlib`` package. + + Args: + tag: Data identifier + figure: Figure or a list of figures + global_step: Global step value to record + close: Flag to automatically close the figure + walltime: Optional override default walltime (time.time()) + seconds after epoch of event + """ + torch._C._log_api_usage_once("tensorboard.logging.add_figure") + if isinstance(figure, list): + self.add_image( + tag, + figure_to_image(figure, close), + global_step, + walltime, + dataformats="NCHW", + ) + else: + self.add_image( + tag, + figure_to_image(figure, close), + global_step, + walltime, + dataformats="CHW", + ) + + def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None): + """Add video data to summary. + + Note that this requires the ``moviepy`` package. + + Args: + tag (str): Data identifier + vid_tensor (torch.Tensor): Video data + global_step (int): Global step value to record + fps (float or int): Frames per second + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + vid_tensor: :math:`(N, T, C, H, W)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + """ + torch._C._log_api_usage_once("tensorboard.logging.add_video") + self._get_file_writer().add_summary( + video(tag, vid_tensor, fps), global_step, walltime + ) + + def add_audio( + self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None + ): + """Add audio data to summary. + + Args: + tag (str): Data identifier + snd_tensor (torch.Tensor): Sound data + global_step (int): Global step value to record + sample_rate (int): sample rate in Hz + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1]. + """ + torch._C._log_api_usage_once("tensorboard.logging.add_audio") + if self._check_caffe2_blob(snd_tensor): + from caffe2.python import workspace + + snd_tensor = workspace.FetchBlob(snd_tensor) + self._get_file_writer().add_summary( + audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime + ) + + def add_text(self, tag, text_string, global_step=None, walltime=None): + """Add text data to summary. + + Args: + tag (str): Data identifier + text_string (str): String to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Examples:: + + writer.add_text('lstm', 'This is an lstm', 0) + writer.add_text('rnn', 'This is an rnn', 10) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_text") + self._get_file_writer().add_summary( + text(tag, text_string), global_step, walltime + ) + + def add_onnx_graph(self, prototxt): + torch._C._log_api_usage_once("tensorboard.logging.add_onnx_graph") + self._get_file_writer().add_onnx_graph(load_onnx_graph(prototxt)) + + def add_graph( + self, model, input_to_model=None, verbose=False, use_strict_trace=True + ): + """Add graph data to summary. + + Args: + model (torch.nn.Module): Model to draw. + input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of + variables to be fed. + verbose (bool): Whether to print graph structure in console. + use_strict_trace (bool): Whether to pass keyword argument `strict` to + `torch.jit.trace`. Pass False when you want the tracer to + record your mutable container types (list, dict) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_graph") + if hasattr(model, "forward"): + # A valid PyTorch model should have a 'forward' method + self._get_file_writer().add_graph( + graph(model, input_to_model, verbose, use_strict_trace) + ) + else: + # Caffe2 models do not have the 'forward' method + from caffe2.proto import caffe2_pb2 + from caffe2.python import core + + from ._caffe2_graph import ( + model_to_graph_def, + nets_to_graph_def, + protos_to_graph_def, + ) + + if isinstance(model, list): + if isinstance(model[0], core.Net): + current_graph = nets_to_graph_def(model) + elif isinstance(model[0], caffe2_pb2.NetDef): + current_graph = protos_to_graph_def(model) + else: + # Handles cnn.CNNModelHelper, model_helper.ModelHelper + current_graph = model_to_graph_def(model) + event = event_pb2.Event(graph_def=current_graph.SerializeToString()) # type: ignore[possibly-undefined] + self._get_file_writer().add_event(event) + + @staticmethod + def _encode(rawstr): + # I'd use urllib but, I'm unsure about the differences from python3 to python2, etc. + retval = rawstr + retval = retval.replace("%", f"%{ord('%'):02x}") + retval = retval.replace("/", f"%{ord('/'):02x}") + retval = retval.replace("\\", "%%%02x" % (ord("\\"))) + return retval + + def add_embedding( + self, + mat, + metadata=None, + label_img=None, + global_step=None, + tag="default", + metadata_header=None, + ): + """Add embedding projector data to summary. + + Args: + mat (torch.Tensor or numpy.ndarray): A matrix which each row is the feature vector of the data point + metadata (list): A list of labels, each element will be convert to string + label_img (torch.Tensor): Images correspond to each data point + global_step (int): Global step value to record + tag (str): Name for the embedding + Shape: + mat: :math:`(N, D)`, where N is number of data and D is feature dimension + + label_img: :math:`(N, C, H, W)` + + Examples:: + + import keyword + import torch + meta = [] + while len(meta)<100: + meta = meta+keyword.kwlist # get some strings + meta = meta[:100] + + for i, v in enumerate(meta): + meta[i] = v+str(i) + + label_img = torch.rand(100, 3, 10, 32) + for i in range(100): + label_img[i]*=i/100.0 + + writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img) + writer.add_embedding(torch.randn(100, 5), label_img=label_img) + writer.add_embedding(torch.randn(100, 5), metadata=meta) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_embedding") + mat = make_np(mat) + if global_step is None: + global_step = 0 + # clear pbtxt? + + # Maybe we should encode the tag so slashes don't trip us up? + # I don't think this will mess us up, but better safe than sorry. + subdir = f"{str(global_step).zfill(5)}/{self._encode(tag)}" + save_path = os.path.join(self._get_file_writer().get_logdir(), subdir) + + fs = tf.io.gfile + if fs.exists(save_path): + if fs.isdir(save_path): + print( + "warning: Embedding dir exists, did you set global_step for add_embedding()?" + ) + else: + raise Exception( + f"Path: `{save_path}` exists, but is a file. Cannot proceed." + ) + else: + fs.makedirs(save_path) + + if metadata is not None: + assert mat.shape[0] == len( + metadata + ), "#labels should equal with #data points" + make_tsv(metadata, save_path, metadata_header=metadata_header) + + if label_img is not None: + assert ( + mat.shape[0] == label_img.shape[0] + ), "#images should equal with #data points" + make_sprite(label_img, save_path) + + assert ( + mat.ndim == 2 + ), "mat should be 2D, where mat.size(0) is the number of data points" + make_mat(mat, save_path) + + # Filesystem doesn't necessarily have append semantics, so we store an + # internal buffer to append to and re-write whole file after each + # embedding is added + if not hasattr(self, "_projector_config"): + self._projector_config = ProjectorConfig() + embedding_info = get_embedding_info( + metadata, label_img, subdir, global_step, tag + ) + self._projector_config.embeddings.extend([embedding_info]) + + from google.protobuf import text_format + + config_pbtxt = text_format.MessageToString(self._projector_config) + write_pbtxt(self._get_file_writer().get_logdir(), config_pbtxt) + + def add_pr_curve( + self, + tag, + labels, + predictions, + global_step=None, + num_thresholds=127, + weights=None, + walltime=None, + ): + """Add precision recall curve. + + Plotting a precision-recall curve lets you understand your model's + performance under different threshold settings. With this function, + you provide the ground truth labeling (T/F) and prediction confidence + (usually the output of your model) for each target. The TensorBoard UI + will let you choose the threshold interactively. + + Args: + tag (str): Data identifier + labels (torch.Tensor, numpy.ndarray, or string/blobname): + Ground truth data. Binary label for each element. + predictions (torch.Tensor, numpy.ndarray, or string/blobname): + The probability that an element be classified as true. + Value should be in [0, 1] + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + labels = np.random.randint(2, size=100) # binary label + predictions = np.random.rand(100) + writer = SummaryWriter() + writer.add_pr_curve('pr_curve', labels, predictions, 0) + writer.close() + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve") + labels, predictions = make_np(labels), make_np(predictions) + self._get_file_writer().add_summary( + pr_curve(tag, labels, predictions, num_thresholds, weights), + global_step, + walltime, + ) + + def add_pr_curve_raw( + self, + tag, + true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + global_step=None, + num_thresholds=127, + weights=None, + walltime=None, + ): + """Add precision recall curve with raw data. + + Args: + tag (str): Data identifier + true_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): true positive counts + false_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): false positive counts + true_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): true negative counts + false_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): false negative counts + precision (torch.Tensor, numpy.ndarray, or string/blobname): precision + recall (torch.Tensor, numpy.ndarray, or string/blobname): recall + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md + """ + torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve_raw") + self._get_file_writer().add_summary( + pr_curve_raw( + tag, + true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + num_thresholds, + weights, + ), + global_step, + walltime, + ) + + def add_custom_scalars_multilinechart( + self, tags, category="default", title="untitled" + ): + """Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330']) + """ + torch._C._log_api_usage_once( + "tensorboard.logging.add_custom_scalars_multilinechart" + ) + layout = {category: {title: ["Multiline", tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_custom_scalars_marginchart( + self, tags, category="default", title="untitled" + ): + """Shorthand for creating marginchart. + + Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*, + which should have exactly 3 elements. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006']) + """ + torch._C._log_api_usage_once( + "tensorboard.logging.add_custom_scalars_marginchart" + ) + assert len(tags) == 3 + layout = {category: {title: ["Margin", tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_custom_scalars(self, layout): + """Create special chart by collecting charts tags in 'scalars'. + + NOTE: This function can only be called once for each SummaryWriter() object. + + Because it only provides metadata to tensorboard, the function can be called before or after the training loop. + + Args: + layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary + {chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type + (one of **Multiline** or **Margin**) and the second element should be a list containing the tags + you have used in add_scalar function, which will be collected into the new chart. + + Examples:: + + layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]}, + 'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']], + 'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}} + + writer.add_custom_scalars(layout) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars") + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_mesh( + self, + tag, + vertices, + colors=None, + faces=None, + config_dict=None, + global_step=None, + walltime=None, + ): + """Add meshes or 3D point clouds to TensorBoard. + + The visualization is based on Three.js, + so it allows users to interact with the rendered object. Besides the basic definitions + such as vertices, faces, users can further provide camera parameter, lighting condition, etc. + Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for + advanced usage. + + Args: + tag (str): Data identifier + vertices (torch.Tensor): List of the 3D coordinates of vertices. + colors (torch.Tensor): Colors for each vertex + faces (torch.Tensor): Indices of vertices within each triangle. (Optional) + config_dict: Dictionary with ThreeJS classes names and configuration. + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Shape: + vertices: :math:`(B, N, 3)`. (batch, number_of_vertices, channels) + + colors: :math:`(B, N, 3)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + + faces: :math:`(B, N, 3)`. The values should lie in [0, number_of_vertices] for type `uint8`. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + vertices_tensor = torch.as_tensor([ + [1, 1, 1], + [-1, -1, 1], + [1, -1, -1], + [-1, 1, -1], + ], dtype=torch.float).unsqueeze(0) + colors_tensor = torch.as_tensor([ + [255, 0, 0], + [0, 255, 0], + [0, 0, 255], + [255, 0, 255], + ], dtype=torch.int).unsqueeze(0) + faces_tensor = torch.as_tensor([ + [0, 2, 3], + [0, 3, 1], + [0, 1, 2], + [1, 3, 2], + ], dtype=torch.int).unsqueeze(0) + + writer = SummaryWriter() + writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor) + + writer.close() + """ + torch._C._log_api_usage_once("tensorboard.logging.add_mesh") + self._get_file_writer().add_summary( + mesh(tag, vertices, colors, faces, config_dict), global_step, walltime + ) + + def flush(self): + """Flushes the event file to disk. + + Call this method to make sure that all pending events have been written to + disk. + """ + if self.all_writers is None: + return + for writer in self.all_writers.values(): + writer.flush() + + def close(self): + if self.all_writers is None: + return # ignore double close + for writer in self.all_writers.values(): + writer.flush() + writer.close() + self.file_writer = self.all_writers = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close()