diff --git a/env-llmeval/lib/python3.10/site-packages/torch/linalg/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73057ab08c86c4395da5908dcf574823654aa9e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/linalg/__init__.py @@ -0,0 +1,2848 @@ +import sys + +import torch +from torch._C import _add_docstr, _linalg # type: ignore[attr-defined] + +LinAlgError = torch._C._LinAlgError # type: ignore[attr-defined] + +Tensor = torch.Tensor + +common_notes = { + "experimental_warning": """This function is "experimental" and it may change in a future PyTorch release.""", + "sync_note": "When inputs are on a CUDA device, this function synchronizes that device with the CPU.", + "sync_note_ex": r"When the inputs are on a CUDA device, this function synchronizes only when :attr:`check_errors`\ `= True`.", + "sync_note_has_ex": ("When inputs are on a CUDA device, this function synchronizes that device with the CPU. " + "For a version of this function that does not synchronize, see :func:`{}`.") +} + + +# Note: This not only adds doc strings for functions in the linalg namespace, but +# also connects the torch.linalg Python namespace to the torch._C._linalg builtins. + +cross = _add_docstr(_linalg.linalg_cross, r""" +linalg.cross(input, other, *, dim=-1, out=None) -> Tensor + + +Computes the cross product of two 3-dimensional vectors. + +Supports input of float, double, cfloat and cdouble dtypes. Also supports batches +of vectors, for which it computes the product along the dimension :attr:`dim`. +It broadcasts over the batch dimensions. + +Args: + input (Tensor): the first input tensor. + other (Tensor): the second input tensor. + dim (int, optional): the dimension along which to take the cross-product. Default: `-1`. + +Keyword args: + out (Tensor, optional): the output tensor. Ignored if `None`. Default: `None`. + +Example: + >>> a = torch.randn(4, 3) + >>> a + tensor([[-0.3956, 1.1455, 1.6895], + [-0.5849, 1.3672, 0.3599], + [-1.1626, 0.7180, -0.0521], + [-0.1339, 0.9902, -2.0225]]) + >>> b = torch.randn(4, 3) + >>> b + tensor([[-0.0257, -1.4725, -1.2251], + [-1.1479, -0.7005, -1.9757], + [-1.3904, 0.3726, -1.1836], + [-0.9688, -0.7153, 0.2159]]) + >>> torch.linalg.cross(a, b) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) + >>> a = torch.randn(1, 3) # a is broadcast to match shape of b + >>> a + tensor([[-0.9941, -0.5132, 0.5681]]) + >>> torch.linalg.cross(a, b) + tensor([[ 1.4653, -1.2325, 1.4507], + [ 1.4119, -2.6163, 0.1073], + [ 0.3957, -1.9666, -1.0840], + [ 0.2956, -0.3357, 0.2139]]) +""") + +cholesky = _add_docstr(_linalg.linalg_cholesky, r""" +linalg.cholesky(A, *, upper=False, out=None) -> Tensor + +Computes the Cholesky decomposition of a complex Hermitian or real symmetric positive-definite matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **Cholesky decomposition** of a complex Hermitian or real symmetric positive-definite matrix +:math:`A \in \mathbb{K}^{n \times n}` is defined as + +.. math:: + + A = LL^{\text{H}}\mathrlap{\qquad L \in \mathbb{K}^{n \times n}} + +where :math:`L` is a lower triangular matrix with real positive diagonal (even in the complex case) and +:math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, and the transpose when :math:`L` is real-valued. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. seealso:: + + :func:`torch.linalg.cholesky_ex` for a version of this operation that + skips the (slow) error checking by default and instead returns the debug + information. This makes it a faster way to check if a matrix is + positive-definite. + + :func:`torch.linalg.eigh` for a different decomposition of a Hermitian matrix. + The eigenvalue decomposition gives more information about the matrix but it + slower to compute than the Cholesky decomposition. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of symmetric or Hermitian positive-definite matrices. + +Keyword args: + upper (bool, optional): whether to return an upper triangular matrix. + The tensor returned with upper=True is the conjugate transpose of the tensor + returned with upper=False. + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Raises: + RuntimeError: if the :attr:`A` matrix or any matrix in a batched :attr:`A` is not Hermitian + (resp. symmetric) positive-definite. If :attr:`A` is a batch of matrices, + the error message will include the batch index of the first matrix that fails + to meet this condition. + +Examples:: + + >>> A = torch.randn(2, 2, dtype=torch.complex128) + >>> A = A @ A.T.conj() + torch.eye(2) # creates a Hermitian positive-definite matrix + >>> A + tensor([[2.5266+0.0000j, 1.9586-2.0626j], + [1.9586+2.0626j, 9.4160+0.0000j]], dtype=torch.complex128) + >>> L = torch.linalg.cholesky(A) + >>> L + tensor([[1.5895+0.0000j, 0.0000+0.0000j], + [1.2322+1.2976j, 2.4928+0.0000j]], dtype=torch.complex128) + >>> torch.dist(L @ L.T.conj(), A) + tensor(4.4692e-16, dtype=torch.float64) + + >>> A = torch.randn(3, 2, 2, dtype=torch.float64) + >>> A = A @ A.mT + torch.eye(2) # batch of symmetric positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> torch.dist(L @ L.mT, A) + tensor(5.8747e-16, dtype=torch.float64) +""") + +cholesky_ex = _add_docstr(_linalg.linalg_cholesky_ex, r""" +linalg.cholesky_ex(A, *, upper=False, check_errors=False, out=None) -> (Tensor, Tensor) + +Computes the Cholesky decomposition of a complex Hermitian or real +symmetric positive-definite matrix. + +This function skips the (slow) error checking and error message construction +of :func:`torch.linalg.cholesky`, instead directly returning the LAPACK +error codes as part of a named tuple ``(L, info)``. This makes this function +a faster way to check if a matrix is positive-definite, and it provides an +opportunity to handle decomposition errors more gracefully or performantly +than :func:`torch.linalg.cholesky` does. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +If :attr:`A` is not a Hermitian positive-definite matrix, or if it's a batch of matrices +and one or more of them is not a Hermitian positive-definite matrix, +then ``info`` stores a positive integer for the corresponding matrix. +The positive integer indicates the order of the leading minor that is not positive-definite, +and the decomposition could not be completed. +``info`` filled with zeros indicates that the decomposition was successful. +If ``check_errors=True`` and ``info`` contains positive integers, then a RuntimeError is thrown. + +""" + fr""" +.. note:: {common_notes["sync_note_ex"]} + +.. warning:: {common_notes["experimental_warning"]} +""" + r""" + +.. seealso:: + :func:`torch.linalg.cholesky` is a NumPy compatible variant that always checks for errors. + +Args: + A (Tensor): the Hermitian `n \times n` matrix or the batch of such matrices of size + `(*, n, n)` where `*` is one or more batch dimensions. + +Keyword args: + upper (bool, optional): whether to return an upper triangular matrix. + The tensor returned with upper=True is the conjugate transpose of the tensor + returned with upper=False. + check_errors (bool, optional): controls whether to check the content of ``infos``. Default: `False`. + out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.randn(2, 2, dtype=torch.complex128) + >>> A = A @ A.t().conj() # creates a Hermitian positive-definite matrix + >>> L, info = torch.linalg.cholesky_ex(A) + >>> A + tensor([[ 2.3792+0.0000j, -0.9023+0.9831j], + [-0.9023-0.9831j, 0.8757+0.0000j]], dtype=torch.complex128) + >>> L + tensor([[ 1.5425+0.0000j, 0.0000+0.0000j], + [-0.5850-0.6374j, 0.3567+0.0000j]], dtype=torch.complex128) + >>> info + tensor(0, dtype=torch.int32) + +""") + +inv = _add_docstr(_linalg.linalg_inv, r""" +linalg.inv(A, *, out=None) -> Tensor + +Computes the inverse of a square matrix if it exists. +Throws a `RuntimeError` if the matrix is not invertible. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +for a matrix :math:`A \in \mathbb{K}^{n \times n}`, +its **inverse matrix** :math:`A^{-1} \in \mathbb{K}^{n \times n}` (if it exists) is defined as + +.. math:: + + A^{-1}A = AA^{-1} = \mathrm{I}_n + +where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix. + +The inverse matrix exists if and only if :math:`A` is `invertible`_. In this case, +the inverse is unique. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices +then the output has the same batch dimensions. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. note:: + Consider using :func:`torch.linalg.solve` if possible for multiplying a matrix on the left by + the inverse, as:: + + linalg.solve(A, B) == linalg.inv(A) @ B # When B is a matrix + + It is always preferred to use :func:`~solve` when possible, as it is faster and more + numerically stable than computing the inverse explicitly. + +.. seealso:: + + :func:`torch.linalg.pinv` computes the pseudoinverse (Moore-Penrose inverse) of matrices + of any shape. + + :func:`torch.linalg.solve` computes :attr:`A`\ `.inv() @ \ `:attr:`B` with a + numerically stable algorithm. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of invertible matrices. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Raises: + RuntimeError: if the matrix :attr:`A` or any matrix in the batch of matrices :attr:`A` is not invertible. + +Examples:: + + >>> A = torch.randn(4, 4) + >>> Ainv = torch.linalg.inv(A) + >>> torch.dist(A @ Ainv, torch.eye(4)) + tensor(1.1921e-07) + + >>> A = torch.randn(2, 3, 4, 4) # Batch of matrices + >>> Ainv = torch.linalg.inv(A) + >>> torch.dist(A @ Ainv, torch.eye(4)) + tensor(1.9073e-06) + + >>> A = torch.randn(4, 4, dtype=torch.complex128) # Complex matrix + >>> Ainv = torch.linalg.inv(A) + >>> torch.dist(A @ Ainv, torch.eye(4)) + tensor(7.5107e-16, dtype=torch.float64) + +.. _invertible: + https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem +""") + +solve_ex = _add_docstr(_linalg.linalg_solve_ex, r""" +linalg.solve_ex(A, B, *, left=True, check_errors=False, out=None) -> (Tensor, Tensor) + +A version of :func:`~solve` that does not perform error checks unless :attr:`check_errors`\ `= True`. +It also returns the :attr:`info` tensor returned by `LAPACK's getrf`_. + +""" + fr""" +.. note:: {common_notes["sync_note_ex"]} + +.. warning:: {common_notes["experimental_warning"]} +""" + r""" + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions. + +Keyword args: + left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`. + check_errors (bool, optional): controls whether to check the content of ``infos`` and raise + an error if it is non-zero. Default: `False`. + out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(result, info)`. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> Ainv, info = torch.linalg.solve_ex(A) + >>> torch.dist(torch.linalg.inv(A), Ainv) + tensor(0.) + >>> info + tensor(0, dtype=torch.int32) + +.. _LAPACK's getrf: + https://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html +""") + +inv_ex = _add_docstr(_linalg.linalg_inv_ex, r""" +linalg.inv_ex(A, *, check_errors=False, out=None) -> (Tensor, Tensor) + +Computes the inverse of a square matrix if it is invertible. + +Returns a namedtuple ``(inverse, info)``. ``inverse`` contains the result of +inverting :attr:`A` and ``info`` stores the LAPACK error codes. + +If :attr:`A` is not an invertible matrix, or if it's a batch of matrices +and one or more of them is not an invertible matrix, +then ``info`` stores a positive integer for the corresponding matrix. +The positive integer indicates the diagonal element of the LU decomposition of +the input matrix that is exactly zero. +``info`` filled with zeros indicates that the inversion was successful. +If ``check_errors=True`` and ``info`` contains positive integers, then a RuntimeError is thrown. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +""" + fr""" +.. note:: {common_notes["sync_note_ex"]} + +.. warning:: {common_notes["experimental_warning"]} +""" + r""" + +.. seealso:: + + :func:`torch.linalg.inv` is a NumPy compatible variant that always checks for errors. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of square matrices. + check_errors (bool, optional): controls whether to check the content of ``info``. Default: `False`. + +Keyword args: + out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> Ainv, info = torch.linalg.inv_ex(A) + >>> torch.dist(torch.linalg.inv(A), Ainv) + tensor(0.) + >>> info + tensor(0, dtype=torch.int32) + +""") + +det = _add_docstr(_linalg.linalg_det, r""" +linalg.det(A, *, out=None) -> Tensor + +Computes the determinant of a square matrix. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +.. seealso:: + + :func:`torch.linalg.slogdet` computes the sign and natural logarithm of the absolute + value of the determinant of square matrices. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> torch.linalg.det(A) + tensor(0.0934) + + >>> A = torch.randn(3, 2, 2) + >>> torch.linalg.det(A) + tensor([1.1990, 0.4099, 0.7386]) +""") + +slogdet = _add_docstr(_linalg.linalg_slogdet, r""" +linalg.slogdet(A, *, out=None) -> (Tensor, Tensor) + +Computes the sign and natural logarithm of the absolute value of the determinant of a square matrix. + +For complex :attr:`A`, it returns the sign and the natural logarithm of the modulus of the +determinant, that is, a logarithmic polar decomposition of the determinant. + +The determinant can be recovered as `sign * exp(logabsdet)`. +When a matrix has a determinant of zero, it returns `(0, -inf)`. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +.. seealso:: + + :func:`torch.linalg.det` computes the determinant of square matrices. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions. + +Keyword args: + out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(sign, logabsdet)`. + + `sign` will have the same dtype as :attr:`A`. + + `logabsdet` will always be real-valued, even when :attr:`A` is complex. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> A + tensor([[ 0.0032, -0.2239, -1.1219], + [-0.6690, 0.1161, 0.4053], + [-1.6218, -0.9273, -0.0082]]) + >>> torch.linalg.det(A) + tensor(-0.7576) + >>> torch.logdet(A) + tensor(nan) + >>> torch.linalg.slogdet(A) + torch.return_types.linalg_slogdet(sign=tensor(-1.), logabsdet=tensor(-0.2776)) +""") + +eig = _add_docstr(_linalg.linalg_eig, r""" +linalg.eig(A, *, out=None) -> (Tensor, Tensor) + +Computes the eigenvalue decomposition of a square matrix if it exists. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **eigenvalue decomposition** of a square matrix +:math:`A \in \mathbb{K}^{n \times n}` (if it exists) is defined as + +.. math:: + + A = V \operatorname{diag}(\Lambda) V^{-1}\mathrlap{\qquad V \in \mathbb{C}^{n \times n}, \Lambda \in \mathbb{C}^n} + +This decomposition exists if and only if :math:`A` is `diagonalizable`_. +This is the case when all its eigenvalues are different. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +.. note:: The eigenvalues and eigenvectors of a real matrix may be complex. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. warning:: This function assumes that :attr:`A` is `diagonalizable`_ (for example, when all the + eigenvalues are different). If it is not diagonalizable, the returned + eigenvalues will be correct but :math:`A \neq V \operatorname{diag}(\Lambda)V^{-1}`. + +.. warning:: The returned eigenvectors are normalized to have norm `1`. + Even then, the eigenvectors of a matrix are not unique, nor are they continuous with respect to + :attr:`A`. Due to this lack of uniqueness, different hardware and software may compute + different eigenvectors. + + This non-uniqueness is caused by the fact that multiplying an eigenvector by + by :math:`e^{i \phi}, \phi \in \mathbb{R}` produces another set of valid eigenvectors + of the matrix. For this reason, the loss function shall not depend on the phase of the + eigenvectors, as this quantity is not well-defined. + This is checked when computing the gradients of this function. As such, + when inputs are on a CUDA device, the computation of the gradients + of this function synchronizes that device with the CPU. + + +.. warning:: Gradients computed using the `eigenvectors` tensor will only be finite when + :attr:`A` has distinct eigenvalues. + Furthermore, if the distance between any two eigenvalues is close to zero, + the gradient will be numerically unstable, as it depends on the eigenvalues + :math:`\lambda_i` through the computation of + :math:`\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}`. + +.. seealso:: + + :func:`torch.linalg.eigvals` computes only the eigenvalues. + Unlike :func:`torch.linalg.eig`, the gradients of :func:`~eigvals` are always + numerically stable. + + :func:`torch.linalg.eigh` for a (faster) function that computes the eigenvalue decomposition + for Hermitian and symmetric matrices. + + :func:`torch.linalg.svd` for a function that computes another type of spectral + decomposition that works on matrices of any shape. + + :func:`torch.linalg.qr` for another (much faster) decomposition that works on matrices of + any shape. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of diagonalizable matrices. + +Keyword args: + out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(eigenvalues, eigenvectors)` which corresponds to :math:`\Lambda` and :math:`V` above. + + `eigenvalues` and `eigenvectors` will always be complex-valued, even when :attr:`A` is real. The eigenvectors + will be given by the columns of `eigenvectors`. + +Examples:: + + >>> A = torch.randn(2, 2, dtype=torch.complex128) + >>> A + tensor([[ 0.9828+0.3889j, -0.4617+0.3010j], + [ 0.1662-0.7435j, -0.6139+0.0562j]], dtype=torch.complex128) + >>> L, V = torch.linalg.eig(A) + >>> L + tensor([ 1.1226+0.5738j, -0.7537-0.1286j], dtype=torch.complex128) + >>> V + tensor([[ 0.9218+0.0000j, 0.1882-0.2220j], + [-0.0270-0.3867j, 0.9567+0.0000j]], dtype=torch.complex128) + >>> torch.dist(V @ torch.diag(L) @ torch.linalg.inv(V), A) + tensor(7.7119e-16, dtype=torch.float64) + + >>> A = torch.randn(3, 2, 2, dtype=torch.float64) + >>> L, V = torch.linalg.eig(A) + >>> torch.dist(V @ torch.diag_embed(L) @ torch.linalg.inv(V), A) + tensor(3.2841e-16, dtype=torch.float64) + +.. _diagonalizable: + https://en.wikipedia.org/wiki/Diagonalizable_matrix#Definition +""") + +eigvals = _add_docstr(_linalg.linalg_eigvals, r""" +linalg.eigvals(A, *, out=None) -> Tensor + +Computes the eigenvalues of a square matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **eigenvalues** of a square matrix :math:`A \in \mathbb{K}^{n \times n}` are defined +as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by + +.. math:: + + p(\lambda) = \operatorname{det}(A - \lambda \mathrm{I}_n)\mathrlap{\qquad \lambda \in \mathbb{C}} + +where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +.. note:: The eigenvalues of a real matrix may be complex, as the roots of a real polynomial may be complex. + + The eigenvalues of a matrix are always well-defined, even when the matrix is not diagonalizable. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. seealso:: + + :func:`torch.linalg.eig` computes the full eigenvalue decomposition. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Returns: + A complex-valued tensor containing the eigenvalues even when :attr:`A` is real. + +Examples:: + + >>> A = torch.randn(2, 2, dtype=torch.complex128) + >>> L = torch.linalg.eigvals(A) + >>> L + tensor([ 1.1226+0.5738j, -0.7537-0.1286j], dtype=torch.complex128) + + >>> torch.dist(L, torch.linalg.eig(A).eigenvalues) + tensor(2.4576e-07) +""") + +eigh = _add_docstr(_linalg.linalg_eigh, r""" +linalg.eigh(A, UPLO='L', *, out=None) -> (Tensor, Tensor) + +Computes the eigenvalue decomposition of a complex Hermitian or real symmetric matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **eigenvalue decomposition** of a complex Hermitian or real symmetric matrix +:math:`A \in \mathbb{K}^{n \times n}` is defined as + +.. math:: + + A = Q \operatorname{diag}(\Lambda) Q^{\text{H}}\mathrlap{\qquad Q \in \mathbb{K}^{n \times n}, \Lambda \in \mathbb{R}^n} + +where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex, and the transpose when :math:`Q` is real-valued. +:math:`Q` is orthogonal in the real case and unitary in the complex case. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +:attr:`A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead: + +- If :attr:`UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation. +- If :attr:`UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used. + +The eigenvalues are returned in ascending order. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. note:: The eigenvalues of real symmetric or complex Hermitian matrices are always real. + +.. warning:: The eigenvectors of a symmetric matrix are not unique, nor are they continuous with + respect to :attr:`A`. Due to this lack of uniqueness, different hardware and + software may compute different eigenvectors. + + This non-uniqueness is caused by the fact that multiplying an eigenvector by + `-1` in the real case or by :math:`e^{i \phi}, \phi \in \mathbb{R}` in the complex + case produces another set of valid eigenvectors of the matrix. + For this reason, the loss function shall not depend on the phase of the eigenvectors, as + this quantity is not well-defined. + This is checked for complex inputs when computing the gradients of this function. As such, + when inputs are complex and are on a CUDA device, the computation of the gradients + of this function synchronizes that device with the CPU. + +.. warning:: Gradients computed using the `eigenvectors` tensor will only be finite when + :attr:`A` has distinct eigenvalues. + Furthermore, if the distance between any two eigenvalues is close to zero, + the gradient will be numerically unstable, as it depends on the eigenvalues + :math:`\lambda_i` through the computation of + :math:`\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}`. + +.. warning:: User may see pytorch crashes if running `eigh` on CUDA devices with CUDA versions before 12.1 update 1 + with large ill-conditioned matrices as inputs. + Refer to :ref:`Linear Algebra Numerical Stability` for more details. + If this is the case, user may (1) tune their matrix inputs to be less ill-conditioned, + or (2) use :func:`torch.backends.cuda.preferred_linalg_library` to + try other supported backends. + +.. seealso:: + + :func:`torch.linalg.eigvalsh` computes only the eigenvalues of a Hermitian matrix. + Unlike :func:`torch.linalg.eigh`, the gradients of :func:`~eigvalsh` are always + numerically stable. + + :func:`torch.linalg.cholesky` for a different decomposition of a Hermitian matrix. + The Cholesky decomposition gives less information about the matrix but is much faster + to compute than the eigenvalue decomposition. + + :func:`torch.linalg.eig` for a (slower) function that computes the eigenvalue decomposition + of a not necessarily Hermitian square matrix. + + :func:`torch.linalg.svd` for a (slower) function that computes the more general SVD + decomposition of matrices of any shape. + + :func:`torch.linalg.qr` for another (much faster) decomposition that works on general + matrices. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of symmetric or Hermitian matrices. + UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part + of :attr:`A` in the computations. Default: `'L'`. + +Keyword args: + out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(eigenvalues, eigenvectors)` which corresponds to :math:`\Lambda` and :math:`Q` above. + + `eigenvalues` will always be real-valued, even when :attr:`A` is complex. + It will also be ordered in ascending order. + + `eigenvectors` will have the same dtype as :attr:`A` and will contain the eigenvectors as its columns. + +Examples:: + >>> A = torch.randn(2, 2, dtype=torch.complex128) + >>> A = A + A.T.conj() # creates a Hermitian matrix + >>> A + tensor([[2.9228+0.0000j, 0.2029-0.0862j], + [0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128) + >>> L, Q = torch.linalg.eigh(A) + >>> L + tensor([0.3277, 2.9415], dtype=torch.float64) + >>> Q + tensor([[-0.0846+-0.0000j, -0.9964+0.0000j], + [ 0.9170+0.3898j, -0.0779-0.0331j]], dtype=torch.complex128) + >>> torch.dist(Q @ torch.diag(L.cdouble()) @ Q.T.conj(), A) + tensor(6.1062e-16, dtype=torch.float64) + + >>> A = torch.randn(3, 2, 2, dtype=torch.float64) + >>> A = A + A.mT # creates a batch of symmetric matrices + >>> L, Q = torch.linalg.eigh(A) + >>> torch.dist(Q @ torch.diag_embed(L) @ Q.mH, A) + tensor(1.5423e-15, dtype=torch.float64) +""") + +eigvalsh = _add_docstr(_linalg.linalg_eigvalsh, r""" +linalg.eigvalsh(A, UPLO='L', *, out=None) -> Tensor + +Computes the eigenvalues of a complex Hermitian or real symmetric matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **eigenvalues** of a complex Hermitian or real symmetric matrix :math:`A \in \mathbb{K}^{n \times n}` +are defined as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by + +.. math:: + + p(\lambda) = \operatorname{det}(A - \lambda \mathrm{I}_n)\mathrlap{\qquad \lambda \in \mathbb{R}} + +where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix. +The eigenvalues of a real symmetric or complex Hermitian matrix are always real. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +The eigenvalues are returned in ascending order. + +:attr:`A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead: + +- If :attr:`UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation. +- If :attr:`UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. seealso:: + + :func:`torch.linalg.eigh` computes the full eigenvalue decomposition. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of symmetric or Hermitian matrices. + UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part + of :attr:`A` in the computations. Default: `'L'`. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Returns: + A real-valued tensor containing the eigenvalues even when :attr:`A` is complex. + The eigenvalues are returned in ascending order. + +Examples:: + + >>> A = torch.randn(2, 2, dtype=torch.complex128) + >>> A = A + A.T.conj() # creates a Hermitian matrix + >>> A + tensor([[2.9228+0.0000j, 0.2029-0.0862j], + [0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128) + >>> torch.linalg.eigvalsh(A) + tensor([0.3277, 2.9415], dtype=torch.float64) + + >>> A = torch.randn(3, 2, 2, dtype=torch.float64) + >>> A = A + A.mT # creates a batch of symmetric matrices + >>> torch.linalg.eigvalsh(A) + tensor([[ 2.5797, 3.4629], + [-4.1605, 1.3780], + [-3.1113, 2.7381]], dtype=torch.float64) +""") + +householder_product = _add_docstr(_linalg.linalg_householder_product, r""" +householder_product(A, tau, *, out=None) -> Tensor + +Computes the first `n` columns of a product of Householder matrices. + +Let :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, and +let :math:`V \in \mathbb{K}^{m \times n}` be a matrix with columns :math:`v_i \in \mathbb{K}^m` +for :math:`i=1,\ldots,m` with :math:`m \geq n`. Denote by :math:`w_i` the vector resulting from +zeroing out the first :math:`i-1` components of :math:`v_i` and setting to `1` the :math:`i`-th. +For a vector :math:`\tau \in \mathbb{K}^k` with :math:`k \leq n`, this function computes the +first :math:`n` columns of the matrix + +.. math:: + + H_1H_2 ... H_k \qquad\text{with}\qquad H_i = \mathrm{I}_m - \tau_i w_i w_i^{\text{H}} + +where :math:`\mathrm{I}_m` is the `m`-dimensional identity matrix and :math:`w^{\text{H}}` is the +conjugate transpose when :math:`w` is complex, and the transpose when :math:`w` is real-valued. +The output matrix is the same size as the input matrix :attr:`A`. + +See `Representation of Orthogonal or Unitary Matrices`_ for further details. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if the inputs are batches of matrices then +the output has the same batch dimensions. + +.. seealso:: + + :func:`torch.geqrf` can be used together with this function to form the `Q` from the + :func:`~qr` decomposition. + + :func:`torch.ormqr` is a related function that computes the matrix multiplication + of a product of Householder matrices with another matrix. + However, that function is not supported by autograd. + +.. warning:: + Gradient computations are only well-defined if :math:`tau_i \neq \frac{1}{||v_i||^2}`. + If this condition is not met, no error will be thrown, but the gradient produced may contain `NaN`. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + tau (Tensor): tensor of shape `(*, k)` where `*` is zero or more batch dimensions. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Raises: + RuntimeError: if :attr:`A` doesn't satisfy the requirement `m >= n`, + or :attr:`tau` doesn't satisfy the requirement `n >= k`. + +Examples:: + + >>> A = torch.randn(2, 2) + >>> h, tau = torch.geqrf(A) + >>> Q = torch.linalg.householder_product(h, tau) + >>> torch.dist(Q, torch.linalg.qr(A).Q) + tensor(0.) + + >>> h = torch.randn(3, 2, 2, dtype=torch.complex128) + >>> tau = torch.randn(3, 1, dtype=torch.complex128) + >>> Q = torch.linalg.householder_product(h, tau) + >>> Q + tensor([[[ 1.8034+0.4184j, 0.2588-1.0174j], + [-0.6853+0.7953j, 2.0790+0.5620j]], + + [[ 1.4581+1.6989j, -1.5360+0.1193j], + [ 1.3877-0.6691j, 1.3512+1.3024j]], + + [[ 1.4766+0.5783j, 0.0361+0.6587j], + [ 0.6396+0.1612j, 1.3693+0.4481j]]], dtype=torch.complex128) + +.. _Representation of Orthogonal or Unitary Matrices: + https://www.netlib.org/lapack/lug/node128.html +""") + +ldl_factor = _add_docstr(_linalg.linalg_ldl_factor, r""" +linalg.ldl_factor(A, *, hermitian=False, out=None) -> (Tensor, Tensor) + +Computes a compact representation of the LDL factorization of a Hermitian or symmetric (possibly indefinite) matrix. + +When :attr:`A` is complex valued it can be Hermitian (:attr:`hermitian`\ `= True`) +or symmetric (:attr:`hermitian`\ `= False`). + +The factorization is of the form the form :math:`A = L D L^T`. +If :attr:`hermitian` is `True` then transpose operation is the conjugate transpose. + +:math:`L` (or :math:`U`) and :math:`D` are stored in compact form in ``LD``. +They follow the format specified by `LAPACK's sytrf`_ function. +These tensors may be used in :func:`torch.linalg.ldl_solve` to solve linear systems. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +""" + fr""" +.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.ldl_factor_ex")} +""" + r""" + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of symmetric or Hermitian matrices. + +Keyword args: + hermitian (bool, optional): whether to consider the input to be Hermitian or symmetric. + For real-valued matrices, this switch has no effect. Default: `False`. + out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(LD, pivots)`. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.mT # make symmetric + >>> A + tensor([[7.2079, 4.2414, 1.9428], + [4.2414, 3.4554, 0.3264], + [1.9428, 0.3264, 1.3823]]) + >>> LD, pivots = torch.linalg.ldl_factor(A) + >>> LD + tensor([[ 7.2079, 0.0000, 0.0000], + [ 0.5884, 0.9595, 0.0000], + [ 0.2695, -0.8513, 0.1633]]) + >>> pivots + tensor([1, 2, 3], dtype=torch.int32) + +.. _LAPACK's sytrf: + https://www.netlib.org/lapack/explore-html/d3/db6/group__double_s_ycomputational_gad91bde1212277b3e909eb6af7f64858a.html +""") + +ldl_factor_ex = _add_docstr(_linalg.linalg_ldl_factor_ex, r""" +linalg.ldl_factor_ex(A, *, hermitian=False, check_errors=False, out=None) -> (Tensor, Tensor, Tensor) + +This is a version of :func:`~ldl_factor` that does not perform error checks unless :attr:`check_errors`\ `= True`. +It also returns the :attr:`info` tensor returned by `LAPACK's sytrf`_. +``info`` stores integer error codes from the backend library. +A positive integer indicates the diagonal element of :math:`D` that is zero. +Division by 0 will occur if the result is used for solving a system of linear equations. +``info`` filled with zeros indicates that the factorization was successful. +If ``check_errors=True`` and ``info`` contains positive integers, then a `RuntimeError` is thrown. + +""" + fr""" +.. note:: {common_notes["sync_note_ex"]} + +.. warning:: {common_notes["experimental_warning"]} +""" + r""" + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of symmetric or Hermitian matrices. + +Keyword args: + hermitian (bool, optional): whether to consider the input to be Hermitian or symmetric. + For real-valued matrices, this switch has no effect. Default: `False`. + check_errors (bool, optional): controls whether to check the content of ``info`` and raise + an error if it is non-zero. Default: `False`. + out (tuple, optional): tuple of three tensors to write the output to. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(LD, pivots, info)`. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.mT # make symmetric + >>> A + tensor([[7.2079, 4.2414, 1.9428], + [4.2414, 3.4554, 0.3264], + [1.9428, 0.3264, 1.3823]]) + >>> LD, pivots, info = torch.linalg.ldl_factor_ex(A) + >>> LD + tensor([[ 7.2079, 0.0000, 0.0000], + [ 0.5884, 0.9595, 0.0000], + [ 0.2695, -0.8513, 0.1633]]) + >>> pivots + tensor([1, 2, 3], dtype=torch.int32) + >>> info + tensor(0, dtype=torch.int32) + +.. _LAPACK's sytrf: + https://www.netlib.org/lapack/explore-html/d3/db6/group__double_s_ycomputational_gad91bde1212277b3e909eb6af7f64858a.html +""") + +ldl_solve = _add_docstr(_linalg.linalg_ldl_solve, r""" +linalg.ldl_solve(LD, pivots, B, *, hermitian=False, out=None) -> Tensor + +Computes the solution of a system of linear equations using the LDL factorization. + +:attr:`LD` and :attr:`pivots` are the compact representation of the LDL factorization and +are expected to be computed by :func:`torch.linalg.ldl_factor_ex`. +:attr:`hermitian` argument to this function should be the same +as the corresponding arguments in :func:`torch.linalg.ldl_factor_ex`. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +""" + fr""" +.. warning:: {common_notes["experimental_warning"]} +""" + r""" + +Args: + LD (Tensor): the `n \times n` matrix or the batch of such matrices of size + `(*, n, n)` where `*` is one or more batch dimensions. + pivots (Tensor): the pivots corresponding to the LDL factorization of :attr:`LD`. + B (Tensor): right-hand side tensor of shape `(*, n, k)`. + +Keyword args: + hermitian (bool, optional): whether to consider the decomposed matrix to be Hermitian or symmetric. + For real-valued matrices, this switch has no effect. Default: `False`. + out (tuple, optional): output tensor. `B` may be passed as `out` and the result is computed in-place on `B`. + Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.randn(2, 3, 3) + >>> A = A @ A.mT # make symmetric + >>> LD, pivots, info = torch.linalg.ldl_factor_ex(A) + >>> B = torch.randn(2, 3, 4) + >>> X = torch.linalg.ldl_solve(LD, pivots, B) + >>> torch.linalg.norm(A @ X - B) + >>> tensor(0.0001) +""") + +lstsq = _add_docstr(_linalg.linalg_lstsq, r""" +torch.linalg.lstsq(A, B, rcond=None, *, driver=None) -> (Tensor, Tensor, Tensor, Tensor) + +Computes a solution to the least squares problem of a system of linear equations. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **least squares problem** for a linear system :math:`AX = B` with +:math:`A \in \mathbb{K}^{m \times n}, B \in \mathbb{K}^{m \times k}` is defined as + +.. math:: + + \min_{X \in \mathbb{K}^{n \times k}} \|AX - B\|_F + +where :math:`\|-\|_F` denotes the Frobenius norm. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if the inputs are batches of matrices then +the output has the same batch dimensions. + +:attr:`driver` chooses the backend function that will be used. +For CPU inputs the valid values are `'gels'`, `'gelsy'`, `'gelsd`, `'gelss'`. +To choose the best driver on CPU consider: + +- If :attr:`A` is well-conditioned (its `condition number`_ is not too large), or you do not mind some precision loss. + + - For a general matrix: `'gelsy'` (QR with pivoting) (default) + - If :attr:`A` is full-rank: `'gels'` (QR) + +- If :attr:`A` is not well-conditioned. + + - `'gelsd'` (tridiagonal reduction and SVD) + - But if you run into memory issues: `'gelss'` (full SVD). + +For CUDA input, the only valid driver is `'gels'`, which assumes that :attr:`A` is full-rank. + +See also the `full description of these drivers`_ + +:attr:`rcond` is used to determine the effective rank of the matrices in :attr:`A` +when :attr:`driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`). +In this case, if :math:`\sigma_i` are the singular values of `A` in decreasing order, +:math:`\sigma_i` will be rounded down to zero if :math:`\sigma_i \leq \text{rcond} \cdot \sigma_1`. +If :attr:`rcond`\ `= None` (default), :attr:`rcond` is set to the machine precision of the dtype of :attr:`A` times `max(m, n)`. + +This function returns the solution to the problem and some extra information in a named tuple of +four tensors `(solution, residuals, rank, singular_values)`. For inputs :attr:`A`, :attr:`B` +of shape `(*, m, n)`, `(*, m, k)` respectively, it contains + +- `solution`: the least squares solution. It has shape `(*, n, k)`. +- `residuals`: the squared residuals of the solutions, that is, :math:`\|AX - B\|_F^2`. + It has shape equal to the batch dimensions of :attr:`A`. + It is computed when `m > n` and every matrix in :attr:`A` is full-rank, + otherwise, it is an empty tensor. + If :attr:`A` is a batch of matrices and any matrix in the batch is not full rank, + then an empty tensor is returned. This behavior may change in a future PyTorch release. +- `rank`: tensor of ranks of the matrices in :attr:`A`. + It has shape equal to the batch dimensions of :attr:`A`. + It is computed when :attr:`driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`), + otherwise it is an empty tensor. +- `singular_values`: tensor of singular values of the matrices in :attr:`A`. + It has shape `(*, min(m, n))`. + It is computed when :attr:`driver` is one of (`'gelsd'`, `'gelss'`), + otherwise it is an empty tensor. + +.. note:: + This function computes `X = \ `:attr:`A`\ `.pinverse() @ \ `:attr:`B` in a faster and + more numerically stable way than performing the computations separately. + +.. warning:: + The default value of :attr:`rcond` may change in a future PyTorch release. + It is therefore recommended to use a fixed value to avoid potential + breaking changes. + +Args: + A (Tensor): lhs tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + B (Tensor): rhs tensor of shape `(*, m, k)` where `*` is zero or more batch dimensions. + rcond (float, optional): used to determine the effective rank of :attr:`A`. + If :attr:`rcond`\ `= None`, :attr:`rcond` is set to the machine + precision of the dtype of :attr:`A` times `max(m, n)`. Default: `None`. + +Keyword args: + driver (str, optional): name of the LAPACK/MAGMA method to be used. + If `None`, `'gelsy'` is used for CPU inputs and `'gels'` for CUDA inputs. + Default: `None`. + +Returns: + A named tuple `(solution, residuals, rank, singular_values)`. + +Examples:: + + >>> A = torch.randn(1,3,3) + >>> A + tensor([[[-1.0838, 0.0225, 0.2275], + [ 0.2438, 0.3844, 0.5499], + [ 0.1175, -0.9102, 2.0870]]]) + >>> B = torch.randn(2,3,3) + >>> B + tensor([[[-0.6772, 0.7758, 0.5109], + [-1.4382, 1.3769, 1.1818], + [-0.3450, 0.0806, 0.3967]], + [[-1.3994, -0.1521, -0.1473], + [ 1.9194, 1.0458, 0.6705], + [-1.1802, -0.9796, 1.4086]]]) + >>> X = torch.linalg.lstsq(A, B).solution # A is broadcasted to shape (2, 3, 3) + >>> torch.dist(X, torch.linalg.pinv(A) @ B) + tensor(1.5152e-06) + + >>> S = torch.linalg.lstsq(A, B, driver='gelsd').singular_values + >>> torch.dist(S, torch.linalg.svdvals(A)) + tensor(2.3842e-07) + + >>> A[:, 0].zero_() # Decrease the rank of A + >>> rank = torch.linalg.lstsq(A, B).rank + >>> rank + tensor([2]) + +.. _condition number: + https://pytorch.org/docs/master/linalg.html#torch.linalg.cond +.. _full description of these drivers: + https://www.netlib.org/lapack/lug/node27.html +""") + +matrix_power = _add_docstr(_linalg.linalg_matrix_power, r""" +matrix_power(A, n, *, out=None) -> Tensor + +Computes the `n`-th power of a square matrix for an integer `n`. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +If :attr:`n`\ `= 0`, it returns the identity matrix (or batch) of the same shape +as :attr:`A`. If :attr:`n` is negative, it returns the inverse of each matrix +(if invertible) raised to the power of `abs(n)`. + +.. note:: + Consider using :func:`torch.linalg.solve` if possible for multiplying a matrix on the left by + a negative power as, if :attr:`n`\ `> 0`:: + + torch.linalg.solve(matrix_power(A, n), B) == matrix_power(A, -n) @ B + + It is always preferred to use :func:`~solve` when possible, as it is faster and more + numerically stable than computing :math:`A^{-n}` explicitly. + +.. seealso:: + + :func:`torch.linalg.solve` computes :attr:`A`\ `.inverse() @ \ `:attr:`B` with a + numerically stable algorithm. + +Args: + A (Tensor): tensor of shape `(*, m, m)` where `*` is zero or more batch dimensions. + n (int): the exponent. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Raises: + RuntimeError: if :attr:`n`\ `< 0` and the matrix :attr:`A` or any matrix in the + batch of matrices :attr:`A` is not invertible. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> torch.linalg.matrix_power(A, 0) + tensor([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + >>> torch.linalg.matrix_power(A, 3) + tensor([[ 1.0756, 0.4980, 0.0100], + [-1.6617, 1.4994, -1.9980], + [-0.4509, 0.2731, 0.8001]]) + >>> torch.linalg.matrix_power(A.expand(2, -1, -1), -2) + tensor([[[ 0.2640, 0.4571, -0.5511], + [-1.0163, 0.3491, -1.5292], + [-0.4899, 0.0822, 0.2773]], + [[ 0.2640, 0.4571, -0.5511], + [-1.0163, 0.3491, -1.5292], + [-0.4899, 0.0822, 0.2773]]]) +""") + +matrix_rank = _add_docstr(_linalg.linalg_matrix_rank, r""" +linalg.matrix_rank(A, *, atol=None, rtol=None, hermitian=False, out=None) -> Tensor + +Computes the numerical rank of a matrix. + +The matrix rank is computed as the number of singular values +(or eigenvalues in absolute value when :attr:`hermitian`\ `= True`) +that are greater than :math:`\max(\text{atol}, \sigma_1 * \text{rtol})` threshold, +where :math:`\sigma_1` is the largest singular value (or eigenvalue). + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +If :attr:`hermitian`\ `= True`, :attr:`A` is assumed to be Hermitian if complex or +symmetric if real, but this is not checked internally. Instead, just the lower +triangular part of the matrix is used in the computations. + +If :attr:`rtol` is not specified and :attr:`A` is a matrix of dimensions `(m, n)`, +the relative tolerance is set to be :math:`\text{rtol} = \max(m, n) \varepsilon` +and :math:`\varepsilon` is the epsilon value for the dtype of :attr:`A` (see :class:`.finfo`). +If :attr:`rtol` is not specified and :attr:`atol` is specified to be larger than zero then +:attr:`rtol` is set to zero. + +If :attr:`atol` or :attr:`rtol` is a :class:`torch.Tensor`, its shape must be broadcastable to that +of the singular values of :attr:`A` as returned by :func:`torch.linalg.svdvals`. + +.. note:: + This function has NumPy compatible variant `linalg.matrix_rank(A, tol, hermitian=False)`. + However, use of the positional argument :attr:`tol` is deprecated in favor of :attr:`atol` and :attr:`rtol`. + +""" + fr""" +.. note:: The matrix rank is computed using a singular value decomposition + :func:`torch.linalg.svdvals` if :attr:`hermitian`\ `= False` (default) and the eigenvalue + decomposition :func:`torch.linalg.eigvalsh` when :attr:`hermitian`\ `= True`. + {common_notes["sync_note"]} +""" + r""" + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + tol (float, Tensor, optional): [NumPy Compat] Alias for :attr:`atol`. Default: `None`. + +Keyword args: + atol (float, Tensor, optional): the absolute tolerance value. When `None` it's considered to be zero. + Default: `None`. + rtol (float, Tensor, optional): the relative tolerance value. See above for the value it takes when `None`. + Default: `None`. + hermitian(bool): indicates whether :attr:`A` is Hermitian if complex + or symmetric if real. Default: `False`. + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.eye(10) + >>> torch.linalg.matrix_rank(A) + tensor(10) + >>> B = torch.eye(10) + >>> B[0, 0] = 0 + >>> torch.linalg.matrix_rank(B) + tensor(9) + + >>> A = torch.randn(4, 3, 2) + >>> torch.linalg.matrix_rank(A) + tensor([2, 2, 2, 2]) + + >>> A = torch.randn(2, 4, 2, 3) + >>> torch.linalg.matrix_rank(A) + tensor([[2, 2, 2, 2], + [2, 2, 2, 2]]) + + >>> A = torch.randn(2, 4, 3, 3, dtype=torch.complex64) + >>> torch.linalg.matrix_rank(A) + tensor([[3, 3, 3, 3], + [3, 3, 3, 3]]) + >>> torch.linalg.matrix_rank(A, hermitian=True) + tensor([[3, 3, 3, 3], + [3, 3, 3, 3]]) + >>> torch.linalg.matrix_rank(A, atol=1.0, rtol=0.0) + tensor([[3, 2, 2, 2], + [1, 2, 1, 2]]) + >>> torch.linalg.matrix_rank(A, atol=1.0, rtol=0.0, hermitian=True) + tensor([[2, 2, 2, 1], + [1, 2, 2, 2]]) +""") + +norm = _add_docstr(_linalg.linalg_norm, r""" +linalg.norm(A, ord=None, dim=None, keepdim=False, *, out=None, dtype=None) -> Tensor + +Computes a vector or matrix norm. + +Supports input of float, double, cfloat and cdouble dtypes. + +Whether this function computes a vector or matrix norm is determined as follows: + +- If :attr:`dim` is an `int`, the vector norm will be computed. +- If :attr:`dim` is a `2`-`tuple`, the matrix norm will be computed. +- If :attr:`dim`\ `= None` and :attr:`ord`\ `= None`, + :attr:`A` will be flattened to 1D and the `2`-norm of the resulting vector will be computed. +- If :attr:`dim`\ `= None` and :attr:`ord` `!= None`, :attr:`A` must be 1D or 2D. + +:attr:`ord` defines the norm that is computed. The following norms are supported: + +====================== ========================= ======================================================== +:attr:`ord` norm for matrices norm for vectors +====================== ========================= ======================================================== +`None` (default) Frobenius norm `2`-norm (see below) +`'fro'` Frobenius norm -- not supported -- +`'nuc'` nuclear norm -- not supported -- +`inf` `max(sum(abs(x), dim=1))` `max(abs(x))` +`-inf` `min(sum(abs(x), dim=1))` `min(abs(x))` +`0` -- not supported -- `sum(x != 0)` +`1` `max(sum(abs(x), dim=0))` as below +`-1` `min(sum(abs(x), dim=0))` as below +`2` largest singular value as below +`-2` smallest singular value as below +other `int` or `float` -- not supported -- `sum(abs(x)^{ord})^{(1 / ord)}` +====================== ========================= ======================================================== + +where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object. + +.. seealso:: + + :func:`torch.linalg.vector_norm` computes a vector norm. + + :func:`torch.linalg.matrix_norm` computes a matrix norm. + + The above functions are often clearer and more flexible than using :func:`torch.linalg.norm`. + For example, `torch.linalg.norm(A, ord=1, dim=(0, 1))` always + computes a matrix norm, but with `torch.linalg.vector_norm(A, ord=1, dim=(0, 1))` it is possible + to compute a vector norm over the two dimensions. + +Args: + A (Tensor): tensor of shape `(*, n)` or `(*, m, n)` where `*` is zero or more batch dimensions + ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `None` + dim (int, Tuple[int], optional): dimensions over which to compute + the vector or matrix norm. See above for the behavior when :attr:`dim`\ `= None`. + Default: `None` + keepdim (bool, optional): If set to `True`, the reduced dimensions are retained + in the result as dimensions with size one. Default: `False` + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + dtype (:class:`torch.dtype`, optional): If specified, the input tensor is cast to + :attr:`dtype` before performing the operation, and the returned tensor's type + will be :attr:`dtype`. Default: `None` + +Returns: + A real-valued tensor, even when :attr:`A` is complex. + +Examples:: + + >>> from torch import linalg as LA + >>> a = torch.arange(9, dtype=torch.float) - 4 + >>> a + tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.]) + >>> B = a.reshape((3, 3)) + >>> B + tensor([[-4., -3., -2.], + [-1., 0., 1.], + [ 2., 3., 4.]]) + + >>> LA.norm(a) + tensor(7.7460) + >>> LA.norm(B) + tensor(7.7460) + >>> LA.norm(B, 'fro') + tensor(7.7460) + >>> LA.norm(a, float('inf')) + tensor(4.) + >>> LA.norm(B, float('inf')) + tensor(9.) + >>> LA.norm(a, -float('inf')) + tensor(0.) + >>> LA.norm(B, -float('inf')) + tensor(2.) + + >>> LA.norm(a, 1) + tensor(20.) + >>> LA.norm(B, 1) + tensor(7.) + >>> LA.norm(a, -1) + tensor(0.) + >>> LA.norm(B, -1) + tensor(6.) + >>> LA.norm(a, 2) + tensor(7.7460) + >>> LA.norm(B, 2) + tensor(7.3485) + + >>> LA.norm(a, -2) + tensor(0.) + >>> LA.norm(B.double(), -2) + tensor(1.8570e-16, dtype=torch.float64) + >>> LA.norm(a, 3) + tensor(5.8480) + >>> LA.norm(a, -3) + tensor(0.) + +Using the :attr:`dim` argument to compute vector norms:: + + >>> c = torch.tensor([[1., 2., 3.], + ... [-1, 1, 4]]) + >>> LA.norm(c, dim=0) + tensor([1.4142, 2.2361, 5.0000]) + >>> LA.norm(c, dim=1) + tensor([3.7417, 4.2426]) + >>> LA.norm(c, ord=1, dim=1) + tensor([6., 6.]) + +Using the :attr:`dim` argument to compute matrix norms:: + + >>> A = torch.arange(8, dtype=torch.float).reshape(2, 2, 2) + >>> LA.norm(A, dim=(1,2)) + tensor([ 3.7417, 11.2250]) + >>> LA.norm(A[0, :, :]), LA.norm(A[1, :, :]) + (tensor(3.7417), tensor(11.2250)) +""") + +vector_norm = _add_docstr(_linalg.linalg_vector_norm, r""" +linalg.vector_norm(x, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor + +Computes a vector norm. + +If :attr:`x` is complex valued, it computes the norm of :attr:`x`\ `.abs()` + +Supports input of float, double, cfloat and cdouble dtypes. + +This function does not necessarily treat multidimensional :attr:`x` as a batch of +vectors, instead: + +- If :attr:`dim`\ `= None`, :attr:`x` will be flattened before the norm is computed. +- If :attr:`dim` is an `int` or a `tuple`, the norm will be computed over these dimensions + and the other dimensions will be treated as batch dimensions. + +This behavior is for consistency with :func:`torch.linalg.norm`. + +:attr:`ord` defines the vector norm that is computed. The following norms are supported: + +====================== =============================== +:attr:`ord` vector norm +====================== =============================== +`2` (default) `2`-norm (see below) +`inf` `max(abs(x))` +`-inf` `min(abs(x))` +`0` `sum(x != 0)` +other `int` or `float` `sum(abs(x)^{ord})^{(1 / ord)}` +====================== =============================== + +where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object. + +:attr:`dtype` may be used to perform the computation in a more precise dtype. +It is semantically equivalent to calling ``linalg.vector_norm(x.to(dtype))`` +but it is faster in some cases. + +.. seealso:: + + :func:`torch.linalg.matrix_norm` computes a matrix norm. + +Args: + x (Tensor): tensor, flattened by default, but this behavior can be + controlled using :attr:`dim`. + ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `2` + dim (int, Tuple[int], optional): dimensions over which to compute + the norm. See above for the behavior when :attr:`dim`\ `= None`. + Default: `None` + keepdim (bool, optional): If set to `True`, the reduced dimensions are retained + in the result as dimensions with size one. Default: `False` + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + dtype (:class:`torch.dtype`, optional): type used to perform the accumulation and the return. + If specified, :attr:`x` is cast to :attr:`dtype` before performing the operation, + and the returned tensor’s type will be :attr:`dtype` if real and of its real counterpart if complex. + :attr:`dtype` may be complex if :attr:`x` is complex, otherwise it must be real. + :attr:`x` should be convertible without narrowing to :attr:`dtype`. Default: None + +Returns: + A real-valued tensor, even when :attr:`x` is complex. + +Examples:: + + >>> from torch import linalg as LA + >>> a = torch.arange(9, dtype=torch.float) - 4 + >>> a + tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.]) + >>> B = a.reshape((3, 3)) + >>> B + tensor([[-4., -3., -2.], + [-1., 0., 1.], + [ 2., 3., 4.]]) + >>> LA.vector_norm(a, ord=3.5) + tensor(5.4345) + >>> LA.vector_norm(B, ord=3.5) + tensor(5.4345) +""") + +matrix_norm = _add_docstr(_linalg.linalg_matrix_norm, r""" +linalg.matrix_norm(A, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor + +Computes a matrix norm. + +If :attr:`A` is complex valued, it computes the norm of :attr:`A`\ `.abs()` + +Support input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices: the norm will be computed over the +dimensions specified by the 2-tuple :attr:`dim` and the other dimensions will +be treated as batch dimensions. The output will have the same batch dimensions. + +:attr:`ord` defines the matrix norm that is computed. The following norms are supported: + +====================== ======================================================== +:attr:`ord` matrix norm +====================== ======================================================== +`'fro'` (default) Frobenius norm +`'nuc'` nuclear norm +`inf` `max(sum(abs(x), dim=1))` +`-inf` `min(sum(abs(x), dim=1))` +`1` `max(sum(abs(x), dim=0))` +`-1` `min(sum(abs(x), dim=0))` +`2` largest singular value +`-2` smallest singular value +====================== ======================================================== + +where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object. + +Args: + A (Tensor): tensor with two or more dimensions. By default its + shape is interpreted as `(*, m, n)` where `*` is zero or more + batch dimensions, but this behavior can be controlled using :attr:`dim`. + ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'fro'` + dim (Tuple[int, int], optional): dimensions over which to compute the norm. Default: `(-2, -1)` + keepdim (bool, optional): If set to `True`, the reduced dimensions are retained + in the result as dimensions with size one. Default: `False` + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + dtype (:class:`torch.dtype`, optional): If specified, the input tensor is cast to + :attr:`dtype` before performing the operation, and the returned tensor's type + will be :attr:`dtype`. Default: `None` + +Returns: + A real-valued tensor, even when :attr:`A` is complex. + +Examples:: + + >>> from torch import linalg as LA + >>> A = torch.arange(9, dtype=torch.float).reshape(3, 3) + >>> A + tensor([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> LA.matrix_norm(A) + tensor(14.2829) + >>> LA.matrix_norm(A, ord=-1) + tensor(9.) + >>> B = A.expand(2, -1, -1) + >>> B + tensor([[[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]], + + [[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]]) + >>> LA.matrix_norm(B) + tensor([14.2829, 14.2829]) + >>> LA.matrix_norm(B, dim=(0, 2)) + tensor([ 3.1623, 10.0000, 17.2627]) +""") + +matmul = _add_docstr(_linalg.linalg_matmul, r""" +linalg.matmul(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.matmul` +""") + +diagonal = _add_docstr(_linalg.linalg_diagonal, r""" +linalg.diagonal(A, *, offset=0, dim1=-2, dim2=-1) -> Tensor + +Alias for :func:`torch.diagonal` with defaults :attr:`dim1`\ `= -2`, :attr:`dim2`\ `= -1`. +""") + +multi_dot = _add_docstr(_linalg.linalg_multi_dot, r""" +linalg.multi_dot(tensors, *, out=None) + +Efficiently multiplies two or more matrices by reordering the multiplications so that +the fewest arithmetic operations are performed. + +Supports inputs of float, double, cfloat and cdouble dtypes. +This function does not support batched inputs. + +Every tensor in :attr:`tensors` must be 2D, except for the first and last which +may be 1D. If the first tensor is a 1D vector of shape `(n,)` it is treated as a row vector +of shape `(1, n)`, similarly if the last tensor is a 1D vector of shape `(n,)` it is treated +as a column vector of shape `(n, 1)`. + +If the first and last tensors are matrices, the output will be a matrix. +However, if either is a 1D vector, then the output will be a 1D vector. + +Differences with `numpy.linalg.multi_dot`: + +- Unlike `numpy.linalg.multi_dot`, the first and last tensors must either be 1D or 2D + whereas NumPy allows them to be nD + +.. warning:: This function does not broadcast. + +.. note:: This function is implemented by chaining :func:`torch.mm` calls after + computing the optimal matrix multiplication order. + +.. note:: The cost of multiplying two matrices with shapes `(a, b)` and `(b, c)` is + `a * b * c`. Given matrices `A`, `B`, `C` with shapes `(10, 100)`, + `(100, 5)`, `(5, 50)` respectively, we can calculate the cost of different + multiplication orders as follows: + + .. math:: + + \begin{align*} + \operatorname{cost}((AB)C) &= 10 \times 100 \times 5 + 10 \times 5 \times 50 = 7500 \\ + \operatorname{cost}(A(BC)) &= 10 \times 100 \times 50 + 100 \times 5 \times 50 = 75000 + \end{align*} + + In this case, multiplying `A` and `B` first followed by `C` is 10 times faster. + +Args: + tensors (Sequence[Tensor]): two or more tensors to multiply. The first and last + tensors may be 1D or 2D. Every other tensor must be 2D. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Examples:: + + >>> from torch.linalg import multi_dot + + >>> multi_dot([torch.tensor([1, 2]), torch.tensor([2, 3])]) + tensor(8) + >>> multi_dot([torch.tensor([[1, 2]]), torch.tensor([2, 3])]) + tensor([8]) + >>> multi_dot([torch.tensor([[1, 2]]), torch.tensor([[2], [3]])]) + tensor([[8]]) + + >>> A = torch.arange(2 * 3).view(2, 3) + >>> B = torch.arange(3 * 2).view(3, 2) + >>> C = torch.arange(2 * 2).view(2, 2) + >>> multi_dot((A, B, C)) + tensor([[ 26, 49], + [ 80, 148]]) +""") + +svd = _add_docstr(_linalg.linalg_svd, r""" +linalg.svd(A, full_matrices=True, *, driver=None, out=None) -> (Tensor, Tensor, Tensor) + +Computes the singular value decomposition (SVD) of a matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **full SVD** of a matrix +:math:`A \in \mathbb{K}^{m \times n}`, if `k = min(m,n)`, is defined as + +.. math:: + + A = U \operatorname{diag}(S) V^{\text{H}} + \mathrlap{\qquad U \in \mathbb{K}^{m \times m}, S \in \mathbb{R}^k, V \in \mathbb{K}^{n \times n}} + +where :math:`\operatorname{diag}(S) \in \mathbb{K}^{m \times n}`, +:math:`V^{\text{H}}` is the conjugate transpose when :math:`V` is complex, and the transpose when :math:`V` is real-valued. +The matrices :math:`U`, :math:`V` (and thus :math:`V^{\text{H}}`) are orthogonal in the real case, and unitary in the complex case. + +When `m > n` (resp. `m < n`) we can drop the last `m - n` (resp. `n - m`) columns of `U` (resp. `V`) to form the **reduced SVD**: + +.. math:: + + A = U \operatorname{diag}(S) V^{\text{H}} + \mathrlap{\qquad U \in \mathbb{K}^{m \times k}, S \in \mathbb{R}^k, V \in \mathbb{K}^{k \times n}} + +where :math:`\operatorname{diag}(S) \in \mathbb{K}^{k \times k}`. +In this case, :math:`U` and :math:`V` also have orthonormal columns. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +The returned decomposition is a named tuple `(U, S, Vh)` +which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above. + +The singular values are returned in descending order. + +The parameter :attr:`full_matrices` chooses between the full (default) and reduced SVD. + +The :attr:`driver` kwarg may be used in CUDA with a cuSOLVER backend to choose the algorithm used to compute the SVD. +The choice of a driver is a trade-off between accuracy and speed. + +- If :attr:`A` is well-conditioned (its `condition number`_ is not too large), or you do not mind some precision loss. + + - For a general matrix: `'gesvdj'` (Jacobi method) + - If :attr:`A` is tall or wide (`m >> n` or `m << n`): `'gesvda'` (Approximate method) + +- If :attr:`A` is not well-conditioned or precision is relevant: `'gesvd'` (QR based) + +By default (:attr:`driver`\ `= None`), we call `'gesvdj'` and, if it fails, we fallback to `'gesvd'`. + +Differences with `numpy.linalg.svd`: + +- Unlike `numpy.linalg.svd`, this function always returns a tuple of three tensors + and it doesn't support `compute_uv` argument. + Please use :func:`torch.linalg.svdvals`, which computes only the singular values, + instead of `compute_uv=False`. + +.. note:: When :attr:`full_matrices`\ `= True`, the gradients with respect to `U[..., :, min(m, n):]` + and `Vh[..., min(m, n):, :]` will be ignored, as those vectors can be arbitrary bases + of the corresponding subspaces. + +.. warning:: The returned tensors `U` and `V` are not unique, nor are they continuous with + respect to :attr:`A`. + Due to this lack of uniqueness, different hardware and software may compute + different singular vectors. + + This non-uniqueness is caused by the fact that multiplying any pair of singular + vectors :math:`u_k, v_k` by `-1` in the real case or by + :math:`e^{i \phi}, \phi \in \mathbb{R}` in the complex case produces another two + valid singular vectors of the matrix. + For this reason, the loss function shall not depend on this :math:`e^{i \phi}` quantity, + as it is not well-defined. + This is checked for complex inputs when computing the gradients of this function. As such, + when inputs are complex and are on a CUDA device, the computation of the gradients + of this function synchronizes that device with the CPU. + +.. warning:: Gradients computed using `U` or `Vh` will only be finite when + :attr:`A` does not have repeated singular values. If :attr:`A` is rectangular, + additionally, zero must also not be one of its singular values. + Furthermore, if the distance between any two singular values is close to zero, + the gradient will be numerically unstable, as it depends on the singular values + :math:`\sigma_i` through the computation of + :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. + In the rectangular case, the gradient will also be numerically unstable when + :attr:`A` has small singular values, as it also depends on the computation of + :math:`\frac{1}{\sigma_i}`. + +.. seealso:: + + :func:`torch.linalg.svdvals` computes only the singular values. + Unlike :func:`torch.linalg.svd`, the gradients of :func:`~svdvals` are always + numerically stable. + + :func:`torch.linalg.eig` for a function that computes another type of spectral + decomposition of a matrix. The eigendecomposition works just on square matrices. + + :func:`torch.linalg.eigh` for a (faster) function that computes the eigenvalue decomposition + for Hermitian and symmetric matrices. + + :func:`torch.linalg.qr` for another (much faster) decomposition that works on general + matrices. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + full_matrices (bool, optional): controls whether to compute the full or reduced + SVD, and consequently, + the shape of the returned tensors + `U` and `Vh`. Default: `True`. + +Keyword args: + driver (str, optional): name of the cuSOLVER method to be used. This keyword argument only works on CUDA inputs. + Available options are: `None`, `gesvd`, `gesvdj`, and `gesvda`. + Default: `None`. + out (tuple, optional): output tuple of three tensors. Ignored if `None`. + +Returns: + A named tuple `(U, S, Vh)` which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above. + + `S` will always be real-valued, even when :attr:`A` is complex. + It will also be ordered in descending order. + + `U` and `Vh` will have the same dtype as :attr:`A`. The left / right singular vectors will be given by + the columns of `U` and the rows of `Vh` respectively. + +Examples:: + + >>> A = torch.randn(5, 3) + >>> U, S, Vh = torch.linalg.svd(A, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + (torch.Size([5, 3]), torch.Size([3]), torch.Size([3, 3])) + >>> torch.dist(A, U @ torch.diag(S) @ Vh) + tensor(1.0486e-06) + + >>> U, S, Vh = torch.linalg.svd(A) + >>> U.shape, S.shape, Vh.shape + (torch.Size([5, 5]), torch.Size([3]), torch.Size([3, 3])) + >>> torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh) + tensor(1.0486e-06) + + >>> A = torch.randn(7, 5, 3) + >>> U, S, Vh = torch.linalg.svd(A, full_matrices=False) + >>> torch.dist(A, U @ torch.diag_embed(S) @ Vh) + tensor(3.0957e-06) + +.. _condition number: + https://pytorch.org/docs/master/linalg.html#torch.linalg.cond +.. _the resulting vectors will span the same subspace: + https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD +""") + +svdvals = _add_docstr(_linalg.linalg_svdvals, r""" +linalg.svdvals(A, *, driver=None, out=None) -> Tensor + +Computes the singular values of a matrix. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +The singular values are returned in descending order. + +.. note:: This function is equivalent to NumPy's `linalg.svd(A, compute_uv=False)`. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. seealso:: + + :func:`torch.linalg.svd` computes the full singular value decomposition. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + +Keyword args: + driver (str, optional): name of the cuSOLVER method to be used. This keyword argument only works on CUDA inputs. + Available options are: `None`, `gesvd`, `gesvdj`, and `gesvda`. + Check :func:`torch.linalg.svd` for details. + Default: `None`. + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Returns: + A real-valued tensor, even when :attr:`A` is complex. + +Examples:: + + >>> A = torch.randn(5, 3) + >>> S = torch.linalg.svdvals(A) + >>> S + tensor([2.5139, 2.1087, 1.1066]) + + >>> torch.dist(S, torch.linalg.svd(A, full_matrices=False).S) + tensor(2.4576e-07) +""") + +cond = _add_docstr(_linalg.linalg_cond, r""" +linalg.cond(A, p=None, *, out=None) -> Tensor + +Computes the condition number of a matrix with respect to a matrix norm. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **condition number** :math:`\kappa` of a matrix +:math:`A \in \mathbb{K}^{n \times n}` is defined as + +.. math:: + + \kappa(A) = \|A\|_p\|A^{-1}\|_p + +The condition number of :attr:`A` measures the numerical stability of the linear system `AX = B` +with respect to a matrix norm. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +:attr:`p` defines the matrix norm that is computed. The following norms are supported: + +========= ================================= +:attr:`p` matrix norm +========= ================================= +`None` `2`-norm (largest singular value) +`'fro'` Frobenius norm +`'nuc'` nuclear norm +`inf` `max(sum(abs(x), dim=1))` +`-inf` `min(sum(abs(x), dim=1))` +`1` `max(sum(abs(x), dim=0))` +`-1` `min(sum(abs(x), dim=0))` +`2` largest singular value +`-2` smallest singular value +========= ================================= + +where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object. + +For :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`, this function uses +:func:`torch.linalg.norm` and :func:`torch.linalg.inv`. +As such, in this case, the matrix (or every matrix in the batch) :attr:`A` has to be square +and invertible. + +For :attr:`p` in `(2, -2)`, this function can be computed in terms of the singular values +:math:`\sigma_1 \geq \ldots \geq \sigma_n` + +.. math:: + + \kappa_2(A) = \frac{\sigma_1}{\sigma_n}\qquad \kappa_{-2}(A) = \frac{\sigma_n}{\sigma_1} + +In these cases, it is computed using :func:`torch.linalg.svdvals`. For these norms, the matrix +(or every matrix in the batch) :attr:`A` may have any shape. + +.. note :: When inputs are on a CUDA device, this function synchronizes that device with the CPU + if :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`. + +.. seealso:: + + :func:`torch.linalg.solve` for a function that solves linear systems of square matrices. + + :func:`torch.linalg.lstsq` for a function that solves linear systems of general matrices. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions + for :attr:`p` in `(2, -2)`, and of shape `(*, n, n)` where every matrix + is invertible for :attr:`p` in `('fro', 'nuc', inf, -inf, 1, -1)`. + p (int, inf, -inf, 'fro', 'nuc', optional): + the type of the matrix norm to use in the computations (see above). Default: `None` + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Returns: + A real-valued tensor, even when :attr:`A` is complex. + +Raises: + RuntimeError: + if :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)` + and the :attr:`A` matrix or any matrix in the batch :attr:`A` is not square + or invertible. + +Examples:: + + >>> A = torch.randn(3, 4, 4, dtype=torch.complex64) + >>> torch.linalg.cond(A) + >>> A = torch.tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> torch.linalg.cond(A) + tensor([1.4142]) + >>> torch.linalg.cond(A, 'fro') + tensor(3.1623) + >>> torch.linalg.cond(A, 'nuc') + tensor(9.2426) + >>> torch.linalg.cond(A, float('inf')) + tensor(2.) + >>> torch.linalg.cond(A, float('-inf')) + tensor(1.) + >>> torch.linalg.cond(A, 1) + tensor(2.) + >>> torch.linalg.cond(A, -1) + tensor(1.) + >>> torch.linalg.cond(A, 2) + tensor([1.4142]) + >>> torch.linalg.cond(A, -2) + tensor([0.7071]) + + >>> A = torch.randn(2, 3, 3) + >>> torch.linalg.cond(A) + tensor([[9.5917], + [3.2538]]) + >>> A = torch.randn(2, 3, 3, dtype=torch.complex64) + >>> torch.linalg.cond(A) + tensor([[4.6245], + [4.5671]]) +""") + +pinv = _add_docstr(_linalg.linalg_pinv, r""" +linalg.pinv(A, *, atol=None, rtol=None, hermitian=False, out=None) -> Tensor + +Computes the pseudoinverse (Moore-Penrose inverse) of a matrix. + +The pseudoinverse may be `defined algebraically`_ +but it is more computationally convenient to understand it `through the SVD`_ + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +If :attr:`hermitian`\ `= True`, :attr:`A` is assumed to be Hermitian if complex or +symmetric if real, but this is not checked internally. Instead, just the lower +triangular part of the matrix is used in the computations. + +The singular values (or the norm of the eigenvalues when :attr:`hermitian`\ `= True`) +that are below :math:`\max(\text{atol}, \sigma_1 \cdot \text{rtol})` threshold are +treated as zero and discarded in the computation, +where :math:`\sigma_1` is the largest singular value (or eigenvalue). + +If :attr:`rtol` is not specified and :attr:`A` is a matrix of dimensions `(m, n)`, +the relative tolerance is set to be :math:`\text{rtol} = \max(m, n) \varepsilon` +and :math:`\varepsilon` is the epsilon value for the dtype of :attr:`A` (see :class:`.finfo`). +If :attr:`rtol` is not specified and :attr:`atol` is specified to be larger than zero then +:attr:`rtol` is set to zero. + +If :attr:`atol` or :attr:`rtol` is a :class:`torch.Tensor`, its shape must be broadcastable to that +of the singular values of :attr:`A` as returned by :func:`torch.linalg.svd`. + +.. note:: This function uses :func:`torch.linalg.svd` if :attr:`hermitian`\ `= False` and + :func:`torch.linalg.eigh` if :attr:`hermitian`\ `= True`. + For CUDA inputs, this function synchronizes that device with the CPU. + +.. note:: + Consider using :func:`torch.linalg.lstsq` if possible for multiplying a matrix on the left by + the pseudoinverse, as:: + + torch.linalg.lstsq(A, B).solution == A.pinv() @ B + + It is always preferred to use :func:`~lstsq` when possible, as it is faster and more + numerically stable than computing the pseudoinverse explicitly. + +.. note:: + This function has NumPy compatible variant `linalg.pinv(A, rcond, hermitian=False)`. + However, use of the positional argument :attr:`rcond` is deprecated in favor of :attr:`rtol`. + +.. warning:: + This function uses internally :func:`torch.linalg.svd` (or :func:`torch.linalg.eigh` + when :attr:`hermitian`\ `= True`), so its derivative has the same problems as those of these + functions. See the warnings in :func:`torch.linalg.svd` and :func:`torch.linalg.eigh` for + more details. + +.. seealso:: + + :func:`torch.linalg.inv` computes the inverse of a square matrix. + + :func:`torch.linalg.lstsq` computes :attr:`A`\ `.pinv() @ \ `:attr:`B` with a + numerically stable algorithm. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + rcond (float, Tensor, optional): [NumPy Compat]. Alias for :attr:`rtol`. Default: `None`. + +Keyword args: + atol (float, Tensor, optional): the absolute tolerance value. When `None` it's considered to be zero. + Default: `None`. + rtol (float, Tensor, optional): the relative tolerance value. See above for the value it takes when `None`. + Default: `None`. + hermitian(bool, optional): indicates whether :attr:`A` is Hermitian if complex + or symmetric if real. Default: `False`. + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.randn(3, 5) + >>> A + tensor([[ 0.5495, 0.0979, -1.4092, -0.1128, 0.4132], + [-1.1143, -0.3662, 0.3042, 1.6374, -0.9294], + [-0.3269, -0.5745, -0.0382, -0.5922, -0.6759]]) + >>> torch.linalg.pinv(A) + tensor([[ 0.0600, -0.1933, -0.2090], + [-0.0903, -0.0817, -0.4752], + [-0.7124, -0.1631, -0.2272], + [ 0.1356, 0.3933, -0.5023], + [-0.0308, -0.1725, -0.5216]]) + + >>> A = torch.randn(2, 6, 3) + >>> Apinv = torch.linalg.pinv(A) + >>> torch.dist(Apinv @ A, torch.eye(3)) + tensor(8.5633e-07) + + >>> A = torch.randn(3, 3, dtype=torch.complex64) + >>> A = A + A.T.conj() # creates a Hermitian matrix + >>> Apinv = torch.linalg.pinv(A, hermitian=True) + >>> torch.dist(Apinv @ A, torch.eye(3)) + tensor(1.0830e-06) + +.. _defined algebraically: + https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Existence_and_uniqueness +.. _through the SVD: + https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Singular_value_decomposition_(SVD) +""") + +matrix_exp = _add_docstr(_linalg.linalg_matrix_exp, r""" +linalg.matrix_exp(A) -> Tensor + +Computes the matrix exponential of a square matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +this function computes the **matrix exponential** of :math:`A \in \mathbb{K}^{n \times n}`, which is defined as + +.. math:: + \mathrm{matrix\_exp}(A) = \sum_{k=0}^\infty \frac{1}{k!}A^k \in \mathbb{K}^{n \times n}. + +If the matrix :math:`A` has eigenvalues :math:`\lambda_i \in \mathbb{C}`, +the matrix :math:`\mathrm{matrix\_exp}(A)` has eigenvalues :math:`e^{\lambda_i} \in \mathbb{C}`. + +Supports input of bfloat16, float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions. + +Example:: + + >>> A = torch.empty(2, 2, 2) + >>> A[0, :, :] = torch.eye(2, 2) + >>> A[1, :, :] = 2 * torch.eye(2, 2) + >>> A + tensor([[[1., 0.], + [0., 1.]], + + [[2., 0.], + [0., 2.]]]) + >>> torch.linalg.matrix_exp(A) + tensor([[[2.7183, 0.0000], + [0.0000, 2.7183]], + + [[7.3891, 0.0000], + [0.0000, 7.3891]]]) + + >>> import math + >>> A = torch.tensor([[0, math.pi/3], [-math.pi/3, 0]]) # A is skew-symmetric + >>> torch.linalg.matrix_exp(A) # matrix_exp(A) = [[cos(pi/3), sin(pi/3)], [-sin(pi/3), cos(pi/3)]] + tensor([[ 0.5000, 0.8660], + [-0.8660, 0.5000]]) +""") + + +solve = _add_docstr(_linalg.linalg_solve, r""" +linalg.solve(A, B, *, left=True, out=None) -> Tensor + +Computes the solution of a square system of linear equations with a unique solution. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system** associated to +:math:`A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{n \times k}`, which is defined as + +.. math:: AX = B + +If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that solves the system + +.. math:: + + XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.} + +This system of linear equations has one solution if and only if :math:`A` is `invertible`_. +This function assumes that :math:`A` is invertible. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if the inputs are batches of matrices then +the output has the same batch dimensions. + +Letting `*` be zero or more batch dimensions, + +- If :attr:`A` has shape `(*, n, n)` and :attr:`B` has shape `(*, n)` (a batch of vectors) or shape + `(*, n, k)` (a batch of matrices or "multiple right-hand sides"), this function returns `X` of shape + `(*, n)` or `(*, n, k)` respectively. +- Otherwise, if :attr:`A` has shape `(*, n, n)` and :attr:`B` has shape `(n,)` or `(n, k)`, :attr:`B` + is broadcasted to have shape `(*, n)` or `(*, n, k)` respectively. + This function then returns the solution of the resulting batch of systems of linear equations. + +.. note:: + This function computes `X = \ `:attr:`A`\ `.inverse() @ \ `:attr:`B` in a faster and + more numerically stable way than performing the computations separately. + +.. note:: + It is possible to compute the solution of the system :math:`XA = B` by passing the inputs + :attr:`A` and :attr:`B` transposed and transposing the output returned by this function. + +""" + fr""" +.. note:: {common_notes["sync_note"]} +""" + r""" + +.. seealso:: + + :func:`torch.linalg.solve_triangular` computes the solution of a triangular system of linear + equations with a unique solution. + +Args: + A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions. + B (Tensor): right-hand side tensor of shape `(*, n)` or `(*, n, k)` or `(n,)` or `(n, k)` + according to the rules described above + +Keyword args: + left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`. + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Raises: + RuntimeError: if the :attr:`A` matrix is not invertible or any matrix in a batched :attr:`A` + is not invertible. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> b = torch.randn(3) + >>> x = torch.linalg.solve(A, b) + >>> torch.allclose(A @ x, b) + True + >>> A = torch.randn(2, 3, 3) + >>> B = torch.randn(2, 3, 4) + >>> X = torch.linalg.solve(A, B) + >>> X.shape + torch.Size([2, 3, 4]) + >>> torch.allclose(A @ X, B) + True + + >>> A = torch.randn(2, 3, 3) + >>> b = torch.randn(3, 1) + >>> x = torch.linalg.solve(A, b) # b is broadcasted to size (2, 3, 1) + >>> x.shape + torch.Size([2, 3, 1]) + >>> torch.allclose(A @ x, b) + True + >>> b = torch.randn(3) + >>> x = torch.linalg.solve(A, b) # b is broadcasted to size (2, 3) + >>> x.shape + torch.Size([2, 3]) + >>> Ax = A @ x.unsqueeze(-1) + >>> torch.allclose(Ax, b.unsqueeze(-1).expand_as(Ax)) + True + +.. _invertible: + https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem +""") + +solve_triangular = _add_docstr(_linalg.linalg_solve_triangular, r""" +linalg.solve_triangular(A, B, *, upper, left=True, unitriangular=False, out=None) -> Tensor + +Computes the solution of a triangular system of linear equations with a unique solution. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system** +associated to the triangular matrix :math:`A \in \mathbb{K}^{n \times n}` without zeros on the diagonal +(that is, it is `invertible`_) and the rectangular matrix , :math:`B \in \mathbb{K}^{n \times k}`, +which is defined as + +.. math:: AX = B + +The argument :attr:`upper` signals whether :math:`A` is upper or lower triangular. + +If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that +solves the system + +.. math:: + + XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.} + +If :attr:`upper`\ `= True` (resp. `False`) just the upper (resp. lower) triangular half of :attr:`A` +will be accessed. The elements below the main diagonal will be considered to be zero and will not be accessed. + +If :attr:`unitriangular`\ `= True`, the diagonal of :attr:`A` is assumed to be ones and will not be accessed. + +The result may contain `NaN` s if the diagonal of :attr:`A` contains zeros or elements that +are very close to zero and :attr:`unitriangular`\ `= False` (default) or if the input matrix +has very small eigenvalues. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if the inputs are batches of matrices then +the output has the same batch dimensions. + +.. seealso:: + + :func:`torch.linalg.solve` computes the solution of a general square system of linear + equations with a unique solution. + +Args: + A (Tensor): tensor of shape `(*, n, n)` (or `(*, k, k)` if :attr:`left`\ `= True`) + where `*` is zero or more batch dimensions. + B (Tensor): right-hand side tensor of shape `(*, n, k)`. + +Keyword args: + upper (bool): whether :attr:`A` is an upper or lower triangular matrix. + left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`. + unitriangular (bool, optional): if `True`, the diagonal elements of :attr:`A` are assumed to be + all equal to `1`. Default: `False`. + out (Tensor, optional): output tensor. `B` may be passed as `out` and the result is computed in-place on `B`. + Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.randn(3, 3).triu_() + >>> B = torch.randn(3, 4) + >>> X = torch.linalg.solve_triangular(A, B, upper=True) + >>> torch.allclose(A @ X, B) + True + + >>> A = torch.randn(2, 3, 3).tril_() + >>> B = torch.randn(2, 3, 4) + >>> X = torch.linalg.solve_triangular(A, B, upper=False) + >>> torch.allclose(A @ X, B) + True + + >>> A = torch.randn(2, 4, 4).tril_() + >>> B = torch.randn(2, 3, 4) + >>> X = torch.linalg.solve_triangular(A, B, upper=False, left=False) + >>> torch.allclose(X @ A, B) + True + +.. _invertible: + https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem +""") + +lu_factor = _add_docstr(_linalg.linalg_lu_factor, r""" +linalg.lu_factor(A, *, bool pivot=True, out=None) -> (Tensor, Tensor) + +Computes a compact representation of the LU factorization with partial pivoting of a matrix. + +This function computes a compact representation of the decomposition given by :func:`torch.linalg.lu`. +If the matrix is square, this representation may be used in :func:`torch.linalg.lu_solve` +to solve system of linear equations that share the matrix :attr:`A`. + +The returned decomposition is represented as a named tuple `(LU, pivots)`. +The ``LU`` matrix has the same shape as the input matrix ``A``. Its upper and lower triangular +parts encode the non-constant elements of ``L`` and ``U`` of the LU decomposition of ``A``. + +The returned permutation matrix is represented by a 1-indexed vector. `pivots[i] == j` represents +that in the `i`-th step of the algorithm, the `i`-th row was permuted with the `j-1`-th row. + +On CUDA, one may use :attr:`pivot`\ `= False`. In this case, this function returns the LU +decomposition without pivoting if it exists. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if the inputs are batches of matrices then +the output has the same batch dimensions. + +""" + fr""" +.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.lu_factor_ex")} +""" + r""" +.. warning:: The LU decomposition is almost never unique, as often there are different permutation + matrices that can yield different LU decompositions. + As such, different platforms, like SciPy, or inputs on different devices, + may produce different valid decompositions. + + Gradient computations are only supported if the input matrix is full-rank. + If this condition is not met, no error will be thrown, but the gradient may not be finite. + This is because the LU decomposition with pivoting is not differentiable at these points. + +.. seealso:: + + :func:`torch.linalg.lu_solve` solves a system of linear equations given the output of this + function provided the input matrix was square and invertible. + + :func:`torch.lu_unpack` unpacks the tensors returned by :func:`~lu_factor` into the three + matrices `P, L, U` that form the decomposition. + + :func:`torch.linalg.lu` computes the LU decomposition with partial pivoting of a possibly + non-square matrix. It is a composition of :func:`~lu_factor` and :func:`torch.lu_unpack`. + + :func:`torch.linalg.solve` solves a system of linear equations. It is a composition + of :func:`~lu_factor` and :func:`~lu_solve`. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + +Keyword args: + pivot (bool, optional): Whether to compute the LU decomposition with partial pivoting, or the regular LU + decomposition. :attr:`pivot`\ `= False` not supported on CPU. Default: `True`. + out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(LU, pivots)`. + +Raises: + RuntimeError: if the :attr:`A` matrix is not invertible or any matrix in a batched :attr:`A` + is not invertible. + +Examples:: + + >>> A = torch.randn(2, 3, 3) + >>> B1 = torch.randn(2, 3, 4) + >>> B2 = torch.randn(2, 3, 7) + >>> A_factor = torch.linalg.lu_factor(A) + >>> X1 = torch.linalg.lu_solve(A_factor, B1) + >>> X2 = torch.linalg.lu_solve(A_factor, B2) + >>> torch.allclose(A @ X1, B1) + True + >>> torch.allclose(A @ X2, B2) + True + +.. _invertible: + https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem +""") + +lu_factor_ex = _add_docstr(_linalg.linalg_lu_factor_ex, r""" +linalg.lu_factor_ex(A, *, pivot=True, check_errors=False, out=None) -> (Tensor, Tensor, Tensor) + +This is a version of :func:`~lu_factor` that does not perform error checks unless :attr:`check_errors`\ `= True`. +It also returns the :attr:`info` tensor returned by `LAPACK's getrf`_. + +""" + fr""" +.. note:: {common_notes["sync_note_ex"]} + +.. warning:: {common_notes["experimental_warning"]} +""" + r""" + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + +Keyword args: + pivot (bool, optional): Whether to compute the LU decomposition with partial pivoting, or the regular LU + decomposition. :attr:`pivot`\ `= False` not supported on CPU. Default: `True`. + check_errors (bool, optional): controls whether to check the content of ``infos`` and raise + an error if it is non-zero. Default: `False`. + out (tuple, optional): tuple of three tensors to write the output to. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(LU, pivots, info)`. + +.. _LAPACK's getrf: + https://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html +""") + +lu_solve = _add_docstr(_linalg.linalg_lu_solve, r""" +linalg.lu_solve(LU, pivots, B, *, left=True, adjoint=False, out=None) -> Tensor + +Computes the solution of a square system of linear equations with a unique solution given an LU decomposition. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system** associated to +:math:`A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{n \times k}`, which is defined as + +.. math:: AX = B + +where :math:`A` is given factorized as returned by :func:`~lu_factor`. + +If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that solves the system + +.. math:: + + XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.} + +If :attr:`adjoint`\ `= True` (and :attr:`left`\ `= True), given an LU factorization of :math:`A` +this function function returns the :math:`X \in \mathbb{K}^{n \times k}` that solves the system + +.. math:: + + A^{\text{H}}X = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.} + +where :math:`A^{\text{H}}` is the conjugate transpose when :math:`A` is complex, and the +transpose when :math:`A` is real-valued. The :attr:`left`\ `= False` case is analogous. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if the inputs are batches of matrices then +the output has the same batch dimensions. + +Args: + LU (Tensor): tensor of shape `(*, n, n)` (or `(*, k, k)` if :attr:`left`\ `= True`) + where `*` is zero or more batch dimensions as returned by :func:`~lu_factor`. + pivots (Tensor): tensor of shape `(*, n)` (or `(*, k)` if :attr:`left`\ `= True`) + where `*` is zero or more batch dimensions as returned by :func:`~lu_factor`. + B (Tensor): right-hand side tensor of shape `(*, n, k)`. + +Keyword args: + left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`. + adjoint (bool, optional): whether to solve the system :math:`AX=B` or :math:`A^{\text{H}}X = B`. Default: `False`. + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Examples:: + + >>> A = torch.randn(3, 3) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> B = torch.randn(3, 2) + >>> X = torch.linalg.lu_solve(LU, pivots, B) + >>> torch.allclose(A @ X, B) + True + + >>> B = torch.randn(3, 3, 2) # Broadcasting rules apply: A is broadcasted + >>> X = torch.linalg.lu_solve(LU, pivots, B) + >>> torch.allclose(A @ X, B) + True + + >>> B = torch.randn(3, 5, 3) + >>> X = torch.linalg.lu_solve(LU, pivots, B, left=False) + >>> torch.allclose(X @ A, B) + True + + >>> B = torch.randn(3, 3, 4) # Now solve for A^T + >>> X = torch.linalg.lu_solve(LU, pivots, B, adjoint=True) + >>> torch.allclose(A.mT @ X, B) + True + +.. _invertible: + https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem +""") + +lu = _add_docstr(_linalg.linalg_lu, r""" +lu(A, *, pivot=True, out=None) -> (Tensor, Tensor, Tensor) + +Computes the LU decomposition with partial pivoting of a matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **LU decomposition with partial pivoting** of a matrix +:math:`A \in \mathbb{K}^{m \times n}` is defined as + +.. math:: + + A = PLU\mathrlap{\qquad P \in \mathbb{K}^{m \times m}, L \in \mathbb{K}^{m \times k}, U \in \mathbb{K}^{k \times n}} + +where `k = min(m,n)`, :math:`P` is a `permutation matrix`_, :math:`L` is lower triangular with ones on the diagonal +and :math:`U` is upper triangular. + +If :attr:`pivot`\ `= False` and :attr:`A` is on GPU, then the **LU decomposition without pivoting** is computed + +.. math:: + + A = LU\mathrlap{\qquad L \in \mathbb{K}^{m \times k}, U \in \mathbb{K}^{k \times n}} + +When :attr:`pivot`\ `= False`, the returned matrix :attr:`P` will be empty. +The LU decomposition without pivoting `may not exist`_ if any of the principal minors of :attr:`A` is singular. +In this case, the output matrix may contain `inf` or `NaN`. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +.. seealso:: + + :func:`torch.linalg.solve` solves a system of linear equations using the LU decomposition + with partial pivoting. + +.. warning:: The LU decomposition is almost never unique, as often there are different permutation + matrices that can yield different LU decompositions. + As such, different platforms, like SciPy, or inputs on different devices, + may produce different valid decompositions. + +.. warning:: Gradient computations are only supported if the input matrix is full-rank. + If this condition is not met, no error will be thrown, but the gradient + may not be finite. + This is because the LU decomposition with pivoting is not differentiable at these points. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + pivot (bool, optional): Controls whether to compute the LU decomposition with partial pivoting or + no pivoting. Default: `True`. + +Keyword args: + out (tuple, optional): output tuple of three tensors. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(P, L, U)`. + +Examples:: + + >>> A = torch.randn(3, 2) + >>> P, L, U = torch.linalg.lu(A) + >>> P + tensor([[0., 1., 0.], + [0., 0., 1.], + [1., 0., 0.]]) + >>> L + tensor([[1.0000, 0.0000], + [0.5007, 1.0000], + [0.0633, 0.9755]]) + >>> U + tensor([[0.3771, 0.0489], + [0.0000, 0.9644]]) + >>> torch.dist(A, P @ L @ U) + tensor(5.9605e-08) + + >>> A = torch.randn(2, 5, 7, device="cuda") + >>> P, L, U = torch.linalg.lu(A, pivot=False) + >>> P + tensor([], device='cuda:0') + >>> torch.dist(A, L @ U) + tensor(1.0376e-06, device='cuda:0') + +.. _permutation matrix: + https://en.wikipedia.org/wiki/Permutation_matrix +.. _may not exist: + https://en.wikipedia.org/wiki/LU_decomposition#Definitions +""") + +tensorinv = _add_docstr(_linalg.linalg_tensorinv, r""" +linalg.tensorinv(A, ind=2, *, out=None) -> Tensor + +Computes the multiplicative inverse of :func:`torch.tensordot`. + +If `m` is the product of the first :attr:`ind` dimensions of :attr:`A` and `n` is the product of +the rest of the dimensions, this function expects `m` and `n` to be equal. +If this is the case, it computes a tensor `X` such that +`tensordot(\ `:attr:`A`\ `, X, \ `:attr:`ind`\ `)` is the identity matrix in dimension `m`. +`X` will have the shape of :attr:`A` but with the first :attr:`ind` dimensions pushed back to the end + +.. code:: text + + X.shape == A.shape[ind:] + A.shape[:ind] + +Supports input of float, double, cfloat and cdouble dtypes. + +.. note:: When :attr:`A` is a `2`-dimensional tensor and :attr:`ind`\ `= 1`, + this function computes the (multiplicative) inverse of :attr:`A` + (see :func:`torch.linalg.inv`). + +.. note:: + Consider using :func:`torch.linalg.tensorsolve` if possible for multiplying a tensor on the left + by the tensor inverse, as:: + + linalg.tensorsolve(A, B) == torch.tensordot(linalg.tensorinv(A), B) # When B is a tensor with shape A.shape[:B.ndim] + + It is always preferred to use :func:`~tensorsolve` when possible, as it is faster and more + numerically stable than computing the pseudoinverse explicitly. + +.. seealso:: + + :func:`torch.linalg.tensorsolve` computes + `torch.tensordot(tensorinv(\ `:attr:`A`\ `), \ `:attr:`B`\ `)`. + +Args: + A (Tensor): tensor to invert. Its shape must satisfy + `prod(\ `:attr:`A`\ `.shape[:\ `:attr:`ind`\ `]) == + prod(\ `:attr:`A`\ `.shape[\ `:attr:`ind`\ `:])`. + ind (int): index at which to compute the inverse of :func:`torch.tensordot`. Default: `2`. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Raises: + RuntimeError: if the reshaped :attr:`A` is not invertible or the product of the first + :attr:`ind` dimensions is not equal to the product of the rest. + +Examples:: + + >>> A = torch.eye(4 * 6).reshape((4, 6, 8, 3)) + >>> Ainv = torch.linalg.tensorinv(A, ind=2) + >>> Ainv.shape + torch.Size([8, 3, 4, 6]) + >>> B = torch.randn(4, 6) + >>> torch.allclose(torch.tensordot(Ainv, B), torch.linalg.tensorsolve(A, B)) + True + + >>> A = torch.randn(4, 4) + >>> Atensorinv = torch.linalg.tensorinv(A, ind=1) + >>> Ainv = torch.linalg.inv(A) + >>> torch.allclose(Atensorinv, Ainv) + True +""") + +tensorsolve = _add_docstr(_linalg.linalg_tensorsolve, r""" +linalg.tensorsolve(A, B, dims=None, *, out=None) -> Tensor + +Computes the solution `X` to the system `torch.tensordot(A, X) = B`. + +If `m` is the product of the first :attr:`B`\ `.ndim` dimensions of :attr:`A` and +`n` is the product of the rest of the dimensions, this function expects `m` and `n` to be equal. + +The returned tensor `x` satisfies +`tensordot(\ `:attr:`A`\ `, x, dims=x.ndim) == \ `:attr:`B`. +`x` has shape :attr:`A`\ `[B.ndim:]`. + +If :attr:`dims` is specified, :attr:`A` will be reshaped as + +.. code:: text + + A = movedim(A, dims, range(len(dims) - A.ndim + 1, 0)) + +Supports inputs of float, double, cfloat and cdouble dtypes. + +.. seealso:: + + :func:`torch.linalg.tensorinv` computes the multiplicative inverse of + :func:`torch.tensordot`. + +Args: + A (Tensor): tensor to solve for. Its shape must satisfy + `prod(\ `:attr:`A`\ `.shape[:\ `:attr:`B`\ `.ndim]) == + prod(\ `:attr:`A`\ `.shape[\ `:attr:`B`\ `.ndim:])`. + B (Tensor): tensor of shape :attr:`A`\ `.shape[:\ `:attr:`B`\ `.ndim]`. + dims (Tuple[int], optional): dimensions of :attr:`A` to be moved. + If `None`, no dimensions are moved. Default: `None`. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Raises: + RuntimeError: if the reshaped :attr:`A`\ `.view(m, m)` with `m` as above is not + invertible or the product of the first :attr:`ind` dimensions is not equal + to the product of the rest of the dimensions. + +Examples:: + + >>> A = torch.eye(2 * 3 * 4).reshape((2 * 3, 4, 2, 3, 4)) + >>> B = torch.randn(2 * 3, 4) + >>> X = torch.linalg.tensorsolve(A, B) + >>> X.shape + torch.Size([2, 3, 4]) + >>> torch.allclose(torch.tensordot(A, X, dims=X.ndim), B) + True + + >>> A = torch.randn(6, 4, 4, 3, 2) + >>> B = torch.randn(4, 3, 2) + >>> X = torch.linalg.tensorsolve(A, B, dims=(0, 2)) + >>> X.shape + torch.Size([6, 4]) + >>> A = A.permute(1, 3, 4, 0, 2) + >>> A.shape[B.ndim:] + torch.Size([6, 4]) + >>> torch.allclose(torch.tensordot(A, X, dims=X.ndim), B, atol=1e-6) + True +""") + +qr = _add_docstr(_linalg.linalg_qr, r""" +qr(A, mode='reduced', *, out=None) -> (Tensor, Tensor) + +Computes the QR decomposition of a matrix. + +Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, +the **full QR decomposition** of a matrix +:math:`A \in \mathbb{K}^{m \times n}` is defined as + +.. math:: + + A = QR\mathrlap{\qquad Q \in \mathbb{K}^{m \times m}, R \in \mathbb{K}^{m \times n}} + +where :math:`Q` is orthogonal in the real case and unitary in the complex case, +and :math:`R` is upper triangular with real diagonal (even in the complex case). + +When `m > n` (tall matrix), as `R` is upper triangular, its last `m - n` rows are zero. +In this case, we can drop the last `m - n` columns of `Q` to form the +**reduced QR decomposition**: + +.. math:: + + A = QR\mathrlap{\qquad Q \in \mathbb{K}^{m \times n}, R \in \mathbb{K}^{n \times n}} + +The reduced QR decomposition agrees with the full QR decomposition when `n >= m` (wide matrix). + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :attr:`A` is a batch of matrices then +the output has the same batch dimensions. + +The parameter :attr:`mode` chooses between the full and reduced QR decomposition. +If :attr:`A` has shape `(*, m, n)`, denoting `k = min(m, n)` + +- :attr:`mode`\ `= 'reduced'` (default): Returns `(Q, R)` of shapes `(*, m, k)`, `(*, k, n)` respectively. + It is always differentiable. +- :attr:`mode`\ `= 'complete'`: Returns `(Q, R)` of shapes `(*, m, m)`, `(*, m, n)` respectively. + It is differentiable for `m <= n`. +- :attr:`mode`\ `= 'r'`: Computes only the reduced `R`. Returns `(Q, R)` with `Q` empty and `R` of shape `(*, k, n)`. + It is never differentiable. + +Differences with `numpy.linalg.qr`: + +- :attr:`mode`\ `= 'raw'` is not implemented. +- Unlike `numpy.linalg.qr`, this function always returns a tuple of two tensors. + When :attr:`mode`\ `= 'r'`, the `Q` tensor is an empty tensor. + +.. warning:: The elements in the diagonal of `R` are not necessarily positive. + As such, the returned QR decomposition is only unique up to the sign of the diagonal of `R`. + Therefore, different platforms, like NumPy, or inputs on different devices, + may produce different valid decompositions. + +.. warning:: The QR decomposition is only well-defined if the first `k = min(m, n)` columns + of every matrix in :attr:`A` are linearly independent. + If this condition is not met, no error will be thrown, but the QR produced + may be incorrect and its autodiff may fail or produce incorrect results. + +Args: + A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + mode (str, optional): one of `'reduced'`, `'complete'`, `'r'`. + Controls the shape of the returned tensors. Default: `'reduced'`. + +Keyword args: + out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`. + +Returns: + A named tuple `(Q, R)`. + +Examples:: + + >>> A = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]]) + >>> Q, R = torch.linalg.qr(A) + >>> Q + tensor([[-0.8571, 0.3943, 0.3314], + [-0.4286, -0.9029, -0.0343], + [ 0.2857, -0.1714, 0.9429]]) + >>> R + tensor([[ -14.0000, -21.0000, 14.0000], + [ 0.0000, -175.0000, 70.0000], + [ 0.0000, 0.0000, -35.0000]]) + >>> (Q @ R).round() + tensor([[ 12., -51., 4.], + [ 6., 167., -68.], + [ -4., 24., -41.]]) + >>> (Q.T @ Q).round() + tensor([[ 1., 0., 0.], + [ 0., 1., -0.], + [ 0., -0., 1.]]) + >>> Q2, R2 = torch.linalg.qr(A, mode='r') + >>> Q2 + tensor([]) + >>> torch.equal(R, R2) + True + >>> A = torch.randn(3, 4, 5) + >>> Q, R = torch.linalg.qr(A, mode='complete') + >>> torch.dist(Q @ R, A) + tensor(1.6099e-06) + >>> torch.dist(Q.mT @ Q, torch.eye(4)) + tensor(6.2158e-07) +""") + +vander = _add_docstr(_linalg.linalg_vander, r""" +vander(x, N=None) -> Tensor + +Generates a Vandermonde matrix. + +Returns the Vandermonde matrix :math:`V` + +.. math:: + + V = \begin{pmatrix} + 1 & x_1 & x_1^2 & \dots & x_1^{N-1}\\ + 1 & x_2 & x_2^2 & \dots & x_2^{N-1}\\ + 1 & x_3 & x_3^2 & \dots & x_3^{N-1}\\ + \vdots & \vdots & \vdots & \ddots &\vdots \\ + 1 & x_n & x_n^2 & \dots & x_n^{N-1} + \end{pmatrix}. + +for `N > 1`. +If :attr:`N`\ `= None`, then `N = x.size(-1)` so that the output is a square matrix. + +Supports inputs of float, double, cfloat, cdouble, and integral dtypes. +Also supports batches of vectors, and if :attr:`x` is a batch of vectors then +the output has the same batch dimensions. + +Differences with `numpy.vander`: + +- Unlike `numpy.vander`, this function returns the powers of :attr:`x` in ascending order. + To get them in the reverse order call ``linalg.vander(x, N).flip(-1)``. + +Args: + x (Tensor): tensor of shape `(*, n)` where `*` is zero or more batch dimensions + consisting of vectors. + +Keyword args: + N (int, optional): Number of columns in the output. Default: `x.size(-1)` + +Example:: + + >>> x = torch.tensor([1, 2, 3, 5]) + >>> linalg.vander(x) + tensor([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + >>> linalg.vander(x, N=3) + tensor([[ 1, 1, 1], + [ 1, 2, 4], + [ 1, 3, 9], + [ 1, 5, 25]]) +""") + +vecdot = _add_docstr(_linalg.linalg_vecdot, r""" +linalg.vecdot(x, y, *, dim=-1, out=None) -> Tensor + +Computes the dot product of two batches of vectors along a dimension. + +In symbols, this function computes + +.. math:: + + \sum_{i=1}^n \overline{x_i}y_i. + +over the dimension :attr:`dim` where :math:`\overline{x_i}` denotes the conjugate for complex +vectors, and it is the identity for real vectors. + +Supports input of half, bfloat16, float, double, cfloat, cdouble and integral dtypes. +It also supports broadcasting. + +Args: + x (Tensor): first batch of vectors of shape `(*, n)`. + y (Tensor): second batch of vectors of shape `(*, n)`. + +Keyword args: + dim (int): Dimension along which to compute the dot product. Default: `-1`. + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Examples:: + + >>> v1 = torch.randn(3, 2) + >>> v2 = torch.randn(3, 2) + >>> linalg.vecdot(v1, v2) + tensor([ 0.3223, 0.2815, -0.1944]) + >>> torch.vdot(v1[0], v2[0]) + tensor(0.3223) +""") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d16ea7f6270bdafcd12c62400e2270415f36991 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/nested/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/nested/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c6292ed9378a5b19d2dc0099a6c6ee5d18192ec1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/nested/__init__.py @@ -0,0 +1,256 @@ +from typing import List, Optional, Union + +import torch +from torch import SymInt, Tensor +from torch._C import _add_docstr, _nested # type: ignore[attr-defined] + +from torch.types import _device as Device, _dtype as DType + +__all__ = [ + "to_padded_tensor", + "as_nested_tensor", + "nested_tensor", + "narrow", +] + +# Nested Tensor constructor functions + + +def as_nested_tensor( + tensor_list: List[Tensor], + dtype: Optional[DType] = None, + device: Optional[Device] = None, + layout=None +) -> Tensor: + r""" + Constructs a nested tensor preserving autograd history from :attr:`tensor_list` a list of tensors. + + .. note:: + Tensors within the list are always copied by this function due to current nested tensor semantics. + + Args: + tensor_list (List[Tensor]): a list of tensors with the same ndim + + Keyword arguments: + dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor. + Default: if None, same :class:`torch.dtype` as leftmost tensor in the list. + device (:class:`torch.device`, optional): the desired device of returned nested tensor. + Default: if None, same :class:`torch.device` as leftmost tensor in the list + layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor. + Only strided and jagged layouts are supported. Default: if None, the strided layout. + + Example:: + + >>> a = torch.arange(3, dtype=torch.float, requires_grad=True) + >>> b = torch.arange(5, dtype=torch.float, requires_grad=True) + >>> nt = torch.nested.as_nested_tensor([a, b]) + >>> nt.is_leaf + False + >>> fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)]) + >>> nt.backward(fake_grad) + >>> a.grad + tensor([1., 1., 1.]) + >>> b.grad + tensor([0., 0., 0., 0., 0.]) + """ + if not isinstance(tensor_list, list) or any( + not isinstance(t, Tensor) for t in tensor_list + ): + raise TypeError( + "as_nested_tensor(): Expected first argument to be a list of tensors " + ) + + if layout is None: + layout = torch.strided + if layout == torch.strided: + return torch._nested_tensor_from_tensor_list(tensor_list, dtype, None, device, None) + elif layout == torch.jagged: + from torch.nested._internal.nested_tensor import jagged_from_list + + nt, _ = jagged_from_list(tensor_list, offsets=None, device=device, dtype=dtype) + return nt + else: + raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}") + + +# Note: This not only adds doc strings for the nested ops, but +# also connects the torch.nested Python namespace to the torch._C._nested builtins. + +to_padded_tensor = _add_docstr( + _nested.nested_to_padded_tensor, + r""" +to_padded_tensor(input, padding, output_size=None, out=None) -> Tensor + +Returns a new (non-nested) Tensor by padding the :attr:`input` nested tensor. +The leading entries will be filled with the nested data, +while the trailing entries will be padded. + +.. warning:: + + :func:`to_padded_tensor` always copies the underlying data, + since the nested and the non-nested tensors differ in memory layout. + +Args: + padding (float): The padding value for the trailing entries. + +Keyword args: + output_size (Tuple[int]): The size of the output tensor. + If given, it must be large enough to contain all nested data; + else, will infer by taking the max size of each nested sub-tensor along each dimension. + out (Tensor, optional): the output tensor. + +Example:: + + >>> nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))]) + nested_tensor([ + tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276], + [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]), + tensor([[-1.8546, -0.7194, -0.2918, -0.1846], + [ 0.2773, 0.8793, -0.5183, -0.6447], + [ 1.8009, 1.8468, -0.9832, -1.5272]]) + ]) + >>> pt_infer = torch.nested.to_padded_tensor(nt, 0.0) + tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276], + [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]], + [[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000], + [ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000], + [ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]]) + >>> pt_large = torch.nested.to_padded_tensor(nt, 1.0, (2, 4, 6)) + tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000], + [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000], + [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000], + [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]], + [[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000], + [ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000], + [ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000], + [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]]) + >>> pt_small = torch.nested.to_padded_tensor(nt, 2.0, (2, 2, 2)) + RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported. + +""", +) + +def nested_tensor(tensor_list, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor: + r""" +Constructs a nested tensor with no autograd history (also known as a “leaf tensor”, see +:ref:`Autograd mechanics `) from :attr:`tensor_list` a list of tensors. + +Args: + tensor_list (List[array_like]): a list of tensors, or anything that can be passed to torch.tensor, + where each element of the list has the same dimensionality. + +Keyword arguments: + dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor. + Default: if None, same :class:`torch.dtype` as leftmost tensor in the list. + layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor. + Only strided and jagged layouts are supported. Default: if None, the strided layout. + device (:class:`torch.device`, optional): the desired device of returned nested tensor. + Default: if None, same :class:`torch.device` as leftmost tensor in the list + requires_grad (bool, optional): If autograd should record operations on the + returned nested tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned nested tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + +Example:: + + >>> a = torch.arange(3, dtype=torch.float, requires_grad=True) + >>> b = torch.arange(5, dtype=torch.float, requires_grad=True) + >>> nt = torch.nested.nested_tensor([a, b], requires_grad=True) + >>> nt.is_leaf + True + """ + if layout is None: + layout = torch.strided + if layout == torch.strided: + return _nested.nested_tensor( + tensor_list, + dtype=dtype, + device=device, + requires_grad=requires_grad, + pin_memory=pin_memory) + elif layout == torch.jagged: + # Need to: + # * Detach tensors to discard autograd history + # * Wrap lists of scalars as tensors + list_of_tensors = [t.detach() if isinstance(t, Tensor) else torch.as_tensor(t) + for t in tensor_list] + + from torch.nested._internal.nested_tensor import jagged_from_list + + with torch.no_grad(): + nt, _ = jagged_from_list(list_of_tensors, offsets=None, device=device, dtype=dtype) + + nt.requires_grad_(requires_grad) + if pin_memory: + nt = nt.pin_memory() # type: ignore[assignment] + + return nt + else: + raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}") + + +def narrow(tensor: Tensor, dim: int, start: Union[int, Tensor], length: Union[int, Tensor], layout=torch.strided) -> Tensor: + r""" +Constructs a nested tensor (which might be a view) from :attr:`tensor`, a strided tensor. This follows +similar semantics to torch.Tensor.narrow, where in the :attr:`dim`-th dimension the new nested tensor +shows only the elements in the interval `[start, start+length)`. As nested representations +allow for a different `start` and `length` at each 'row' of that dimension, :attr:`start` and :attr:`length` +can also be tensors of shape `tensor.shape[0]`. + +There's some differences depending on the layout you use for the nested tensor. If using strided layout, +torch.narrow will do a copy of the narrowed data into a contiguous NT with strided layout, while +jagged layout narrow() will create a non-contiguous view of your original strided tensor. This particular +representation is really useful for representing kv-caches in Transformer models, as specialized +SDPA kernels can deal with format easily, resulting in performance improvements. + + +Args: + tensor (:class:`torch.Tensor`): a strided tensor, which will be used as the underlying data + for the nested tensor if using the jagged layout or will be copied for the strided layout. + dim (int): the dimension where narrow will be applied. Only `dim=1` is supported for the + jagged layout, while strided supports all dim + start (Union[int, :class:`torch.Tensor`]): starting element for the narrow operation + length (Union[int, :class:`torch.Tensor`]): number of elements taken during the narrow op + +Keyword arguments: + layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor. + Only strided and jagged layouts are supported. Default: if None, the strided layout. + +Example:: + + >>> starts = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64) + >>> lengths = torch.tensor([3, 2, 2, 1, 5], dtype=torch.int64) + >>> narrow_base = torch.randn(5, 10, 20) + >>> nt_narrowed = torch.nested.narrow(narrow_base, 1, starts, lengths, layout=torch.jagged) + >>> nt_narrowed.is_contiguous() + False + """ + if not isinstance(start, (int, SymInt, Tensor)): + raise RuntimeError("start must be an integer or a tensor") + + if not isinstance(length, (int, SymInt, Tensor)): + raise RuntimeError("length must be an integer or a tensor") + + if layout == torch.strided: + if isinstance(start, Tensor) or isinstance(length, Tensor): + raise RuntimeError("start and length must be integers for the strided layout NT impl") + # TODO: switch to as_nested_tensor(tensor) when it is available + nt = as_nested_tensor(torch.unbind(tensor), layout=torch.strided).narrow(dim, start, length) + elif layout == torch.jagged: + if dim != 1: + raise RuntimeError("jagged layout only supports dim=1") + + from torch.nested._internal.nested_tensor import jagged_from_tensor_and_lengths + + if isinstance(start, (int, SymInt)): + start = torch.tensor([start], device=tensor.device, dtype=torch.int64) + + if isinstance(length, (int, SymInt)): + length = torch.tensor([length], device=tensor.device, dtype=torch.int64) + + nt, _, _ = jagged_from_tensor_and_lengths(tensor, start, length) + else: + raise RuntimeError(f"Specified layout is unsupported for nested narrow: {layout}") + + return nt diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62046bd1e763477d73ad7257d08ef1b630ad4c00 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d92876f3697181ad667900a722614ebc49b0c93 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0225243a065ac9a8ee01da599d1f7fad298cfe50 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57fe3ad51dd8ef4eb703c692ea63237f5a8999b4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fde555274de7a159ca37762b4753bdd2b16f8e4c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d5fb7c8677adc5ad5de18b6a9e064e754fff9be Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c89b8dba4100c2f84eb0f5bd5d72432ed6d43428 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeeb4f9d6d78a8bdf6adf992654d428ade06dd6c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca9d84796b10a8e8677736223d3b093aa11babd4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02dd8df94988c43c72bc4a001dd30b4d12c78b74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6280995b7b8f186e04bbc55c30709a532c0ee1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a943d0d3767013c37291b6632a8552f0cec6a64 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56fdc76925332921bd9902be653e82003222da66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9293654af9ed2fdf3874ba4f43b0081589f0fa9a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..700fca7b429fa6d061e01fc97a394ea1fbf5e022 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35a175d47f2b941b0ff0db7e011938f35d2f99b6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbcdedecf75c30fd5e3da33a9e90ff4b375f2101 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..761f4ec6d435947830c0b99f6c75c8a5b15e941d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beda83051b067eec8ecca514cfe8ee70c58e52ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df76f4c9b9f1533129901dde1408638635b51b1a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c92c9e35f9d4a5c6e458d32199c4b9e4e450542e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab93078eaa4779969fef62f1937185af0dc36733 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61b03a88681b3d1fabcddd268d26a2c58090f85d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9cac50e23d9d8c2356047e3356629d05cdaff14 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..417420e27899d6ddd761b252c2421c1780c35a30 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b8461a4f68ede1fbd0caf5478131322283e6d8a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b03a113732a6d151f2c362732f45094418a6ef62 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..297d228baf1aa4c1c1cdb034e40bf45c513d3357 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6a6891a984d7924ef99394d4800669defceb7eb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f30591df32ac19da17c3b6ab3456a111fed982ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0b4c17dab2d46978fdb2e3ba831dd223fd39a97 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50ae9d60d192f52836ddb9bb3d73c6afdec90f24 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8916c793da6009ab78672dcf8b837eee1b3e15cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1d248e21287188952cff1d092ff2e2332fb20ae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cb594db3c0841ce53ca64bc01b21f5cfc6b81fc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baf0dd3a647abb82fdf95e7d3bf28ab9e08063ee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6d497df7a63b2912240925ecead38e4d7e56464 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..98db73d74018450bf0fb5d1ca01284cdc4a6dcad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py @@ -0,0 +1,259 @@ +# If you need to modify this file to make this test pass, please also apply same edits accordingly to +# https://github.com/pytorch/examples/blob/master/distributed/rpc/rl/main.py +# and https://pytorch.org/tutorials/intermediate/rpc_tutorial.html + +import numpy as np +from itertools import count + +import torch +import torch.distributed.rpc as rpc +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote +from torch.distributions import Categorical + +from torch.testing._internal.dist_utils import dist_init, worker_name +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture + +TOTAL_EPISODE_STEP = 5000 +GAMMA = 0.1 +SEED = 543 + +def _call_method(method, rref, *args, **kwargs): + r""" + a helper function to call a method on the given RRef + """ + return method(rref.local_value(), *args, **kwargs) + + +def _remote_method(method, rref, *args, **kwargs): + r""" + a helper function to run method on the owner of rref and fetch back the + result using RPC + """ + args = [method, rref] + list(args) + return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs) + + +class Policy(nn.Module): + r""" + Borrowing the ``Policy`` class from the Reinforcement Learning example. + Copying the code to make these two examples independent. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + """ + def __init__(self): + super().__init__() + self.affine1 = nn.Linear(4, 128) + self.dropout = nn.Dropout(p=0.6) + self.affine2 = nn.Linear(128, 2) + + self.saved_log_probs = [] + self.rewards = [] + + def forward(self, x): + x = self.affine1(x) + x = self.dropout(x) + x = F.relu(x) + action_scores = self.affine2(x) + return F.softmax(action_scores, dim=1) + + +class DummyEnv: + r""" + A dummy environment that implements the required subset of the OpenAI gym + interface. It exists only to avoid a dependency on gym for running the + tests in this file. It is designed to run for a set max number of iterations, + returning random states and rewards at each step. + """ + def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0): + self.state_dim = state_dim + self.num_iters = num_iters + self.iter = 0 + self.reward_threshold = reward_threshold + + def seed(self, manual_seed): + torch.manual_seed(manual_seed) + + def reset(self): + self.iter = 0 + return torch.randn(self.state_dim) + + def step(self, action): + self.iter += 1 + state = torch.randn(self.state_dim) + reward = torch.rand(1).item() * self.reward_threshold + done = self.iter >= self.num_iters + info = {} + return state, reward, done, info + + +class Observer: + r""" + An observer has exclusive access to its own environment. Each observer + captures the state from its environment, and send the state to the agent to + select an action. Then, the observer applies the action to its environment + and reports the reward to the agent. + """ + def __init__(self): + self.id = rpc.get_worker_info().id + self.env = DummyEnv() + self.env.seed(SEED) + + def run_episode(self, agent_rref, n_steps): + r""" + Run one episode of n_steps. + Arguments: + agent_rref (RRef): an RRef referencing the agent object. + n_steps (int): number of steps in this episode + """ + state, ep_reward = self.env.reset(), 0 + for step in range(n_steps): + # send the state to the agent to get an action + action = _remote_method(Agent.select_action, agent_rref, self.id, state) + + # apply the action to the environment, and get the reward + state, reward, done, _ = self.env.step(action) + + # report the reward to the agent for training purpose + _remote_method(Agent.report_reward, agent_rref, self.id, reward) + + if done: + break + + +class Agent: + def __init__(self, world_size): + self.ob_rrefs = [] + self.agent_rref = RRef(self) + self.rewards = {} + self.saved_log_probs = {} + self.policy = Policy() + self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2) + self.eps = np.finfo(np.float32).eps.item() + self.running_reward = 0 + self.reward_threshold = DummyEnv().reward_threshold + for ob_rank in range(1, world_size): + ob_info = rpc.get_worker_info(worker_name(ob_rank)) + self.ob_rrefs.append(remote(ob_info, Observer)) + self.rewards[ob_info.id] = [] + self.saved_log_probs[ob_info.id] = [] + + def select_action(self, ob_id, state): + r""" + This function is mostly borrowed from the Reinforcement Learning example. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + The main difference is that instead of keeping all probs in one list, + the agent keeps probs in a dictionary, one key per observer. + + NB: no need to enforce thread-safety here as GIL will serialize + executions. + """ + probs = self.policy(state.unsqueeze(0)) + m = Categorical(probs) + action = m.sample() + self.saved_log_probs[ob_id].append(m.log_prob(action)) + return action.item() + + def report_reward(self, ob_id, reward): + r""" + Observers call this function to report rewards. + """ + self.rewards[ob_id].append(reward) + + def run_episode(self, n_steps=0): + r""" + Run one episode. The agent will tell each observer to run n_steps. + """ + futs = [] + for ob_rref in self.ob_rrefs: + # make async RPC to kick off an episode on all observers + futs.append( + rpc_async( + ob_rref.owner(), + _call_method, + args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps) + ) + ) + + # wait until all observers have finished this episode + for fut in futs: + fut.wait() + + def finish_episode(self): + r""" + This function is mostly borrowed from the Reinforcement Learning example. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + The main difference is that it joins all probs and rewards from + different observers into one list, and uses the minimum observer rewards + as the reward of the current episode. + """ + + # joins probs and rewards from different observers into lists + R, probs, rewards = 0, [], [] + for ob_id in self.rewards: + probs.extend(self.saved_log_probs[ob_id]) + rewards.extend(self.rewards[ob_id]) + + # use the minimum observer reward to calculate the running reward + min_reward = min([sum(self.rewards[ob_id]) for ob_id in self.rewards]) + self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward + + # clear saved probs and rewards + for ob_id in self.rewards: + self.rewards[ob_id] = [] + self.saved_log_probs[ob_id] = [] + + policy_loss, returns = [], [] + for r in rewards[::-1]: + R = r + GAMMA * R + returns.insert(0, R) + returns = torch.tensor(returns) + returns = (returns - returns.mean()) / (returns.std() + self.eps) + for log_prob, R in zip(probs, returns): + policy_loss.append(-log_prob * R) + self.optimizer.zero_grad() + policy_loss = torch.cat(policy_loss).sum() + policy_loss.backward() + self.optimizer.step() + return min_reward + + +def run_agent(agent, n_steps): + for i_episode in count(1): + agent.run_episode(n_steps=n_steps) + last_reward = agent.finish_episode() + + if agent.running_reward > agent.reward_threshold: + print(f"Solved! Running reward is now {agent.running_reward}!") + break + + +class ReinforcementLearningRpcTest(RpcAgentTestFixture): + @dist_init(setup_rpc=False) + def test_rl_rpc(self): + if self.rank == 0: + # Rank 0 is the agent. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + agent = Agent(self.world_size) + run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1))) + + # Ensure training was run. We don't really care about whether the task was learned, + # since the purpose of the test is to check the API calls. + self.assertGreater(agent.running_reward, 0.0) + else: + # Other ranks are observers that passively wait for instructions from the agent. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7351e88007fa5d13be1b408cd0d926d135314227 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e192db3321a2adda047cd5ae54069942a86eb51 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1959ff1842f679aeb5c2405c1b2a32747610736d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d15a639b746c4be9489b0b9b4138335cbe9d4db0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..1ad932af9e08b00fd5b75fada9e68cb1919019bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py @@ -0,0 +1,114 @@ +from typing import Dict, Tuple + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +from torch import Tensor +from torch.distributed.rpc import rpc_async +from torch.testing import FileCheck +from torch.testing._internal.dist_utils import dist_init, worker_name +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +@torch.jit.script +def local_add(t1, t2): + return torch.add(t1, t2) + + +@torch.jit.script +def remote_add(t1, t2, dst: str): # noqa: E999 + return rpc_async(dst, local_add, (t1, t2)).wait() + + +@torch.jit.script +def fork_add(t1, t2, dst: str): + fut = torch.jit._fork(remote_add, t1, t2, dst) + return torch.jit._wait(fut) + + +class JitDistAutogradTest(RpcAgentTestFixture): + @dist_init + def test_get_gradients(self): + dst_rank = self.rank + + @torch.jit.script + def dist_get_gradients(context_id: int) -> (Dict[Tensor, Tensor]): + return dist_autograd.get_gradients(context_id) + + FileCheck().check("get_gradients").run(str(dist_get_gradients.graph)) + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.add(t1, t2) + + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_get_gradients(context_id) + + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(torch.ones(3, 3), grads[t1]) + self.assertEqual(torch.ones(3, 3), grads[t2]) + + @dist_init + def test_dist_backward(self): + if self.rank != 0: + return + + @torch.jit.script + def dist_backward_script(context_id: int, loss: torch.Tensor): + dist_autograd.backward(context_id, [loss]) + + FileCheck().check("dist_backward").run(str(dist_backward_script.graph)) + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(3, 3, requires_grad=True) + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum() + dist_backward_script(context_id, loss) + + @dist_init + def test_jit_fork_within_context(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + res = fork_add(t1, t2, dst_worker_name) + loss = res.sum() + dist_autograd.backward(context_id, [loss]) + + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + + @dist_init + def test_restore_context_after_swtich_to_jit_thread(self): + if self.rank != 0: + return + + @torch.jit.script + def forward_script( + context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor + ) -> Tuple[Tensor, Tensor]: + res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1)) + res1 = res1_fut.wait() # After this, the script runs in a new JIT thread. + loss1 = res1.sum() + + # SendRpcBackward is not attached, since DistAutogradContext is lost here. + res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2)) + res2 = res2_fut.wait() + loss2 = res2.sum() + + return loss1, loss2 + + with dist_autograd.context() as context_id: + t1 = torch.ones((2, 3), requires_grad=True) + t2 = torch.ones((2, 3), requires_grad=True) + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + loss0, loss1 = forward_script(context_id, dst_worker_name, t1, t2) + dist_autograd.backward(context_id, [loss0, loss1]) + grad0, grad1 = dist_autograd.get_gradients(context_id) + self.assertEqual(grad0, grad1) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..267adb5a09ab11a1c843639ccea733b6397257b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py @@ -0,0 +1,1383 @@ +import time +import io +from typing import Dict, List, Tuple, Any + +import torch +import torch.distributed as dist +import torch.distributed.rpc as rpc +from torch import Tensor +from torch.autograd.profiler import record_function +from torch.distributed.rpc import RRef +from torch.distributed.rpc.internal import RPCExecMode, _build_rpc_profiling_key +from torch.futures import Future +from torch.testing._internal.common_utils import TemporaryFileName +from torch.testing._internal.dist_utils import ( + dist_init, + get_function_event, + initialize_pg, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +from torch.autograd.profiler_legacy import profile as _profile + +def rref_isinstance(rref, cls_to_check): + return isinstance(rref.local_value(), cls_to_check) + +def sleep(t): + time.sleep(t) + + +def rpc_return_rref(dst): + return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +@torch.jit.script +def rref_local_value(rref: RRef[Tensor]) -> Tensor: + return rref.local_value() + + +@torch.jit.script +def list_create() -> List[int]: + global_list = [1, 2, 3] + return global_list + + +@torch.jit.script +def rref_list_mutate(rref: RRef[List[int]]) -> None: + rref.local_value().append(4) + rref.to_here().append(5) + rref.to_here(5.0).append(6) + + +def return_value(value: int) -> int: + return value + + +class RRefAPITest: + @dist_init + def test_rref_is_owner(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + rref_var = rpc_return_rref(dst_worker_name) + + @torch.jit.script + def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool: + return rref_var.is_owner() + + res = rref_tensor_is_owner(rref_var) + self.assertEqual(res, False) + + @dist_init + def test_rref_local_value(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + rref = rpc_return_rref(dst_worker_name) + + with self.assertRaisesRegex( + RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef" + ): + rref_local_value(rref) + + ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,)) + self.assertEqual(ret, torch.add(torch.ones(2, 2), 1)) + + @dist_init + def test_local_rref_local_value(self): + if self.rank != 0: + return + + dst_worker_name = worker_name(self.rank) + rref = rpc.remote(dst_worker_name, return_value, (5,), {}) + + ret = rref_local_value(rref) + self.assertEqual(ret, 5) + + def _create_rref(self): + owner_rank = (self.rank + 2) % self.world_size + return rpc.remote( + worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1) + ) + + @dist_init + def test_user_rrefs_confirmed(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret = rpc.rpc_sync( + worker_name(dst_rank), script_check_rref_confirmed, args=(rref,) + ) + self.assertEqual(ret, True) + + @dist_init + def test_user_rrefs_confirmed_remote(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret_rref = rpc.remote( + worker_name(dst_rank), script_check_rref_confirmed, args=(rref,) + ) + self.assertEqual(ret_rref.to_here(), True) + + @dist_init + def test_rref_list_mutate(self): + dst = worker_name((self.rank + 1) % self.world_size) + list_rref = rpc.remote(dst, list_create) + + rpc.rpc_sync(dst, rref_list_mutate, args=(list_rref,)) + self.assertEqual(list_rref.to_here(), [1, 2, 3, 4, 5, 6]) + + +@torch.jit.script +def no_arg(): + return 0 + + +@torch.jit.script +def one_arg(value): + return value + 1 + +@torch.jit.script +def script_add_ones(x): + return torch.add(x, torch.ones(1)) + +@torch.jit.script +def script_add_ones_with_record_function(x, block: str): + with record_function(block): + return torch.add(x, torch.ones(1)) + + +@torch.jit.script +def record_function_on_caller_rpc_async(dst_worker_name: str, block: str) -> Tensor: + t: Tensor = torch.ones(1) + with record_function(block) as rf: + fut1 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, )) + # Extra operator call to avoid de-duplication of the next async call + # see https://github.com/pytorch/pytorch/pull/62710#discussion_r694680279 + zero = torch.zeros_like(t) + fut2 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, )) + res = fut1.wait() + fut2.wait() + zero + return res + + + +@torch.jit.script +def script_fork_wait_udf(tensor): + fut = torch.jit._fork(script_add_ones, tensor) + x = torch.jit._wait(fut) + return x + + +@torch.jit.script +def rref_to_here(rref_var: RRef[Tensor]) -> Tensor: + return rref_var.to_here() + + +@torch.jit.script +def return_rref(rref_var: RRef[Tensor]) -> RRef[Tensor]: + return rref_var + + +@torch.jit.script +def script_raise_func(value): + if value.numel() == 2: + raise ValueError("Expected error") + return value + 1 + + +@torch.jit.script +def script_fork_wait_throw(invalue): + fut = torch.jit._fork(script_raise_func, invalue) + value = torch.jit._wait(fut) + return value + + +@torch.jit.script +def call_rpc_with_profiling(record: torch.classes.profiler._RecordFunction, dst_worker_name: str) -> Tensor: + # Call rpc_async from within ScriptFunction and ensure that we can attach + # profiling callbacks. Note that handle here is a Tensor representation of + # RecordFunction. + fut = rpc.rpc_async(dst_worker_name, one_arg, (torch.tensor(1),)) + torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut) + ret = fut.wait() + return ret + +@torch.jit.script +def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor: + fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block)) + return fut.wait() + + +@torch.jit.script +def call_fork_with_profiling(record: torch.classes.profiler._RecordFunction) -> Tensor: + # Call fork from within ScriptFunction and ensure that we can attach profiling + # callbacks to the resulting future. Note that handle here is a Tensor + # representation of RecordFunction. + fut = torch.jit._fork(one_arg, torch.tensor(1)) + torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut) + ret = fut.wait() + return ret + + +class MyScriptModuleWithRRefs(torch.jit.ScriptModule): + def __init__(self, dst_worker): + super().__init__() + self.rrefs = [] + for _ in range(4): + self.rrefs.append(rpc_return_rref(dst_worker)) + + @torch.jit.script_method + def forward(self) -> Tensor: + res_tensor = torch.ones(2, 2) + for rref in self.rrefs: + res_tensor += rref.to_here() + + return res_tensor + + +@torch.jit.ignore +def rref_python_annotation(rref_var: RRef[Tensor]) -> RRef[Tensor]: + return rref_var + + +@torch.jit.script +def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor: + return rref_python_annotation(rref_var).to_here() + + +class RRefTypingTest: + @dist_init + def test_rref_as_arg_and_return(self): + n = self.rank + 1 + dst_rank = n % self.world_size + local_ret = one_arg(torch.ones(2, 2)) + + # create rref on current rank + rref = rpc.remote(worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)) + + # pass rref to another user in rpc call + ret = rpc.rpc_sync(worker_name(dst_rank), rref_to_here, args=(rref,)) + self.assertEqual(ret, local_ret) + + # return rref in rpc call + rref1 = rpc.rpc_sync(worker_name(dst_rank), return_rref, args=(rref,)) + self.assertEqual(rref1.to_here(), local_ret) + + # pass rref to another user in remote call + rref2 = rpc.remote(worker_name(dst_rank), rref_to_here, args=(rref,)) + self.assertEqual(rref2.to_here(), local_ret) + + # return rref in remote call + rref3 = rpc.remote(worker_name(dst_rank), return_rref, args=(rref,)) + self.assertEqual(rref3.to_here().to_here(), local_ret) + + @dist_init + def test_my_script_module_with_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + module_with_rrefs = MyScriptModuleWithRRefs(worker_name(dst_rank)) + res = module_with_rrefs() + self.assertEqual(res, torch.ones(2, 2) * 9) + + @dist_init + def test_rref_python_annotation(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_var = rpc_return_rref(worker_name(dst_rank)) + + res = rref_script_annotation(rref_var) + self.assertEqual(res, torch.ones(2, 2) + 1) + + +class FutureTypingTest: + @dist_init + def test_future_passed_between_python_and_jit(self): + dst_rank = (self.rank + 1) % self.world_size + inputs = (torch.tensor([1, 1]), torch.tensor([2, 2])) + ret_fut = rpc.rpc_async(worker_name(dst_rank), two_args_two_kwargs, args=inputs) + expected_res = torch.tensor([10, 10]) + + @torch.jit.script + def future_wait_in_script(fut: Future[Tensor]) -> Tensor: + return fut.wait() + + self.assertEqual(future_wait_in_script(ret_fut), expected_res) + + @torch.jit.script + def future_return_to_python( + dst_rank: int, inputs: Tuple[Tensor, Tensor] + ) -> Future[Tensor]: + return rpc.rpc_async( + f"worker{dst_rank}", two_args_two_kwargs, inputs + ) + + fut_res = future_return_to_python(dst_rank, inputs) + self.assertEqual(fut_res.wait(), expected_res) + + @dist_init + def test_future_python_annotation(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + input_0 = torch.ones(2, 2) + input_1 = 1 + expected_res = torch.add(input_0, input_1) + + @torch.jit.ignore + def python_return_future() -> Future[Tensor]: + fut = rpc.rpc_async(dst_worker_name, torch.add, (input_0, input_1), {}) + return fut + + @torch.jit.script + def script_use_future() -> Tensor: + fut = python_return_future() + return fut.wait() + + res = script_use_future() + self.assertEqual(res, expected_res) + + +@torch.jit.script +class MyScriptClass: + def __init__(self, a: int): + self.a = a + + def get_value(self) -> int: + return self.a + + +@torch.jit.interface +class MyModuleInterface(torch.nn.Module): + def forward(self) -> Tensor: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + +class MyScriptModule(torch.jit.ScriptModule): + def __init__(self, rank): + super().__init__() + self.a = torch.ones(rank) + + @torch.jit.script_method + def forward(self) -> Tensor: + return self.a + + @torch.jit.script_method + def custom_func(self) -> Tensor: + return self.a + + +def owner_create_rref_my_script_class(a): + return rpc.RRef(MyScriptClass(a)) + + +def owner_create_rref_my_script_module(a): + return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface) + + +@torch.jit.script +def script_rref_get_value_my_script_class(rref: RRef[MyScriptClass]) -> int: + return rref.to_here().get_value() + + +@torch.jit.script +def script_rref_run_forward_my_script_module(rref: RRef[MyModuleInterface]) -> Tensor: + return rref.to_here().forward() + + +class LocalRRefTest: + @dist_init + def test_create_local_script_class_rref_in_py(self): + if self.rank != 0: + return + + # Create a local RRef. + rref_script_class = rpc.RRef(MyScriptClass(self.rank)) + ret = rref_script_class.to_here().get_value() + self.assertEqual(ret, self.rank) + + @dist_init + def test_create_local_script_module_rref_in_py(self): + if self.rank != 0: + return + + # Create a local RRef. + rref_script_module = rpc.RRef(MyScriptModule(self.rank), MyModuleInterface) + ret = rref_script_module.to_here().forward() + self.assertEqual(ret, torch.ones(self.rank)) + + # Create a local RRef without type hint. + with self.assertRaisesRegex( + RuntimeError, + ( + "The RRef being created contains a ScriptModule, " + "must provide its ModuleInterface type hint." + ), + ): + rref_script_module = rpc.RRef(MyScriptModule(self.rank)) + + @dist_init + def test_return_local_script_class_rref_in_py_and_use_in_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Create a local RRef remotely in Python. + rref = rpc.rpc_sync( + dst_worker_name, owner_create_rref_my_script_class, args=(self.rank,) + ) + + def use_rref_on_owner(rref: RRef[MyScriptClass]) -> int: + args = (rref,) + kwargs: Dict[str, Any] = {} + fut = rpc.rpc_async( + rref.owner(), script_rref_get_value_my_script_class, args, kwargs + ) + ret = fut.wait() + return ret + + # Use RRef in local Python RPC and remote Script run. + ret = use_rref_on_owner(rref) + self.assertEqual(ret, self.rank) + + # Use RRef in local Script RPC and remote Script run. + use_rref_on_owner_script = torch.jit.script(use_rref_on_owner) + ret = use_rref_on_owner_script(rref) + self.assertEqual(ret, self.rank) + + @dist_init + def test_return_local_script_module_rref_in_py_and_use_in_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Create a local RRef remotely in Python. + rref = rpc.rpc_sync( + dst_worker_name, owner_create_rref_my_script_module, args=(self.rank,) + ) + + def use_rref_on_owner(rref: RRef[MyModuleInterface]) -> Tensor: + args = (rref,) + kwargs: Dict[str, Any] = {} + fut = rpc.rpc_async( + rref.owner_name(), + script_rref_run_forward_my_script_module, + args, + kwargs, + ) + ret = fut.wait() + return ret + + # Use RRef in local Python RPC and remote Script run. + ret = use_rref_on_owner(rref) + self.assertEqual(ret, torch.ones(self.rank)) + + # Use RRef in local Script RPC and remote Script run. + use_rref_on_owner_script = torch.jit.script(use_rref_on_owner) + ret = use_rref_on_owner_script(rref) + self.assertEqual(ret, torch.ones(self.rank)) + + +def python_function(): + return 0 + + +@torch.jit.script +def two_args_two_kwargs( + first_arg, + second_arg, + first_kwarg=torch.tensor([3, 3]), + second_kwarg=torch.tensor([4, 4]), +): + return first_arg + second_arg + first_kwarg + second_kwarg + + +@torch.jit.script +def assorted_types_args_kwargs( + tensor_arg: Tensor, # noqa: E999 + str_arg: str, + int_arg: int, + tensor_kwarg: Tensor = torch.tensor([2, 2]), + str_kwarg: str = "str_kwarg", + int_kwarg: int = 2, +): + return tensor_arg + tensor_kwarg, str_arg + str_kwarg, int_arg + int_kwarg + + +@torch.jit.script +def raise_script(): + raise RuntimeError("Expected error") + + +@torch.jit.script +def script_rpc_async_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + +@torch.jit.script +def script_rpc_sync_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + res = rpc.rpc_sync(dst_worker_name, two_args_two_kwargs, args, kwargs) + return res + +@torch.jit.script +def script_rpc_remote_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + rref_res = rpc.remote(dst_worker_name, two_args_two_kwargs, args, kwargs) + return rref_res.to_here() + +class JitRpcOpTest: + # Call functions remotely from Script. + @dist_init + def test_all_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {} + + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([10, 10])) + + @dist_init + def test_some_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {"first_kwarg": torch.tensor([2, 2])} + + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([9, 9])) + + @dist_init + def test_no_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = { + "first_kwarg": torch.tensor([2, 2]), + "second_kwarg": torch.tensor([3, 3]), + } + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([8, 8])) + + @dist_init + def test_args_and_kwargs_contain_different_types(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_with_assorted_types( + dst_worker_name: str, + ): + args = (torch.tensor([1, 1]), "str_arg", 1) + # Must annotate the value type as `Any`, because JIT type inference + # does not support multiple types when defining a Dict. + # The error JIT gives is, + # "Dict values must contain only a single type, " + # "expected: Tensor but found str instead." + kwargs: Dict[str, Any] = { + "tensor_kwarg": torch.tensor([3, 3]), + "str_kwarg": "_str_kwarg", + "int_kwarg": 3, + } + fut = rpc.rpc_async( + dst_worker_name, assorted_types_args_kwargs, args, kwargs + ) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_with_assorted_types( + dst_worker_name + ) + self.assertEqual(ret, (torch.tensor([4, 4]), "str_arg_str_kwarg", 4)) + + @dist_init + def test_kwargs_not_passed(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_without_kwargs_passed( + dst_worker_name: str, + ): + args = () + fut = rpc.rpc_async(dst_worker_name, no_arg, args) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_without_kwargs_passed( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_args_kwargs_are_neither_passed(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_without_args_kwargs_passed( + dst_worker_name: str, + ): + fut = rpc.rpc_async(dst_worker_name, no_arg) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_without_args_kwargs_passed( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_less_than_needed_args_are_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, args matching happens during scripting. + with self.assertRaisesRegex(RuntimeError, "Argument second_arg not provided"): + + @torch.jit.script + def script_rpc_async_call_with_less_args( + dst_worker_name: str, # noqa: E999 + ): + args = (torch.tensor([1, 1]),) + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + @dist_init + def test_more_than_needed_args_are_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, args matching happens during scripting. + with self.assertRaisesRegex( + RuntimeError, + "Expected at most 4 arguments but found 5 positional arguments", + ): + + @torch.jit.script + def script_rpc_async_call_with_more_args( + dst_worker_name: str, + ): + args = ( + torch.tensor([1, 1]), + torch.tensor([2, 2]), + torch.tensor([3, 3]), + torch.tensor([4, 4]), + torch.tensor([5, 5]), + ) + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + @dist_init + def test_unexepected_kwarg_is_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, kwargs matching happens during execution. + @torch.jit.script + def script_rpc_async_call_with_unexpected_kwarg( + dst_worker_name: str, # noqa: E999 + ): + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {"third_kwarg": torch.tensor([1, 1])} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "Unknown keyword argument 'third_kwarg'" + ): + ret = script_rpc_async_call_with_unexpected_kwarg( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_call_python_function_remotely_from_script_not_supported(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def rpc_async_call_remote_py_function_in_torchscript(dst_worker_name: str): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, python_function, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "attempted to get undefined function" + ): + ret = rpc_async_call_remote_py_function_in_torchscript(dst_worker_name) + self.assertEqual(ret, 0) + + @dist_init + def test_call_script_function_that_raises_remotely_from_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, TorchScript always translates(emits) Python `raise` statement, + # as the exception message string, "Exception", + # no matter what exception type and exception message are in the statement, + @torch.jit.script + def rpc_async_call_remote_raising_torchscript_in_torchscript( + dst_worker_name: str, + ): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, raise_script, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex(RuntimeError, "Expected error"): + ret = rpc_async_call_remote_raising_torchscript_in_torchscript( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_call_script_function_that_not_exists_remotely_from_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def nonexisting_script(): + return 0 + + @torch.jit.script + def rpc_async_call_remote_nonexisting_torchscript_in_torchscript( + dst_worker_name: str, + ): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, nonexisting_script, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "attempted to get undefined function nonexisting_script" + ): + ret = rpc_async_call_remote_nonexisting_torchscript_in_torchscript( + dst_worker_name + ) + self.assertEqual(ret, 0) + + +@torch.jit.ignore +def my_script_module_init(rank: int) -> MyModuleInterface: + return MyScriptModule(rank) + + +@torch.jit.script +def construct_my_script_module(rank: int) -> MyModuleInterface: + return my_script_module_init(rank) + + +@torch.jit.script +def run_ref_script_module( + ref_script_module: RRef[MyModuleInterface], t: Tensor +) -> Tensor: + module = ref_script_module.to_here() + return module.forward() + t + + +@torch.jit.script +def script_check_rref_confirmed(rref: RRef[Tensor]) -> bool: + return rref.confirmed_by_owner() + + +@torch.jit.script +def save_rref(rref_var: RRef[Tensor], fname: str) -> None: + torch.save(rref_var, fname) + + +@torch.jit.script +def script_add(x: Tensor, y: Tensor) -> Tensor: + return x + y + + +@rpc.functions.async_execution +@torch.jit.script +def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]: + return rpc.rpc_async(to, script_add, (x, y)) + + +@rpc.functions.async_execution +@torch.jit.script +def async_wrong_type() -> Tensor: + return torch.zeros(2) + + +def load_script_module_with_pickled_rref(pickled_script_module): + f = io.BytesIO(pickled_script_module) + m = torch.jit.load(f) + return m() + + +class JitRpcTest( + RRefAPITest, + RRefTypingTest, + LocalRRefTest, + JitRpcOpTest, + FutureTypingTest, + RpcAgentTestFixture, +): + @dist_init + def test_torchscript_function(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + local_ret = one_arg(torch.ones(2, 2)) + ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),)) + self.assertEqual(ret, local_ret) + rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),)) + self.assertEqual(rref.to_here(), local_ret) + # create rref to itself + local_rref = rpc.remote( + worker_name(self.rank), one_arg, args=(torch.ones(2, 2),) + ) + self.assertEqual(local_rref.to_here(), local_ret) + + @dist_init + def test_torchscript_function_exception(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"): + ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20)) + + with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"): + rref = rpc.remote(dst_worker_name, one_arg, args=(10, 20)) + + @dist_init + def test_torchscript_functions_not_supported(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + my_local_script_module = MyScriptModule(self.rank) + + # It is not thread safe to instantiate MyScriptModule in multiple threads, + # wait for local MyScriptModule instantiation to finish, + # otherwise it could instantiate MyScriptModule in parallel with + # server thread in the below + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + # rpc_sync still accepts script class and run it in + # the same code path as python call. + ret = rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,)) + + # rpc_sync does not accept script module method. + # Python 3.5 and Python 3.6 throw different error message, the only + # common word can be greped is "pickle". + with self.assertRaisesRegex(TypeError, "pickle"): + ret = rpc.rpc_async( + dst_worker_name, my_local_script_module.forward, args=() + ) + + @dist_init + def test_remote_script_module(self): + # TODO, need more investigation + # there is rref leak when shutting down, suspect it is because + # ref as arg is passed to pybind boundary, and the ref is not garbage + # collected by python when calling shutdown() + import torch.distributed.rpc.api as api + + api._ignore_rref_leak = True + + local_ret = torch.ones(self.rank) + torch.ones(self.rank) + + n = self.rank + 1 + dst_rank = n % self.world_size + remote_ref = rpc.remote( + worker_name(dst_rank), construct_my_script_module, args=(self.rank,) + ) + + # pass rref arg to owner + ret = rpc.rpc_sync( + worker_name(dst_rank), + run_ref_script_module, + args=(remote_ref, torch.ones(self.rank)), + ) + self.assertEqual(ret, local_ret) + + # pass rref arg to self/user + with self.assertRaisesRegex( + RuntimeError, + "is an RRef to a ScriptModule. It can't be sent through RPC from owner,", + ): + ret = rpc.rpc_sync( + worker_name(self.rank), + run_ref_script_module, + args=(remote_ref, torch.ones(self.rank)), + ) + + @dist_init + def test_create_script_module_on_remote(self): + dst_name = worker_name((self.rank + 1) % self.world_size) + # Construct on remote end with rpc_sync + created_script_module = rpc.rpc_sync( + dst_name, MyScriptModule, args=(self.rank,) + ) + # Forward should output a ones tensor of self.rank. + self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule)) + rank_ones_tensor = created_script_module() + self.assertEqual(torch.ones(self.rank), rank_ones_tensor) + + # Construct ScriptModule with rpc.remote. + remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,)) + # Verify it is an instance of ScriptModule on remote end. + remote_end_is_script = rpc.rpc_sync( + remote_script_module.owner(), + rref_isinstance, + args=(remote_script_module, torch.jit.ScriptModule), + ) + self.assertTrue(remote_end_is_script) + # Run forward pass remotely. + remote_forward_output = remote_script_module.rpc_sync().forward() + self.assertEqual(remote_forward_output, torch.ones(self.rank)) + # Run function defined on ScriptModule remotely. + remote_func_output = remote_script_module.rpc_sync().custom_func() + self.assertEqual(remote_func_output, torch.ones(self.rank)) + # Ensure we can transfer ScriptModule RRef to this rank and run + # forward pass. + local_script_module = remote_script_module.to_here() + self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule)) + rank_ones_tensor = local_script_module() + self.assertEqual(rank_ones_tensor, torch.ones(self.rank)) + local_script_func_output = local_script_module.custom_func() + self.assertEqual(local_script_func_output, torch.ones(self.rank)) + + @dist_init + def test_load_script_module_with_pickled_rref(self): + dst_name = worker_name((self.rank + 1) % self.world_size) + m1 = MyScriptModuleWithRRefs(dst_name) + m2 = MyScriptModuleWithRRefs(dst_name) + + f = io.BytesIO() + + rpc._enable_jit_rref_pickle() + torch.jit.save(m1, f) + rpc._disable_jit_rref_pickle() + + out1 = rpc.rpc_sync( + dst_name, + load_script_module_with_pickled_rref, + args=(f.getvalue(),) + ) + out2 = m2() + self.assertEqual(out1, out2) + + @dist_init + def test_rref_jit_pickle_not_supported(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_var = rpc_return_rref(worker_name(dst_rank)) + with TemporaryFileName() as fname: + with self.assertRaisesRegex( + RuntimeError, "RRef jit pickling is only allowed inside RPC calls" + ): + save_rref(rref_var, fname) + + @dist_init + def test_remote_script_throw(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + script_raise_func, + args=(torch.ones(2),), + ) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + rref.to_here() + + @dist_init + def test_remote_script_udf(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + self.assertEqual(rref.to_here(), torch.ones(2) * 2) + + @dist_init + def test_async_script_udf(self): + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + self.assertEqual(future.wait(), torch.ones(2) * 2) + + @dist_init + def test_callback_simple(self): + def callback(fut): + return fut.wait() + 1 + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ).then(callback) + self.assertEqual(future.wait(), torch.ones(2) * 2 + 1) + + @dist_init + def test_callback_chain(self): + n = self.rank + 1 + dst = worker_name(n % self.world_size) + + def callback(fut): + return fut.wait() + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),) + ) + + num_cbs = 20 + for _ in range(num_cbs): + fut = fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs) + + @dist_init + def test_add_done_callback(self): + callback_called = None + + def callback(fut): + nonlocal callback_called + callback_called = fut.wait() * 2 + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + + future.add_done_callback(callback) + future_then = future.then(lambda _: True) + + self.assertEqual(future.wait(), torch.ones(2) * 2) + + # We have no guarantee that the add_done_callback fn will execute before the test finishes. + # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback + future_then.wait() + self.assertEqual(callback_called, torch.ones(2) * 4) + + @dist_init + def test_async_script_throw(self): + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_throw, + args=(torch.ones(2),), + ) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + future.wait() + + @dist_init + def test_callback_with_exception(self): + def callback(fut): + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + fut.wait() + raise RuntimeError("Another expected error") + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_throw, + args=(torch.ones(2),), + ).then(callback) + + with self.assertRaisesRegex(RuntimeError, "Another expected error"): + future.wait() + + @dist_init + def test_call_rpc_with_profiling(self): + # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit + # future from within a script function that calls rpc_async + if self.rank == 0: + with _profile() as prof: + prof_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + torch._jit_internal._qualified_name(one_arg), + "worker0", + "worker1", + ) + with torch.autograd.profiler.record_function(prof_key) as rf: + ret = call_rpc_with_profiling(rf.record, "worker1") + # TODO: Can't get a reliable time for this profiling event since + # it's hard to estimate the execution time on the remote end for non-UDFs. + # This can be resolved by https://github.com/pytorch/pytorch/issues/36272. + # After that, this test should be modified to validate the function time. + events = prof.function_events + function_event = get_function_event(events, prof_key) + self.assertTrue(torch._jit_internal._qualified_name(one_arg) in function_event.name) + + @dist_init + def test_rpc_async_jit_profiled(self): + # Tests that rpc_async calls made from within a TorchScript function are + # profiled. + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {} + with _profile() as prof: + script_rpc_async_call( + dst_worker_name, args, kwargs + ) + + # Ensure rpc_async call is profiled + function_events = prof.function_events + qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs) + rpc_async_jit_event = [ + event + for event in function_events + if qual_name in event.name and event.node_id == self.rank + ] + self.assertEqual(len(rpc_async_jit_event), 1) + rpc_async_jit_event = rpc_async_jit_event[0] + profiled_name = _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + qual_name, + worker_name(self.rank), + dst_worker_name, + ) + self.assertEqual(profiled_name, rpc_async_jit_event.name) + remote_events = [event for event in function_events if event.is_remote] + # All remote events should have taken place on dst_rank + remote_event_node_ids = { + remote_event.node_id for remote_event in remote_events + } + self.assertEqual(remote_event_node_ids, {dst_rank}) + # script_rpc_async_call invokes add operator + # so we should see this as a remote event. + remote_add = next( + remote_event + for remote_event in remote_events + if "aten::add" in remote_event.name + ) + remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add" + self.assertEqual(remote_add.name, remote_add_profiled_name) + + @dist_init + def test_record_function_on_caller_rpc_async(self): + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + block_scope = "foo" + with _profile() as prof: + # Runs 2 rpc_async calls within JIT under record_function. + record_function_on_caller_rpc_async(dst_worker_name, block_scope) + + # Ensure record_function event is profiled. + function_events = prof.function_events + record_function_scope_event = [ + event for event in function_events if event.name == block_scope + ] + self.assertEqual(1, len(record_function_scope_event)) + record_function_scope_event = record_function_scope_event[0] + # Ensure RPC future is profiled. + expected_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + torch._jit_internal._qualified_name(script_add_ones), + worker_name(self.rank), + dst_worker_name, + ) + jit_rpc_events = [ + event for event in function_events if event.name == expected_key + ] + self.assertEqual(2, len(jit_rpc_events)) + # Validate that the record_function scope time is greater than both + # of the individual RPC async call times. The reason it is not necessarily + # greater than the sum is because the two can execute in parallel. + for jit_rpc_event in jit_rpc_events: + self.assertTrue( + record_function_scope_event.cpu_time_total + > jit_rpc_event.cpu_time_total + ) + + @dist_init + def test_rpc_torchscript_record_function(self): + # tests that torchscript functions can be profiled using with + # record_function(...) over RPC. + REMOTE_OP_STR = "#remote_op: " + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + block_scope = "foo" + with _profile() as prof: + call_rpc_torchscript_with_record_function(dst_worker_name, block_scope) + + # Need to call below to populate CPU children. + prof.key_averages() + function_events = prof.function_events + expected_key = ( + _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + torch._jit_internal._qualified_name( + script_add_ones_with_record_function + ), + worker_name(self.rank), + dst_worker_name, + ) + + REMOTE_OP_STR + + block_scope + ) + remote_record_function_event = next( + evt for evt in function_events if evt.name == expected_key + ) + self.assertTrue(block_scope in remote_record_function_event.name) + remote_children = remote_record_function_event.cpu_children + self.assertTrue("aten::add" in child.name for child in remote_children) + + def test_record_function_jit_end_callbacks_with_fork(self): + # Ensures that we can call rf._call_end_callbacks_on_future on a jit + # future in python eager mode with torch.jit.fork + sleep_interval = 1 + with _profile() as prof: + with torch.autograd.profiler.record_function("foo") as rf: + fut = torch.jit._fork(sleep, sleep_interval) + rf._call_end_callbacks_on_future(fut) + fut.wait() + + function_events = prof.function_events + sleep_event = get_function_event(function_events, "foo") + self.assertEqual(sleep_event.name, "foo") + # Validate that callbacks were fired at the right time by checking the + # profiling event cpu time + self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval) + + def test_call_fork_in_jit_with_profiling(self): + # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit + # future from within a script function with torch.jit.fork + with _profile() as prof: + with torch.autograd.profiler.record_function("foo") as rf: + ret = call_fork_with_profiling(rf.record) + + events = prof.function_events + function_event = get_function_event(events, "foo") + self.assertEqual(function_event.name, "foo") + + @dist_init + def test_async_function_simple(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2)) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + @dist_init + def test_async_function_wrong_return_type(self): + with self.assertRaisesRegex( + RuntimeError, + "Async functions must return an IValue of Future type, but got Tensor", + ): + rpc.rpc_sync( + worker_name((self.rank + 1) % self.world_size), async_wrong_type + ) + + @dist_init + def test_async_function_wrong_decorator_order(self): + # @torch.jit.script complains about undefined value rpc. Error is shown + # below. The reason for not checking error string is to avoid making + # JIT error handling code depend on RPC tests, as we don't have any + # restrictions on the error message here. + # + # RuntimeError: + # undefined value rpc: + # def async_wrong_decorator_order(to, x, y): + # # type: (str, Tensor, Tensor) -> Future[Tensor] + # return rpc.rpc_async(to, script_add, (x, y)) + # ~~~ <--- HERE + with self.assertRaises(RuntimeError): + + @torch.jit.script + @rpc.functions.async_execution + def async_wrong_decorator_order( + to: str, x: Tensor, y: Tensor + ) -> Future[Tensor]: + return rpc.rpc_async(to, script_add, (x, y)) + + @dist_init + def test_async_function_remote(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + rref = rpc.remote( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2)) + ) + self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1) + + @dist_init + def test_async_function_remote_multi(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + num = 20 + rrefs = [] + for i in range(num): + rrefs.append( + rpc.remote( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i) + ) + ) + + for i in range(num): + self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i) + + @dist_init + def test_async_function_wrong_return_type_remote(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), async_wrong_type + ) + + with self.assertRaisesRegex( + RuntimeError, + "Async functions must return an IValue of Future type, but got Tensor", + ): + rref.to_here() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py new file mode 100644 index 0000000000000000000000000000000000000000..2e4eea3a36517668e7a330d062ca7b7012b05ae0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py @@ -0,0 +1,216 @@ +from typing import Dict, Tuple + +import torch +import torch.distributed.rpc as rpc +from torch import Tensor +from torch.distributed.rpc import RRef +from torch.testing._internal.dist_utils import ( + dist_init, + worker_name, + wait_until_pending_futures_and_users_flushed +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +@torch.jit.script +def two_args_two_kwargs( + first_arg, + second_arg, + first_kwarg=torch.tensor([3, 3]), + second_kwarg=torch.tensor([4, 4]), +): + return first_arg + second_arg + first_kwarg + second_kwarg + + +@torch.jit.script +def script_rpc_async_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + +@torch.jit.script +def rpc_async_call_with_timeout( + dst_worker_name: str, + args: Tuple[Tensor, Tensor], + kwargs: Dict[str, Tensor], + timeout: float, +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout) + ret = fut.wait() + return ret + + +@torch.jit.script +def rpc_async_call_with_timeout_future_ret( + dst_worker_name: str, + args: Tuple[Tensor, Tensor], + kwargs: Dict[str, Tensor], + timeout: float, +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout) + return fut + + +@torch.jit.script +def rpc_async_call_future_ret( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + return fut + +@torch.jit.script +def rref_to_here(rref_var: RRef[Tensor]) -> Tensor: + return rref_var.to_here() + +@torch.jit.script +def rref_to_here_with_timeout(rref_var: RRef[Tensor], timeout: float) -> Tensor: + return rref_var.to_here(timeout) + +@torch.jit.script +def rpc_async_with_rref_arg(dst_worker_name: str, args: Tuple[RRef[Tensor]]) -> Tensor: + fut = rpc.rpc_async(dst_worker_name, rref_to_here, args) + ret = fut.wait() + return ret + + +class JitFaultyAgentRpcTest(RpcAgentTestFixture): + """ + Run tests for rpc_async in JIT under the faulty agent test fixture to test + arbitrary timeouts. + """ + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_timeout_in_torchscript_function(self): + # Call rpc_async + fut.wait() in torchscript function and ensure that + # timeout is raised. + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = { + "first_kwarg": torch.tensor([2, 2]), + "second_kwarg": torch.tensor([3, 3]), + } + expected_error = self.get_timeout_error_regex() + # Ensure that we get a timeout if we override the default timeout and + # the RPC takes longer to execute. + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5) + + # Ensure that we timeout if we don't specify a timeout but the default + # is less than the RPC takes to execute. + rpc._set_rpc_timeout(0.001) + with self.assertRaisesRegex(RuntimeError, expected_error): + script_rpc_async_call( + dst_worker_name, args, kwargs + ) + + # Ensure that we run to completion if zero timeout is specified. + ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0) + self.assertEqual(ret, torch.tensor([8, 8])) + # reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_timeout_in_python(self): + # Ensures timeouts are raised if we call rpc_async from within a + # torchscript function, but wait on the future in python. + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = { + "first_kwarg": torch.tensor([2, 2]), + "second_kwarg": torch.tensor([3, 3]), + } + expected_error = self.get_timeout_error_regex() + + fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure timeout if we don't specify but the default is less than the + # RPC takes to execute. + rpc._set_rpc_timeout(0.001) + fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if zero timeout is specified + fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0) + result = fut.wait() + self.assertEqual(result, torch.tensor([8, 8])) + # reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_remote_timeout_to_here_in_jit(self): + # Test that calling to_here() in JIT will raise timeout error if + # rpc.remote failed. + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + # Will ensure error handling callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Call to_here() within a ScriptFunction and ensure it raises + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref_to_here(rref) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1}) + def test_rref_to_here_timeout_in_jit(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref_to_here_with_timeout(rref, 0.01) + + rref_to_here_with_timeout(rref, 100) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_rref_timeout_pickle_in_jit(self): + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + # Will ensure error handling callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Call RPC with RRef arg in JIT, which will go through JIT pickling and + # ensure error is raised. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc_async_with_rref_arg(dst_worker, (rref, )) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_rref_timeout_pickle_script_func(self): + # Similar to above test, but calls python rpc with script function. + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + # Will ensure error handling callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Call RPC with script function that takes RRef, ensure timeout during pickling + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc.rpc_sync(dst_worker, rref_to_here, args=(rref, )) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4afd4147f10f43a6a452a2917519c894c97c4c83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py @@ -0,0 +1,2 @@ +import torch.testing._internal.opinfo.core +import torch.testing._internal.opinfo.definitions diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..937cee31d1bb033c4db1f15f544f5b33598977e7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa6317ba8fd7116a3abfe42cd8e473f84e68e639 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0762b6984925855bee86da21d4d49634257494ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b567e4599b0dc88c56502ff54a73e2db2dbe6f72 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py new file mode 100644 index 0000000000000000000000000000000000000000..f100865f9d5d58cec7bfb65e2c54264cf6657f1f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py @@ -0,0 +1,2855 @@ +import collections +import collections.abc +import math +import operator +import unittest +from dataclasses import asdict, dataclass +from enum import Enum +from functools import partial +from itertools import product +from typing import Any, Callable, Iterable, List, Optional, Tuple + +from torchgen.utils import dataclass_repr + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + skipCPUIfNoFFT, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, +) +from torch.testing._internal.common_utils import ( + is_iterable_of_tensors, + noncontiguous_like, + TEST_WITH_ROCM, + torch_to_numpy_dtype_dict, + TrackedInputIter, +) +from torch.testing._internal.opinfo import utils + +# Reasonable testing sizes for dimensions +L = 20 +M = 10 +S = 5 +XS = 3 + +# Unique value to distinguish default from anything else +_NOTHING = object() + + +# Extension of getattr to support qualified names +# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm +def _getattr_qual(obj, name, default=_NOTHING): + try: + for path in name.split("."): + obj = getattr(obj, path) + return obj + except AttributeError: + if default is not _NOTHING: + return default + else: + raise + + +class DecorateInfo: + """Describes which test, or type of tests, should be wrapped in the given + decorators when testing an operator. Any test that matches all provided + arguments will be decorated. The decorators will only be applied if the + active_if argument is True.""" + + __slots__ = [ + "decorators", + "cls_name", + "test_name", + "device_type", + "dtypes", + "active_if", + ] + + def __init__( + self, + decorators, + cls_name=None, + test_name=None, + *, + device_type=None, + dtypes=None, + active_if=True, + ): + self.decorators = ( + list(decorators) + if isinstance(decorators, collections.abc.Sequence) + else [decorators] + ) + self.cls_name = cls_name + self.test_name = test_name + self.device_type = device_type + self.dtypes = dtypes + self.active_if = active_if + + # Validate dtypes + if self.dtypes is not None: + for dtype in self.dtypes: + assert isinstance(dtype, torch.dtype) + + def is_active(self, cls_name, test_name, device_type, dtype, param_kwargs): + return ( + self.active_if + and (self.cls_name is None or self.cls_name == cls_name) + and (self.test_name is None or self.test_name == test_name) + and (self.device_type is None or self.device_type == device_type) + and (self.dtypes is None or dtype in self.dtypes) + # Support callables over kwargs to determine if the decorator is active. + and ( + self.active_if(param_kwargs) + if isinstance(self.active_if, Callable) + else self.active_if + ) + ) + + +# FIXME +# Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying +# to support scalar inputs, too. Some tests still depend on 'input' being a Tensor +# or TensorList, however. +class SampleInput: + """Represents sample inputs to a function.""" + + __slots__ = [ + "input", + "args", + "kwargs", + "output_process_fn_grad", + "broadcasts_input", + "name", + ] + + def __init__( + self, + input, + *var_args, + args=None, + kwargs=None, + output_process_fn_grad=None, + broadcasts_input=None, + name=None, + **var_kwargs, + ): + # input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]). + # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...). + self.input = input + + # Allow calling either as SampleInput(input, args=args, kwargs=kwargs), or as + # SampleInput(input, *args, **kwargs) but not to mix the two forms + if args is not None or kwargs is not None: + assert ( + not var_args and not var_kwargs + ), """ +A SampleInput can be constructed "naturally" with *args and **kwargs or by +explicitly setting the "args" and "kwargs" parameters, but the two +methods of construction cannot be mixed!""" + elif len(var_args) or len(var_kwargs): + assert ( + output_process_fn_grad is None + and broadcasts_input is None + and name is None + ), """ +A SampleInput constructed "naturally" with *args and **kwargs +cannot specify additional metadata in keyword arguments""" + + self.args = args if args is not None else var_args + assert isinstance(self.args, tuple) + self.kwargs = kwargs if kwargs is not None else var_kwargs + assert isinstance(self.kwargs, dict) + + self.output_process_fn_grad = ( + output_process_fn_grad + if output_process_fn_grad is not None + else lambda x: x + ) + self.name = name if name is not None else "" + + # Specifies if `self.input` is broadcasted or not, + # given that the operator supports broadcasting. + # This field is used to verify the behavior for inplace variant. + # + # If a SampleInput is marked with `broadcasts_input=True`, + # it is verified that we get a `RuntimeError` with this sample, + # and inplace variant. Also inplace grad{grad} tests are skipped, + # for such inputs (as they will error out otherwise). + self.broadcasts_input = ( + broadcasts_input if broadcasts_input is not None else False + ) + + def with_metadata( + self, *, output_process_fn_grad=None, broadcasts_input=None, name=None + ): + if output_process_fn_grad is not None: + self.output_process_fn_grad = output_process_fn_grad + if broadcasts_input is not None: + self.broadcasts_input = broadcasts_input + if name is not None: + self.name = name + return self + + def _repr_helper(self, formatter): + # Helper function to return the details of the SampleInput as `str` + # It consolidates all the fields of SampleInput and allows, + # formatting the fields like `input`, `args`, etc with `formatter` + # callable to customize the representation. + # Look at `summary` method for example. + arguments = [ + f"input={formatter(self.input)}", + f"args={formatter(self.args)}", + f"kwargs={formatter(self.kwargs)}", + f"broadcasts_input={self.broadcasts_input}", + f"name={repr(self.name)}", + ] + + return f'SampleInput({", ".join(a for a in arguments if a is not None)})' + + def __repr__(self): + return self._repr_helper(lambda x: x) + + def summary(self): + # Returns the SampleInput details in a more + # friendly format. + # It formats `Tensor` and `TensorList` + # in a more condensed representation. + def formatter(arg): + # Format any instance of `Tensor` (standalone, in list, or in dict) + # by Tensor[TensorShape] + # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4] + if isinstance(arg, torch.Tensor): + shape = str(tuple(arg.shape)) + dtype = str(arg.dtype) + device = str(arg.device) + contiguity_suffix = "" + # NB: sparse CSR tensors annoyingly return is_sparse=False + is_sparse = arg.is_sparse or arg.layout == torch.sparse_csr + if not is_sparse and not arg.is_contiguous(): + contiguity_suffix = ", contiguous=False" + return f'Tensor[size={shape}, device="{device}", dtype={dtype}{contiguity_suffix}]' + elif isinstance(arg, dict): + return {k: formatter(v) for k, v in arg.items()} + elif is_iterable_of_tensors(arg): + return "TensorList[" + ", ".join(map(formatter, arg)) + "]" + elif isinstance(arg, (list, tuple)): # Handle list, tuple + return "(" + ",".join(map(formatter, arg)) + ")" + + return repr(arg) + + return self._repr_helper(formatter) + + # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput + def transform(self, f): + def tt(t): + def _tt(t): + with torch.no_grad(): + return f(t) + + if isinstance(t, torch.Tensor): + return _tt(t) + elif isinstance(t, torch.dtype): + return _tt(t) + elif isinstance(t, list): + return list(map(tt, t)) + elif isinstance(t, tuple): + return tuple(map(tt, t)) + elif isinstance(t, dict): + return {k: tt(v) for k, v in t.items()} + else: + return t + + sample_tt_input, tt_args, tt_kwargs = ( + tt(self.input), + tt(self.args), + tt(self.kwargs), + ) + + # Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid! + return SampleInput( + sample_tt_input, + args=tt_args, + kwargs=tt_kwargs, + output_process_fn_grad=self.output_process_fn_grad, + broadcasts_input=self.broadcasts_input, + name=self.name + "_transformed", + ) + + # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs) + # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them + # Converts dtypes by remapping them using torch_to_numpy_dtype_dict + def numpy(self): + def to_numpy(t): + if isinstance(t, torch.Tensor): + if t.dtype is torch.bfloat16: + return t.detach().cpu().to(torch.float32).numpy() + if t.dtype is torch.chalf: + return t.detach().cpu().to(torch.cfloat).numpy() + return t.detach().cpu().numpy() + elif isinstance(t, torch.dtype): + return torch_to_numpy_dtype_dict[t] + + return t + + return self.transform(to_numpy) + + def noncontiguous(self): + def to_noncontiguous(t): + if isinstance(t, torch.Tensor): + return noncontiguous_like(t) + elif isinstance(t, torch.dtype): + return t + + return t + + return self.transform(to_noncontiguous) + + +NumericsFilter = collections.namedtuple("NumericsFilter", ["condition", "safe_val"]) + + +class ErrorInput: + """ + A SampleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["sample_input", "error_type", "error_regex"] + + def __init__(self, sample_input, *, error_type=RuntimeError, error_regex): + self.sample_input = sample_input + self.error_type = error_type + self.error_regex = error_regex + + +class AliasInfo: + """Class holds alias information. For example, torch.abs -> + torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_ + """ + + def __init__(self, alias_name): + self.name = alias_name + self.op = _getattr_qual(torch, alias_name) + self.method_variant = getattr(torch.Tensor, alias_name, None) + self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None) + + def __call__(self, *args, **kwargs): + return self.op(*args, **kwargs) + + +# Note [OpInfos] +# ~~~~~~~~~~~~~~ +# +# The majority of this note was written shortly after the PyTorch 1.9 release. +# If you notice it's out-of-date or think it could be improved then please +# file an issue. +# +# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261) +# See also: "Writing Test Templates" in common_device_type.py to learn how to +# parametrize a test template using OpInfos. +# See also: PyTorch's GitHub wiki on running and writing tests +# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests +# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py +# +# An OpInfo is a collection of metadata related to a PyTorch operator. This +# metadata is used to generate tests that validate properties of the operator, +# like if it implements the correct gradient formula. +# +# WHY OPINFOS? +# ~~~~~~~~~~~~ +# +# OpInfos are principally intended to do three things: +# +# 1) to allow systematic testing over all PyTorch's operators +# 2) to simplify operating testing by autogenerating many tests +# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test +# against every PyTorch operator +# +# All these goals are still a work in progress. Not every operator has an +# OpInfo, and some operator tests that could be automatically generated +# still have to be written manually. +# +# It's helpful to understand that OpInfos are both about test simplification and +# modularity. PyTorch is a complicated framework with many interrelated systems, +# too many for any one person to keep track of. An OpInfo can be thought of as the +# interface between an operator implementer and those other systems. Instead of +# requiring the implementer of torch.foo understand how to test its forward +# mode AD or NNC support that's typically handled automatically just by +# defining an OpInfo. +# +# It's often surprising to OpInfo writers that just implementing an OpInfo +# typically can't verify an operator is actually implemented correctly: +# +# "If an OpInfo doesn't validate my op works as expected, what's the point +# of it?" +# +# But the point of is the above. OpInfos are intended to let you focus on testing +# the operator logic you're familiar with instead of having to write tests for +# how the operator interacts with each of PyTorch's many systems. +# +# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES +# validate your op works as expected, but that's only in special +# cases. See below for details. +# +# WHAT'S AN OPINFO? +# ~~~~~~~~~~~~~~~~~ +# +# So what is an OpInfo? It's a Python class that describes an operator's properties, +# like which dtypes it supports on the CPU and whether it has any aliases. +# These properties can be divided into three categories: +# +# 1) Metadata describing the operator, like the operator's name and if it +# "supports" the out kwarg. +# 2) Test directives, like "skips" that tell the test suite to skip some +# tests. +# 3) A "sample inputs" function that generates valid inputs for the operator. +# +# OpInfo attributes are described in more detail below. +# +# THE SAMPLE INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The "sample inputs" function merits special elaboration. This function is +# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator +# as a black box. There's no structure for the test to understand or exploit. +# Without "sample inputs" it wouldn't even know how to call the OpInfo's +# operator. The sample input function saves the day by providing different +# "SampleInputs" that can be used to call the operator. A sample input +# function should have the following signature: +# +# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs): +# +# And should return an iterable of SampleInputs (see the class description +# above). Each SampleInput defines an "input", "args", "kwargs", an +# "output_process_fn_grad" function, the "broadcasts_input" bool and a +# "name". +# +# All the "sample_inputs" functions are invoked within a `torch.no_grad()` +# environment for efficiency and correctness. As such remember to set the +# "requires_grad" flag on the inputs **after** performing any transformations +# on them. +# +# The "input" is the first argument to the operator, or the tensor that +# the method or inplace variants of the operator should be called on, and +# should be on the requested device, of the requested dtype, and its +# requires_grad attribute should be set to the requires_grad argument. +# +# "args" should contain positional arguments, and "kwargs" keyword arguments. +# +# "output_process_fn_grad" has an interesting name. It's a function that maps +# the operator's output (when given the input, args, and kwargs) to the +# portion of the output to gradcheck. For example, consider an operator +# like torch.linalg.slogdet +# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html). +# This operator returns a tuple of two tensors, but the first tensor +# cannot be backwarded through. Its "output_process_fn_grad" filters +# this output tuple to just the second argument, which we can call backward +# on. Functions that produce a single tensor can ignore this argument. +# +# "broadcasts_input" is a bool indicated if the SampleInput causes the operator +# to broadcast the "input" argument. This is important for tests to understand +# because inplace variants of operations throw a runtime error if they +# would broadcast their input arguments, so tests that work with inplace +# variants filter SampleInputs that broadcast their input. +# +# "name" is a string that's just used for debugging. It appears when printing +# the SampleInput. +# +# Sample inputs are designed to be used with many tests, some +# that are very time consuming, so they should be a small +# set with small tensors. An elaborated set of sample inputs +# can be specified using the "reference_inputs_func" attribute. +# The "reference inputs" for an operation are an extended +# set of sample inputs that can more exhausively test an +# operator. They are used by only a few tests that are careful +# not to take too long to run. Adding reference inputs +# is highly encouraged! +# +# THE (OPTIONAL) ERROR INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# OpInfos may optionally specify "error inputs" through an error function. If +# specified test_errors in test_ops.py will call the op with these inputs +# and validate that the desired error is thrown. +# +# Error inputs automate a common testing pattern where multiple inputs are +# passed to an operation and the errors they thrown are reviewed. Tests +# written in this style should be ported to the new OpInfo pattern. +# +# Error inputs are specified using the ErrorInputs class, which contains +# a SampleInput (see above) and data about the expected error. +# +# OPINFO FILE ORGANIZATION +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# +# All OpInfos are currently defined in this file. Most OpInfo tests are defined +# in test_ops.py, but some system-specific tests are defined in those +# systems' test files, and subclass-specific tests are defined in the test +# file that corresponds to that subclass (see the below). +# Expect a reorganization in the future. +# +# WHAT'S TESTED? +# ~~~~~~~~~~~~~~ +# +# Every OpInfo in the op_db sequence has the following properties validated in +# test_ops.py: +# +# - that its supported dtypes are specified correctly +# - that the operation produces the same results when called with noncontiguous inputs +# - that it supports the out= argument properly (if it allows out=), +# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch +# - that it works with the conjugate view bit properly +# - that its function, method, and inplace variants perform the same operation +# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all +# do the same thing). +# - that its inplace variant preserves the input's storage +# - that its gradient formula is implemented correctly, and that it supports +# gradgrad and complex grad and gradgrad and forward mode AD properly for +# the op's function and inplace variants (method variants are skipped +# to reduce test time). +# - that the operation performs the same operation when traced or scripted +# using the jit +# - that the operation is autodifferentiated by the jit as expected +# - that the operator's aliases, if any, perform the same operation and that +# the jit understands the alias +# - that the operator throws the correct errors (if error_inputs is defined) +# - that the operator produces the same results as a NumPy reference (if ref is defined) +# - that the operator produces the same results as a NumPy reference on an extended +# set of "reference inputs" (if both ref and reference_inputs_func are defined) +# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only +# ref is defined, because they effectively autogenerate reference inputs) +# - that the operator works on different CUDA devices +# +# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py, +# and test_fx.py. These tests validate that operators work with NNC and FX +# as expected. +# +# For performance, some of the above tests may only run on the first +# SampleInput returned by an OpInfo's sample input function. +# +# In addition to these tests, some subclasses (discussed in the next section) +# define additional tests. +# +# Critically, as mentioned above, what's not necessarily tested is that the operator +# works as expected. When implementing an OpInfo an engineer must still +# typically write one or more tests validating the operator's behavior. +# The exception to this is if reference testing is sufficient, or if +# the operation belongs to an OpInfo subclass that has more exhaustive +# operator testing. Elementwise unary and elementwise binary operators, +# in particular, usually don't require additional testing beyond +# writing an Opinfo. +# +# +# OPINFO (SUB)CLASSES +# ~~~~~~~~~~~~~~~~~~~ +# +# In addition to the OpInfo base class there are several specialized OpInfo +# subclasses. For example, the UnaryUfuncInfo subclass is used for +# unary elementwise operations. These operations have a common structure +# that test_unary_ufuncs.py exploits with additional automated testing. +# The automated testing in test_unary_ufuncs.py is so thorough, comparing +# the operator to a NumPy reference function on a plethora of values, that +# just implementing an OpInfo for a unary elementwise operation is often +# sufficient testing. +# +# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a +# very unique class of operations. These OpInfos aren't included in the +# op_db sequence and have their own tests. +# +# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience +# when writing OpInfos. +# +# TESTING A NEW OPERATOR +# ~~~~~~~~~~~~~~~~~~~~~~ +# +# If you're adding a new operator to any of the following namespaces: +# - torch +# - torch.fft +# - torch.linalg, +# - torch.special +# - torch.nn.functional +# then you should typically add an OpInfo for it. +# +# As mentioned a couple times above, implementing an OpInfo is not +# usually sufficient testing (unless the operator is a unary or binary elementwise +# operator). The OpInfo will only test the properties described in the +# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is +# implemented correctly. +# +# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to +# be consumed by a variety of systems it can be hard to understand how to +# deal with test failures or how to set the OpInfo metadata properly. +# +# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs +# function must be defined, and the operator's dtypes must be specified. +# Once that's done you should run the operator's tests in test_ops.py +# (these can be filtered using the "-k" argument in pytest). Tests that +# fail should provide an error message that describes what to change about +# your OpInfo. You don't need to worry about changing an OpInfo's default +# values unless a test yells at you. +# +# Similarly, if you're writing a test that consumes OpInfos then it's critical +# your test provides a clear error message describing what to do when it +# fails. You should not assume the OpInfo implementer is familiar with your +# system. +# +# If you see a confusing error message while developing an OpInfo then please +# file an issue describing what happened. +# +# This trial-and-error approach to writing an OpInfo can be frustrating, +# but it's probably necessary as long as OpInfos don't require +# learning about all the systems that consume them. One thing that can help +# is the get_supported_dtypes() function defined in utils.py. This +# function can be used to programmatically specify the dtypes an operator +# supports, and is especially useful if writing an OpInfo on a machine +# without a CUDA device. See its documentation for more details. +# +# THE FUTURE OF OPINFOS AND OPINFO TESTING +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# In the future we expect OpInfo coverage to improve and cover +# the great majority of PyTorch's (public) operators. +# + + +# Classes and methods for the operator database +@dataclass +class OpInfo: + """Operator information and helper functions for acquiring it.""" + + # the string name of the function + name: str + + # An optional reference function that accepts ndarrays (AKA "NumPy arrays"). + # If given, the op will be compared with its reference on each of its sample inputs. + ref: Optional[Callable] = None + + # the following metadata describes the operator, its variants, and its aliases, if any + + # iterable of aliases, e.g. ("absolute",) for torch.abs + aliases: Iterable = None + + # additional string to include in the test name + # this is useful when an op needs multiple OpInfos, + # like divide does, often because it's really several + # different ops behind the scenes + variant_test_name: str = "" + + # the function variant of the operation, populated as torch. if None + op: Callable = None + + # allows the method variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated method + # - if a Callable, then that callable should be the method associated with this operation + method_variant: Callable = _NOTHING + + # allows the inplace variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace variant + # - if a Callable, then that callable should be the inplace variant associated with this operation + inplace_variant: Callable = _NOTHING + + # allows the operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated operator + # - if a Callable, then that callable should be the operator associated with this operation + operator_variant: Callable = _NOTHING + + # allows the inplace operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace operator + # - if a Callable, then that callable should be the inplace operator associated with this operation + inplace_operator_variant: Callable = _NOTHING + + # the following metadata are test directives for skipping or modifying tests + + # information about which tests to skip + skips: Tuple = tuple() + + # decorators to apply to generated tests + decorators: Tuple = tuple() + + # the following are pointers to functions to generate certain classes of inputs + + # function to generate sample inputs with strided layouts + sample_inputs_func: Callable = None + + # function to generate a more thorough set of samples inputs with strided layouts + reference_inputs_func: Callable = None + + # function to generate inputs that will throw errors + error_inputs_func: Callable = None + + # function to generate sparse (coo, csr, csc, bsr, bsc) inputs that will throw errors + error_inputs_sparse_func: Callable = None + + # function to generate sample inputs with sparse coo layouts + sample_inputs_sparse_coo_func: Callable = None + + # function to generate sample inputs with sparse csr layouts + sample_inputs_sparse_csr_func: Callable = None + + # function to generate sample inputs with sparse csc layouts + sample_inputs_sparse_csc_func: Callable = None + + # function to generate sample inputs with sparse bsr layouts + sample_inputs_sparse_bsr_func: Callable = None + + # function to generate sample inputs with sparse bsc layouts + sample_inputs_sparse_bsc_func: Callable = None + + # the following metadata relates to dtype support and is tested for correctness in test_ops.py + + # dtypes this function works with on the CPU, + # inherited by other device types that don't specify their own dtypes + dtypes: _dispatch_dtypes = None + + # the following dtypesIf... options override the dtypes value on their respective device types + + # dtypes this function is expected to work with on CUDA + dtypesIfCUDA: _dispatch_dtypes = None + + # dtypes this function is expected to work with on ROCM + dtypesIfROCM: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with + backward_dtypes: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on CUDA + backward_dtypesIfCUDA: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on ROCM + backward_dtypesIfROCM: _dispatch_dtypes = None + + # the following metadata describes the operators out= support + + # whether the op supports the out kwarg + # defaults to True, if the op does not allow the out kwarg or + # supports it incorrectly then test_out in test_ops.py should fail + supports_out: bool = True + + # the following metadata relates to autograd support + # whether the operation supports backward mode AD + # if true, gradient correctness is tested in test_ops.py + # using the op's sample inputs + supports_autograd: bool = True + + # whether the op supports second order gradients + # if true, gradgrad correctness is tested in test_ops.py + # defaults to support_autograd's value + # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below + supports_gradgrad: bool = None + + # whether the ops supports second order gradients via + # forward-over-reverse. If True, forward-over-reverse gradgrad correctness + # is tested. If False, test that forward grad is not implemented. + # Defaults to False. + supports_fwgrad_bwgrad: bool = False + + # whether the operation supports inplace autograd + # if true, tested in test_ops.py + # defaults to supports_autograd's value + supports_inplace_autograd: bool = None + + # Whether the operation support forward mode AD + # If the value is True, we check that the gradients are correct + # If the value is False, we test that forward grad is not implemented + supports_forward_ad: bool = False + + # Whether the operation has a varargs variant + # (e.g. functions like ones, zeros, methods like view, permute) + supports_varargs: bool = False + + # wrapper function for gradcheck + gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs) + + # whether to check batched grad when doing gradcheck + # defaults to support_autograd's value + check_batched_grad: bool = None + + # whether to check batched grad grad when doing gradgradcheck + # default's to support_gradgrad's value + check_batched_gradgrad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `supports_forward_ad` + check_batched_forward_grad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `check_batched_forward_grad` + check_inplace_batched_forward_grad: bool = None + + # tolerance for nondeterminism while performing gradcheck + gradcheck_nondet_tol: float = 0.0 + + # Whether to use the fast implmentation for gradcheck/gradgradcheck. + # When set to None, defers to the default value provided by the wrapper + # function around gradcheck (testing._internal.common_utils.gradcheck) + gradcheck_fast_mode: bool = None + + # the following metadata relates to JIT support and is tested for correctness in test_ops.py + + # name of the corresponding aten:: operator + aten_name: str = None + + # if this is a composite implicit autograd op, the decomposed op + decomp_aten_name: Optional[str] = None + + # name of the corresponding aten:: operator for backwards + aten_backward_name: Optional[str] = None + + # if a op's aten::node is expected to be symbolically autodiffed + assert_autodiffed: bool = False + + # a list of strings with node names that are expected to be in a + # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'], + # default is populated to be ['aten::(name of Python operator)'] + autodiff_nonfusible_nodes: List[str] = None + + # a list of strings with node names that are expected to be in FusionGroups + # inside of DifferentiableGraphs when this operation is autodiffed. + # Ex: ['aten::add', 'aten::mm'], defaults to an empty list + # Note: currently no ops use fusible nodes + autodiff_fusible_nodes: List[str] = None + + # the following metadata relates to sparse support and is used in test_sparse.py + + # whether the op supports sparse coo inputs, defaults to False + # TODO: rename supports_sparse to supports_sparse_coo + supports_sparse: bool = None + + # only run tracing tests + supports_scripting: bool = True + + # if the operator can be traced + supports_tracing: bool = True + + # the following metadata relates to sparse compressed support and + # is used in test_sparse_csr.py and test_sparse.py + + # whether the op supports sparse csr inputs, defaults to False + supports_sparse_csr: bool = None + # whether the op supports sparse csc inputs, defaults to False + supports_sparse_csc: bool = None + # whether the op supports sparse bsr inputs, defaults to False + supports_sparse_bsr: bool = None + # whether the op supports sparse bsc inputs, defaults to False + supports_sparse_bsc: bool = None + + # whether the op promotes integer inputs to float + promotes_int_to_float: bool = False + + # the following metadata relates to complex support and is checked in test_ops.py + + test_conjugated_samples: bool = True + + test_neg_view: bool = True + + # assert that jit shape analysis fully propagates shape + assert_jit_shape_analysis: bool = False + + # the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py + + supports_expanded_weight: bool = False + + is_factory_function: bool = False + + def __post_init__(self): + self._original_opinfo_args = asdict(self).copy() + + assert self.dtypes is not None, f"OpInfo for {self.name} has no dtypes!" + + dtypes_args = (self.dtypes, self.dtypesIfCUDA, self.dtypesIfROCM) + + # Validates the dtypes are generated from the dispatch-related functions + for dtype_list in dtypes_args: + assert isinstance(dtype_list, (_dispatch_dtypes, type(None))) + + if self.aten_name is None: + self.aten_name = self.name + + # Attribute to verify dynamic_dtypes are used. + self.dynamic_dtypes = any( + isinstance(dtypes, utils._dynamic_dispatch_dtypes) for dtypes in dtypes_args + ) + + if self.dynamic_dtypes: + # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU + # This is because, below we set dtypesIfCUDA to dtypes if they are None. + assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), ( + f"To use dynamic dypes for operator {self.name}, " + "acquire the dtypes dynamically for argument `dtypesIfCUDA`." + "This is to ensure that CUDA dtypes are acquired correctly as they" + "differ from CPU dtypes occasionally" + ) + + self.dtypes = set(self.dtypes) + + # NOTE: backward dtypes must be acquired before forward dtypes + # since they fallback to explicit (not implicit!) specifications of + # forward dtypes + self.backward_dtypesIfROCM = ( + set(self.backward_dtypesIfROCM) + if self.backward_dtypesIfROCM is not None + else ( + self.backward_dtypesIfCUDA + if self.backward_dtypesIfCUDA is not None + else self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfROCM + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypesIfCUDA = ( + set(self.backward_dtypesIfCUDA) + if self.backward_dtypesIfCUDA is not None + else ( + self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypes = ( + set(self.backward_dtypes) + if self.backward_dtypes is not None + else self.dtypes + ) + + self.dtypesIfCUDA = ( + set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes + ) + self.dtypesIfROCM = ( + set(self.dtypesIfROCM) + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + ) + + # NOTE: if the op is unspecified it is assumed to be under the torch namespace + if not self.op: + self.op = _getattr_qual(torch, self.name) + + if self.method_variant is _NOTHING: + self.method_variant = getattr(torch.Tensor, self.name, None) + + # attributes like real, imag are not callable + if not callable(self.method_variant): + self.method_variant = None + + if self.inplace_variant is _NOTHING: + inplace_name = self.name + "_" + self.inplace_variant = getattr(torch.Tensor, inplace_name, None) + + if self.operator_variant is _NOTHING: + self.operator_variant = getattr(operator, self.name, None) + + if self.inplace_operator_variant is _NOTHING: + # Note: operator.i will use operator. and assign the result to the lhs when no + # __i__ method is found. This results in the appearance of an inplace operator variant which + # does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace + # operator with a check that an inplace variant exists. + if self.inplace_variant is not None: + inplace_operator_name = "i" + self.name + self.inplace_operator_variant = getattr( + operator, inplace_operator_name, None + ) + else: + self.inplace_operator_variant = None + + self.decorators = (*self.decorators, *self.skips) + + # Specifying sample inputs function without specifying the + # corresponding layout support implies the layout support: + if self.supports_sparse is None: + self.supports_sparse = self.sample_inputs_sparse_coo_func is not None + if self.sample_inputs_sparse_coo_func is None: + self.sample_inputs_sparse_coo_func = self._sample_inputs_unspecified + + if self.supports_sparse_csr is None: + self.supports_sparse_csr = self.sample_inputs_sparse_csr_func is not None + if self.sample_inputs_sparse_csr_func is None: + self.sample_inputs_sparse_csr_func = self._sample_inputs_unspecified + + if self.supports_sparse_csc is None: + self.supports_sparse_csc = self.sample_inputs_sparse_csc_func is not None + if self.sample_inputs_sparse_csc_func is None: + self.sample_inputs_sparse_csc_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsr is None: + self.supports_sparse_bsr = self.sample_inputs_sparse_bsr_func is not None + if self.sample_inputs_sparse_bsr_func is None: + self.sample_inputs_sparse_bsr_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsc is None: + self.supports_sparse_bsc = self.sample_inputs_sparse_bsc_func is not None + if self.sample_inputs_sparse_bsc_func is None: + self.sample_inputs_sparse_bsc_func = self._sample_inputs_unspecified + + # We run the sampling functions without tracking the gradiends of the creation of inputs + self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func) + self.sample_inputs_sparse_coo_func = torch.no_grad()( + self.sample_inputs_sparse_coo_func + ) + self.sample_inputs_sparse_csr_func = torch.no_grad()( + self.sample_inputs_sparse_csr_func + ) + self.sample_inputs_sparse_csc_func = torch.no_grad()( + self.sample_inputs_sparse_csc_func + ) + self.sample_inputs_sparse_bsr_func = torch.no_grad()( + self.sample_inputs_sparse_bsr_func + ) + self.sample_inputs_sparse_bsc_func = torch.no_grad()( + self.sample_inputs_sparse_bsc_func + ) + if self.reference_inputs_func is not None: + self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func) + + if not self.autodiff_fusible_nodes: + self.autodiff_fusible_nodes = [] + + if self.autodiff_nonfusible_nodes is None: + self.autodiff_nonfusible_nodes = ["aten::" + self.name] + + # Autograd support + + # Autograd flags that depend on backward AD only + # - If setting has been explicitly set, raise error if inconsistent + if self.supports_gradgrad is None: + self.supports_gradgrad = self.supports_autograd + else: + assert not (self.supports_gradgrad and not self.supports_autograd), ( + "supports_gradgrad refines the part of autograd is supported, so it should " + "not be set if supports_autograd is False" + ) + if self.check_batched_grad is None: + self.check_batched_grad = self.supports_autograd or self.supports_forward_ad + else: + assert not ( + self.check_batched_grad + and not (self.supports_autograd or self.supports_forward_ad) + ), ( + "check_batched_grad refines the part of autograd that will be checked (by gradcheck), so " + "it should not be set if supports_autograd is False" + ) + if self.check_batched_gradgrad is None: + self.check_batched_gradgrad = self.supports_gradgrad + else: + assert not (self.check_batched_gradgrad and not self.supports_gradgrad), ( + "check_batched_gradgrad refines the part of autograd that will be checked (by " + "gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd " + "is False." + ) + if self.check_batched_forward_grad is None: + self.check_batched_forward_grad = self.supports_forward_ad + else: + assert not ( + self.check_batched_forward_grad and not self.supports_forward_ad + ), ( + "check_batched_forward_grad should only be used when supports_forward_ad " + "is True. It is used to disable the test in the specific cases " + "where the op supports forward ad but fails to compute " + "batched forward grad." + ) + + if self.check_inplace_batched_forward_grad is None: + self.check_inplace_batched_forward_grad = self.check_batched_forward_grad + else: + assert not ( + self.check_inplace_batched_forward_grad + and not self.check_batched_forward_grad + ), ( + "check_batched_forward_grad should only be used when check_batched_forward_grad " + "is True. It is used to disable the test in the specific cases " + "where the op supports batched forward grad but fails to compute batched forward " + "grad for the inplace variant of the op." + ) + + assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), ( + "supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be " + "True if backward ad is also checked, i.e., supports_forward_ad should be True.", + self.name, + ) + + # Autograd flags that depend on both forward AD and backward AD + if self.supports_inplace_autograd is None: + self.supports_inplace_autograd = ( + self.supports_autograd or self.supports_forward_ad + ) + else: + assert not ( + self.supports_inplace_autograd + and not self.supports_autograd + and not self.supports_forward_ad + ), ( + "supports_inplace_autograd refines the part of autograd that is supported, so " + "it should not be set if both supports_autograd and supports_forward_ad are False" + ) + + if self.aliases is not None: + self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment] + else: + self.aliases = () + + def __call__(self, *args, **kwargs): + """Calls the function variant of the operator.""" + return self.op(*args, **kwargs) + + def __str__(self): + return dataclass_repr(self) + + def get_op(self): + """Returns the function variant of the operator, torch..""" + return self.op + + def get_method(self): + """Returns the method variant of the operator, torch.Tensor.. + Returns None if the operator has no method variant. + """ + return self.method_variant + + def get_inplace(self): + """Returns the inplace variant of the operator, torch.Tensor._. + Returns None if the operator has no inplace variant. + """ + return self.inplace_variant + + def get_operator(self): + """Returns operator variant of the operator, e.g. operator.neg + Returns None if the operator has no operator variant. + """ + return self.operator_variant + + def get_inplace_operator(self): + """Returns the inplace operator variant of the operator, e.g operator.iadd + Returns None if the operator has no inplace operator variant""" + return self.inplace_operator_variant + + def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs but with the tensor input or first + tensor in a sequence input conjugated. + """ + + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + conj_samples = list(samples) + + def conjugate(tensor): + _requires_grad = tensor.requires_grad + tensor = tensor.conj() + return tensor.requires_grad_(_requires_grad) + + for i, sample in enumerate(samples): + sample = conj_samples[i] + # Note: it is assumed that the input here is either a tensor or tensorlist + if isinstance(sample.input, torch.Tensor): + sample.input = conjugate(sample.input) + else: + sample.input[0] = conjugate(sample.input[0]) + + return TrackedInputIter(iter(conj_samples), "conjugate sample input") + + def sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + These samples should be sufficient to test the function works correctly + with autograd, TorchScript, etc. + """ + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + + if kwargs.get("include_conjugated_inputs", False): + conj_samples = self.conjugate_sample_inputs( + device, dtype, requires_grad, **kwargs + ) + samples_list = list(samples) + samples_list.extend(conj_samples) + samples = tuple(samples_list) + + return TrackedInputIter(iter(samples), "sample input") + + def reference_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + Distinct from sample_inputs() above because this returns an expanded set + of inputs when reference_inputs_func is defined. If undefined this returns + the sample inputs. + """ + if self.reference_inputs_func is None: + samples = self.sample_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter(iter(samples), "sample input") + + if kwargs.get("include_conjugated_inputs", False): + raise NotImplementedError + + references = self.reference_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter(iter(references), "reference input") + + def error_inputs(self, device, **kwargs): + """ + Returns an iterable of ErrorInputs. + """ + errs = self.error_inputs_func(self, device, **kwargs) + return TrackedInputIter( + iter(errs), "error input", callback=lambda e: e.sample_input + ) + + def error_inputs_sparse(self, device, layout, **kwargs): + """ + Returns an iterable of ErrorInputs that contain sparse sample + inputs with a specified layout. + """ + if not self.supports_sparse_layout(layout): + raise unittest.SkipTest("unsupported sparse layout") + return self.error_inputs_sparse_func(self, device, layout, **kwargs) + + def supports_sparse_layout(self, layout): + """Return True if OpInfo supports the specified sparse layout.""" + layout_name = str(layout).split(".")[-1] + # map torch.sparse_coo to OpInfo.supports_sparse: + layout_name = layout_name.replace("_coo", "") + return getattr(self, f"supports_{layout_name}") + + def sample_inputs_sparse( + self, layout, device, dtype, requires_grad=False, **kwargs + ): + """Returns an iterable of SampleInputs that contain inputs with a + specified sparse layout. + """ + layout_name = str(layout).split(".")[-1] + sample_inputs_mth = getattr(self, "sample_inputs_" + layout_name) + + def non_empty_sampler(op, generator): + found_sample = False + for sample in generator: + found_sample = True + yield sample + if not found_sample: + raise unittest.SkipTest("NO SAMPLES!") + + return non_empty_sampler( + self, + sample_inputs_mth(device, dtype, requires_grad=requires_grad, **kwargs), + ) + + def _sample_inputs_unspecified(self, *args, **kwargs): + """Raises an NotImplemented exception in a OpInfo instance creation + that specifies supports_sparse(|_csr|_csc|_bsr|_bsc)=True + without specifying the corresponding sample function as + sample_inputs_sparse_(coo|csr|csc|bsr|bsc)_func. + + To avoid this, either define the corresponding sample function, + or re-map unsupported samples to error inputs in an appropiate + + opinfo/definitions/sparse.py:_validate_sample_input_sparse_ + + function. + """ + raise NotImplementedError("no sample function specified") + + def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + coo layout. + """ + return self.sample_inputs_sparse_coo_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csr layout. + """ + return self.sample_inputs_sparse_csr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csc layout. + """ + return self.sample_inputs_sparse_csc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsr layout. + """ + return self.sample_inputs_sparse_bsr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsc layout. + """ + return self.sample_inputs_sparse_bsc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + """Returns the decorators targeting the given test.""" + result = [] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active( + test_class, test_name, device, dtype, param_kwargs + ): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + def supported_dtypes(self, device_type): + device_type = torch.device(device_type).type + if device_type == "cuda": + return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA + return self.dtypes + + def supported_backward_dtypes(self, device_type): + if not self.supports_autograd: + return set() + + device_type = torch.device(device_type).type + backward_dtypes = None + if device_type == "cuda": + backward_dtypes = ( + self.backward_dtypesIfROCM + if TEST_WITH_ROCM + else self.backward_dtypesIfCUDA + ) + else: + backward_dtypes = self.backward_dtypes + + allowed_backward_dtypes = floating_and_complex_types_and( + torch.bfloat16, torch.float16, torch.complex32 + ) + return set(allowed_backward_dtypes).intersection(backward_dtypes) + + def supports_dtype(self, dtype, device_type) -> bool: + return dtype in self.supported_dtypes(device_type) + + @property + def formatted_name(self): + """Returns a formatted full name for this OpInfo that can be used in test names.""" + variant = ( + "_" + self.variant_test_name.replace(".", "_") + if self.variant_test_name + else "" + ) + return f"{self.name.replace('.', '_')}{variant}" + + +def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs): + """Generates input tensors for testing reduction operators""" + yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor( + [3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad + ) + + +def _generate_reduction_kwargs(ndim, supports_multiple_dims=True): + """Generates a subset of all valid dim and keepdim kwargs given ndim that + is appropriate for testing reduction operators. + """ + + # Test default dim and keepdim + yield {} + + # Test reducing inner and outer most dimensions + yield {"dim": 0, "keepdim": True} + yield {"dim": -1, "keepdim": False} + + # Test reducing middle dimension + if ndim > 2: + yield {"dim": ndim // 2, "keepdim": True} + + if supports_multiple_dims: + # Test reducing all dimensions + yield {"dim": tuple(range(ndim)), "keepdim": False} + + # Test reducing both first and last dimensions + if ndim > 1: + yield {"dim": (0, -1), "keepdim": True} + + # Test reducing every other dimension starting with the second + if ndim > 3: + yield {"dim": tuple(range(1, ndim, 2)), "keepdim": False} + + +def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for reduction operators.""" + + # TODO(@heitorschueroff) Once all reduction operators are using + # ReductionOpInfo use op_info.supports_multiple_dims directly. + supports_multiple_dims: bool = kwargs.get("supports_multiple_dims", True) + + # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo + # use op_info.generate_args_kwargs directly. + generate_args_kwargs = kwargs.get( + "generate_args_kwargs", lambda *args, **kwargs: (yield tuple(), {}) + ) + + for t in _generate_reduction_inputs(device, dtype, requires_grad): + for reduction_kwargs in _generate_reduction_kwargs( + t.ndim, supports_multiple_dims + ): + for args, kwargs in generate_args_kwargs(t, **reduction_kwargs): + kwargs.update(reduction_kwargs) + yield SampleInput( + t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs + ) + + +# NOTE [Reductions]: +# +# For testing purposes, we relax the definition of a reduction operator +# as defined in the docstring below. We do this to capture operators with +# a similar API so they can be tested automatically. However... +# +# Strictly speaking a reduction operator is an operator that can reduce an +# array to a single scalar value and that can be computed from the partial +# result of reducing subarrays. This usually means that the reduction operation +# should be commutative and associative. This definition is important when it +# comes to implementation as it determines how a reduction can be parallelized. +# +# For example, many summary statistics such as median, mode and quantile cannot +# be computed from partial results because these are sorting and counting based +# algorithms that need information that would be lost in the reduced value. +class ReductionOpInfo(OpInfo): + """Reduction operator information. + + An operator is a reduction operator if it reduces one or more dimensions of + the input tensor to a single value. Reduction operators must implement the + following signature: + + - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor` + + ReductionOpInfo tests that reduction operators implement a consistent API. + Optional features such as reducing over multiple dimensions are captured in + the optional keyword parameters of the ReductionOpInfo constructor. + + If a reduction operator does not yet implement the full required API of + reduction operators, this should be documented by xfailing the failing + tests rather than adding optional parameters to ReductionOpInfo. + + NOTE + The API for reduction operators has not yet been finalized and some + requirements may change. + + See tests in test/test_reductions.py + """ + + def __init__( + self, + name, + *, + # The identity value for the operator if it has one. + identity: Optional[Any] = None, + # The nan policy for the operator if it implements one. + # - propagate: NaN values are propagated to the output + # - omit: NaN values are discarded during the reduction + nan_policy: Optional[str] = None, + # Whether the operator supports reducing multiple dimensions. + supports_multiple_dims: bool = True, + # Whether the operator promotes integral to floating point dtypes. + promotes_int_to_float: bool = False, + # Whether the operator promotes all integral dtypes to int64. + promotes_int_to_int64: bool = False, + # If a specific dtype is given, then the operator always returns that + # dtype irrespective of the input dtype. If None, the operator returns + # the dtype according to the type promotion rules above. + result_dtype: Optional[torch.dtype] = None, + # Casts complex results to real (e.g. linalg.norm or torch.var) + complex_to_real: bool = False, + # ReductionOpInfo tests generate their own input, dim and keepdim + # arguments and call this function to generate tuples of extra args and + # kwargs to use when calling the op. This is required for operators that + # have other required parameters besides the input tensor. + generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: ( + yield tuple(), + {}, + ), + # Options from the OpInfo base class + **kwargs, + ): + self._original_reduction_args = locals().copy() + assert nan_policy in (None, "propagate", "omit") + + # These are mutually exclusive options + assert not (result_dtype and promotes_int_to_float) + assert not (result_dtype and promotes_int_to_int64) + assert not (result_dtype and complex_to_real) + assert not (promotes_int_to_float and promotes_int_to_int64) + + # Default sample_inputs_func for ReductionOpInfo which augments sample + # inputs from sample_inputs_reduction with the args and kwargs from + # generate_args_kwargs. This is only used if sample_inputs_func is None. + def sample_inputs_func(*args, **kwargs): + kwargs["supports_multiple_dims"] = supports_multiple_dims + kwargs["generate_args_kwargs"] = generate_args_kwargs + yield from sample_inputs_reduction(*args, **kwargs) + + # Override OpInfo defaults and call base class __init__ + kwargs.setdefault("inplace_variant", None) + kwargs.setdefault("sample_inputs_func", sample_inputs_func) + super().__init__(name, promotes_int_to_float=promotes_int_to_float, **kwargs) + + self.identity = identity + self.nan_policy = nan_policy + self.supports_multiple_dims = supports_multiple_dims + self.promotes_int_to_int64 = promotes_int_to_int64 + self.complex_to_real = complex_to_real + self.result_dtype = result_dtype + self.generate_args_kwargs = generate_args_kwargs + + +# The base reference input generation for elementwise binary operations +def _reference_inputs_elementwise_binary( + op, device, dtype, requires_grad, exclude_zero, **kwargs +): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + yield from generate_elementwise_binary_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + if dtype is not torch.bool: + yield from generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + if dtype not in (torch.bool, torch.uint8, torch.int8): + yield from generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + yield from generate_elementwise_binary_broadcasting_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield from generate_elementwise_binary_with_scalar_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + if dtype.is_floating_point or dtype.is_complex: + yield from generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + +# Note that these references inputs use scalars for the SampleInput.input value, +# and many tests require SampleInput.input be a tensor or a list of tensors +def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + gen = partial( + _reference_inputs_elementwise_binary, + op, + device, + dtype, + requires_grad, + exclude_zero, + **kwargs, + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_binary_noncontiguous_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + yield from generate_elementwise_binary_arbitrarily_strided_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + +# A functional that extends an elementwise binary operator's bespoke error inputs +# with generic error inputs for the class of elementwise binary operations +def make_error_inputs_elementwise_binary(error_inputs_func): + def error_inputs_func_wrapper(op, device, **kwargs): + if error_inputs_func is not None: + yield from error_inputs_func(op, device, **kwargs) + + if not op.supports_rhs_python_scalar: + si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if not op.supports_one_python_scalar: + si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if ( + not kwargs.get("skip_two_python_scalars", False) + and not op.supports_two_python_scalars + ): + si = SampleInput(2, args=(3,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + return error_inputs_func_wrapper + + +# The following functions and classes are for testing elementwise binary operators. + + +# Returns a generator of pairs of contiguous tensors on the requested device +# and with the requested dtype. +# +# This function is intended to test the non-vectorized and vectorized code +# paths of elementwise binary functions, as well as their handling of odd tensor +# sizes (like zero-dim tensors and tensors with zero elements). +# +# Each iterable will include an a tensor with no elements, +# zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and +# a large 2D tensor. +def generate_elementwise_binary_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + # tensors with no elements + (0,), + (1, 0, 3), + # zero dim (scalar) tensor + (), + # small 1D tensor + (20,), + # medium 1D tensor + (812,), + # large 2D tensor + (1029, 917), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + b = make_arg(shape) + yield SampleInput(a, args=(b,)) + + +# Returns a generator of pairs of contiguous tensors on the requested device and with +# the requested dtype. +# +# Unlike the previous function, the values in these tensors are specified manually. +def generate_elementwise_binary_small_value_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=None +): + if exclude_zero is None: + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + # defines interesting values + _unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254) + _int_vals = (0, -1, 1, -55, 55, -127, 127, -128) + _float_vals = ( + 0.0, + -0.0, + -0.001, + 0.001, + -0.25, + 0.25, + -1.0, + 1.0, + -math.pi / 2, + math.pi / 2, + -math.pi + 0.00001, + math.pi - 0.00001, + -math.pi, + math.pi, + -math.pi - 0.00001, + math.pi + 0.00001, + ) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_vals, _float_vals) + elif dtype.is_complex: + complex_vals = product(_float_vals, _float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64): + prod = product(_int_vals, _int_vals) + elif dtype is torch.uint8: + prod = product(_unsigned_int_vals, _unsigned_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + if r == 0 and exclude_zero: + r_vals.append(1) + else: + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + _large_int_vals = (-1113, 1113, -10701, 10701) + _large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7) + _large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20) + + l_vals = [] + r_vals = [] + + if dtype == torch.float16: + prod = product(_large_float16_vals, _large_float16_vals) + elif dtype.is_floating_point: + prod = product(_large_float_vals, _large_float_vals) + elif dtype.is_complex: + complex_vals = product(_large_float_vals, _large_float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int16, torch.int32, torch.int64): + prod = product(_large_int_vals, _large_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + _float_extremals = (float("inf"), float("-inf"), float("nan")) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_extremals, _float_extremals) + elif dtype.is_complex: + complex_vals = product(_float_extremals, _float_extremals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + # Test case for NaN propagation + nan = ( + float("nan") if dtype.is_floating_point else complex(float("nan"), float("nan")) + ) + lhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + lhs.view(-1)[::3] = nan + rhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + rhs.view(-1)[::3] = nan + + yield SampleInput(lhs, args=(rhs,)) + + +# Returns a generator of pairs of contiguous and noncontiguous tensors that +# require broadcasting +def generate_elementwise_binary_broadcasting_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + ((1,), ()), + ((2,), ()), + ((1,), (2,)), + ((2, 1), (2,)), + ((1, 2), (2,)), + ((3, 2), (2,)), + ((1, 3, 2), (2,)), + ((1, 3, 2), (3, 2)), + ((3, 1, 2), (3, 2)), + ((2, 3, 2), ()), + ((3, 1, 2), (1, 3, 2)), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, noncontiguous in product(shapes, [True, False]): + shape_lhs, shape_rhs = shape + lhs = make_arg( + shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs + ) + rhs = make_arg( + shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs + ) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=True) + + +# Returns a generator of pairs of contiguous tensors and scalars +def generate_elementwise_binary_with_scalar_samples( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5)) + if op.supports_rhs_python_scalar: + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs, args=(rhs_scalar,)) + + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(lhs_scalar, args=(rhs,)) + + if op.supports_two_python_scalars: + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs_scalar, args=(rhs_scalar,)) + + +# Returns a generator of pairs of contiguous tensors and 0d tensors and scalars and type promotion +def generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, *, device, dtype, requires_grad=False +): + # add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars + if op.name in ( + "eq", + "ne", + "gt", + "ge", + "lt", + "le", + "logical_and", + "logical_or", + "logical_xor", + ): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + shape = ( + 23, + ) # this shape is big enough to trigger vectorization, and has non-vectorized tail + values = (float("nan"), float("inf"), -float("inf")) + scalar_tensors = tuple(torch.tensor(val) for val in values) + if op.supports_rhs_python_scalar: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + for scalar in values + scalar_tensors: + yield SampleInput(lhs, args=(scalar,)) + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(scalar, args=(rhs,)) + + +# Returns a generator of pairs of noncontiguous tensors +def generate_elementwise_binary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + # Generic noncontiguity + lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs) + rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.clone(), args=(rhs.clone(),)) + yield SampleInput(lhs.contiguous(), args=(rhs,)) + + # Transposed + lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs) + rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.T, args=(rhs.T,)) + + # More noncontiguity + shapes = ((5, 7), (1024,)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + lhs_non_contig.copy_(lhs) + + rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + rhs_non_contig.copy_(rhs) + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Noncontiguous indices + shape = (2, 2, 1, 2) + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs[:, 1, ...] + rhs_non_contig = rhs[:, 1, ...] + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs.expand(3, -1, -1) + rhs_non_contig = rhs.expand(3, -1, -1) + + yield SampleInput(lhs_non_contig, args=(rhs_non_contig,)) + + +# Sample inputs for elementwise binary operators, like add +def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + _M = S if kwargs.get("small_inputs_only", False) else M + _S = XS if kwargs.get("small_inputs_only", False) else S + + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + shapes = ( + ((), ()), + ((_S,), ()), + ((_S, 1), (_S,)), + ((_M, _S), ()), + ((_S, _M, _S), (_M, _S)), + ((_S, _M, _S), (_S, _M, _S)), + ((_M, 1, _S), (_M, _S)), + ((_M, 1, _S), (1, _M, _S)), + ((0, 1, XS), (0, _M, XS)), + ) + + sample_kwargs = kwargs.get("sample_kwargs", {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput( + lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input + ) + + +# Metadata class for binary "universal functions (ufuncs)" that accept two +# tensor and have common properties +class BinaryUfuncInfo(OpInfo): + """Operator information for 'universal binary functions (binary ufuncs).' + These are functions of two tensors with common properties like: + - they are elementwise functions + - the output shape is determined by the input shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/stable/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, + *, + sample_inputs_func=sample_inputs_elementwise_binary, + reference_inputs_func=reference_inputs_elementwise_binary, + error_inputs_func=None, + lhs_make_tensor_kwargs=None, + rhs_make_tensor_kwargs=None, + always_returns_bool=False, # Set to true if the op always returns bool tensors + supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs + supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs + supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs + **kwargs, + ): + self._original_binary_ufunc_args = locals().copy() + + # Elementwise binary operations perform the equivalent of test_numpy_refs + # in test_binary_ufuncs, but with additional test granularity. So the + # generic test_ops.py test is skipped because it's redundant. + common_skips = ( + DecorateInfo( + unittest.skip("Skipping redundant test."), + "TestCommon", + "test_numpy_refs", + ), + ) + kwargs["skips"] = kwargs.get("skips", tuple()) + common_skips + super().__init__( + name, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func), + **kwargs, + ) + + # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on. + if lhs_make_tensor_kwargs is None: + lhs_make_tensor_kwargs = {} + self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs + + if rhs_make_tensor_kwargs is None: + rhs_make_tensor_kwargs = {} + self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs + + self.always_returns_bool = always_returns_bool + self.supports_rhs_python_scalar = supports_rhs_python_scalar + self.supports_one_python_scalar = supports_one_python_scalar + self.supports_two_python_scalars = supports_two_python_scalars + + if self.supports_two_python_scalars: + self.supports_one_python_scalar = True + + if self.supports_one_python_scalar: + assert ( + supports_rhs_python_scalar + ), "Can't support lhs and rhs Python scalars but not rhs scalars!" + + +# The following functions and classes are for testing elementwise unary operators. +def sample_inputs_elementwise_unary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + _L = S if kwargs.get("small_inputs_only", False) else L + + low, high = op_info.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op_info._domain_eps + high = high if high is None or not is_floating else high - op_info._domain_eps + if ( + op_info.supports_sparse_csr + or op_info.supports_sparse_csc + or op_info.supports_sparse_bsr + or op_info.supports_sparse_bsc + ): + # Tensors with dim=2 for sparse compressed testing + yield SampleInput( + make_tensor( + (_L, _L), + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + else: + # Creates a 1D, empty, and scalar tensor + for shape in ((_L,), (1, 0, 3), ()): + yield SampleInput( + make_tensor( + shape, + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + + +# Replace values satisfying condition with a safe value. This is used to block +# out values the could cause singularity like tan(pi/2) +def _replace_values_in_tensor(tensor, condition, safe_value): + mask = condition(tensor) + tensor.masked_fill_(mask, safe_value) + + +# Helper to create a unary elementwise tensor with valid inputs +def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs): + low, high = op.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs) + + if op.reference_numerics_filter is not None and dtype is not torch.bool: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + return a + + +# Restricts the values in the tensor to the domain of the +# given elementwise unary operator +def _filter_unary_elementwise_tensor(a, *, op): + # short-circuits for boolean tensors + if a.dtype is torch.bool: + return a + + low, high = op.domain + is_floating = a.dtype.is_floating_point or a.dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + if a.dtype is torch.uint8 and low is not None: + low = max(low, 0) + + if not a.dtype.is_floating_point and not a.dtype.is_complex: + low = math.ceil(low) if low is not None else None + high = math.floor(high) if high is not None else None + + if op.reference_numerics_filter is not None: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + if low is not None or high is not None: + if a.dtype.is_complex: + a.real.clamp_(low, high) + a.imag.clamp_(low, high) + else: + a.clamp_(min=low, max=high) + + return a + + +def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs): + # Special-cases bool + if dtype is torch.bool: + tensors = ( + torch.empty(0, device=device, dtype=torch.bool), + torch.tensor(True, device=device), + torch.tensor(False, device=device), + torch.tensor((True, False), device=device), + make_tensor((812,), device=device, dtype=dtype), + make_tensor((1029, 917), device=device, dtype=dtype), + ) + for a in tensors: + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + shapes = ( + (1029, 917), + (812,), + # Empty sizes + (0,), + (0, 3, 3), + (1, 0, 5), + (6, 0, 0, 0), + (3, 0, 1, 0), + ) + + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + for shape in shapes: + a = make_arg(shape) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_small_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + yield SampleInput( + sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0] + ) + + +def generate_elementwise_unary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + + # Generic noncontiguity + t = make_arg((1026,), noncontiguous=True) + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Transposed + t = make_arg((1024, 1024)).T + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + t = make_arg(shape) + t_non_contig = t.expand(3, -1, -1) + yield SampleInput( + t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0] + ) + + +def generate_elementwise_unary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +# Reuses the elementwise binary generators for consistency +# TODO: in the future generalize the reference generators to handle n-ary elementwise operations +def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + yield from generate_elementwise_unary_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype is not torch.bool: + yield from generate_elementwise_unary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + if dtype not in (torch.bool, torch.uint8, torch.int8) and ( + op.handles_large_floats + or (not dtype.is_floating_point and not dtype.is_complex) + ): + yield from generate_elementwise_unary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype.is_floating_point or ( + op.handles_complex_extremal_values and dtype.is_complex + ): + yield from generate_elementwise_unary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + gen = partial( + _reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_unary_noncontiguous_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield from generate_elementwise_unary_arbitrarily_strided_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +# Metadata class for unary "universal functions (ufuncs)" that accept a single +# tensor and have common properties like: +class UnaryUfuncInfo(OpInfo): + """Operator information for 'universal unary functions (unary ufuncs).' + These are functions of a single tensor with common properties like: + - they are elementwise functions + - the input shape is the output shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, # the string name of the function + *, + dtypes=floating_types(), + domain=(None, None), # the [low, high) domain of the function + handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf) + handles_large_floats=True, # whether the op correctly handles large float values (like 1e20) + supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle + sample_inputs_func=sample_inputs_elementwise_unary, + reference_inputs_func=reference_inputs_elementwise_unary, + sample_kwargs=lambda device, dtype, input: ({}, {}), + reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested + **kwargs, + ): + self._original_unary_ufunc_args = locals().copy() + + super().__init__( + name, + dtypes=dtypes, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + **kwargs, + ) + self.domain = domain + self.handles_complex_extremal_values = handles_complex_extremal_values + self.handles_large_floats = handles_large_floats + self.supports_complex_to_float = supports_complex_to_float + self.reference_numerics_filter = reference_numerics_filter + + # test_unary_ufuncs.py generates its own inputs to test the consistency + # of the operator on sliced tensors, non-contig tensors, etc. + # `sample_kwargs` is a utility function to provide kwargs + # along with those inputs if required (eg. clamp). + # It should return two dictionaries, first holding kwarg for + # torch operator and second one for reference NumPy operator. + self.sample_kwargs = sample_kwargs + + # Epsilon to ensure grad and gradgrad checks don't test values + # outside a function's domain. + self._domain_eps = 1e-5 + + +def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs): + is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half + if not is_fp16_or_chalf: + nd_tensor = partial( + make_tensor, + (S, S + 1, S + 2), + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, (31,), device=device, dtype=dtype, requires_grad=requires_grad + ) + else: + # cuFFT supports powers of 2 for half and complex half precision + # NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args + # where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two + low = None + high = None + if self.name in ["fft.hfft", "fft.irfft", "_refs.fft.hfft", "_refs.fft.irfft"]: + shapes = ((2, 9, 9), (33,)) + elif self.name in [ + "fft.hfft2", + "fft.irfft2", + "_refs.fft.hfft2", + "_refs.fft.irfft2", + ]: + shapes = ((2, 8, 9), (33,)) + elif self.name in [ + "fft.hfftn", + "fft.irfftn", + "_refs.fft.hfftn", + "_refs.fft.irfftn", + ]: + shapes = ((2, 2, 33), (33,)) + # Adjusting the limits because the test would be flaky due to over-saturation of float16 + # See: https://github.com/pytorch/pytorch/pull/81416 + low = -1.0 + high = 1.0 + else: + shapes = ((2, 8, 16), (32,)) + nd_tensor = partial( + make_tensor, + shapes[0], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, + shapes[1], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + + if self.ndimensional == SpectralFuncType.ND: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(8,)) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3, (0, -1)]) + elif self.ndimensional == SpectralFuncType.TwoD: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(6, 8) if not is_fp16_or_chalf else (4, 8)) + yield SampleInput(nd_tensor(), dim=0) + yield SampleInput(nd_tensor(), dim=(0, -1)) + yield SampleInput(nd_tensor(), dim=(-3, -2, -1)) + else: + yield SampleInput( + nd_tensor(), + n=10 if not is_fp16_or_chalf else 8, + dim=1, + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), n=7 if not is_fp16_or_chalf else 8) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3]) + + +SpectralFuncType = Enum("SpectralFuncType", ("OneD", "TwoD", "ND")) + + +# Metadata class for Fast Fourier Transforms in torch.fft. +class SpectralFuncInfo(OpInfo): + """Operator information for torch.fft transforms.""" + + def __init__( + self, + name, # the string name of the function + *, + ref=None, # Reference implementation (probably in np.fft namespace) + dtypes=floating_and_complex_types(), + ndimensional: SpectralFuncType, + sample_inputs_func=sample_inputs_spectral_ops, + decorators=None, + **kwargs, + ): + self._original_spectral_func_args = dict(locals()).copy() + self._original_spectral_func_args.update(kwargs) + + decorators = list(decorators) if decorators is not None else [] + decorators += [ + skipCPUIfNoFFT, + DecorateInfo( + toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}), + "TestCommon", + "test_complex_half_reference_testing", + ), + ] + + super().__init__( + name=name, + dtypes=dtypes, + decorators=decorators, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + self.ndimensional = ndimensional + + +class ShapeFuncInfo(OpInfo): + """Early version of a specialized OpInfo for Shape manipulating operations like tile and roll""" + + def __init__( + self, + name, # the string name of the function + *, + ref, # a reference function + dtypes=floating_types(), + dtypesIfCUDA=None, + dtypesIfROCM=None, + sample_inputs_func=None, + **kwargs, + ): + super().__init__( + name, + dtypes=dtypes, + dtypesIfCUDA=dtypesIfCUDA, + dtypesIfROCM=dtypesIfROCM, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + + +def sample_inputs_foreach( + self, + device, + dtype, + N, + *, + noncontiguous=False, + same_size=False, + low=None, + high=None, + zero_size: bool, + requires_grad: bool, + # mutually exclusive from same_size and zero_size, which are all or nothing + intersperse_empty_tensors: bool = False, +): + if zero_size: + return [torch.empty(0, dtype=dtype, device=device) for _ in range(N)] + if same_size: + return [ + make_tensor( + (N, N), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for _ in range(N) + ] + else: + # interweave some empty tensors + have the last 2 tensors be empty (see #100701) + return [ + torch.empty(0, dtype=dtype, device=device, requires_grad=requires_grad) + if (i % 3 == 0 or i >= N - 2) and intersperse_empty_tensors + else make_tensor( + (N - i, N - i), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for i in range(N) + ] + + +def get_foreach_method_names(name): + # get torch inplace reference function + op_name = "_foreach_" + name + inplace_op_name = op_name + "_" + + op = getattr(torch, op_name, None) + inplace_op = getattr(torch, inplace_op_name, None) + + ref = getattr(torch, name, None) + ref_inplace = getattr(torch.Tensor, name + "_", None) + return op, inplace_op, ref, ref_inplace + + +class ForeachFuncInfo(OpInfo): + """Early version of a specialized OpInfo for foreach functions""" + + def __init__( + self, + name, + sample_inputs_func, + *, + dtypes=floating_and_complex_types(), + dtypesIfCUDA=floating_and_complex_types_and(torch.half), + dtypesIfROCM=None, + supports_alpha_param=False, + supports_autograd=True, + supports_inplace_autograd=True, + supports_scalar_self_arg=False, + supports_forward_ad=True, + backward_requires_result=False, + supports_out=True, + **kwargs, + ): + ( + foreach_method, + foreach_method_inplace, + torch_ref_method, + torch_ref_inplace, + ) = get_foreach_method_names(name) + if not supports_out: + # note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call + # `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero` + # is not defined at the moment. Thus to skip the qualification, set a similar torch + # function. + assert foreach_method is None + assert torch_ref_method is None + foreach_method = foreach_method_inplace + torch_ref_method = torch_ref_inplace + super().__init__( + name="_foreach_" + name, + op=foreach_method, + ref=torch_ref_method, + method_variant=foreach_method, + inplace_variant=foreach_method_inplace, + dtypes=dtypes, + dtypesIfCUDA=dtypesIfCUDA, + dtypesIfROCM=dtypesIfROCM, + sample_inputs_func=sample_inputs_func, + supports_autograd=supports_autograd, + supports_forward_ad=supports_forward_ad, + supports_out=supports_out, + **kwargs, + ) + self.supports_scalar_self_arg = supports_scalar_self_arg + + self.ref_inplace = torch_ref_inplace + self.supports_alpha_param = supports_alpha_param + self.backward_requires_result = backward_requires_result + self.has_no_in_place = self.inplace_variant is None + self.supports_inplace_autograd = supports_inplace_autograd + + if name == "norm": + self.ref = torch.linalg.vector_norm + elif name == "minimum": + # because minimum ref does not support inplace or scalar + self.ref = torch.clamp_max + self.ref_inplace = torch.Tensor.clamp_max_ + elif name == "maximum": + # because maximum ref does not support inplace or scalar + self.ref = torch.clamp_min + self.ref_inplace = torch.Tensor.clamp_min_ + + def sample_zero_size_inputs(self, device, dtype, requires_grad=False, **kwargs): + if not hasattr(self.sample_inputs_func, "sample_zero_size_tensor_inputs"): + return [] + return self.sample_inputs_func.sample_zero_size_tensor_inputs( + self, device, dtype, requires_grad, **kwargs + ) + + +def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs): + """Gradcheck wrapper for functions that take Hermitian matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the Hermitian property of the input. + """ + return op(input + input.mH, *args, **kwargs) + + +def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs): + """Gradcheck wrapper for functions that take lower or upper triangular matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the triangular property of the input. + `idx` is used to specific which `args[idx]` is to be triangularized. + """ + triangular_arg = args[idx].triu() if upper else args[idx].tril() + return op(*args[:idx], triangular_arg, *args[idx + 1 :], upper, **kwargs) + + +def gradcheck_wrapper_triangular_input_real_positive_diagonal( + op, *args, upper=False, idx=0, **kwargs +): + """Gradcheck wrapper for functions that take lower/upper triangular matrices + with real and positive diagonals, for example, cholesky-like operations. + """ + arg = args[idx] + arg_diag = arg.diagonal(0, -2, -1) + arg_diag_embed = torch.diag_embed(arg_diag) + id_diag_tensor = torch.ones_like(arg_diag) + id_tensor = torch.diag_embed(id_diag_tensor) + # new_arg = arg - diag(arg) + I + new_arg = arg - arg_diag_embed + id_tensor + return gradcheck_wrapper_triangular_input( + op, *args[:idx], new_arg, *args[idx + 1 :], upper=upper, idx=idx, **kwargs + ) + + +def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked operations. + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + mask = kwargs.get("mask") + if mask is not None: + output_mask = torch.masked._output_mask(op, input, *args, **kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def gradcheck_wrapper_masked_pointwise_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked pointwise operations. Assumes that the result + will be masked iff both tensors are masked at a specific index + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + input_mask = kwargs.get("input_mask") + other_mask = kwargs.get("other_mask") + if input_mask is not None and other_mask is not None: + combined_mask = torch.logical_and(input_mask, other_mask) + new_kwargs = dict(mask=combined_mask, **kwargs) + output_mask = torch.masked._input_mask(input, *args, **new_kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def clone_sample(sample, **kwargs): + """ + Given a SampleInput, this function analyzes its input, args and kwargs, + and produces a copy with each non-Tensor entry being copied by reference, + and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)` + """ + + def clone_tensor(t): + if isinstance(t, torch.Tensor): + return t.detach().clone().requires_grad_(t.requires_grad) + else: + return t + + sample_kwargs = kwargs if kwargs else sample.kwargs + + return SampleInput( + clone_tensor(sample.input), + args=tuple(map(clone_tensor, sample.args)), + kwargs={k: clone_tensor(v) for k, v in sample_kwargs.items()}, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b2ed8d391b4f8ff4caf2d390e2cf84790eb2877 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py @@ -0,0 +1,25 @@ +from typing import List + +from torch.testing._internal.opinfo.core import OpInfo +from torch.testing._internal.opinfo.definitions import ( + _masked, + fft, + linalg, + signal, + special, +) + +# Operator database +op_db: List[OpInfo] = [ + *fft.op_db, + *linalg.op_db, + *signal.op_db, + *special.op_db, + *_masked.op_db, +] + +python_ref_db: List[OpInfo] = [ + *fft.python_ref_db, + *linalg.python_ref_db, + *special.python_ref_db, +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25b0d5bed3ec6256cf8233b11529f3f17dafe1c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75fc94c265826b389d05e8eea1199ef462674141 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a537a879ca478e4cdc739490462ece04cafb26cf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f09647f82fa9e5f12125f65b65bd36e39286e4b6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75210fb1d2baeb2cbdb378c53a149f699b866c99 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43d099762cd6867caf61b31972d26e271e05bfef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d254ac23d1e693191ab96b9048942e605cdff44 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py new file mode 100644 index 0000000000000000000000000000000000000000..11298ddfe3bd98155096fa309c9011fdde25d211 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py @@ -0,0 +1,1183 @@ +import unittest +from collections.abc import Sequence +from functools import partial +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import tol, toleranceOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, + complex_types, + floating_and_complex_types_and, + floating_types_and, + integral_types, +) +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + M, + OpInfo, + ReductionOpInfo, + S, + sample_inputs_reduction, + SampleInput, +) +from torch.testing._internal.opinfo.utils import prod_numpy, reference_reduction_numpy + + +# Used for log_softmax, softmax, softmin +def sample_inputs_softmax_variant( + op_info, + device, + dtype, + requires_grad, + with_dtype=False, + use_zero_dimensions=True, + **kwargs, +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), (0,)), + ((S, S), (0,)), + ((S, S), (1,)), + ((S, S), (-1,)), + ((S, M, S), (2,)), + *([((S, 0, 0), (-1,))] if use_zero_dimensions else []), + ] + kwargs = dict(dtype=torch.float64) if with_dtype else None + + # PyTorch on XLA throws an error when passed with dim argument for 0d tensor. + # See https://github.com/pytorch/xla/issues/3061 for more details. + if torch.device(device).type != "xla": + cases.append(((), (0,))) + + return ( + SampleInput(make_arg(shape), args=dim, kwargs=kwargs) for shape, dim in cases + ) + + +def _generate_masked_op_mask(input_shape, device, **kwargs): + make_arg = partial( + make_tensor, dtype=torch.bool, device=device, requires_grad=False + ) + yield None + yield make_arg(input_shape) + if len(input_shape) > 2: + # broadcast last mask dimension: + yield make_arg(input_shape[:-1] + (1,)) + # broadcast middle mask dimension: + yield make_arg(input_shape[:1] + (1,) + input_shape[2:]) + # broadcast first mask dimension: + yield make_arg((1,) + input_shape[1:]) + # mask.ndim < input.ndim + yield make_arg(input_shape[1:]) + # mask.ndim == 1 + yield make_arg(input_shape[-1:]) + # masks that require broadcasting of inputs (mask.ndim > + # input.ndim) will not be supported, however, we may + # reconsider this if there will be demand on this kind of + # degenerate cases. + + +def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked reduction operators. + + Masked reduction operator is a reduction operator with trailing + mask optional argument. A mask is a bool tensor with the same + shape as input or a shape that is broadcastable to input shape. + """ + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_sparse_coo_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse coo layouts. + """ + if op_info.supports_sparse: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse()) + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in {"prod", "amax", "amin"}: + # FIXME: for now reductions with non-zero reduction identity and + # unspecified mask are not supported for sparse COO + # tensors, see torch.masked.prod implementation + # for details. + continue + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def sample_inputs_sparse_csr_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse csr layouts. + """ + if op_info.supports_sparse_csr: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if not ( + sample_input.input.ndim == 2 and sample_input.kwargs.get("keepdim") + ): + # - sparse CSR tensors are always 2-D tensors + # - masked reduction on CSR tensors are defined only if keepdim is True. + continue + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse_csr()) + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in ["prod", "amax", "amin", "mean"]: + # reductions with non-zero reduction identity and + # unspecified mask is not supported for sparse CSR + # tensors, see torch.masked.prod implementation + # for details. + continue + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + yield new_sample + if sample_input.kwargs["dim"] == 0: + # Reductions of CSR tensors use different implementations for + # inner and/or outer dimensions. So, as a minimum of testing CSR + # implementations the following kwargs must be generated: + # dict(dim=0, keepdim=True) + # dict(dim=1, keepdim=True) + # dict(dim=(0, 1), keepdim=True) + # Here we generate the dim=1 case from the dim=0 case. + sample_input_kwargs = new_sample.kwargs.copy() + sample_input_kwargs.update(dim=1) + yield SampleInput( + new_sample.input.clone(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked norm.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + sample_input_args, sample_input_kwargs = ( + ord, + ) + sample_input.args, sample_input.kwargs.copy() + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def reference_masked_std_var( + numpy_fn, +): + ref = reference_reduction_numpy(numpy_fn) + + # Translate unbiased or correction arguments into ddof + def func( + input, + dim=None, + unbiased=None, + *, + correction=None, + **kwargs, + ): + ddof = 1 + if unbiased is not None: + ddof = 1 if unbiased else 0 + if correction is not None: + ddof = correction + + if isinstance(dim, Sequence): + dim = tuple(dim) + + return ref(input, dim, ddof=ddof, **kwargs) + + return func + + +def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked std/var.""" + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + from torch.testing._internal.common_methods_invocations import sample_inputs_std_var + + def masked_samples(): + for sample_input in sample_inputs_std_var( + op_info, device, dtype, requires_grad, **kwargs + ): + if len(sample_input.args) and isinstance(sample_input.args[0], bool): + continue # masked.{std, var} doesn't support `.var(unbiased)` + + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + for sample_input in masked_samples(): + correction = sample_input.kwargs.get("correction") + if correction is None: + correction = int(sample_input.kwargs.get("unbiased", True)) + + dim = sample_input.kwargs.get("dim", None) + + if sample_input.kwargs.get("mask") is None: + orig_count = torch.masked.sum( + torch.ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + ) + else: + inmask = torch.masked._input_mask( + sample_input.input, *sample_input.args, **sample_input.kwargs + ) + orig_count = torch.masked.sum( + inmask.new_ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + mask=inmask, + ) + if orig_count.min() <= correction + 1: + # Skip samples that lead to nans in var computation + continue + + yield sample_input + + +def sample_inputs_masked_softmax( + op_info, device, dtype, requires_grad, with_dtype=False, **kwargs +): + """Sample inputs for masked softmax, log_softmax, and softmin. + + Masked normalization operator is a reduction operator with + trailing mask optional argument. A mask is a bool tensor with the + same shape as input or a shape that is broadcastable to input + shape. + """ + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input.args, + mask=mask, + **sample_input.kwargs, + ) + + +def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked cumsum and cumprod.""" + inputs: List[SampleInput] = [] + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + if type(mask) != torch.Tensor: + continue + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + if "keepdim" in sample_input_kwargs: + sample_input_kwargs.pop("keepdim") + # dimension is required + if sample_input_args: + dim = sample_input.args[0] + else: + if "dim" not in sample_input_kwargs: + continue + dim = sample_input_kwargs.pop("dim") + sample_input_args = (dim,) + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input_args, + **sample_input_kwargs, + ) + + +def sample_inputs_masked_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked logaddexp.""" + shapes = [(S,), (S, S), (S, M, S)] + input_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + other_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for shape, input_masks, other_masks in zip( + shapes, input_mask_lists, other_mask_lists + ): + for input_mask, other_mask in zip(input_masks, other_masks): + yield SampleInput( + make_arg(shape), + make_arg(shape), + input_mask=input_mask, + other_mask=other_mask, + ) + + +def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked normalize.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, use_zero_dimensions=False, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + ord, + *sample_input.args, + **sample_input.kwargs, + ) + + +op_db: List[OpInfo] = [ + ReductionOpInfo( + "masked.sum", + ref=reference_reduction_numpy(np.sum), + method_variant=None, + identity=0, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=5e-2), + torch.float16: tol(atol=1e-03, rtol=5e-3), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=0.1, rtol=0.1), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestMasked", + "test_mask_layout", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + ReductionOpInfo( + "masked.prod", + ref=prod_numpy, + method_variant=None, + identity=1, + nan_policy="propagate", + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + "TestReductions", + "test_ref_small_input", + dtypes=(torch.int8, torch.int16, torch.int32), + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + device_type="cuda", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_duplicate_values", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1.5e-03)}), + "TestMasked", + "test_mask_layout", + device_type="cpu", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + OpInfo( + "masked.cumsum", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.cumprod", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCompositeCompliance", + "test_backward", + device_type="cuda", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=2e-3)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amax", + nan_policy="propagate", + supports_out=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amax), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amin", + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amin), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmax", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmax + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmin", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmin + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.mean", + ref=reference_reduction_numpy(np.mean) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_sparse_csr=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_duplicate_values", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_small_input", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=0.05), + torch.float16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=2e-03)}), + "TestSparseCompressed", + "test_consistency", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.median", + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16), + method_variant=None, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=partial( + sample_inputs_masked_softmax, use_zero_dimensions=False + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.norm", + identity=0, + method_variant=None, + nan_policy="propagate", + supports_out=False, + promotes_int_to_float=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # torch.jit.frontend.NotSupportedError: Compiled functions + # can't take variable number of arguments or use + # keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_norm, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.var", + ref=reference_masked_std_var(np.var) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestMasked", + "test_reference_masked", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + ReductionOpInfo( + "masked.std", + ref=reference_masked_std_var(np.std) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-02, rtol=1e-02), + torch.float16: tol(atol=1e-02, rtol=1e-02), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=5e-03, rtol=5e-04), + } + ), + "TestMasked", + "test_reference_masked", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + OpInfo( + "masked.softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.log_softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + ], + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.softmin", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.normalize", + method_variant=None, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_normalize, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.logaddexp", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestFwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestBwdGradients", "test_fn_gradgrad" + ), + ), + sample_inputs_func=sample_inputs_masked_logaddexp, + gradcheck_wrapper=gradcheck_wrapper_masked_pointwise_operation, + ), + ReductionOpInfo( + "masked.logsumexp", + dtypes=all_types_and(torch.half, torch.bfloat16), + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.skip("Skipped!"), "TestReductions", "test_dim_empty_keepdim" + ), + # Identity can't be -torch.inf without overflow + DecorateInfo( + unittest.skip("Skipped!"), + "TestReductions", + "test_empty_tensor_empty_slice", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # all the values are the same except for -inf vs nan + DecorateInfo(unittest.skip("Skipped!"), "TestDecomp", "test_comprehensive"), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..621930811056918d246e7fefe7ef5f5a60ad6d4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py @@ -0,0 +1,830 @@ +import unittest +from functools import partial +from typing import List + +import numpy as np + +import torch + +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import SM53OrLater +from torch.testing._internal.common_device_type import precisionOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, +) +from torch.testing._internal.common_utils import TEST_SCIPY, TEST_WITH_ROCM +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + sample_inputs_spectral_ops, + SampleInput, + SpectralFuncInfo, + SpectralFuncType, +) +from torch.testing._internal.opinfo.refs import ( + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, +) + +has_scipy_fft = False +if TEST_SCIPY: + try: + import scipy.fft + + has_scipy_fft = True + except ModuleNotFoundError: + pass + + +class SpectralFuncPythonRefInfo(SpectralFuncInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant="", + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant, op_db=op_db + ) + assert isinstance(self.torch_opinfo, SpectralFuncInfo) + + inherited = self.torch_opinfo._original_spectral_func_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) + + +def error_inputs_fft(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Zero-dimensional tensor has no dimension to take FFT of + yield ErrorInput( + SampleInput(make_arg()), + error_type=IndexError, + error_regex="Dimension specified as -1 but tensor has no dimensions", + ) + + +def error_inputs_fftn(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Specifying a dimension on a zero-dimensional tensor + yield ErrorInput( + SampleInput(make_arg(), dim=(0,)), + error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions", + ) + + +def sample_inputs_fft_with_min( + op_info, device, dtype, requires_grad=False, *, min_size, **kwargs +): + yield from sample_inputs_spectral_ops( + op_info, device, dtype, requires_grad, **kwargs + ) + if TEST_WITH_ROCM: + # FIXME: Causes floating point exception on ROCm + return + + # Check the "Invalid number of data points" error isn't too strict + # https://github.com/pytorch/pytorch/pull/109083 + a = make_tensor(min_size, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(a) + + +def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor( + shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield SampleInput(mt((9, 10))) + yield SampleInput(mt((50,)), kwargs=dict(dim=0)) + yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,))) + yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1))) + yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2))) + + +# Operator database +op_db: List[OpInfo] = [ + SpectralFuncInfo( + "fft.fft", + aten_name="fft_fft", + decomp_aten_name="_fft_c2c", + ref=np.fft.fft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + SpectralFuncInfo( + "fft.fft2", + aten_name="fft_fft2", + ref=np.fft.fft2, + decomp_aten_name="_fft_c2c", + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + ), + SpectralFuncInfo( + "fft.fftn", + aten_name="fft_fftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.fftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + ), + SpectralFuncInfo( + "fft.hfft", + aten_name="fft_hfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.hfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=2), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + check_batched_gradgrad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + ), + ), + SpectralFuncInfo( + "fft.hfft2", + aten_name="fft_hfft2", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfft2 if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + ), + ), + SpectralFuncInfo( + "fft.hfftn", + aten_name="fft_hfftn", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + ), + ), + SpectralFuncInfo( + "fft.rfft", + aten_name="fft_rfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + skips=(), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.rfft2", + aten_name="fft_rfft2", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft2, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.rfftn", + aten_name="fft_rfftn", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.ifft", + aten_name="fft_ifft", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + ), + SpectralFuncInfo( + "fft.ifft2", + aten_name="fft_ifft2", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ifftn", + aten_name="fft_ifftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ihfft", + aten_name="fft_ihfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.ihfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fft, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)) + ), + skips=(), + check_batched_grad=False, + ), + SpectralFuncInfo( + "fft.ihfft2", + aten_name="fft_ihfft2", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=( + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warnings"), + ), + ), + SpectralFuncInfo( + "fft.ihfftn", + aten_name="fft_ihfftn", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + ], + ), + SpectralFuncInfo( + "fft.irfft", + aten_name="fft_irfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.irfft2", + aten_name="fft_irfft2", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.irfftn", + aten_name="fft_irfftn", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # rocFFT doesn't support Half/Complex Half Precision FFT + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *( + () + if (TEST_WITH_ROCM or not SM53OrLater) + else (torch.half, torch.complex32) + ), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + OpInfo( + "fft.fftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "fft.ifftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), +] + +python_ref_db: List[OpInfo] = [ + SpectralFuncPythonRefInfo( + "_refs.fft.fft", + torch_opinfo_name="fft.fft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft", + torch_opinfo_name="fft.ifft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft", + torch_opinfo_name="fft.rfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft", + torch_opinfo_name="fft.irfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft", + torch_opinfo_name="fft.hfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft", + torch_opinfo_name="fft.ihfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fftn", + torch_opinfo_name="fft.fftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifftn", + torch_opinfo_name="fft.ifftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfftn", + torch_opinfo_name="fft.rfftn", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfftn", + torch_opinfo_name="fft.irfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfftn", + torch_opinfo_name="fft.hfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfftn", + torch_opinfo_name="fft.ihfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fft2", + torch_opinfo_name="fft.fft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft2", + torch_opinfo_name="fft.ifft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft2", + torch_opinfo_name="fft.rfft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft2", + torch_opinfo_name="fft.irfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft2", + torch_opinfo_name="fft.hfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft2", + torch_opinfo_name="fft.ihfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + PythonRefInfo( + "_refs.fft.fftshift", + op_db=op_db, + torch_opinfo_name="fft.fftshift", + ), + PythonRefInfo( + "_refs.fft.ifftshift", + op_db=op_db, + torch_opinfo_name="fft.ifftshift", + ), +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..1b668042964e3fad7e6cad1d79cc7d519fed4439 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py @@ -0,0 +1,2454 @@ +import itertools +import random +import unittest +from functools import partial +from itertools import chain, product +from typing import Iterable, List + +import numpy as np +from numpy import inf + +import torch + +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import ( + _get_magma_version, + _get_torch_cuda_version, + with_tf32_off, +) +from torch.testing._internal.common_device_type import ( + has_cusolver, + skipCPUIfNoLapack, + skipCUDAIf, + skipCUDAIfNoCusolver, + skipCUDAIfNoMagma, + skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoMagmaAndNoLinalgsolver, + skipCUDAIfRocm, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + all_types_and_complex, + all_types_and_complex_and, + floating_and_complex_types, + floating_and_complex_types_and, + get_all_complex_dtypes, +) +from torch.testing._internal.common_utils import ( + GRADCHECK_NONDET_TOL, + IS_MACOS, + make_fullrank_matrices_with_distinct_singular_values, + skipIfSlowGradcheckEnv, + slowTest, + TEST_WITH_ROCM, +) +from torch.testing._internal.opinfo.core import ( + clone_sample, + DecorateInfo, + ErrorInput, + gradcheck_wrapper_hermitian_input, + L, + M, + OpInfo, + ReductionOpInfo, + S, + SampleInput, +) +from torch.testing._internal.opinfo.refs import PythonRefInfo, ReductionPythonRefInfo + + +def sample_kwargs_vector_norm(t, **kwargs): + # orders with / without identity + def ords(): + has_id = (6, 4, 2, 1, 0, 0.9) + no_id = (inf, -2.1, -inf) + if t.numel() == 0: + dim = kwargs.get("dim") + if dim is None: + return has_id + if not isinstance(dim, Iterable): + dim = (dim,) + for d in dim: + if t.size(d) == 0: + return has_id + return has_id + no_id + + return (((), dict(ord=o)) for o in ords()) + + +def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + is_linalg_svd = "linalg.svd" in op_info.name + batches = [(), (0,), (3,)] + ns = [0, 3, 5] + + def uniformize(usv): + S = usv[1] + k = S.shape[-1] + U = usv[0][..., :k] + Vh = usv[2] if is_linalg_svd else usv[2].mH + Vh = Vh[..., :k, :] + return U, S, Vh + + def fn_U(usv): + U, _, _ = uniformize(usv) + return U.abs() + + def fn_S(usv): + return uniformize(usv)[1] + + def fn_Vh(usv): + # We also return S to test + _, S, Vh = uniformize(usv) + return S, Vh.abs() + + def fn_UVh(usv): + U, S, Vh = uniformize(usv) + return U @ Vh, S + + fns = (fn_U, fn_S, fn_Vh, fn_UVh) + + fullmat = "full_matrices" if is_linalg_svd else "some" + + for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns): + shape = batch + (n, k) + yield SampleInput( + make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn + ) + + +def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + yield SampleInput(make_arg((S, 3)), args=(make_arg((S, 3)),)) + yield SampleInput( + make_arg((S, 3, S)), args=(make_arg((S, 3, S)),), kwargs=dict(dim=1) + ) + yield SampleInput(make_arg((1, 3)), args=(make_arg((S, 3)),), kwargs=dict(dim=-1)) + + +def error_inputs_cross(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + sample = SampleInput(input=make_arg((S, 3)), args=(make_arg((S, 1)),)) + err = "inputs dimension -1 must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((5, S, 3)), args=(make_arg((S, 3)),)) + err = "inputs must have the same number of dimensions" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((S, 2)), args=(make_arg((S, 2)),)) + err = "must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput( + input=make_arg((S, 2)), args=(make_arg((S, 2)),), kwargs=dict(dim=2) + ) + err = "Dimension out of range" + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + + +def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs): + """ + This function generates input for torch.linalg.householder_product (torch.orgqr). + The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors. + Empty, square, rectangular, batched square and batched rectangular input is generated. + """ + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + low=-2, + high=2, + ) + # Each column of the matrix is getting multiplied many times leading to very large values for + # the Jacobian matrix entries and making the finite-difference result of grad check less accurate. + # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here. + yield SampleInput(make_arg((S, S)), make_arg((S,))) + yield SampleInput(make_arg((S + 1, S)), make_arg((S,))) + yield SampleInput(make_arg((2, 1, S, S)), make_arg((2, 1, S))) + yield SampleInput(make_arg((2, 1, S + 1, S)), make_arg((2, 1, S))) + yield SampleInput( + make_arg((0, 0), low=None, high=None), + make_arg((0,), low=None, high=None), + ) + yield SampleInput(make_arg((S, S)), make_arg((0,), low=None, high=None)) + # m = n = S, k = S - 2 + yield SampleInput(make_arg((S, S)), make_arg((S - 2,), low=None, high=None)) + # m = S, n = S -1, k = S - 2 + yield SampleInput(make_arg((S, S - 1)), make_arg((S - 2,), low=None, high=None)) + + +def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype) + + def make_singular_matrix_batch_base(size, rank): + assert size[-1] == size[-2] + assert rank > 0 and rank < size[-1] + + n = size[-1] + a = make_arg(size[:-2] + (n, rank)) / 10 + b = make_arg(size[:-2] + (rank, n)) / 10 + x = a @ b + lu, pivs, _ = torch.linalg.lu_factor_ex(x) + p, l, u = torch.lu_unpack(lu, pivs) + u_diag_abs = u.diagonal(0, -2, -1).abs() + u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values + u_diag_abs_smallest_idxs = torch.topk( + u_diag_abs, k=(n - rank), largest=False + ).indices + u.diagonal(0, -2, -1).div_(u_diag_abs_largest) + u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps + matrix = p @ l @ u + + matrix.requires_grad_(requires_grad) + return matrix + + for batch, size in product(((), (2,), (2, 2)), range(6)): + shape = batch + (size, size) + for rank in range(1, size): + yield SampleInput(make_singular_matrix_batch_base(shape, rank)) + + +def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_arg_fullrank = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + # (, ()) + test_sizes = [ + (1, ()), + (2, (0,)), + (2, (2,)), + ] + + for matrix_size, batch_sizes in test_sizes: + size = batch_sizes + (matrix_size, matrix_size) + for n in (0, 3, 5): + yield SampleInput(make_arg(size), args=(n,)) + for n in [-4, -2, -1]: + yield SampleInput(make_arg_fullrank(*size), args=(n,)) + + +def sample_inputs_linalg_det_logdet_slogdet( + op_info, device, dtype, requires_grad, **kwargs +): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + batches = [(), (0,), (3,)] + ns = [0, 1, 5] + + is_logdet = op_info.name == "logdet" + + for ( + batch, + n, + ) in product(batches, ns): + shape = batch + (n, n) + A = make_arg(*shape) + # Need to make the matrices in A have positive determinant for autograd + # To do so, we multiply A by its determinant to flip the sign of its determinant + if is_logdet and not A.is_complex() and A.numel() > 0: + s = torch.linalg.slogdet(A).sign + A = A * s.unsqueeze(-1).unsqueeze(-1) + A.requires_grad_(requires_grad) + yield SampleInput(A) + + +def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """Samples the inputs for both linalg.lu_solve and lu_solve""" + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_a = partial(make_fn, dtype=dtype, device=device) + make_b = partial(make_tensor, dtype=dtype, device=device) + + def clone(X, requires_grad): + Y = X.clone() + Y.requires_grad_(requires_grad) + return Y + + is_linalg_lu_solve = op_info.name == "linalg.lu_solve" + + batches = ((), (0,), (2,)) + ns = (3, 1, 0) + nrhs = (4, 1, 0) + + for n, batch, rhs in product(ns, batches, nrhs): + A = make_a(*(batch + (n, n))) + LU, pivots = torch.linalg.lu_factor(A) + + B = make_b(batch + (n, rhs)) + + grads = (False,) if not requires_grad else (True, False) + # we try all possible combinations of requires_grad for each input + for LU_grad, B_grad in product(grads, grads): + # when requires_grad == True, at least one input has to have requires_grad enabled + if requires_grad and not LU_grad and not B_grad: + continue + + if is_linalg_lu_solve: + for adjoint, left in product((True, False), repeat=2): + yield SampleInput( + clone(LU, LU_grad), + args=(pivots, clone(B if left else B.mT, B_grad)), + kwargs=dict(adjoint=adjoint, left=left), + ) + else: + yield SampleInput(clone(B, B_grad), args=(clone(LU, LU_grad), pivots)) + + +def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs): + # Each test case consists of the sizes in the chain of multiplications + # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5) + test_cases = [ + [1, 2, 1], + [2, 0, 2], + [0, 2, 2], + [2, 2, 2, 2], + [2, 3, 4, 5], + [5, 4, 0, 2], + [2, 4, 3, 5, 3, 2], + ] + + for sizes in test_cases: + tensors = [] + for size in zip(sizes[:-1], sizes[1:]): + t = make_tensor( + size, dtype=dtype, device=device, requires_grad=requires_grad + ) + tensors.append(t) + yield SampleInput(tensors) + + +def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs): + low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32) + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + sizes = ((2, 2), (2, 3, 2)) + if dtype in low_precision_dtypes: + # svdvals not supported for low precision dtypes + ords = ("fro", inf, -inf, 1, -1) + else: + ords = ("fro", "nuc", inf, -inf, 1, -1, 2, -2) + dims = ((-2, -1), (-1, 0)) + + for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]): + yield SampleInput(make_arg(size), args=(ord, dim, keepdim)) + + +def sample_inputs_linalg_norm( + op_info, device, dtype, requires_grad, *, variant=None, **kwargs +): + if variant is not None and variant not in ("subgradient_at_zero",): + raise ValueError( + f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}" + ) + + test_sizes = [ + (S,), + (0,), + (S, S), + (0, 0), + (S, 0), + (0, S), + (S, S, S), + (0, S, S), + (S, 0, S), + (0, 0, 0), + ] + + vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf) + if dtype in {torch.float16, torch.bfloat16, torch.complex32}: + # svdvals not supported for low precision dtypes + matrix_ords = ("fro", inf, -inf, 1, -1) + else: + matrix_ords = (None, "fro", "nuc", inf, -inf, 1, -1, 2, -2) + + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + low=None, + high=None, + ) + + for test_size in test_sizes: + is_vector_norm = len(test_size) == 1 + is_matrix_norm = len(test_size) == 2 + + # IndexError: amax(): Expected reduction dim 0 to have non-zero size. + is_valid_for_p2 = is_vector_norm or (test_size[-1] != 0 and test_size[-2] != 0) + + for keepdim in [False, True]: + if variant != "subgradient_at_zero" and is_valid_for_p2: + yield SampleInput(make_arg(test_size), keepdim=keepdim) + + if not (is_vector_norm or is_matrix_norm): + continue + + ords = vector_ords if is_vector_norm else matrix_ords + + for ord in ords: + if is_vector_norm and test_size[-1] == 0: + if ord == np.inf or (ord is not None and ord < 0): + # RuntimeError: linalg.vector_norm cannot compute the + # {ord} norm on an empty tensor because the operation + # does not have an identity + continue + elif is_matrix_norm: + dims_to_check = { + None: (0,), + np.inf: (0,), + 2: (0, 1), + 1: (1,), + -1: (1,), + -2: (0, 1), + -np.inf: (0,), + }.get(ord, ()) + + if any(test_size[d] == 0 for d in dims_to_check): + # IndexError: amax(): Expected reduction dim {dim} to + # have non-zero size. + continue + + if variant == "subgradient_at_zero": + yield SampleInput( + torch.zeros( + test_size, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ), + ord, + keepdim=keepdim, + ) + else: + yield SampleInput(make_arg(test_size), ord, keepdim=keepdim) + + if ord in ["nuc", "fro"]: + yield SampleInput( + make_arg(test_size), ord=ord, keepdim=keepdim, dim=(0, 1) + ) + + +def sample_inputs_linalg_vecdot(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + batches = ((), (0,), (1,), (5,)) + ns = (0, 1, 3, 5) + for b, n in product(batches, ns): + shape = b + (n,) + yield SampleInput(make_arg(shape), args=(make_arg(shape),)) + for i in range(len(shape)): + yield SampleInput( + make_arg(shape), args=(make_arg(shape),), kwargs=dict(dim=i) + ) + + +def sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates invertible inputs for linear algebra ops + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + + for batch, n in product(batches, ns): + yield SampleInput(make_arg(*batch, n, n)) + + +def sample_inputs_matrix_rank(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function produces inputs for matrix rank that test + all possible combinations for atol and rtol + """ + + def make_tol_arg(kwarg_type, inp): + if kwarg_type == "none": + return None + if kwarg_type == "float": + return 1.0 + assert kwarg_type == "tensor" + return torch.ones(inp.shape[:-2], device=device) + + for tol_type in ["float", "tensor"]: + for atol_type, rtol_type in product(["none", tol_type], repeat=2): + if ( + not atol_type and not rtol_type + ): # default behavior, so skipped here so it's not tested 2 extra times + continue + for sample in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad + ): + assert sample.kwargs == {} + sample.kwargs = { + "atol": make_tol_arg(atol_type, sample.input), + "rtol": make_tol_arg(rtol_type, sample.input), + } + yield sample + + # default kwargs + yield from sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + + +def sample_inputs_linalg_pinv_singular( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to + test the backward method of `linalg_pinv`. That way we always preserve the rank of the + input no matter the perturbations applied to it by the gradcheck. + Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood. + """ + batches = [(), (0,), (2,), (1, 1)] + # the size of at least 30 is required to cause failures for the previous implicit implementation + # of the pinv's backward method, albeit it is slow. + size = [0, 3, 50] + + for batch, m, n in product(batches, size, size): + for k in range(min(3, m, n)): + # Note that by making the columns of `a` and `b` orthonormal we make sure that + # the product matrix `a @ b.t()` has condition number 1 when restricted to its image + a = ( + torch.rand(*batch, m, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + b = ( + torch.rand(*batch, n, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + yield SampleInput(a, args=(b,)) + + +def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # autograd is not supported for inputs with zero number of elements + shapes = ( + (S, S), + (2, S, S), + (2, 1, S, S), + ) + + for shape in shapes: + yield SampleInput(make_arg(shape)) + + +def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + shapes = ( + (), + (1,), + (S,), + (2, S), + ) + + for shape in shapes: + if len(shape) > 0 and shape[-1] > 1: + yield SampleInput(make_arg(shape)) + n = shape[-1] if len(shape) > 0 else 1 + for i in range(3): + # n-1, n, n+1 + N = n + i - 1 + if N < 2: + continue + yield SampleInput(make_arg(shape), kwargs=dict(N=N)) + + +def np_vander_batched(x, N=None): + # Wrapper around np.vander that supports batches of 1 dimension (enough for the tests) + if x.ndim == 0: + x = x[np.newaxis] + if x.ndim == 1: + y = np.vander(x, N=N, increasing=True) + return y + else: + if N is None: + N = x.shape[-1] + y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N)) + return y + + +def sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + # Cholesky factorization is for positive-definite matrices + single_well_conditioned_matrix = random_well_conditioned_matrix( + S, S, dtype=dtype, device=device + ) + batch_well_conditioned_matrices = random_well_conditioned_matrix( + 2, S, S, dtype=dtype, device=device + ) + single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH + batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH + + inputs = ( + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + single_pd, + batch_pd, + ) + test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs) + for l in test_cases: + # generated lower-triangular samples + l.requires_grad = requires_grad + yield SampleInput(l) # upper=False by default + yield SampleInput( + l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False) + ) + + # generate upper-triangular inputs + u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad) + yield SampleInput(u, kwargs=dict(upper=True)) + + +def sample_inputs_linalg_ldl_factor( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + + # Symmetric inputs + yield SampleInput( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # single matrix + yield SampleInput( + random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # batch of matrices + yield SampleInput( + torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # 0x0 matrix + yield SampleInput( + torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # zero batch of matrices + + # Hermitian inputs + # hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+ + magma_254_available = device.type == "cuda" and _get_magma_version() >= (2, 5, 4) + if dtype.is_complex and (device.type == "cpu" or magma_254_available): + yield SampleInput( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # single matrix + yield SampleInput( + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # batch of matrices + + +def sample_inputs_linalg_ldl_solve( + op_info, device, dtype, requires_grad=False, **kwargs +): + # Generate LDL factors of symmetric (and Hermitian on CPU) matrices + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + symmetric_inputs = ( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix + random_symmetric_pd_matrix( + S, 2, dtype=dtype, device=device + ), # batch of matrices + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + ) + hermitian_inputs = ( + ( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + ) + if device.type == "cpu" and dtype.is_complex + else () + ) + test_cases1 = ( + torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs + ) + test_cases2 = ( + torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs + ) + + # Symmetric case + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for test_case in test_cases1: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=False) + ) + + # Hermitian case + for test_case in test_cases2: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=True) + ) + + +def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + device = torch.device(device) + + drivers: Tuple[str, ...] + if device.type == "cuda": + drivers = ("gels",) + else: + drivers = ("gels", "gelsy", "gelss", "gelsd") + + # we generate matrices of shape (..., n + delta, n) + deltas: Tuple[int, ...] + if device.type == "cpu" or has_cusolver(): + deltas = (-1, 0, +1) + # only square systems if Cusolver is not available + # becase we solve a lstsq problem with a transposed matrix in the backward + else: + deltas = (0,) + + for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas): + shape = batch + (3 + delta, 3) + a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device) + a.requires_grad_(requires_grad) + b = make_tensor( + shape, + dtype=dtype, + device=device, + low=None, + high=None, + requires_grad=requires_grad, + ) + yield SampleInput(a, b, driver=driver) + + +def error_inputs_lstsq(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d,)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def error_inputs_lstsq_grad_oriented(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d, None)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # Shapes for 2D Tensors + shapes_2d = ((S, S), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((S, S, S),) + + kwargs_2d = (dict(), dict(offset=2), dict(offset=2), dict(offset=1)) + kwargs_3d = ( + dict(offset=1, dim1=1, dim2=2), + dict(offset=2, dim1=0, dim2=1), + dict(offset=-2, dim1=0, dim2=1), + ) + + for shape, kwarg in chain( + product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d) + ): + yield SampleInput(make_arg(shape), kwargs=kwarg) + + +def error_inputs_diagonal_diag_embed(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + shapes1d = (0, 1, (0,), (1,)) + shapes2d = ((M, L),) + shapes3d = ((M, S, L),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 == dim2 is not allowed + dict(dim1=1, dim2=1), + # out of bounds dims are not allowed + dict(dim1=10000), + dict(dim2=10000), + ) + + kwargs3d = kwargs2d + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + arg = make_arg(shape) + sample = SampleInput(input=arg, kwargs=kwargs) + + dim1 = kwargs.get("dim1") + dim2 = kwargs.get("dim2") + + if "diagonal" in op_info.name: + num_dim = arg.dim() + elif op_info.name in ("diag_embed", "_refs.diag_embed"): + # these are valid inputs for diag_embed + if shape in ((0,), (1,)): + continue + num_dim = arg.dim() + 1 + else: + raise RuntimeError("should be unreachable") + + bound1 = -num_dim + bound2 = num_dim - 1 + dim_range = range(bound1, bound2 + 1) + dim1_cond = dim1 and dim1 not in dim_range + dim2_cond = dim2 and dim2 not in dim_range + + if dim1 == dim2: + err = f"diagonal dimensions cannot be identical {dim1}, {dim2}" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + elif dim1_cond or dim2_cond: + err_dim = dim1 if dim1_cond else dim2 + err = ( + r"Dimension out of range \(expected to be in range of " + rf"\[{bound1}, {bound2}\], but got {err_dim}\)" + ) + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + else: + raise RuntimeError("should be unreachable") + + +def sample_inputs_linalg_cholesky( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates always positive-definite input for torch.linalg.cholesky using + random_hermitian_pd_matrix. + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + from torch.testing._internal.common_utils import random_hermitian_pd_matrix + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + for batch, n, upper in product(batches, ns, [True, False]): + a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device) + a.requires_grad = requires_grad + yield SampleInput(a, upper=upper) + + +def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eig + """ + + def out_fn(output): + return output[0], abs(output[1]) + + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument. + """ + + def out_fn(output): + if isinstance(output, tuple): + # eigh function + return output[0], abs(output[1]) + else: + # eigvalsh function + return output + + # Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + # Note: we cannot use np.random.choice here as TorchDynamo + # does not support tensors of strings. + sample.kwargs = {"UPLO": random.choice(["L", "U"])} + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.pinv with hermitian=False keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + real_dtype = o.input.real.dtype if dtype.is_complex else dtype + # requires_grad path for rtol tensor is not implemented + for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)): + o = clone_sample(o) + o.kwargs = {"rtol": rtol} + yield o + + +def sample_inputs_linalg_pinv_hermitian( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates input for torch.linalg.pinv with hermitian=True keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + o.kwargs = {"hermitian": True} + yield o + + +def sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs +): + """ + This function generates always solvable input for torch.linalg.solve + We sample a fullrank square matrix (i.e. invertible) A + The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'. + The second input is generated as the product of 'batches', 'ns' and 'nrhs'. + In total this function generates 18 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices. + 'ns' gives 0x0 and 5x5 matrices. + and 'nrhs' controls the number of vectors to solve for: + () - using 1 as the number of vectors implicitly + (1,) - same as () but explicit + (3,) - solve for 3 vectors. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs. + torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow + 1D tensors (vectors) as the right-hand-side. + Once torch.solve / triangular_solve / cholesky_solve and its testing are removed, + 'vector_rhs_allowed' may be removed here as well. + """ + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_a = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_b = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,)] + ns = [5, 0] + if vector_rhs_allowed: + nrhs = [(), (1,), (3,)] + else: + nrhs = [(1,), (3,)] + + for n, batch, rhs in product(ns, batches, nrhs): + yield SampleInput(make_a(*batch, n, n), args=(make_b(batch + (n,) + rhs),)) + + +def sample_inputs_linalg_solve_triangular( + op_info, device, dtype, requires_grad=False, **kwargs +): + make_arg = partial(make_tensor, dtype=dtype, device=device) + bs = (1, 2, 0) + ns = (3, 0) + ks = (1, 3, 0) + + for b, n, k, (left, upper, uni) in product( + bs, ns, ks, product((True, False), repeat=3) + ): + if b == 1: + A = make_arg((n, n)) if left else make_arg((k, k)) + B = make_arg((n, k)) + else: + A = make_arg((b, n, n)) if left else make_arg((b, k, k)) + B = make_arg((b, n, k)) + if uni: + # Not really necessary, but writing it for consistency + A.diagonal(0, -2, -1).fill_(1.0) + else: + d = A.diagonal(0, -2, -1) + d[d.abs() < 1e-6] = 1.0 + if upper: + A.triu_() + else: + A.tril_() + kwargs = {"upper": upper, "left": left, "unitriangular": uni} + if requires_grad: + for grad_A, grad_B in product((True, False), repeat=2): + # Either A or B needs to have a gradient + if not grad_A and not grad_B: + continue + yield SampleInput( + A.clone().requires_grad_(grad_A), + args=(B.clone().requires_grad_(grad_B),), + kwargs=kwargs, + ) + else: + yield SampleInput(A, args=(B,), kwargs=kwargs) + + +def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates always solvable input for legacy solve functions + (the ones that are not in torch.linalg module). + The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation + should have b.ndim >= 2, vectors are not allowed. + Also the arguments order is swapped. + """ + out = sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False + ) + + def out_fn(output): + return output[0] + + # Reverses tensor order + for sample in out: + sample.input, sample.args = sample.args[0], (sample.input,) + if op_info.name == "solve": + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs): + full_rank = op_info.name == "linalg.lu_factor" + make_fn = ( + make_tensor + if not full_rank + else make_fullrank_matrices_with_distinct_singular_values + ) + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + def out_fn(output): + if op_info.name == "linalg.lu": + return output[1], output[2] + else: + return output + + batch_shapes = ((), (3,), (3, 3)) + # pivot=False only supported in CUDA + pivots = (True, False) if torch.device(device).type == "cuda" else (True,) + deltas = (-2, -1, 0, +1, +2) + for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas): + shape = batch_shape + (S + delta, S) + # Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple! + A = make_arg(shape) if not full_rank else make_arg(*shape) + yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn) + + +def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, m, n in product(batches, ns, ns): + yield SampleInput(make_arg(batch + (m, n))) + + +def sample_inputs_linalg_qr_geqrf( + op_info, device, dtype, requires_grad=False, **kwargs +): + # QR is just well defined when the matrix is full rank + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, (m, n) in product(batches, product(ns, ns)): + shape = batch + (m, n) + yield SampleInput(make_arg(*shape)) + + +def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs): + a_shapes = [(2, 3, 6), (3, 4, 4, 3)] + # Zero-dim tensors are not supported in NumPy, so we skip them for now. + # NumPy is used in reference check tests. + # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix. + # a_shapes += [(0, 0, 1, 2, 3, 0)] + dimss = [None, (0, 2)] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for a_shape, dims in itertools.product(a_shapes, dimss): + a = make_arg(a_shape) + b = make_arg(a_shape[:2]) + yield SampleInput(a, b, dims=dims) + + +def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = make_fullrank_matrices_with_distinct_singular_values + + def make_input(): + return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad) + + # lhs / rhs shape can have any number of dimensions as long as their product equals 12 + shapes = [ + ((2, 2, 3), (12, 1)), + ((4, 3), (6, 1, 2)), + ] + + for shape_lhs, shape_rhs in shapes: + inp = make_input().reshape(*shape_lhs, *shape_rhs).detach() + inp.requires_grad_(requires_grad) + yield SampleInput(inp, ind=len(shape_lhs)) + + +op_db: List[OpInfo] = [ + OpInfo( + "linalg.cross", + ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim), + op=torch.linalg.cross, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + aten_name="linalg_cross", + sample_inputs_func=sample_inputs_cross, + error_inputs_func=error_inputs_cross, + supports_out=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + check_batched_gradgrad=False, + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + variant_test_name="singular", + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_det_singular, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("The backward may give different results"), + "TestCommon", + "test_noncontiguous_samples", + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestBwdGradients", + "test_fn_grad", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + # Both Hessians are incorrect on complex inputs?? + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93044" + ), + "TestBwdGradients", + "test_fn_grad", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93045" + ), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.diagonal", + aten_name="linalg_diagonal", + aten_backward_name="diagonal_backward", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.float16, torch.chalf + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed, + ), + OpInfo( + "linalg.cholesky", + aten_name="linalg_cholesky", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.cholesky_ex", + aten_name="linalg_cholesky_ex", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vecdot", + aten_name="linalg_vecdot", + ref=lambda x, y, *, dim=-1: (x.conj() * y).sum(dim), + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_linalg_vecdot, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.cond", + aten_name="linalg_cond", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cond, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.eig", + aten_name="linalg_eig", + op=torch.linalg.eig, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eig, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo( + unittest.expectedFailure, "TestCommon", "test_out", device_type="cpu" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + ), + OpInfo( + "linalg.eigvals", + aten_name="linalg_eigvals", + op=torch.linalg.eigvals, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigh", + aten_name="linalg_eigh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigvalsh", + aten_name="linalg_eigvalsh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.householder_product", + aten_name="linalg_householder_product", + op=torch.linalg.householder_product, + aliases=("orgqr",), + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + # TODO: backward uses in-place operations that vmap doesn't like + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_householder_product, + decorators=[ + skipCUDAIfNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}) + ), + DecorateInfo( + unittest.skip("Skipped! Flaky"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cpu", + dtypes=(torch.complex128,), + ), + ], + ), + OpInfo( + "linalg.ldl_factor", + aten_name="linalg_ldl_factor", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_factor_ex", + aten_name="linalg_ldl_factor_ex", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_solve", + aten_name="linalg_ldl_solve", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_solve, + decorators=[ + skipCUDAIf( + _get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1" + ), + skipCUDAIfNoCusolver, + skipCUDAIfRocm, + skipCPUIfNoLapack, + ], + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + dtypes=floating_and_complex_types(), + supports_out=True, + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # we skip gradient checks for this suite as they are tested in + # variant_test_name='grad_oriented' + DecorateInfo(unittest.skip("Skipped!"), "TestFwdGradients"), + DecorateInfo(unittest.skip("Skipped!"), "TestBwdGradients"), + # The values for attribute 'shape' do not match + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + variant_test_name="grad_oriented", + # gradchecks for forward AD fails with multi-Tensor outputs + op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0], + supports_out=False, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq_grad_oriented, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # tests do not work with passing lambda for op + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + ), + ), + OpInfo( + "linalg.matrix_power", + aliases=("matrix_power",), + aten_name="linalg_matrix_power", + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_power, + ), + OpInfo( + "linalg.multi_dot", + # Need this lambda because gradcheck does not work with TensorList inputs + aten_name="linalg_multi_dot", + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_inplace_autograd=False, + # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407) + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_multi_dot, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo( + unittest.skip("67470!"), "TestCommon", "test_noncontiguous_samples" + ), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo( + unittest.skip("Skipped!"), + "TestOpInfo", + device_type="xla", + dtypes=(torch.long,), + ), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + device_type="cpu", + dtypes=(torch.long,), + ), + ), + ), + # NB: linalg.norm has two variants so that different skips can be used for different sample inputs + OpInfo( + "linalg.norm", + aten_name="linalg_norm", + op=torch.linalg.norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_norm, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.norm", + op=torch.linalg.norm, + variant_test_name="subgradients_at_zero", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=partial( + sample_inputs_linalg_norm, variant="subgradient_at_zero" + ), + aten_name="linalg_norm", + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # [NEW] Skips specifically for sample inputs at zero + # norm's vjp/jvp are not well-conditioned near zero + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_fn_fwgrad_bwgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_forward_mode_AD" + ), + DecorateInfo(unittest.expectedFailure, "TestBwdGradients", "test_fn_grad"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.matrix_norm", + aten_name="linalg_matrix_norm", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_norm, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.qr", + aten_name="linalg_qr", + op=torch.linalg.qr, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.slogdet", + aten_name="linalg_slogdet", + op=torch.linalg.slogdet, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vander", + aten_name="linalg_vander", + ref=np_vander_batched, + op=torch.linalg.vander, + dtypes=all_types_and_complex(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_vander, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + ReductionOpInfo( + "linalg.vector_norm", + op=torch.linalg.vector_norm, + identity=0, + nan_policy="propagate", + supports_multiple_dims=True, + complex_to_real=True, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + generate_args_kwargs=sample_kwargs_vector_norm, + aten_name="linalg_vector_norm", + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + OpInfo( + "linalg.lu_factor", + aten_name="linalg_lu_factor", + op=torch.linalg.lu_factor, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_factor_ex", + aten_name="linalg_lu_factor_ex", + op=torch.linalg.lu_factor_ex, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu", + aten_name="linalg_lu", + op=torch.linalg.lu, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_solve", + op=torch.linalg.lu_solve, + aten_name="linalg_lu_solve", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo( + unittest.skip("Tests different backward paths"), + "TestCommon", + "test_floating_inputs_are_differentiable", + ), + ), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + ), + OpInfo( + "linalg.inv", + aten_name="linalg_inv", + op=torch.linalg.inv, + aliases=("inverse",), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.inv_ex", + aten_name="linalg_inv_ex", + op=torch.linalg.inv_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve", + aten_name="linalg_solve", + op=torch.linalg.solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_ex", + aten_name="linalg_solve_ex", + op=torch.linalg.solve_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_triangular", + aten_name="linalg_solve_triangular", + op=torch.linalg.solve_triangular, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve_triangular, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result); + supports_forward_ad=True, + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_matrix_rank, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + # jit doesn't accept tensor inputs for matrix rank + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=[torch.complex64, torch.float32], + ), + ), + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + op=torch.linalg.pinv, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # errors with "leaked XXXX bytes CUDA memory on device 0" + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="singular", + # pinv is Frechet-differentiable in a rank-preserving neighborhood, + # so we feed inputs that are the products of two full-rank factors, + # to avoid any rank changes caused by the perturbations in the gradcheck + op=lambda a, b: torch.linalg.pinv(a @ b.mT), + dtypes=floating_and_complex_types(), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv_singular, + # Only large tensors show issues with implicit backward used prior to + # explicit backward implementation. + decorators=[slowTest, skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # CUDA runs out of memory + DecorateInfo( + unittest.skip("Skipped!"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + # This test takes almost 2 hours to run! + DecorateInfo( + unittest.skip("Skipped!"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + # This test is flaky under slow gradcheck, likely due to rounding issues + DecorateInfo( + skipIfSlowGradcheckEnv, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.svd", + op=torch.linalg.svd, + aten_name="linalg_svd", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_svd, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.svdvals", + op=torch.linalg.svdvals, + aten_name="linalg_svdvals", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # We're using at::allclose, which does not have a batching rule + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_svdvals, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.tensorinv", + ref=np.linalg.tensorinv, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorinv, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.tensorsolve", + ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorsolve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), +] + +python_ref_db: List[OpInfo] = [ + # + # torch.linalg + # + PythonRefInfo( + "_refs.linalg.diagonal", + torch_opinfo_name="linalg.diagonal", + supports_out=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.vecdot", + torch_opinfo_name="linalg.vecdot", + op_db=op_db, + ), + ReductionPythonRefInfo( + "_refs.linalg.vector_norm", + torch_opinfo_name="linalg.vector_norm", + supports_out=True, + op_db=op_db, + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + PythonRefInfo( + "_refs.linalg.matrix_norm", + torch_opinfo_name="linalg.matrix_norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.norm", + torch_opinfo_name="linalg.norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svd", + torch_opinfo_name="linalg.svd", + supports_out=True, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svdvals", + torch_opinfo_name="linalg.svdvals", + supports_out=True, + op_db=op_db, + ), +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py new file mode 100644 index 0000000000000000000000000000000000000000..35cb3aec1934dac3aa78c69a6e0eac1573a98f56 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py @@ -0,0 +1,456 @@ +import unittest +from functools import partial + +from itertools import product +from typing import Callable, List, Tuple + +import numpy + +import torch +from torch.testing._internal.common_dtype import floating_types +from torch.testing._internal.common_utils import TEST_SCIPY +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + SampleInput, +) + +if TEST_SCIPY: + import scipy.signal + + +def sample_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Base function used to create sample inputs for windows. + + For additional required args you should use *args, as well as **kwargs for + additional keyword arguments. + """ + + # Tests window sizes up to 5 samples. + for size, sym in product(range(6), (True, False)): + yield SampleInput( + size, + *args, + sym=sym, + device=device, + dtype=dtype, + requires_grad=requires_grad, + **kwargs, + ) + + +def reference_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Reference inputs function to use for windows which have a common signature, i.e., + window size and sym only. + + Implement other special functions for windows that have a specific signature. + See exponential and gaussian windows for instance. + """ + yield from sample_inputs_window( + op_info, device, dtype, requires_grad, *args, **kwargs + ) + + cases = (8, 16, 32, 64, 128, 256) + + for size in cases: + yield SampleInput(size, sym=False) + yield SampleInput(size, sym=True) + + +def reference_inputs_exponential_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"center": 4, "tau": 0.5}), + (16, {"center": 8, "tau": 2.5}), + (32, {"center": 16, "tau": 43.5}), + (64, {"center": 20, "tau": 3.7}), + (128, {"center": 62, "tau": 99}), + (256, {"tau": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + kw["center"] = None + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_gaussian_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"std": 0.1}), + (16, {"std": 1.2}), + (32, {"std": 2.1}), + (64, {"std": 3.9}), + (128, {"std": 4.5}), + (256, {"std": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_kaiser_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"beta": 2}), + (16, {"beta": 12}), + (32, {"beta": 30}), + (64, {"beta": 35}), + (128, {"beta": 41.2}), + (256, {"beta": 100}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_cosine_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"a": [0.5, 0.5]}), + (16, {"a": [0.46, 0.54]}), + (32, {"a": [0.46, 0.23, 0.31]}), + (64, {"a": [0.5]}), + (128, {"a": [0.1, 0.8, 0.05, 0.05]}), + (256, {"a": [0.2, 0.2, 0.2, 0.2, 0.2]}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_hamming_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"alpha": 0.54}), + (16, {"alpha": 0.5}), + (32, {"alpha": 0.23}), + (64, {"alpha": 0.8}), + (128, {"alpha": 0.9}), + (256, {"alpha": 0.05}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def error_inputs_window(op_info, device, *args, **kwargs): + # Tests for windows that have a negative size + yield ErrorInput( + SampleInput(-1, *args, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="requires non-negative window length, got M=-1", + ) + + # Tests for window tensors that are not torch.strided, for instance, torch.sparse_coo. + yield ErrorInput( + SampleInput( + 3, + *args, + layout=torch.sparse_coo, + device=device, + dtype=torch.float32, + **kwargs, + ), + error_type=ValueError, + error_regex="is implemented for strided tensors only, got: torch.sparse_coo", + ) + + # Tests for window tensors that are not floating point dtypes, for instance, torch.long. + yield ErrorInput( + SampleInput(3, *args, dtype=torch.long, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.int64", + ) + + # Tests for window tensors that are bfloat16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.bfloat16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.bfloat16", + ) + + # Tests for window tensors that are float16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.float16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.float16", + ) + + +def error_inputs_exponential_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, **kwargs) + + # Tests for negative decay values. + yield ErrorInput( + SampleInput(3, tau=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Tau must be positive, got: -1 instead.", + ) + + # Tests for symmetric windows and a given center value. + yield ErrorInput( + SampleInput(3, center=1, sym=True, dtype=torch.float32, device=device), + error_type=ValueError, + error_regex="Center must be None for symmetric windows", + ) + + +def error_inputs_gaussian_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, std=0.5, **kwargs) + + # Tests for negative standard deviations + yield ErrorInput( + SampleInput(3, std=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Standard deviation must be positive, got: -1 instead.", + ) + + +def error_inputs_kaiser_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, beta=12, **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, beta=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="beta must be non-negative, got: -1 instead.", + ) + + +def error_inputs_general_cosine_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, a=[0.54, 0.46], **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, a=None, dtype=torch.float32, device=device, **kwargs), + error_type=TypeError, + error_regex="Coefficients must be a list/tuple", + ) + + yield ErrorInput( + SampleInput(3, a=[], dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Coefficients cannot be empty", + ) + + +def reference_signal_window(fn: Callable): + r"""Wrapper for scipy signal window references. + + Discards keyword arguments for window reference functions that don't have a matching signature with + torch, e.g., gaussian window. + """ + + def _fn( + *args, + dtype=numpy.float64, + device=None, + layout=torch.strided, + requires_grad=False, + **kwargs, + ): + r"""The unused arguments are defined to disregard those values""" + return fn(*args, **kwargs).astype(dtype) + + return _fn + + +def make_signal_windows_opinfo( + name: str, + ref: Callable, + sample_inputs_func: Callable, + reference_inputs_func: Callable, + error_inputs_func: Callable, + *, + skips: Tuple[DecorateInfo, ...] = (), +): + r"""Helper function to create OpInfo objects related to different windows.""" + return OpInfo( + name=name, + ref=ref if TEST_SCIPY else None, + dtypes=floating_types(), + dtypesIfCUDA=floating_types(), + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=error_inputs_func, + supports_out=False, + supports_autograd=False, + skips=( + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # skip these tests since we have non tensor input + DecorateInfo( + unittest.skip("Skipped!"), "TestCommon", "test_noncontiguous_samples" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_conj_view"), + DecorateInfo( + unittest.skip("Skipped!"), "TestMathBits", "test_neg_conj_view" + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_neg_view"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_vmap_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_op_has_batch_rule", + ), + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + *skips, + ), + ) + + +op_db: List[OpInfo] = [ + make_signal_windows_opinfo( + name="signal.windows.hamming", + ref=reference_signal_window(scipy.signal.windows.hamming) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.hann", + ref=reference_signal_window(scipy.signal.windows.hann) if TEST_SCIPY else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.bartlett", + ref=reference_signal_window(scipy.signal.windows.bartlett) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.blackman", + ref=reference_signal_window(scipy.signal.windows.blackman) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.cosine", + ref=reference_signal_window(scipy.signal.windows.cosine) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.exponential", + ref=reference_signal_window(scipy.signal.windows.exponential) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, tau=2.78), + reference_inputs_func=partial(reference_inputs_exponential_window, tau=2.78), + error_inputs_func=error_inputs_exponential_window, + ), + make_signal_windows_opinfo( + name="signal.windows.gaussian", + ref=reference_signal_window(scipy.signal.windows.gaussian) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, std=1.92), + reference_inputs_func=partial(reference_inputs_gaussian_window, std=1.92), + error_inputs_func=error_inputs_gaussian_window, + skips=( + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + make_signal_windows_opinfo( + name="signal.windows.kaiser", + ref=reference_signal_window(scipy.signal.windows.kaiser) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, beta=12.0), + reference_inputs_func=partial(reference_inputs_kaiser_window, beta=12.0), + error_inputs_func=error_inputs_kaiser_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_cosine", + ref=reference_signal_window(scipy.signal.windows.general_cosine) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, a=[0.54, 0.46]), + reference_inputs_func=partial( + reference_inputs_general_cosine_window, a=[0.54, 0.46] + ), + error_inputs_func=error_inputs_general_cosine_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_hamming", + ref=reference_signal_window(scipy.signal.windows.general_hamming) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, alpha=0.54), + reference_inputs_func=partial( + reference_inputs_general_hamming_window, alpha=0.54 + ), + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.nuttall", + ref=reference_signal_window(scipy.signal.windows.nuttall) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..26a5b9ca3b2dce1bc03bcce092141dd0b46d7d8a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py @@ -0,0 +1,928 @@ +import os + +import torch +from torch.testing import make_tensor # noqa: F401 +from torch.testing._internal.opinfo.core import ( # noqa: F401 + BinaryUfuncInfo, + ErrorInput, + generate_elementwise_binary_tensors, + ReductionOpInfo, + sample_inputs_reduction, + SampleInput, +) + + +def _check_validate(op_info, sample): + def _check_fail(sample): + try: + op_info( + sample.sample_input.input, + *sample.sample_input.args, + **sample.sample_input.kwargs, + ) + except sample.error_type: + pass + except Exception as msg: + raise AssertionError( # noqa: TRY200 + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got {type(msg).__name__}: {msg}" + ) + else: + raise AssertionError( + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got none." + ) + + def _check_success(sample): + try: + op_info(sample.input, *sample.args, **sample.kwargs) + except Exception as msg: + raise AssertionError( # noqa: TRY200 + f"{op_info.name} on {sample=} expected to succeed " + f", got {type(msg).__name__}: {msg}" + ) + + if isinstance(sample, ErrorInput): + _check_fail(sample) + else: + _check_success(sample) + + +def _sample_inputs_sparse( + sample_inputs, + maybe_failing_sample_inputs, + validate_sample_input, + op_info, + *args, + **kwargs, +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + # Error inputs are handled in error_inputs_sparse + + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + + +def _error_inputs_sparse( + maybe_failing_sample_inputs, validate_sample_input, op_info, *args, **kwargs +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, ErrorInput): + yield sample + # Sample inputs are handled in sample_inputs_sparse + + +def _apply_requires_grad_to_samples(sample_inputs): + """Decorator to _maybe_failing_sample_inputs_... generator functions + that clones and sets requires_grad argument to tensors in sample + input arguments. This is needed when the generated samples share + tensor instances. + """ + + def wrapper(op_info, device, dtype, requires_grad, layout, **kwargs): + def apply_requires_grad(x): + if ( + not isinstance(x, torch.Tensor) + or x.requires_grad + or not requires_grad + or not (x.is_floating_point() or x.is_complex()) + ): + return x + return x.detach().clone().requires_grad_(requires_grad) + + if requires_grad: + for sample_input in sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ): + yield sample_input.transform(apply_requires_grad) + else: + yield from sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ) + + return wrapper + + +def sample_inputs_sparse_reduction( + op_info, device, dtype, requires_grad, layout, blocksize=None, **kwargs +): + """Sample inputs for reduction operations on sparse tensors.""" + layout_name = str(layout).split(".", 1)[-1].rsplit("_coo", 1)[0] + op_supports_layout = getattr(op_info, "supports_" + layout_name) + if not op_supports_layout: + return + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if sample_input.input.ndim == 0: + # scalar sparse tensors are not supported + continue + + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if sample_input.input.ndim < 2: + # conversion to sparse compressed tensors requires at + # least 2 dimensional tensors + continue + if sample_input.input.ndim > 2 and (sample_input.input == 0).any(): + # Skip batched sparse compressed samples that contain + # explicit zeros because to_sparse(layout=..) will + # fail, see gh-98495. + # TODO: remove this if-block after gh-98495 is fixed. + continue + + if layout in {torch.sparse_bsr, torch.sparse_bsc} and blocksize is None: + blocksize = (1, 1) + + yield SampleInput( + sample_input.input.detach() + .to_sparse(layout=layout, blocksize=blocksize) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if layout is torch.sparse_coo and (dtype.is_floating_point or dtype.is_complex): + # uncoalesced samples + inp = sample_input.input.detach().to_sparse(layout=layout) + inp = torch.sparse_coo_tensor( + inp.indices().repeat(1, 2), + inp.values().repeat(2), + inp.shape, + dtype=inp.dtype, + device=inp.device, + ) + assert not inp.is_coalesced() + yield SampleInput( + inp.requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if sample_input.input.ndim > 2: + # hybrid samples + yield SampleInput( + sample_input.input.detach() + .to_sparse( + layout=layout, + blocksize=blocksize, + dense_dim=sample_input.input.ndim - 2, + ) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_sparse_reduction(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + UNSPECIFIED = object() + if op_info.name == "sum": + sample = _validate_sample_input_sparse_reduction_sum(sample) + + if op_info.name in {"masked.sum"}: + mask = sample.kwargs.get("mask", UNSPECIFIED) + if ( + mask not in {None, UNSPECIFIED} + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif mask is UNSPECIFIED: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + + if op_info.name in {"masked.amax", "masked.amin", "masked.mean", "masked.prod"}: + t_inp = sample.input + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + mask = sample.kwargs.get("mask") + if ( + mask is not None + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif mask is None: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif ( + mask.layout is sample.input.layout + and mask.ndim > 2 + and op_info.name == "masked.mean" + ): + sample = ErrorInput( + sample, + error_type=TypeError, + error_regex=( + "where[(][)] received an invalid combination of arguments" + " - got [(]Tensor, Tensor, NoneType[)]" + ), + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif ( + sample.input.ndim > 2 + and (sample.kwargs.get("dim") not in {0, 1}) + and mask.ndim > 2 + and mask.layout is not torch.strided + ): + if sample.kwargs.get("dim") == (0, -1): + sample = ErrorInput( + sample, + error_regex="tensor dimensionality must be sum of batch, base, and dense dimensionalities", + ) + elif op_info.name == "masked.prod": + sample = ErrorInput( + sample, + error_regex="input_dim == 2 INTERNAL ASSERT FAILED at", + ) + else: + sample = ErrorInput( + sample, + error_type=AssertionError, + error_regex="Sparse CSR tensors are 2D and only support reduction along dim 0 or 1.", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + elif ( + mask.layout is t_inp.layout + and mask._nnz() != t_inp._nnz() + and t_inp.dense_dim() > 0 + ): + sample = ErrorInput( + sample, + error_regex="Index tensor must have the same number of dimensions as src tensor", + ) + + if check_validate: + _check_validate(op_info, sample) + + return sample + + +def _validate_sample_input_sparse_reduction_sum(sample, check_validate=False): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + dim = t_kwargs.get("dim") + keepdim = t_kwargs.get("keepdim") + layout = t_inp.layout + if isinstance(dim, (int, list, tuple)): + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}: + return ErrorInput( + sample, + error_regex=( + "Currently the only compressed sparse format supported for sum.dim_IntList is CSR, but got layout" + ), + ) + if layout in {torch.sparse_csr, torch.sparse_csc} and not keepdim: + return ErrorInput( + sample, + error_regex=( + "reduction operations on CSR tensors with keepdim=False is unsupported" + ), + ) + if t_inp.dim() != 2: + return ErrorInput( + sample, + error_regex=("input_dim == 2 INTERNAL ASSERT"), + ) + if layout == torch.sparse_csr: + if t_inp.dtype == torch.bool: + return ErrorInput( + sample, + error_regex=("_sparse_csr_sum_cpu not implemented for 'Bool'"), + ) + if t_inp.dtype == torch.complex32: + return ErrorInput( + sample, + error_regex=( + "_sparse_csr_sum_cuda not implemented for 'ComplexHalf'" + ), + ) + return sample + + +def _maybe_failing_sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + if layout in [ + torch.sparse_csr, + torch.sparse_csc, + ]: + # NotImplementedError: Could not run 'aten::sum.IntList_out' with arguments from the 'SparseCsrCPU' backend. + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + if layout in [ + torch.sparse_bsr, + torch.sparse_bsc, + ]: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(2, 2)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1), dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + +def sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for sum on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_reduction, + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_reduction_sum(op_info, device, layout, **kwargs): + """Error inputs for sum on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def sample_inputs_sparse_elementwise_binary_operation( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for elementwise binary operations on sparse tensors. + + The samples include regular, zero-sized, batched, and hybrid + sparse tensors as well as rhs scalars. All tensors are full tensors. + """ + + def _to_sparse(tensor, **kwargs): + return tensor.detach().to_sparse(**kwargs).requires_grad_(requires_grad) + + for sample_input in generate_elementwise_binary_tensors( + op_info, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=True, + **kwargs, + ): + lhs, rhs = sample_input.input, sample_input.args[0] + min_dense_dim = 0 + max_dense_dim = lhs.ndim - 1 + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if lhs.ndim < 2: + # sparse compressed tensors sparse_dim must be 2 + continue + max_dense_dim = lhs.ndim - 2 + + for dense_dim in range(min_dense_dim, max_dense_dim + 1): + if layout in {torch.sparse_bsr, torch.sparse_bsc}: + blocksizes = [(1, 1)] + if lhs.numel() > 0: + blocksizes.append( + ( + lhs.shape[lhs.ndim - 2 - dense_dim], + lhs.shape[lhs.ndim - 1 - dense_dim], + ) + ) + else: + blocksizes = [None] + for blocksize in blocksizes: + to_sparse_kwargs = dict( + layout=layout, dense_dim=dense_dim, blocksize=blocksize + ) + lhs_sparse = _to_sparse(lhs, **to_sparse_kwargs) + rhs_sparse = _to_sparse(rhs, **to_sparse_kwargs) + # op(sparse, sparse) + yield SampleInput( + lhs_sparse, + args=(rhs_sparse, *sample_input.args[1:]), + kwargs=sample_input.kwargs, + ) + # op(sparse, scalar) + yield SampleInput( + lhs_sparse, + args=( + make_tensor( + (), dtype=dtype, device=device, requires_grad=requires_grad + ), + *sample_input.args[1:], + ), + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_elementwise_binary_sparse_mul(sample): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + layout = t_inp.layout + dtype = t_inp.dtype + if layout is torch.sparse_csr and batch_dim > 0 and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 2 dimensional tensor", + ) + elif layout is torch.sparse_csc and t_args[0].ndim > 0: + return ErrorInput( + sample, error_regex="Expected result Tensor to be of format CSR" + ) + elif layout is torch.sparse_bsr and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsr", + ) + elif layout is torch.sparse_bsc and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsc", + ) + elif ( + layout is torch.sparse_coo + and dtype is torch.bool + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + and t_inp.dense_dim() > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Bool'" + ) + elif ( + layout in {torch.sparse_coo, torch.sparse_csr} + and dtype is torch.bool + and t_inp._nnz() > 0 + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + ): + return ErrorInput( + sample, error_regex="\"mul_out_sparse\" not implemented for 'Bool'" + ) + elif ( + layout is torch.sparse_csr + and t_args[0].layout is torch.strided + and 0 < t_args[0].ndim + and t_args[0].ndim < t_inp.ndim + ): + return ErrorInput( + sample, error_regex="sparse_mask_sparse_csr expects self to be 2D" + ) + elif layout is torch.sparse_csr and ( + (t_args[0].layout is torch.strided and 0 < t_args[0].ndim) + or (t_args[0].layout is layout and t_inp.shape != t_args[0].shape) + ): + return ErrorInput( + sample, + error_regex=( + "expects sparse inputs with equal dimensionality, number of sparse dimensions," + " and shape of sparse dimensions" + ), + ) + elif ( + layout is torch.sparse_csr + and t_inp.dense_dim() > 0 + and t_inp._nnz() > 0 + and t_inp.is_cpu + and dtype is torch.float16 + and t_args[0].ndim > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Half'" + ) + return sample + + +@_apply_requires_grad_to_samples +def _maybe_failing_sample_inputs_sparse_elementwise_binary_mul( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + regular = torch.tensor([[1, 2], [3, 4]], device=device, dtype=dtype).to_sparse( + layout=layout, dense_dim=0, blocksize=blocksize + ) + batch = torch.tensor( + [[[1, 2], [3, 4]], [[4, 5], [6, 7]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=0, blocksize=blocksize) + hybrid = torch.tensor( + [[[1], [2]], [[3], [4]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=1, blocksize=blocksize) + + if layout is torch.sparse_csr: + # RuntimeError: crow_indices is supposed to be a vector, but got 2 dimensional tensor + yield SampleInput(batch, args=(batch,)) + # RuntimeError: Only tensors with two sparse dimensions can be + # converted to the SparseCsr layout, got self with 3 sparse + # dimensions. + yield SampleInput( + torch.zeros_like(hybrid).requires_grad_(requires_grad), + args=(torch.zeros_like(hybrid).requires_grad_(requires_grad),), + ) + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_csc: + # RuntimeError: Expected result Tensor to be of format CSR + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsr: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsc: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsc + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_coo: + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if dtype in {torch.bool, torch.float16} and regular.is_cpu: + # RuntimeError: "addcmul_cpu_out" not implemented for '(Bool|Half)' + yield SampleInput(hybrid, args=(hybrid,)) + + +def _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=False +): + if op_info.name == "mul": + sample = _validate_sample_input_elementwise_binary_sparse_mul(sample) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def sample_inputs_sparse_mul(op_info, device, dtype, requires_grad, layout, **kwargs): + """Sample inputs for mul operation on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_elementwise_binary_operation, + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_mul(op_info, device, layout, **kwargs): + """Error inputs for mul operation on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + from torch.testing._internal.common_utils import TestCase + + for tensor in TestCase().generate_simple_inputs( + layout, + device=device, + dtype=dtype, + enable_batch=True, + enable_hybrid=True, + enable_zero_sized=True, + enable_non_contiguous_indices=False, + enable_non_contiguous_values=False, + ): + yield SampleInput(tensor, args=(), kwargs={}) + yield SampleInput( + tensor, args=(), kwargs=dict(device=device, dtype=dtype, layout=layout) + ) + + if dtype is not torch.float64: + yield SampleInput(tensor, args=(), kwargs=dict(dtype=torch.float64)) + + if torch.cuda.is_available(): + other_device = "cuda" if tensor.device.type == "cpu" else "cpu" + yield SampleInput(tensor, args=(), kwargs=dict(device=other_device)) + + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + yield SampleInput(tensor, args=(), kwargs=dict(layout=other_layout)) + + if layout is not torch.sparse_coo: + yield SampleInput(tensor, args=(), kwargs=dict(layout=torch.sparse_coo)) + + +def _validate_sample_input_sparse_like_fns(op_info, sample, check_validate=False): + if sample.input.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if sample.kwargs.get("device", sample.input.device) != sample.input.device: + return ErrorInput( + sample, + error_regex=( + "device of (ccol|crow)_indices \\(=(cpu|cuda.*)\\) must" + " match device of values \\(=(cuda.*|cpu)\\)" + ), + ) + if sample.kwargs.get("layout", sample.input.layout) != sample.input.layout: + return ErrorInput( + sample, + error_regex=( + "empty_like with different sparse layout is not supported" + " \\(self is Sparse(Csc|Csr|Bsc|Bsr) but you requested Sparse(Csr|Csc|Bsr|Bsc)\\)" + ), + ) + if sample.input.layout is torch.sparse_coo: + return ErrorInput( + sample, + error_regex=( + "Could not run 'aten::normal_' with arguments from the 'Sparse(CPU|CUDA)' backend." + ), + ) + if check_validate: + _check_validate(op_info, sample) + return sample + + +def _maybe_failing_sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + if torch.cuda.is_available() and layout is not torch.sparse_coo: + other_device = "cuda" if torch.device(device).type == "cpu" else "cpu" + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(device=other_device), + ) + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(layout=other_layout), + ) + + +def sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for like-functions on sparse tensors.""" + yield from _sample_inputs_sparse( + _sample_inputs_sparse_like_fns, + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_like_fns(op_info, device, layout, **kwargs): + """Error inputs for like-functions on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _validate_sample_input_sparse_default(op_info, sample, check_validate=False): + if op_info.name == "to_sparse": + if ( + sample.input.layout + in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + and len(sample.args) == 1 + and isinstance(sample.args[0], int) + and sample.args[0] != 2 + ): + sample = ErrorInput( + sample, + error_regex="sparse dim argument must be 2 for sparse_compressed_to_sparse", + ) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def validate_sample_input_sparse(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + if isinstance(op_info, ReductionOpInfo): + return _validate_sample_input_sparse_reduction( + op_info, sample, check_validate=check_validate + ) + elif isinstance(op_info, BinaryUfuncInfo): + return _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=check_validate + ) + else: + return _validate_sample_input_sparse_default( + op_info, sample, check_validate=check_validate + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py new file mode 100644 index 0000000000000000000000000000000000000000..f2f110ec2442fa9982a395bee8c9cd7247dd69e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py @@ -0,0 +1,817 @@ +import unittest +from functools import partial +from itertools import product +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + precisionOverride, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import all_types_and, floating_types +from torch.testing._internal.common_utils import TEST_SCIPY, torch_to_numpy_dtype_dict +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + DecorateInfo, + L, + NumericsFilter, + OpInfo, + S, + SampleInput, + UnaryUfuncInfo, +) +from torch.testing._internal.opinfo.refs import ( + ElementwiseBinaryPythonRefInfo, + ElementwiseUnaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, +) + + +if TEST_SCIPY: + import scipy.special + + +# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`, +# supports `exclude` argument. +# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617 +def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs): + exclude_zero = requires_grad and op_info.op == torch.special.i0e + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield SampleInput(make_arg((S,))) + yield SampleInput(make_arg(())) + + if requires_grad and not exclude_zero: + # Special Case for gradient + # Sample with `0` in the input + t = make_arg((S,)) + t[0] = 0 + + yield SampleInput(t) + + +def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + for shape, n in product(tensor_shapes, ns): + yield SampleInput(make_arg(shape), args=(n,)) + + +def reference_polygamma(x, n): + # WEIRD `scipy.special.polygamma` behavior + # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype + # dtype('float64') + # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype + # dtype('float32') + # + # Thus we cast output to the default torch dtype or preserve double + result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + if x.dtype == np.double: + result_dtype = np.double + return scipy.special.polygamma(n, x).astype(result_dtype) + + +def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs): + low, _ = op_info.domain + + if requires_grad: + low = 0 + op_info._domain_eps + + make_arg = partial( + make_tensor, dtype=dtype, device=device, low=low, requires_grad=requires_grad + ) + yield SampleInput(make_arg((L,))) + yield SampleInput(make_arg(())) + + +op_db: List[OpInfo] = [ + UnaryUfuncInfo( + "special.i0e", + aten_name="special_i0e", + ref=scipy.special.i0e if TEST_SCIPY else None, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.i1", + aten_name="special_i1", + ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) + if TEST_SCIPY + else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + ), + UnaryUfuncInfo( + "special.i1e", + aten_name="special_i1e", + ref=scipy.special.i1e if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.ndtr", + aten_name="special_ndtr", + decorators=(precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-4}),), + ref=scipy.special.ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatch stub: unsupported device typemeta + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="meta", + ), + ), + ), + # A separate OpInfo entry for special.polygamma is needed to reorder the arguments + # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939 + UnaryUfuncInfo( + "special.polygamma", + op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs), + variant_test_name="special_polygamma_n_0", + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # lambda impl + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + sample_kwargs=lambda device, dtype, input: ({"n": 0}, {"n": 0}), + # polygamma functions have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter( + condition=lambda x: x < 0.1, safe_val=1 + ), + ), + BinaryUfuncInfo( + "special.xlog1py", + aten_name="special_xlog1py", + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test -1 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=-0.99), + ), + BinaryUfuncInfo( + "special.zeta", + aten_name="special_zeta", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + # TODO: FIXME + # OpInfo entry to verify the gradient formula of `other`/`q` + # BinaryUfuncInfo('special.zeta', + # op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs), + # aten_name='special_zeta', + # variant_test_name='grad', + # dtypes=all_types_and(torch.bool), + # promotes_int_to_float=True, + # supports_autograd=True, + # supports_rhs_python_scalar=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable") + # ], + # skips=( + # # Lambda doesn't work in JIT test + # # AssertionError: JIT Test does not execute any logic + # DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"), + # )), + UnaryUfuncInfo( + "special.entr", + ref=scipy.special.entr if TEST_SCIPY else None, + aten_name="special_entr", + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_entr, + ), + UnaryUfuncInfo( + "special.ndtri", + ref=scipy.special.ndtri if TEST_SCIPY else None, + domain=(0, 1), + aten_name="special_ndtri", + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.log_ndtr", + aten_name="special_log_ndtr", + ref=scipy.special.log_ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.erfcx", + ref=scipy.special.erfcx if TEST_SCIPY else None, + aten_name="special_erfcx", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.airy_ai", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.airy(x)[0] if TEST_SCIPY else None, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + ), + ), + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y1 if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_h", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + # Greatest absolute difference: inf + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_he", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.laguerre_polynomial_l", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.legendre_polynomial_p", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0e if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k1", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1e if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.spherical_bessel_j0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.spherical_jn(0, x) if TEST_SCIPY else None, + supports_autograd=False, + ), +] + +python_ref_db: List[OpInfo] = [ + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j0", + torch_opinfo_name="special.bessel_j0", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j1", + torch_opinfo_name="special.bessel_j1", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.entr", + torch_opinfo_name="special.entr", + op_db=op_db, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.erfcx", + torch_opinfo_name="special.erfcx", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i0e", + torch_opinfo_name="special.i0e", + op_db=op_db, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1", + torch_opinfo_name="special.i1", + op_db=op_db, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1e", + torch_opinfo_name="special.i1e", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.log_ndtr", + torch_opinfo_name="special.log_ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtr", + torch_opinfo_name="special.ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtri", + torch_opinfo_name="special.ndtri", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.spherical_bessel_j0", + torch_opinfo_name="special.spherical_bessel_j0", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.zeta", + torch_opinfo_name="special.zeta", + supports_one_python_scalar=True, + op_db=op_db, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py new file mode 100644 index 0000000000000000000000000000000000000000..d720442e39987853b3fbe2c7e863f2d0bbed9c16 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py @@ -0,0 +1,204 @@ +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + OpInfo, + ReductionOpInfo, + UnaryUfuncInfo, +) + +# NOTE [Python References] +# Python References emulate existing PyTorch operations, but can ultimately +# be expressed in terms of "primitive" operations from torch._prims. +# +# These references are experimental. +# See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577 +# for additional context. +# +# Python Reference OpInfos should be added to the python_ref_db list below. +# Tests can opt-into running on these references by including +# that list in the Sequence they pass to the @ops decorator. +# +# When a Python Reference OpInfo is constructed a pointer to an +# existing OpInfo must be provided using the torch_opinfo_name kwarg. +# The existing OpInfo with that name and no variant will be found +# to inherit from. +# +# Instead of just inheriting the existing OpInfo's metadata, the +# Python Reference OpInfos inherit the existing OpInfo's +# construction arguments. These arguments can be overridden +# by adding kwargs to the constructor. + + +def _find_referenced_opinfo(referenced_name, variant_name, *, op_db=None): + """ + Finds the OpInfo with the given name that has no variant name. + """ + # NOTE: searching the global op_db doesn't work when OpInfos are split into + # different modules, as otherwise the op_db will not be fully constructed + # yet. So, instead the local op_db must be passed in explicitly. + if op_db is None: + from torch.testing._internal.common_methods_invocations import op_db + + for opinfo in op_db: + if opinfo.name == referenced_name and opinfo.variant_test_name == variant_name: + return opinfo + + +def _inherit_constructor_args(name, op, inherited, overrides): + # inherits metadata + common_kwargs = { + "name": name, + "op": op, + "aliases": None, # TODO add a check for alias coverage + "method_variant": None, + "inplace_variant": None, # TODO: add a check for inplace coverage + "supports_scripting": False, + } + + # Acquires inherited kwargs + kwargs = inherited.copy() + + # Fixes metadata + if "kwargs" in kwargs: + kwargs.update(kwargs["kwargs"]) + del kwargs["kwargs"] + if "self" in kwargs: + del kwargs["self"] + if "__class__" in kwargs: + del kwargs["__class__"] + if "skips" in kwargs: + del kwargs["skips"] + if "decorators" in kwargs: + del kwargs["decorators"] + + # Overrides metadata + kwargs.update(common_kwargs) + kwargs.update(overrides) + + # At the moment no prims support autograd, so we must not run autograd + # tests e.g. when testing dtype support. Once we start writing autograd + # formulas for prims this can be removed. + kwargs["supports_autograd"] = False + kwargs["supports_gradgrad"] = False + kwargs["supports_fwgrad_bwgrad"] = False + kwargs["supports_inplace_autograd"] = False + kwargs["supports_forward_ad"] = False + + return kwargs + + +class PythonRefInfo(OpInfo): + """ + An OpInfo for a Python reference of an OpInfo base class operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + validate_view_consistency=True, + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + self.validate_view_consistency = validate_view_consistency + assert isinstance(self.torch_opinfo, OpInfo) + + inherited = self.torch_opinfo._original_opinfo_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + super().__init__(**ukwargs) + + +class ReductionPythonRefInfo(ReductionOpInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + assert isinstance(self.torch_opinfo, ReductionOpInfo) + + inherited = self.torch_opinfo._original_reduction_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + # See https://github.com/pytorch/pytorch/issues/77216 + self.validate_view_consistency = False + + super().__init__(**ukwargs) + + +class ElementwiseUnaryPythonRefInfo(UnaryUfuncInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + validate_view_consistency=True, + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + self.validate_view_consistency = validate_view_consistency + assert isinstance(self.torch_opinfo, UnaryUfuncInfo) + + inherited = self.torch_opinfo._original_unary_ufunc_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) + + +class ElementwiseBinaryPythonRefInfo(BinaryUfuncInfo): + """ + An OpInfo for a Python reference of an elementwise binary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + assert isinstance(self.torch_opinfo, BinaryUfuncInfo) + + inherited = self.torch_opinfo._original_binary_ufunc_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bc46fe141c165569aa46ef08acdaca1343553e95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py @@ -0,0 +1,271 @@ +import collections +import warnings +from functools import partial, wraps +from typing import Sequence + +import numpy as np + +import torch +from torch.testing._internal.common_cuda import TEST_CUDA +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + all_types, + all_types_and, + all_types_and_complex, + all_types_and_complex_and, + all_types_and_half, + complex_types, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, + floating_types_and, + floating_types_and_half, + integral_types, + integral_types_and, +) +from torch.testing._internal.common_utils import torch_to_numpy_dtype_dict + + +COMPLETE_DTYPES_DISPATCH = ( + all_types, + all_types_and_complex, + all_types_and_half, + floating_types, + floating_and_complex_types, + floating_types_and_half, + integral_types, + complex_types, +) + +EXTENSIBLE_DTYPE_DISPATCH = ( + all_types_and_complex_and, + floating_types_and, + floating_and_complex_types_and, + integral_types_and, + all_types_and, +) + +# Better way to acquire devices? +DEVICES = ["cpu"] + (["cuda"] if TEST_CUDA else []) + + +class _dynamic_dispatch_dtypes(_dispatch_dtypes): + # Class to tag the dynamically generated types. + pass + + +def get_supported_dtypes(op, sample_inputs_fn, device_type): + # Returns the supported dtypes for the given operator and device_type pair. + assert device_type in ["cpu", "cuda"] + if not TEST_CUDA and device_type == "cuda": + warnings.warn( + "WARNING: CUDA is not available, empty_dtypes dispatch will be returned!" + ) + return _dynamic_dispatch_dtypes(()) + + supported_dtypes = set() + for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half): + try: + samples = sample_inputs_fn(op, device_type, dtype, False) + except RuntimeError: + # If `sample_inputs_fn` doesn't support sampling for a given + # `dtype`, we assume that the `dtype` is not supported. + # We raise a warning, so that user knows that this was the case + # and can investigate if there was an issue with the `sample_inputs_fn`. + warnings.warn( + f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}" + ) + continue + + # We assume the dtype is supported + # only if all samples pass for the given dtype. + supported = True + for sample in samples: + try: + op(sample.input, *sample.args, **sample.kwargs) + except RuntimeError as re: + # dtype is not supported + supported = False + break + + if supported: + supported_dtypes.add(dtype) + + return _dynamic_dispatch_dtypes(supported_dtypes) + + +def dtypes_dispatch_hint(dtypes): + # Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH) + # and its string representation for the passed `dtypes`. + return_type = collections.namedtuple("return_type", "dispatch_fn dispatch_fn_str") + + # CUDA is not available, dtypes will be empty. + if len(dtypes) == 0: + return return_type((), str(tuple())) + + set_dtypes = set(dtypes) + for dispatch in COMPLETE_DTYPES_DISPATCH: + # Short circuit if we get an exact match. + if set(dispatch()) == set_dtypes: + return return_type(dispatch, dispatch.__name__ + "()") + + chosen_dispatch = None + chosen_dispatch_score = 0.0 + for dispatch in EXTENSIBLE_DTYPE_DISPATCH: + dispatch_dtypes = set(dispatch()) + if not dispatch_dtypes.issubset(set_dtypes): + continue + + score = len(dispatch_dtypes) + if score > chosen_dispatch_score: + chosen_dispatch_score = score + chosen_dispatch = dispatch + + # If user passed dtypes which are lower than the lowest + # dispatch type available (not likely but possible in code path). + if chosen_dispatch is None: + return return_type((), str(dtypes)) + + return return_type( + partial(dispatch, *tuple(set(dtypes) - set(dispatch()))), + dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))), + ) + + +def is_dynamic_dtype_set(op): + # Detect if the OpInfo entry acquired dtypes dynamically + # using `get_supported_dtypes`. + return op.dynamic_dtypes + + +def str_format_dynamic_dtype(op): + fmt_str = f""" + OpInfo({op.name}, + dtypes={dtypes_dispatch_hint(op.dtypes).dispatch_fn_str}, + dtypesIfCUDA={dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str}, + ) + """ + + return fmt_str + + +def np_unary_ufunc_integer_promotion_wrapper(fn): + # Wrapper that passes PyTorch's default scalar + # type as an argument to the wrapped NumPy + # unary ufunc when given an integer input. + # This mimicks PyTorch's integer->floating point + # type promotion. + # + # This is necessary when NumPy promotes + # integer types to double, since PyTorch promotes + # integer types to the default scalar type. + + # Helper to determine if promotion is needed + def is_integral(dtype): + return dtype in [ + np.bool_, + bool, + np.uint8, + np.int8, + np.int16, + np.int32, + np.int64, + ] + + @wraps(fn) + def wrapped_fn(x): + # As the default dtype can change, acquire it when function is called. + # NOTE: Promotion in PyTorch is from integer types to the default dtype + np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + + if is_integral(x.dtype): + return fn(x.astype(np_dtype)) + return fn(x) + + return wrapped_fn + + +def reference_reduction_numpy(f, supports_keepdims=True): + """Wraps a NumPy reduction operator. + + The wrapper function will forward dim, keepdim, mask, and identity + kwargs to the wrapped function as the NumPy equivalent axis, + keepdims, where, and initiak kwargs, respectively. + + Args: + f: NumPy reduction operator to wrap + supports_keepdims (bool, optional): Whether the NumPy operator accepts + keepdims parameter. If it does not, the wrapper will manually unsqueeze + the reduced dimensions if it was called with keepdim=True. Defaults to True. + + Returns: + Wrapped function + + """ + + @wraps(f) + def wrapper(x: np.ndarray, *args, **kwargs): + # Copy keys into a set + keys = set(kwargs.keys()) + + dim = kwargs.pop("dim", None) + keepdim = kwargs.pop("keepdim", False) + + if "dim" in keys: + dim = tuple(dim) if isinstance(dim, Sequence) else dim + + # NumPy reductions don't accept dim=0 for scalar inputs + # so we convert it to None if and only if dim is equivalent + if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}: + kwargs["axis"] = None + else: + kwargs["axis"] = dim + + if "keepdim" in keys and supports_keepdims: + kwargs["keepdims"] = keepdim + + if "mask" in keys: + mask = kwargs.pop("mask") + if mask is not None: + assert mask.layout == torch.strided + kwargs["where"] = mask.cpu().numpy() + + if "identity" in keys: + identity = kwargs.pop("identity") + if identity is not None: + if identity.dtype is torch.bfloat16: + identity = identity.cpu().to(torch.float32) + else: + identity = identity.cpu() + kwargs["initial"] = identity.numpy() + + result = f(x, *args, **kwargs) + + # Unsqueeze reduced dimensions if NumPy does not support keepdims + if keepdim and not supports_keepdims and x.ndim > 0: + dim = list(range(x.ndim)) if dim is None else dim + result = np.expand_dims(result, dim) + + return result + + return wrapper + + +def prod_numpy(a, *args, **kwargs): + """ + The function will call np.prod with type as np.int64 if the input type + is int or uint64 if is uint. This is necessary because windows np.prod uses by default + int32 while on linux it uses int64. + This is for fixing integer overflow https://github.com/pytorch/pytorch/issues/77320 + + Returns: + np.prod of input + """ + if "dtype" not in kwargs: + if np.issubdtype(a.dtype, np.signedinteger): + a = a.astype(np.int64) + elif np.issubdtype(a.dtype, np.unsignedinteger): + a = a.astype(np.uint64) + + fn = reference_reduction_numpy(np.prod) + return fn(a, *args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb6631bd2e5b557cba1f9da144e2a599a51245af Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2aba373b2a45525cbf4a4bb1ee68c6297c6e438 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b81ab87074662dbc5e646b2fa2e034ad768d18b6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py new file mode 100644 index 0000000000000000000000000000000000000000..32e008e7f5ed445b09d7658e7024c50f8abbbce9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py @@ -0,0 +1,9 @@ +import torch # noqa: F401 + + +def div_int_nofuture(): + return 1 / 2 + + +def div_float_nofuture(): + return 3.14 / 0.125