applied-ai-018 commited on
Commit
f374d72
·
verified ·
1 Parent(s): 5cda27d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/9.attention.dense.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/testing/__init__.py +3 -0
  6. venv/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/testing/_comparison.py +1575 -0
  10. venv/lib/python3.10/site-packages/torch/testing/_creation.py +267 -0
  11. venv/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py +270 -0
  12. venv/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py +1525 -0
  13. venv/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py +111 -0
  14. venv/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py +134 -0
  15. venv/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py +0 -0
  16. venv/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py +2033 -0
  17. venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py +0 -0
  18. venv/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py +581 -0
  19. venv/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py +77 -0
  20. venv/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py +456 -0
  21. venv/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py +206 -0
  22. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py +0 -0
  23. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py +1 -0
  24. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py +66 -0
  25. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py +42 -0
  26. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py +51 -0
  27. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py +113 -0
  28. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py +733 -0
  29. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py +0 -0
  30. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py +66 -0
  31. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py +32 -0
  32. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py +494 -0
  33. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py +149 -0
  34. venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py +185 -0
  35. venv/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py +87 -0
  36. venv/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py +722 -0
  37. venv/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py +893 -0
  38. venv/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py +208 -0
  39. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py +4 -0
  40. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py +0 -0
  45. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py +27 -0
  46. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/10.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7a8eb4ef8b3a4658ebc534a4b6dc5432f9eebd027d6bb12ceda7668ffdab87d
3
+ size 16778396
ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5ebc1128a44ae3c445a0a95f50ffb143ebf08760b3a98925f758af7b9ebea88
3
+ size 33555612
ckpts/universal/global_step120/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5345922f7704d6731c97aa71da69444eeff5bfce98eb2a060ff29452a7c49fab
3
+ size 33555627
ckpts/universal/global_step120/zero/9.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ef841dd22ee9d4328da498d9ab5d68f09462ad5ea07251cb984e42c4e2e5c90
3
+ size 16778317
venv/lib/python3.10/site-packages/torch/testing/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from torch._C import FileCheck as FileCheck
2
+ from ._comparison import assert_allclose, assert_close as assert_close
3
+ from ._creation import make_tensor as make_tensor
venv/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (347 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc ADDED
Binary file (52.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc ADDED
Binary file (9.22 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/_comparison.py ADDED
@@ -0,0 +1,1575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import cmath
3
+ import collections.abc
4
+ import contextlib
5
+ import warnings
6
+ from typing import (
7
+ Any,
8
+ Callable,
9
+ Collection,
10
+ Dict,
11
+ List,
12
+ NoReturn,
13
+ Optional,
14
+ Sequence,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ )
19
+
20
+ import torch
21
+
22
+ try:
23
+ import numpy as np
24
+
25
+ NUMPY_AVAILABLE = True
26
+ except ModuleNotFoundError:
27
+ NUMPY_AVAILABLE = False
28
+
29
+
30
+ class ErrorMeta(Exception):
31
+ """Internal testing exception that makes that carries error metadata."""
32
+
33
+ def __init__(
34
+ self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = ()
35
+ ) -> None:
36
+ super().__init__(
37
+ "If you are a user and see this message during normal operation "
38
+ "please file an issue at https://github.com/pytorch/pytorch/issues. "
39
+ "If you are a developer and working on the comparison functions, please `raise ErrorMeta().to_error()` "
40
+ "for user facing errors."
41
+ )
42
+ self.type = type
43
+ self.msg = msg
44
+ self.id = id
45
+
46
+ def to_error(
47
+ self, msg: Optional[Union[str, Callable[[str], str]]] = None
48
+ ) -> Exception:
49
+ if not isinstance(msg, str):
50
+ generated_msg = self.msg
51
+ if self.id:
52
+ generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}"
53
+
54
+ msg = msg(generated_msg) if callable(msg) else generated_msg
55
+
56
+ return self.type(msg)
57
+
58
+
59
+ # Some analysis of tolerance by logging tests from test_torch.py can be found in
60
+ # https://github.com/pytorch/pytorch/pull/32538.
61
+ # {dtype: (rtol, atol)}
62
+ _DTYPE_PRECISIONS = {
63
+ torch.float16: (0.001, 1e-5),
64
+ torch.bfloat16: (0.016, 1e-5),
65
+ torch.float32: (1.3e-6, 1e-5),
66
+ torch.float64: (1e-7, 1e-7),
67
+ torch.complex32: (0.001, 1e-5),
68
+ torch.complex64: (1.3e-6, 1e-5),
69
+ torch.complex128: (1e-7, 1e-7),
70
+ }
71
+ # The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in
72
+ # their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values`
73
+ _DTYPE_PRECISIONS.update(
74
+ dict.fromkeys(
75
+ (torch.quint8, torch.quint2x4, torch.quint4x2, torch.qint8, torch.qint32),
76
+ _DTYPE_PRECISIONS[torch.float32],
77
+ )
78
+ )
79
+
80
+
81
+ def default_tolerances(
82
+ *inputs: Union[torch.Tensor, torch.dtype],
83
+ dtype_precisions: Optional[Dict[torch.dtype, Tuple[float, float]]] = None,
84
+ ) -> Tuple[float, float]:
85
+ """Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype.
86
+
87
+ See :func:`assert_close` for a table of the default tolerance for each dtype.
88
+
89
+ Returns:
90
+ (Tuple[float, float]): Loosest tolerances of all input dtypes.
91
+ """
92
+ dtypes = []
93
+ for input in inputs:
94
+ if isinstance(input, torch.Tensor):
95
+ dtypes.append(input.dtype)
96
+ elif isinstance(input, torch.dtype):
97
+ dtypes.append(input)
98
+ else:
99
+ raise TypeError(
100
+ f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead."
101
+ )
102
+ dtype_precisions = dtype_precisions or _DTYPE_PRECISIONS
103
+ rtols, atols = zip(*[dtype_precisions.get(dtype, (0.0, 0.0)) for dtype in dtypes])
104
+ return max(rtols), max(atols)
105
+
106
+
107
+ def get_tolerances(
108
+ *inputs: Union[torch.Tensor, torch.dtype],
109
+ rtol: Optional[float],
110
+ atol: Optional[float],
111
+ id: Tuple[Any, ...] = (),
112
+ ) -> Tuple[float, float]:
113
+ """Gets absolute and relative to be used for numeric comparisons.
114
+
115
+ If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of
116
+ :func:`default_tolerances` is used.
117
+
118
+ Raises:
119
+ ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified.
120
+
121
+ Returns:
122
+ (Tuple[float, float]): Valid absolute and relative tolerances.
123
+ """
124
+ if (rtol is None) ^ (atol is None):
125
+ # We require both tolerance to be omitted or specified, because specifying only one might lead to surprising
126
+ # results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0.
127
+ raise ErrorMeta(
128
+ ValueError,
129
+ f"Both 'rtol' and 'atol' must be either specified or omitted, "
130
+ f"but got no {'rtol' if rtol is None else 'atol'}.",
131
+ id=id,
132
+ )
133
+ elif rtol is not None and atol is not None:
134
+ return rtol, atol
135
+ else:
136
+ return default_tolerances(*inputs)
137
+
138
+
139
+ def _make_mismatch_msg(
140
+ *,
141
+ default_identifier: str,
142
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
143
+ extra: Optional[str] = None,
144
+ abs_diff: float,
145
+ abs_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
146
+ atol: float,
147
+ rel_diff: float,
148
+ rel_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
149
+ rtol: float,
150
+ ) -> str:
151
+ """Makes a mismatch error message for numeric values.
152
+
153
+ Args:
154
+ default_identifier (str): Default description of the compared values, e.g. "Tensor-likes".
155
+ identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides
156
+ ``default_identifier``. Can be passed as callable in which case it will be called with
157
+ ``default_identifier`` to create the description at runtime.
158
+ extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics.
159
+ abs_diff (float): Absolute difference.
160
+ abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference.
161
+ atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are
162
+ ``> 0``.
163
+ rel_diff (float): Relative difference.
164
+ rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference.
165
+ rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are
166
+ ``> 0``.
167
+ """
168
+ equality = rtol == 0 and atol == 0
169
+
170
+ def make_diff_msg(
171
+ *,
172
+ type: str,
173
+ diff: float,
174
+ idx: Optional[Union[int, Tuple[int, ...]]],
175
+ tol: float,
176
+ ) -> str:
177
+ if idx is None:
178
+ msg = f"{type.title()} difference: {diff}"
179
+ else:
180
+ msg = f"Greatest {type} difference: {diff} at index {idx}"
181
+ if not equality:
182
+ msg += f" (up to {tol} allowed)"
183
+ return msg + "\n"
184
+
185
+ if identifier is None:
186
+ identifier = default_identifier
187
+ elif callable(identifier):
188
+ identifier = identifier(default_identifier)
189
+
190
+ msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n"
191
+
192
+ if extra:
193
+ msg += f"{extra.strip()}\n"
194
+
195
+ msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol)
196
+ msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol)
197
+
198
+ return msg.strip()
199
+
200
+
201
+ def make_scalar_mismatch_msg(
202
+ actual: Union[bool, int, float, complex],
203
+ expected: Union[bool, int, float, complex],
204
+ *,
205
+ rtol: float,
206
+ atol: float,
207
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
208
+ ) -> str:
209
+ """Makes a mismatch error message for scalars.
210
+
211
+ Args:
212
+ actual (Union[bool, int, float, complex]): Actual scalar.
213
+ expected (Union[bool, int, float, complex]): Expected scalar.
214
+ rtol (float): Relative tolerance.
215
+ atol (float): Absolute tolerance.
216
+ identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed
217
+ as callable in which case it will be called by the default value to create the description at runtime.
218
+ Defaults to "Scalars".
219
+ """
220
+ abs_diff = abs(actual - expected)
221
+ rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected)
222
+ return _make_mismatch_msg(
223
+ default_identifier="Scalars",
224
+ identifier=identifier,
225
+ extra=f"Expected {expected} but got {actual}.",
226
+ abs_diff=abs_diff,
227
+ atol=atol,
228
+ rel_diff=rel_diff,
229
+ rtol=rtol,
230
+ )
231
+
232
+
233
+ def make_tensor_mismatch_msg(
234
+ actual: torch.Tensor,
235
+ expected: torch.Tensor,
236
+ matches: torch.Tensor,
237
+ *,
238
+ rtol: float,
239
+ atol: float,
240
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
241
+ ):
242
+ """Makes a mismatch error message for tensors.
243
+
244
+ Args:
245
+ actual (torch.Tensor): Actual tensor.
246
+ expected (torch.Tensor): Expected tensor.
247
+ matches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the
248
+ location of matches.
249
+ rtol (float): Relative tolerance.
250
+ atol (float): Absolute tolerance.
251
+ identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed
252
+ as callable in which case it will be called by the default value to create the description at runtime.
253
+ Defaults to "Tensor-likes".
254
+ """
255
+
256
+ def unravel_flat_index(flat_index: int) -> Tuple[int, ...]:
257
+ if not matches.shape:
258
+ return ()
259
+
260
+ inverse_index = []
261
+ for size in matches.shape[::-1]:
262
+ div, mod = divmod(flat_index, size)
263
+ flat_index = div
264
+ inverse_index.append(mod)
265
+
266
+ return tuple(inverse_index[::-1])
267
+
268
+ number_of_elements = matches.numel()
269
+ total_mismatches = number_of_elements - int(torch.sum(matches))
270
+ extra = (
271
+ f"Mismatched elements: {total_mismatches} / {number_of_elements} "
272
+ f"({total_mismatches / number_of_elements:.1%})"
273
+ )
274
+
275
+ actual_flat = actual.flatten()
276
+ expected_flat = expected.flatten()
277
+ matches_flat = matches.flatten()
278
+
279
+ if not actual.dtype.is_floating_point and not actual.dtype.is_complex:
280
+ # TODO: Instead of always upcasting to int64, it would be sufficient to cast to the next higher dtype to avoid
281
+ # overflow
282
+ actual_flat = actual_flat.to(torch.int64)
283
+ expected_flat = expected_flat.to(torch.int64)
284
+
285
+ abs_diff = torch.abs(actual_flat - expected_flat)
286
+ # Ensure that only mismatches are used for the max_abs_diff computation
287
+ abs_diff[matches_flat] = 0
288
+ max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0)
289
+
290
+ rel_diff = abs_diff / torch.abs(expected_flat)
291
+ # Ensure that only mismatches are used for the max_rel_diff computation
292
+ rel_diff[matches_flat] = 0
293
+ max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0)
294
+ return _make_mismatch_msg(
295
+ default_identifier="Tensor-likes",
296
+ identifier=identifier,
297
+ extra=extra,
298
+ abs_diff=max_abs_diff.item(),
299
+ abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)),
300
+ atol=atol,
301
+ rel_diff=max_rel_diff.item(),
302
+ rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)),
303
+ rtol=rtol,
304
+ )
305
+
306
+
307
+ class UnsupportedInputs(Exception): # noqa: B903
308
+ """Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs."""
309
+
310
+
311
+ class Pair(abc.ABC):
312
+ """ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`.
313
+
314
+ Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison.
315
+
316
+ Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the
317
+ super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to
318
+ handle the inputs and the next pair type will be tried.
319
+
320
+ All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can
321
+ be used to automatically handle overwriting the message with a user supplied one and id handling.
322
+ """
323
+
324
+ def __init__(
325
+ self,
326
+ actual: Any,
327
+ expected: Any,
328
+ *,
329
+ id: Tuple[Any, ...] = (),
330
+ **unknown_parameters: Any,
331
+ ) -> None:
332
+ self.actual = actual
333
+ self.expected = expected
334
+ self.id = id
335
+ self._unknown_parameters = unknown_parameters
336
+
337
+ @staticmethod
338
+ def _inputs_not_supported() -> NoReturn:
339
+ raise UnsupportedInputs()
340
+
341
+ @staticmethod
342
+ def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]):
343
+ """Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise."""
344
+ if not all(isinstance(input, cls) for input in inputs):
345
+ Pair._inputs_not_supported()
346
+
347
+ def _fail(
348
+ self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = ()
349
+ ) -> NoReturn:
350
+ """Raises an :class:`ErrorMeta` from a given exception type and message and the stored id.
351
+
352
+ .. warning::
353
+
354
+ If you use this before the ``super().__init__(...)`` call in the constructor, you have to pass the ``id``
355
+ explicitly.
356
+ """
357
+ raise ErrorMeta(type, msg, id=self.id if not id and hasattr(self, "id") else id)
358
+
359
+ @abc.abstractmethod
360
+ def compare(self) -> None:
361
+ """Compares the inputs and raises an :class`ErrorMeta` in case they mismatch."""
362
+
363
+ def extra_repr(self) -> Sequence[Union[str, Tuple[str, Any]]]:
364
+ """Returns extra information that will be included in the representation.
365
+
366
+ Should be overwritten by all subclasses that use additional options. The representation of the object will only
367
+ be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of
368
+ key-value-pairs or attribute names.
369
+ """
370
+ return []
371
+
372
+ def __repr__(self) -> str:
373
+ head = f"{type(self).__name__}("
374
+ tail = ")"
375
+ body = [
376
+ f" {name}={value!s},"
377
+ for name, value in [
378
+ ("id", self.id),
379
+ ("actual", self.actual),
380
+ ("expected", self.expected),
381
+ *[
382
+ (extra, getattr(self, extra)) if isinstance(extra, str) else extra
383
+ for extra in self.extra_repr()
384
+ ],
385
+ ]
386
+ ]
387
+ return "\n".join((head, *body, *tail))
388
+
389
+
390
+ class ObjectPair(Pair):
391
+ """Pair for any type of inputs that will be compared with the `==` operator.
392
+
393
+ .. note::
394
+
395
+ Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs
396
+ couldn't handle the inputs.
397
+
398
+ """
399
+
400
+ def compare(self) -> None:
401
+ try:
402
+ equal = self.actual == self.expected
403
+ except Exception as error:
404
+ # We are not using `self._raise_error_meta` here since we need the exception chaining
405
+ raise ErrorMeta(
406
+ ValueError,
407
+ f"{self.actual} == {self.expected} failed with:\n{error}.",
408
+ id=self.id,
409
+ ) from error
410
+
411
+ if not equal:
412
+ self._fail(AssertionError, f"{self.actual} != {self.expected}")
413
+
414
+
415
+ class NonePair(Pair):
416
+ """Pair for ``None`` inputs."""
417
+
418
+ def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None:
419
+ if not (actual is None or expected is None):
420
+ self._inputs_not_supported()
421
+
422
+ super().__init__(actual, expected, **other_parameters)
423
+
424
+ def compare(self) -> None:
425
+ if not (self.actual is None and self.expected is None):
426
+ self._fail(
427
+ AssertionError, f"None mismatch: {self.actual} is not {self.expected}"
428
+ )
429
+
430
+
431
+ class BooleanPair(Pair):
432
+ """Pair for :class:`bool` inputs.
433
+
434
+ .. note::
435
+
436
+ If ``numpy`` is available, also handles :class:`numpy.bool_` inputs.
437
+
438
+ """
439
+
440
+ def __init__(
441
+ self,
442
+ actual: Any,
443
+ expected: Any,
444
+ *,
445
+ id: Tuple[Any, ...],
446
+ **other_parameters: Any,
447
+ ) -> None:
448
+ actual, expected = self._process_inputs(actual, expected, id=id)
449
+ super().__init__(actual, expected, **other_parameters)
450
+
451
+ @property
452
+ def _supported_types(self) -> Tuple[Type, ...]:
453
+ cls: List[Type] = [bool]
454
+ if NUMPY_AVAILABLE:
455
+ cls.append(np.bool_)
456
+ return tuple(cls)
457
+
458
+ def _process_inputs(
459
+ self, actual: Any, expected: Any, *, id: Tuple[Any, ...]
460
+ ) -> Tuple[bool, bool]:
461
+ self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
462
+ actual, expected = (
463
+ self._to_bool(bool_like, id=id) for bool_like in (actual, expected)
464
+ )
465
+ return actual, expected
466
+
467
+ def _to_bool(self, bool_like: Any, *, id: Tuple[Any, ...]) -> bool:
468
+ if isinstance(bool_like, bool):
469
+ return bool_like
470
+ elif isinstance(bool_like, np.bool_):
471
+ return bool_like.item()
472
+ else:
473
+ raise ErrorMeta(
474
+ TypeError, f"Unknown boolean type {type(bool_like)}.", id=id
475
+ )
476
+
477
+ def compare(self) -> None:
478
+ if self.actual is not self.expected:
479
+ self._fail(
480
+ AssertionError,
481
+ f"Booleans mismatch: {self.actual} is not {self.expected}",
482
+ )
483
+
484
+
485
+ class NumberPair(Pair):
486
+ """Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs.
487
+
488
+ .. note::
489
+
490
+ If ``numpy`` is available, also handles :class:`numpy.number` inputs.
491
+
492
+ Kwargs:
493
+ rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
494
+ values based on the type are selected with the below table.
495
+ atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
496
+ values based on the type are selected with the below table.
497
+ equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
498
+ check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``.
499
+
500
+ The following table displays correspondence between Python number type and the ``torch.dtype``'s. See
501
+ :func:`assert_close` for the corresponding tolerances.
502
+
503
+ +------------------+-------------------------------+
504
+ | ``type`` | corresponding ``torch.dtype`` |
505
+ +==================+===============================+
506
+ | :class:`int` | :attr:`~torch.int64` |
507
+ +------------------+-------------------------------+
508
+ | :class:`float` | :attr:`~torch.float64` |
509
+ +------------------+-------------------------------+
510
+ | :class:`complex` | :attr:`~torch.complex64` |
511
+ +------------------+-------------------------------+
512
+ """
513
+
514
+ _TYPE_TO_DTYPE = {
515
+ int: torch.int64,
516
+ float: torch.float64,
517
+ complex: torch.complex128,
518
+ }
519
+ _NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys())
520
+
521
+ def __init__(
522
+ self,
523
+ actual: Any,
524
+ expected: Any,
525
+ *,
526
+ id: Tuple[Any, ...] = (),
527
+ rtol: Optional[float] = None,
528
+ atol: Optional[float] = None,
529
+ equal_nan: bool = False,
530
+ check_dtype: bool = False,
531
+ **other_parameters: Any,
532
+ ) -> None:
533
+ actual, expected = self._process_inputs(actual, expected, id=id)
534
+ super().__init__(actual, expected, id=id, **other_parameters)
535
+
536
+ self.rtol, self.atol = get_tolerances(
537
+ *[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)],
538
+ rtol=rtol,
539
+ atol=atol,
540
+ id=id,
541
+ )
542
+ self.equal_nan = equal_nan
543
+ self.check_dtype = check_dtype
544
+
545
+ @property
546
+ def _supported_types(self) -> Tuple[Type, ...]:
547
+ cls = list(self._NUMBER_TYPES)
548
+ if NUMPY_AVAILABLE:
549
+ cls.append(np.number)
550
+ return tuple(cls)
551
+
552
+ def _process_inputs(
553
+ self, actual: Any, expected: Any, *, id: Tuple[Any, ...]
554
+ ) -> Tuple[Union[int, float, complex], Union[int, float, complex]]:
555
+ self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
556
+ actual, expected = (
557
+ self._to_number(number_like, id=id) for number_like in (actual, expected)
558
+ )
559
+ return actual, expected
560
+
561
+ def _to_number(
562
+ self, number_like: Any, *, id: Tuple[Any, ...]
563
+ ) -> Union[int, float, complex]:
564
+ if NUMPY_AVAILABLE and isinstance(number_like, np.number):
565
+ return number_like.item()
566
+ elif isinstance(number_like, self._NUMBER_TYPES):
567
+ return number_like # type: ignore[return-value]
568
+ else:
569
+ raise ErrorMeta(
570
+ TypeError, f"Unknown number type {type(number_like)}.", id=id
571
+ )
572
+
573
+ def compare(self) -> None:
574
+ if self.check_dtype and type(self.actual) is not type(self.expected):
575
+ self._fail(
576
+ AssertionError,
577
+ f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.",
578
+ )
579
+
580
+ if self.actual == self.expected:
581
+ return
582
+
583
+ if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected):
584
+ return
585
+
586
+ abs_diff = abs(self.actual - self.expected)
587
+ tolerance = self.atol + self.rtol * abs(self.expected)
588
+
589
+ if cmath.isfinite(abs_diff) and abs_diff <= tolerance:
590
+ return
591
+
592
+ self._fail(
593
+ AssertionError,
594
+ make_scalar_mismatch_msg(
595
+ self.actual, self.expected, rtol=self.rtol, atol=self.atol
596
+ ),
597
+ )
598
+
599
+ def extra_repr(self) -> Sequence[str]:
600
+ return (
601
+ "rtol",
602
+ "atol",
603
+ "equal_nan",
604
+ "check_dtype",
605
+ )
606
+
607
+
608
+ class TensorLikePair(Pair):
609
+ """Pair for :class:`torch.Tensor`-like inputs.
610
+
611
+ Kwargs:
612
+ allow_subclasses (bool):
613
+ rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
614
+ values based on the type are selected. See :func:assert_close: for details.
615
+ atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
616
+ values based on the type are selected. See :func:assert_close: for details.
617
+ equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
618
+ check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
619
+ :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
620
+ :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
621
+ check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
622
+ check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
623
+ :func:`torch.promote_types`) before being compared.
624
+ check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
625
+ check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
626
+ compared.
627
+ check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
628
+ """
629
+
630
+ def __init__(
631
+ self,
632
+ actual: Any,
633
+ expected: Any,
634
+ *,
635
+ id: Tuple[Any, ...] = (),
636
+ allow_subclasses: bool = True,
637
+ rtol: Optional[float] = None,
638
+ atol: Optional[float] = None,
639
+ equal_nan: bool = False,
640
+ check_device: bool = True,
641
+ check_dtype: bool = True,
642
+ check_layout: bool = True,
643
+ check_stride: bool = False,
644
+ **other_parameters: Any,
645
+ ):
646
+ actual, expected = self._process_inputs(
647
+ actual, expected, id=id, allow_subclasses=allow_subclasses
648
+ )
649
+ super().__init__(actual, expected, id=id, **other_parameters)
650
+
651
+ self.rtol, self.atol = get_tolerances(
652
+ actual, expected, rtol=rtol, atol=atol, id=self.id
653
+ )
654
+ self.equal_nan = equal_nan
655
+ self.check_device = check_device
656
+ self.check_dtype = check_dtype
657
+ self.check_layout = check_layout
658
+ self.check_stride = check_stride
659
+
660
+ def _process_inputs(
661
+ self, actual: Any, expected: Any, *, id: Tuple[Any, ...], allow_subclasses: bool
662
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
663
+ directly_related = isinstance(actual, type(expected)) or isinstance(
664
+ expected, type(actual)
665
+ )
666
+ if not directly_related:
667
+ self._inputs_not_supported()
668
+
669
+ if not allow_subclasses and type(actual) is not type(expected):
670
+ self._inputs_not_supported()
671
+
672
+ actual, expected = (self._to_tensor(input) for input in (actual, expected))
673
+ for tensor in (actual, expected):
674
+ self._check_supported(tensor, id=id)
675
+ return actual, expected
676
+
677
+ def _to_tensor(self, tensor_like: Any) -> torch.Tensor:
678
+ if isinstance(tensor_like, torch.Tensor):
679
+ return tensor_like
680
+
681
+ try:
682
+ return torch.as_tensor(tensor_like)
683
+ except Exception:
684
+ self._inputs_not_supported()
685
+
686
+ def _check_supported(self, tensor: torch.Tensor, *, id: Tuple[Any, ...]) -> None:
687
+ if tensor.layout not in {
688
+ torch.strided,
689
+ torch.sparse_coo,
690
+ torch.sparse_csr,
691
+ torch.sparse_csc,
692
+ torch.sparse_bsr,
693
+ torch.sparse_bsc,
694
+ }:
695
+ raise ErrorMeta(
696
+ ValueError, f"Unsupported tensor layout {tensor.layout}", id=id
697
+ )
698
+
699
+ def compare(self) -> None:
700
+ actual, expected = self.actual, self.expected
701
+
702
+ self._compare_attributes(actual, expected)
703
+ if any(input.device.type == "meta" for input in (actual, expected)):
704
+ return
705
+
706
+ actual, expected = self._equalize_attributes(actual, expected)
707
+ self._compare_values(actual, expected)
708
+
709
+ def _compare_attributes(
710
+ self,
711
+ actual: torch.Tensor,
712
+ expected: torch.Tensor,
713
+ ) -> None:
714
+ """Checks if the attributes of two tensors match.
715
+
716
+ Always checks
717
+
718
+ - the :attr:`~torch.Tensor.shape`,
719
+ - whether both inputs are quantized or not,
720
+ - and if they use the same quantization scheme.
721
+
722
+ Checks for
723
+
724
+ - :attr:`~torch.Tensor.layout`,
725
+ - :meth:`~torch.Tensor.stride`,
726
+ - :attr:`~torch.Tensor.device`, and
727
+ - :attr:`~torch.Tensor.dtype`
728
+
729
+ are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair.
730
+ """
731
+
732
+ def raise_mismatch_error(
733
+ attribute_name: str, actual_value: Any, expected_value: Any
734
+ ) -> NoReturn:
735
+ self._fail(
736
+ AssertionError,
737
+ f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.",
738
+ )
739
+
740
+ if actual.shape != expected.shape:
741
+ raise_mismatch_error("shape", actual.shape, expected.shape)
742
+
743
+ if actual.is_quantized != expected.is_quantized:
744
+ raise_mismatch_error(
745
+ "is_quantized", actual.is_quantized, expected.is_quantized
746
+ )
747
+ elif actual.is_quantized and actual.qscheme() != expected.qscheme():
748
+ raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme())
749
+
750
+ if actual.layout != expected.layout:
751
+ if self.check_layout:
752
+ raise_mismatch_error("layout", actual.layout, expected.layout)
753
+ elif (
754
+ actual.layout == torch.strided
755
+ and self.check_stride
756
+ and actual.stride() != expected.stride()
757
+ ):
758
+ raise_mismatch_error("stride()", actual.stride(), expected.stride())
759
+
760
+ if self.check_device and actual.device != expected.device:
761
+ raise_mismatch_error("device", actual.device, expected.device)
762
+
763
+ if self.check_dtype and actual.dtype != expected.dtype:
764
+ raise_mismatch_error("dtype", actual.dtype, expected.dtype)
765
+
766
+ def _equalize_attributes(
767
+ self, actual: torch.Tensor, expected: torch.Tensor
768
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
769
+ """Equalizes some attributes of two tensors for value comparison.
770
+
771
+ If ``actual`` and ``expected`` are ...
772
+
773
+ - ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory.
774
+ - ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to
775
+ :func:`torch.promote_types`).
776
+ - ... not of the same ``layout``, they are converted to strided tensors.
777
+
778
+ Args:
779
+ actual (Tensor): Actual tensor.
780
+ expected (Tensor): Expected tensor.
781
+
782
+ Returns:
783
+ (Tuple[Tensor, Tensor]): Equalized tensors.
784
+ """
785
+ # The comparison logic uses operators currently not supported by the MPS backends.
786
+ # See https://github.com/pytorch/pytorch/issues/77144 for details.
787
+ # TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend
788
+ if actual.is_mps or expected.is_mps: # type: ignore[attr-defined]
789
+ actual = actual.cpu()
790
+ expected = expected.cpu()
791
+
792
+ if actual.device != expected.device:
793
+ actual = actual.cpu()
794
+ expected = expected.cpu()
795
+
796
+ if actual.dtype != expected.dtype:
797
+ actual_dtype = actual.dtype
798
+ expected_dtype = expected.dtype
799
+ # For uint64, this is not sound in general, which is why promote_types doesn't
800
+ # allow it, but for easy testing, we're unlikely to get confused
801
+ # by large uint64 overflowing into negative int64
802
+ if actual_dtype in [torch.uint64, torch.uint32, torch.uint16]:
803
+ actual_dtype = torch.int64
804
+ if expected_dtype in [torch.uint64, torch.uint32, torch.uint16]:
805
+ expected_dtype = torch.int64
806
+ dtype = torch.promote_types(actual_dtype, expected_dtype)
807
+ actual = actual.to(dtype)
808
+ expected = expected.to(dtype)
809
+
810
+ if actual.layout != expected.layout:
811
+ # These checks are needed, since Tensor.to_dense() fails on tensors that are already strided
812
+ actual = actual.to_dense() if actual.layout != torch.strided else actual
813
+ expected = (
814
+ expected.to_dense() if expected.layout != torch.strided else expected
815
+ )
816
+
817
+ return actual, expected
818
+
819
+ def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None:
820
+ if actual.is_quantized:
821
+ compare_fn = self._compare_quantized_values
822
+ elif actual.is_sparse:
823
+ compare_fn = self._compare_sparse_coo_values
824
+ elif actual.layout in {
825
+ torch.sparse_csr,
826
+ torch.sparse_csc,
827
+ torch.sparse_bsr,
828
+ torch.sparse_bsc,
829
+ }:
830
+ compare_fn = self._compare_sparse_compressed_values
831
+ else:
832
+ compare_fn = self._compare_regular_values_close
833
+
834
+ compare_fn(
835
+ actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan
836
+ )
837
+
838
+ def _compare_quantized_values(
839
+ self,
840
+ actual: torch.Tensor,
841
+ expected: torch.Tensor,
842
+ *,
843
+ rtol: float,
844
+ atol: float,
845
+ equal_nan: bool,
846
+ ) -> None:
847
+ """Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness.
848
+
849
+ .. note::
850
+
851
+ A detailed discussion about why only the dequantized variant is checked for closeness rather than checking
852
+ the individual quantization parameters for closeness and the integer representation for equality can be
853
+ found in https://github.com/pytorch/pytorch/issues/68548.
854
+ """
855
+ return self._compare_regular_values_close(
856
+ actual.dequantize(),
857
+ expected.dequantize(),
858
+ rtol=rtol,
859
+ atol=atol,
860
+ equal_nan=equal_nan,
861
+ identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}",
862
+ )
863
+
864
+ def _compare_sparse_coo_values(
865
+ self,
866
+ actual: torch.Tensor,
867
+ expected: torch.Tensor,
868
+ *,
869
+ rtol: float,
870
+ atol: float,
871
+ equal_nan: bool,
872
+ ) -> None:
873
+ """Compares sparse COO tensors by comparing
874
+
875
+ - the number of sparse dimensions,
876
+ - the number of non-zero elements (nnz) for equality,
877
+ - the indices for equality, and
878
+ - the values for closeness.
879
+ """
880
+ if actual.sparse_dim() != expected.sparse_dim():
881
+ self._fail(
882
+ AssertionError,
883
+ (
884
+ f"The number of sparse dimensions in sparse COO tensors does not match: "
885
+ f"{actual.sparse_dim()} != {expected.sparse_dim()}"
886
+ ),
887
+ )
888
+
889
+ if actual._nnz() != expected._nnz():
890
+ self._fail(
891
+ AssertionError,
892
+ (
893
+ f"The number of specified values in sparse COO tensors does not match: "
894
+ f"{actual._nnz()} != {expected._nnz()}"
895
+ ),
896
+ )
897
+
898
+ self._compare_regular_values_equal(
899
+ actual._indices(),
900
+ expected._indices(),
901
+ identifier="Sparse COO indices",
902
+ )
903
+ self._compare_regular_values_close(
904
+ actual._values(),
905
+ expected._values(),
906
+ rtol=rtol,
907
+ atol=atol,
908
+ equal_nan=equal_nan,
909
+ identifier="Sparse COO values",
910
+ )
911
+
912
+ def _compare_sparse_compressed_values(
913
+ self,
914
+ actual: torch.Tensor,
915
+ expected: torch.Tensor,
916
+ *,
917
+ rtol: float,
918
+ atol: float,
919
+ equal_nan: bool,
920
+ ) -> None:
921
+ """Compares sparse compressed tensors by comparing
922
+
923
+ - the number of non-zero elements (nnz) for equality,
924
+ - the plain indices for equality,
925
+ - the compressed indices for equality, and
926
+ - the values for closeness.
927
+ """
928
+ format_name, compressed_indices_method, plain_indices_method = {
929
+ torch.sparse_csr: (
930
+ "CSR",
931
+ torch.Tensor.crow_indices,
932
+ torch.Tensor.col_indices,
933
+ ),
934
+ torch.sparse_csc: (
935
+ "CSC",
936
+ torch.Tensor.ccol_indices,
937
+ torch.Tensor.row_indices,
938
+ ),
939
+ torch.sparse_bsr: (
940
+ "BSR",
941
+ torch.Tensor.crow_indices,
942
+ torch.Tensor.col_indices,
943
+ ),
944
+ torch.sparse_bsc: (
945
+ "BSC",
946
+ torch.Tensor.ccol_indices,
947
+ torch.Tensor.row_indices,
948
+ ),
949
+ }[actual.layout]
950
+
951
+ if actual._nnz() != expected._nnz():
952
+ self._fail(
953
+ AssertionError,
954
+ (
955
+ f"The number of specified values in sparse {format_name} tensors does not match: "
956
+ f"{actual._nnz()} != {expected._nnz()}"
957
+ ),
958
+ )
959
+
960
+ # Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formates can be `torch.int32` _or_
961
+ # `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it
962
+ # can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will
963
+ # fail.
964
+ actual_compressed_indices = compressed_indices_method(actual)
965
+ expected_compressed_indices = compressed_indices_method(expected)
966
+ indices_dtype = torch.promote_types(
967
+ actual_compressed_indices.dtype, expected_compressed_indices.dtype
968
+ )
969
+
970
+ self._compare_regular_values_equal(
971
+ actual_compressed_indices.to(indices_dtype),
972
+ expected_compressed_indices.to(indices_dtype),
973
+ identifier=f"Sparse {format_name} {compressed_indices_method.__name__}",
974
+ )
975
+ self._compare_regular_values_equal(
976
+ plain_indices_method(actual).to(indices_dtype),
977
+ plain_indices_method(expected).to(indices_dtype),
978
+ identifier=f"Sparse {format_name} {plain_indices_method.__name__}",
979
+ )
980
+ self._compare_regular_values_close(
981
+ actual.values(),
982
+ expected.values(),
983
+ rtol=rtol,
984
+ atol=atol,
985
+ equal_nan=equal_nan,
986
+ identifier=f"Sparse {format_name} values",
987
+ )
988
+
989
+ def _compare_regular_values_equal(
990
+ self,
991
+ actual: torch.Tensor,
992
+ expected: torch.Tensor,
993
+ *,
994
+ equal_nan: bool = False,
995
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
996
+ ) -> None:
997
+ """Checks if the values of two tensors are equal."""
998
+ self._compare_regular_values_close(
999
+ actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier
1000
+ )
1001
+
1002
+ def _compare_regular_values_close(
1003
+ self,
1004
+ actual: torch.Tensor,
1005
+ expected: torch.Tensor,
1006
+ *,
1007
+ rtol: float,
1008
+ atol: float,
1009
+ equal_nan: bool,
1010
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
1011
+ ) -> None:
1012
+ """Checks if the values of two tensors are close up to a desired tolerance."""
1013
+ matches = torch.isclose(
1014
+ actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan
1015
+ )
1016
+ if torch.all(matches):
1017
+ return
1018
+
1019
+ if actual.shape == torch.Size([]):
1020
+ msg = make_scalar_mismatch_msg(
1021
+ actual.item(),
1022
+ expected.item(),
1023
+ rtol=rtol,
1024
+ atol=atol,
1025
+ identifier=identifier,
1026
+ )
1027
+ else:
1028
+ msg = make_tensor_mismatch_msg(
1029
+ actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier
1030
+ )
1031
+ self._fail(AssertionError, msg)
1032
+
1033
+ def extra_repr(self) -> Sequence[str]:
1034
+ return (
1035
+ "rtol",
1036
+ "atol",
1037
+ "equal_nan",
1038
+ "check_device",
1039
+ "check_dtype",
1040
+ "check_layout",
1041
+ "check_stride",
1042
+ )
1043
+
1044
+
1045
+ def originate_pairs(
1046
+ actual: Any,
1047
+ expected: Any,
1048
+ *,
1049
+ pair_types: Sequence[Type[Pair]],
1050
+ sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
1051
+ mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
1052
+ id: Tuple[Any, ...] = (),
1053
+ **options: Any,
1054
+ ) -> List[Pair]:
1055
+ """Originates pairs from the individual inputs.
1056
+
1057
+ ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
1058
+ :class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them.
1059
+
1060
+ Args:
1061
+ actual (Any): Actual input.
1062
+ expected (Any): Expected input.
1063
+ pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs.
1064
+ First successful pair will be used.
1065
+ sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
1066
+ mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
1067
+ id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message.
1068
+ **options (Any): Options passed to each pair during construction.
1069
+
1070
+ Raises:
1071
+ ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their
1072
+ length does not match.
1073
+ ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of
1074
+ keys do not match.
1075
+ ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs.
1076
+ ErrorMeta: With any expected exception that happens during the construction of a pair.
1077
+
1078
+ Returns:
1079
+ (List[Pair]): Originated pairs.
1080
+ """
1081
+ # We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop:
1082
+ # "a" == "a"[0][0]...
1083
+ if (
1084
+ isinstance(actual, sequence_types)
1085
+ and not isinstance(actual, str)
1086
+ and isinstance(expected, sequence_types)
1087
+ and not isinstance(expected, str)
1088
+ ):
1089
+ actual_len = len(actual)
1090
+ expected_len = len(expected)
1091
+ if actual_len != expected_len:
1092
+ raise ErrorMeta(
1093
+ AssertionError,
1094
+ f"The length of the sequences mismatch: {actual_len} != {expected_len}",
1095
+ id=id,
1096
+ )
1097
+
1098
+ pairs = []
1099
+ for idx in range(actual_len):
1100
+ pairs.extend(
1101
+ originate_pairs(
1102
+ actual[idx],
1103
+ expected[idx],
1104
+ pair_types=pair_types,
1105
+ sequence_types=sequence_types,
1106
+ mapping_types=mapping_types,
1107
+ id=(*id, idx),
1108
+ **options,
1109
+ )
1110
+ )
1111
+ return pairs
1112
+
1113
+ elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types):
1114
+ actual_keys = set(actual.keys())
1115
+ expected_keys = set(expected.keys())
1116
+ if actual_keys != expected_keys:
1117
+ missing_keys = expected_keys - actual_keys
1118
+ additional_keys = actual_keys - expected_keys
1119
+ raise ErrorMeta(
1120
+ AssertionError,
1121
+ (
1122
+ f"The keys of the mappings do not match:\n"
1123
+ f"Missing keys in the actual mapping: {sorted(missing_keys)}\n"
1124
+ f"Additional keys in the actual mapping: {sorted(additional_keys)}"
1125
+ ),
1126
+ id=id,
1127
+ )
1128
+
1129
+ keys: Collection = actual_keys
1130
+ # Since the origination aborts after the first failure, we try to be deterministic
1131
+ with contextlib.suppress(Exception):
1132
+ keys = sorted(keys)
1133
+
1134
+ pairs = []
1135
+ for key in keys:
1136
+ pairs.extend(
1137
+ originate_pairs(
1138
+ actual[key],
1139
+ expected[key],
1140
+ pair_types=pair_types,
1141
+ sequence_types=sequence_types,
1142
+ mapping_types=mapping_types,
1143
+ id=(*id, key),
1144
+ **options,
1145
+ )
1146
+ )
1147
+ return pairs
1148
+
1149
+ else:
1150
+ for pair_type in pair_types:
1151
+ try:
1152
+ return [pair_type(actual, expected, id=id, **options)]
1153
+ # Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the
1154
+ # inputs. Thus, we try the next pair type.
1155
+ except UnsupportedInputs:
1156
+ continue
1157
+ # Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This
1158
+ # is only in a separate branch, because the one below would also except it.
1159
+ except ErrorMeta:
1160
+ raise
1161
+ # Raising any other exception during origination is unexpected and will give some extra information about
1162
+ # what happened. If applicable, the exception should be expected in the future.
1163
+ except Exception as error:
1164
+ raise RuntimeError(
1165
+ f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n"
1166
+ f"{type(actual).__name__}(): {actual}\n\n"
1167
+ f"and\n\n"
1168
+ f"{type(expected).__name__}(): {expected}\n\n"
1169
+ f"resulted in the unexpected exception above. "
1170
+ f"If you are a user and see this message during normal operation "
1171
+ "please file an issue at https://github.com/pytorch/pytorch/issues. "
1172
+ "If you are a developer and working on the comparison functions, "
1173
+ "please except the previous error and raise an expressive `ErrorMeta` instead."
1174
+ ) from error
1175
+ else:
1176
+ raise ErrorMeta(
1177
+ TypeError,
1178
+ f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.",
1179
+ id=id,
1180
+ )
1181
+
1182
+
1183
+ def not_close_error_metas(
1184
+ actual: Any,
1185
+ expected: Any,
1186
+ *,
1187
+ pair_types: Sequence[Type[Pair]] = (ObjectPair,),
1188
+ sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
1189
+ mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
1190
+ **options: Any,
1191
+ ) -> List[ErrorMeta]:
1192
+ """Asserts that inputs are equal.
1193
+
1194
+ ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
1195
+ :class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them.
1196
+
1197
+ Args:
1198
+ actual (Any): Actual input.
1199
+ expected (Any): Expected input.
1200
+ pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the
1201
+ inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`.
1202
+ sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
1203
+ mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
1204
+ **options (Any): Options passed to each pair during construction.
1205
+ """
1206
+ # Hide this function from `pytest`'s traceback
1207
+ __tracebackhide__ = True
1208
+
1209
+ try:
1210
+ pairs = originate_pairs(
1211
+ actual,
1212
+ expected,
1213
+ pair_types=pair_types,
1214
+ sequence_types=sequence_types,
1215
+ mapping_types=mapping_types,
1216
+ **options,
1217
+ )
1218
+ except ErrorMeta as error_meta:
1219
+ # Explicitly raising from None to hide the internal traceback
1220
+ raise error_meta.to_error() from None
1221
+
1222
+ error_metas: List[ErrorMeta] = []
1223
+ for pair in pairs:
1224
+ try:
1225
+ pair.compare()
1226
+ except ErrorMeta as error_meta:
1227
+ error_metas.append(error_meta)
1228
+ # Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information
1229
+ # about what happened. If applicable, the exception should be expected in the future.
1230
+ except Exception as error:
1231
+ raise RuntimeError(
1232
+ f"Comparing\n\n"
1233
+ f"{pair}\n\n"
1234
+ f"resulted in the unexpected exception above. "
1235
+ f"If you are a user and see this message during normal operation "
1236
+ "please file an issue at https://github.com/pytorch/pytorch/issues. "
1237
+ "If you are a developer and working on the comparison functions, "
1238
+ "please except the previous error and raise an expressive `ErrorMeta` instead."
1239
+ ) from error
1240
+
1241
+ # [ErrorMeta Cycles]
1242
+ # ErrorMeta objects in this list capture
1243
+ # tracebacks that refer to the frame of this function.
1244
+ # The local variable `error_metas` refers to the error meta
1245
+ # objects, creating a reference cycle. Frames in the traceback
1246
+ # would not get freed until cycle collection, leaking cuda memory in tests.
1247
+ # We break the cycle by removing the reference to the error_meta objects
1248
+ # from this frame as it returns.
1249
+ error_metas = [error_metas]
1250
+ return error_metas.pop()
1251
+
1252
+
1253
+ def assert_close(
1254
+ actual: Any,
1255
+ expected: Any,
1256
+ *,
1257
+ allow_subclasses: bool = True,
1258
+ rtol: Optional[float] = None,
1259
+ atol: Optional[float] = None,
1260
+ equal_nan: bool = False,
1261
+ check_device: bool = True,
1262
+ check_dtype: bool = True,
1263
+ check_layout: bool = True,
1264
+ check_stride: bool = False,
1265
+ msg: Optional[Union[str, Callable[[str], str]]] = None,
1266
+ ):
1267
+ r"""Asserts that ``actual`` and ``expected`` are close.
1268
+
1269
+ If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if
1270
+
1271
+ .. math::
1272
+
1273
+ \lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert
1274
+
1275
+ Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are
1276
+ only considered equal to each other if ``equal_nan`` is ``True``.
1277
+
1278
+ In addition, they are only considered close if they have the same
1279
+
1280
+ - :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``),
1281
+ - ``dtype`` (if ``check_dtype`` is ``True``),
1282
+ - ``layout`` (if ``check_layout`` is ``True``), and
1283
+ - stride (if ``check_stride`` is ``True``).
1284
+
1285
+ If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed.
1286
+
1287
+ If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are
1288
+ checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR,
1289
+ or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively,
1290
+ are always checked for equality whereas the values are checked for closeness according to the definition above.
1291
+
1292
+ If ``actual`` and ``expected`` are quantized, they are considered close if they have the same
1293
+ :meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the
1294
+ definition above.
1295
+
1296
+ ``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which
1297
+ :class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types
1298
+ have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s
1299
+ or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all
1300
+ their elements are considered close according to the above definition.
1301
+
1302
+ .. note::
1303
+
1304
+ Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e.
1305
+ :class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus,
1306
+ Python scalars of different types can be checked, but require ``check_dtype=False``.
1307
+
1308
+ Args:
1309
+ actual (Any): Actual input.
1310
+ expected (Any): Expected input.
1311
+ allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types
1312
+ are allowed. Otherwise type equality is required.
1313
+ rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
1314
+ values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
1315
+ atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
1316
+ values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
1317
+ equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal.
1318
+ check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
1319
+ :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
1320
+ :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
1321
+ check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
1322
+ check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
1323
+ :func:`torch.promote_types`) before being compared.
1324
+ check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
1325
+ check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
1326
+ compared.
1327
+ check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
1328
+ msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during
1329
+ the comparison. Can also passed as callable in which case it will be called with the generated message and
1330
+ should return the new message.
1331
+
1332
+ Raises:
1333
+ ValueError: If no :class:`torch.Tensor` can be constructed from an input.
1334
+ ValueError: If only ``rtol`` or ``atol`` is specified.
1335
+ AssertionError: If corresponding inputs are not Python scalars and are not directly related.
1336
+ AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have
1337
+ different types.
1338
+ AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match.
1339
+ AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match.
1340
+ AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`.
1341
+ AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same
1342
+ :attr:`~torch.Tensor.layout`.
1343
+ AssertionError: If only one of corresponding tensors is quantized.
1344
+ AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s.
1345
+ AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same
1346
+ :attr:`~torch.Tensor.device`.
1347
+ AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``.
1348
+ AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride.
1349
+ AssertionError: If the values of corresponding tensors are not close according to the definition above.
1350
+
1351
+ The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching
1352
+ ``dtype``'s, the maximum of both tolerances is used.
1353
+
1354
+ +---------------------------+------------+----------+
1355
+ | ``dtype`` | ``rtol`` | ``atol`` |
1356
+ +===========================+============+==========+
1357
+ | :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` |
1358
+ +---------------------------+------------+----------+
1359
+ | :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` |
1360
+ +---------------------------+------------+----------+
1361
+ | :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` |
1362
+ +---------------------------+------------+----------+
1363
+ | :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` |
1364
+ +---------------------------+------------+----------+
1365
+ | :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` |
1366
+ +---------------------------+------------+----------+
1367
+ | :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` |
1368
+ +---------------------------+------------+----------+
1369
+ | :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` |
1370
+ +---------------------------+------------+----------+
1371
+ | :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` |
1372
+ +---------------------------+------------+----------+
1373
+ | :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` |
1374
+ +---------------------------+------------+----------+
1375
+ | :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` |
1376
+ +---------------------------+------------+----------+
1377
+ | :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` |
1378
+ +---------------------------+------------+----------+
1379
+ | :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` |
1380
+ +---------------------------+------------+----------+
1381
+ | other | ``0.0`` | ``0.0`` |
1382
+ +---------------------------+------------+----------+
1383
+
1384
+ .. note::
1385
+
1386
+ :func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged
1387
+ to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might
1388
+ define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default:
1389
+
1390
+ >>> import functools
1391
+ >>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0)
1392
+ >>> assert_equal(1e-9, 1e-10)
1393
+ Traceback (most recent call last):
1394
+ ...
1395
+ AssertionError: Scalars are not equal!
1396
+ <BLANKLINE>
1397
+ Expected 1e-10 but got 1e-09.
1398
+ Absolute difference: 9.000000000000001e-10
1399
+ Relative difference: 9.0
1400
+
1401
+ Examples:
1402
+ >>> # tensor to tensor comparison
1403
+ >>> expected = torch.tensor([1e0, 1e-1, 1e-2])
1404
+ >>> actual = torch.acos(torch.cos(expected))
1405
+ >>> torch.testing.assert_close(actual, expected)
1406
+
1407
+ >>> # scalar to scalar comparison
1408
+ >>> import math
1409
+ >>> expected = math.sqrt(2.0)
1410
+ >>> actual = 2.0 / math.sqrt(2.0)
1411
+ >>> torch.testing.assert_close(actual, expected)
1412
+
1413
+ >>> # numpy array to numpy array comparison
1414
+ >>> import numpy as np
1415
+ >>> expected = np.array([1e0, 1e-1, 1e-2])
1416
+ >>> actual = np.arccos(np.cos(expected))
1417
+ >>> torch.testing.assert_close(actual, expected)
1418
+
1419
+ >>> # sequence to sequence comparison
1420
+ >>> import numpy as np
1421
+ >>> # The types of the sequences do not have to match. They only have to have the same
1422
+ >>> # length and their elements have to match.
1423
+ >>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)]
1424
+ >>> actual = tuple(expected)
1425
+ >>> torch.testing.assert_close(actual, expected)
1426
+
1427
+ >>> # mapping to mapping comparison
1428
+ >>> from collections import OrderedDict
1429
+ >>> import numpy as np
1430
+ >>> foo = torch.tensor(1.0)
1431
+ >>> bar = 2.0
1432
+ >>> baz = np.array(3.0)
1433
+ >>> # The types and a possible ordering of mappings do not have to match. They only
1434
+ >>> # have to have the same set of keys and their elements have to match.
1435
+ >>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)])
1436
+ >>> actual = {"baz": baz, "bar": bar, "foo": foo}
1437
+ >>> torch.testing.assert_close(actual, expected)
1438
+
1439
+ >>> expected = torch.tensor([1.0, 2.0, 3.0])
1440
+ >>> actual = expected.clone()
1441
+ >>> # By default, directly related instances can be compared
1442
+ >>> torch.testing.assert_close(torch.nn.Parameter(actual), expected)
1443
+ >>> # This check can be made more strict with allow_subclasses=False
1444
+ >>> torch.testing.assert_close(
1445
+ ... torch.nn.Parameter(actual), expected, allow_subclasses=False
1446
+ ... )
1447
+ Traceback (most recent call last):
1448
+ ...
1449
+ TypeError: No comparison pair was able to handle inputs of type
1450
+ <class 'torch.nn.parameter.Parameter'> and <class 'torch.Tensor'>.
1451
+ >>> # If the inputs are not directly related, they are never considered close
1452
+ >>> torch.testing.assert_close(actual.numpy(), expected)
1453
+ Traceback (most recent call last):
1454
+ ...
1455
+ TypeError: No comparison pair was able to handle inputs of type <class 'numpy.ndarray'>
1456
+ and <class 'torch.Tensor'>.
1457
+ >>> # Exceptions to these rules are Python scalars. They can be checked regardless of
1458
+ >>> # their type if check_dtype=False.
1459
+ >>> torch.testing.assert_close(1.0, 1, check_dtype=False)
1460
+
1461
+ >>> # NaN != NaN by default.
1462
+ >>> expected = torch.tensor(float("Nan"))
1463
+ >>> actual = expected.clone()
1464
+ >>> torch.testing.assert_close(actual, expected)
1465
+ Traceback (most recent call last):
1466
+ ...
1467
+ AssertionError: Scalars are not close!
1468
+ <BLANKLINE>
1469
+ Expected nan but got nan.
1470
+ Absolute difference: nan (up to 1e-05 allowed)
1471
+ Relative difference: nan (up to 1.3e-06 allowed)
1472
+ >>> torch.testing.assert_close(actual, expected, equal_nan=True)
1473
+
1474
+ >>> expected = torch.tensor([1.0, 2.0, 3.0])
1475
+ >>> actual = torch.tensor([1.0, 4.0, 5.0])
1476
+ >>> # The default error message can be overwritten.
1477
+ >>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!")
1478
+ Traceback (most recent call last):
1479
+ ...
1480
+ AssertionError: Argh, the tensors are not close!
1481
+ >>> # If msg is a callable, it can be used to augment the generated message with
1482
+ >>> # extra information
1483
+ >>> torch.testing.assert_close(
1484
+ ... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter"
1485
+ ... )
1486
+ Traceback (most recent call last):
1487
+ ...
1488
+ AssertionError: Header
1489
+ <BLANKLINE>
1490
+ Tensor-likes are not close!
1491
+ <BLANKLINE>
1492
+ Mismatched elements: 2 / 3 (66.7%)
1493
+ Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed)
1494
+ Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed)
1495
+ <BLANKLINE>
1496
+ Footer
1497
+ """
1498
+ # Hide this function from `pytest`'s traceback
1499
+ __tracebackhide__ = True
1500
+
1501
+ error_metas = not_close_error_metas(
1502
+ actual,
1503
+ expected,
1504
+ pair_types=(
1505
+ NonePair,
1506
+ BooleanPair,
1507
+ NumberPair,
1508
+ TensorLikePair,
1509
+ ),
1510
+ allow_subclasses=allow_subclasses,
1511
+ rtol=rtol,
1512
+ atol=atol,
1513
+ equal_nan=equal_nan,
1514
+ check_device=check_device,
1515
+ check_dtype=check_dtype,
1516
+ check_layout=check_layout,
1517
+ check_stride=check_stride,
1518
+ msg=msg,
1519
+ )
1520
+
1521
+ if error_metas:
1522
+ # TODO: compose all metas into one AssertionError
1523
+ raise error_metas[0].to_error(msg)
1524
+
1525
+
1526
+ def assert_allclose(
1527
+ actual: Any,
1528
+ expected: Any,
1529
+ rtol: Optional[float] = None,
1530
+ atol: Optional[float] = None,
1531
+ equal_nan: bool = True,
1532
+ msg: str = "",
1533
+ ) -> None:
1534
+ """
1535
+ .. warning::
1536
+
1537
+ :func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release.
1538
+ Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions
1539
+ `here <https://github.com/pytorch/pytorch/issues/61844>`_.
1540
+ """
1541
+ warnings.warn(
1542
+ "`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. "
1543
+ "Please use `torch.testing.assert_close()` instead. "
1544
+ "You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.",
1545
+ FutureWarning,
1546
+ stacklevel=2,
1547
+ )
1548
+
1549
+ if not isinstance(actual, torch.Tensor):
1550
+ actual = torch.tensor(actual)
1551
+ if not isinstance(expected, torch.Tensor):
1552
+ expected = torch.tensor(expected, dtype=actual.dtype)
1553
+
1554
+ if rtol is None and atol is None:
1555
+ rtol, atol = default_tolerances(
1556
+ actual,
1557
+ expected,
1558
+ dtype_precisions={
1559
+ torch.float16: (1e-3, 1e-3),
1560
+ torch.float32: (1e-4, 1e-5),
1561
+ torch.float64: (1e-5, 1e-8),
1562
+ },
1563
+ )
1564
+
1565
+ torch.testing.assert_close(
1566
+ actual,
1567
+ expected,
1568
+ rtol=rtol,
1569
+ atol=atol,
1570
+ equal_nan=equal_nan,
1571
+ check_device=True,
1572
+ check_dtype=False,
1573
+ check_stride=False,
1574
+ msg=msg or None,
1575
+ )
venv/lib/python3.10/site-packages/torch/testing/_creation.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains tensor creation utilities.
3
+ """
4
+
5
+ import collections.abc
6
+ import math
7
+ import warnings
8
+ from typing import cast, List, Optional, Tuple, Union
9
+
10
+ import torch
11
+
12
+ _INTEGRAL_TYPES = [
13
+ torch.uint8,
14
+ torch.int8,
15
+ torch.int16,
16
+ torch.int32,
17
+ torch.int64,
18
+ torch.uint16,
19
+ torch.uint32,
20
+ torch.uint64,
21
+ ]
22
+ _FLOATING_TYPES = [torch.float16, torch.bfloat16, torch.float32, torch.float64]
23
+ _FLOATING_8BIT_TYPES = [
24
+ torch.float8_e4m3fn,
25
+ torch.float8_e5m2,
26
+ torch.float8_e4m3fnuz,
27
+ torch.float8_e5m2fnuz,
28
+ ]
29
+ _COMPLEX_TYPES = [torch.complex32, torch.complex64, torch.complex128]
30
+ _BOOLEAN_OR_INTEGRAL_TYPES = [torch.bool, *_INTEGRAL_TYPES]
31
+ _FLOATING_OR_COMPLEX_TYPES = [*_FLOATING_TYPES, *_COMPLEX_TYPES]
32
+
33
+
34
+ def _uniform_random_(t: torch.Tensor, low: float, high: float) -> torch.Tensor:
35
+ # uniform_ requires to-from <= std::numeric_limits<scalar_t>::max()
36
+ # Work around this by scaling the range before and after the PRNG
37
+ if high - low >= torch.finfo(t.dtype).max:
38
+ return t.uniform_(low / 2, high / 2).mul_(2)
39
+ else:
40
+ return t.uniform_(low, high)
41
+
42
+
43
+ def make_tensor(
44
+ *shape: Union[int, torch.Size, List[int], Tuple[int, ...]],
45
+ dtype: torch.dtype,
46
+ device: Union[str, torch.device],
47
+ low: Optional[float] = None,
48
+ high: Optional[float] = None,
49
+ requires_grad: bool = False,
50
+ noncontiguous: bool = False,
51
+ exclude_zero: bool = False,
52
+ memory_format: Optional[torch.memory_format] = None,
53
+ ) -> torch.Tensor:
54
+ r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with
55
+ values uniformly drawn from ``[low, high)``.
56
+
57
+ If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable
58
+ finite values then they are clamped to the lowest or highest representable finite value, respectively.
59
+ If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`,
60
+ which depend on :attr:`dtype`.
61
+
62
+ +---------------------------+------------+----------+
63
+ | ``dtype`` | ``low`` | ``high`` |
64
+ +===========================+============+==========+
65
+ | boolean type | ``0`` | ``2`` |
66
+ +---------------------------+------------+----------+
67
+ | unsigned integral type | ``0`` | ``10`` |
68
+ +---------------------------+------------+----------+
69
+ | signed integral types | ``-9`` | ``10`` |
70
+ +---------------------------+------------+----------+
71
+ | floating types | ``-9`` | ``9`` |
72
+ +---------------------------+------------+----------+
73
+ | complex types | ``-9`` | ``9`` |
74
+ +---------------------------+------------+----------+
75
+
76
+ Args:
77
+ shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor.
78
+ dtype (:class:`torch.dtype`): The data type of the returned tensor.
79
+ device (Union[str, torch.device]): The device of the returned tensor.
80
+ low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is
81
+ clamped to the least representable finite value of the given dtype. When ``None`` (default),
82
+ this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
83
+ high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is
84
+ clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value
85
+ is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
86
+
87
+ .. deprecated:: 2.1
88
+
89
+ Passing ``low==high`` to :func:`~torch.testing.make_tensor` for floating or complex types is deprecated
90
+ since 2.1 and will be removed in 2.3. Use :func:`torch.full` instead.
91
+
92
+ requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``.
93
+ noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is
94
+ ignored if the constructed tensor has fewer than two elements. Mutually exclusive with ``memory_format``.
95
+ exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value
96
+ depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating
97
+ point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the
98
+ :attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number
99
+ whose real and imaginary parts are both the smallest positive normal number representable by the complex
100
+ type. Default ``False``.
101
+ memory_format (Optional[torch.memory_format]): The memory format of the returned tensor. Mutually exclusive
102
+ with ``noncontiguous``.
103
+
104
+ Raises:
105
+ ValueError: If ``requires_grad=True`` is passed for integral `dtype`
106
+ ValueError: If ``low >= high``.
107
+ ValueError: If either :attr:`low` or :attr:`high` is ``nan``.
108
+ ValueError: If both :attr:`noncontiguous` and :attr:`memory_format` are passed.
109
+ TypeError: If :attr:`dtype` isn't supported by this function.
110
+
111
+ Examples:
112
+ >>> # xdoctest: +SKIP
113
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
114
+ >>> from torch.testing import make_tensor
115
+ >>> # Creates a float tensor with values in [-1, 1)
116
+ >>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1)
117
+ >>> # xdoctest: +SKIP
118
+ tensor([ 0.1205, 0.2282, -0.6380])
119
+ >>> # Creates a bool tensor on CUDA
120
+ >>> make_tensor((2, 2), device='cuda', dtype=torch.bool)
121
+ tensor([[False, False],
122
+ [False, True]], device='cuda:0')
123
+ """
124
+
125
+ def modify_low_high(
126
+ low: Optional[float],
127
+ high: Optional[float],
128
+ *,
129
+ lowest_inclusive: float,
130
+ highest_exclusive: float,
131
+ default_low: float,
132
+ default_high: float,
133
+ ) -> Tuple[float, float]:
134
+ """
135
+ Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high)
136
+ if required.
137
+ """
138
+
139
+ def clamp(a: float, l: float, h: float) -> float:
140
+ return min(max(a, l), h)
141
+
142
+ low = low if low is not None else default_low
143
+ high = high if high is not None else default_high
144
+
145
+ if any(isinstance(value, float) and math.isnan(value) for value in [low, high]):
146
+ raise ValueError(
147
+ f"`low` and `high` cannot be NaN, but got {low=} and {high=}"
148
+ )
149
+ elif low == high and dtype in _FLOATING_OR_COMPLEX_TYPES:
150
+ warnings.warn(
151
+ "Passing `low==high` to `torch.testing.make_tensor` for floating or complex types "
152
+ "is deprecated since 2.1 and will be removed in 2.3. "
153
+ "Use torch.full(...) instead.",
154
+ FutureWarning,
155
+ )
156
+ elif low >= high:
157
+ raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}")
158
+ elif high < lowest_inclusive or low >= highest_exclusive:
159
+ raise ValueError(
160
+ f"The value interval specified by `low` and `high` is [{low}, {high}), "
161
+ f"but {dtype} only supports [{lowest_inclusive}, {highest_exclusive})"
162
+ )
163
+
164
+ low = clamp(low, lowest_inclusive, highest_exclusive)
165
+ high = clamp(high, lowest_inclusive, highest_exclusive)
166
+
167
+ if dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
168
+ # 1. `low` is ceiled to avoid creating values smaller than `low` and thus outside the specified interval
169
+ # 2. Following the same reasoning as for 1., `high` should be floored. However, the higher bound of
170
+ # `torch.randint` is exclusive, and thus we need to ceil here as well.
171
+ return math.ceil(low), math.ceil(high)
172
+
173
+ return low, high
174
+
175
+ if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence):
176
+ shape = shape[0] # type: ignore[assignment]
177
+ shape = cast(Tuple[int, ...], tuple(shape))
178
+
179
+ if noncontiguous and memory_format is not None:
180
+ raise ValueError(
181
+ f"The parameters `noncontiguous` and `memory_format` are mutually exclusive, "
182
+ f"but got {noncontiguous=} and {memory_format=}"
183
+ )
184
+
185
+ if requires_grad and dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
186
+ raise ValueError(
187
+ f"`requires_grad=True` is not supported for boolean and integral dtypes, but got {dtype=}"
188
+ )
189
+
190
+ if dtype is torch.bool:
191
+ low, high = cast(
192
+ Tuple[int, int],
193
+ modify_low_high(
194
+ low,
195
+ high,
196
+ lowest_inclusive=0,
197
+ highest_exclusive=2,
198
+ default_low=0,
199
+ default_high=2,
200
+ ),
201
+ )
202
+ result = torch.randint(low, high, shape, device=device, dtype=dtype)
203
+ elif dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
204
+ low, high = cast(
205
+ Tuple[int, int],
206
+ modify_low_high(
207
+ low,
208
+ high,
209
+ lowest_inclusive=torch.iinfo(dtype).min,
210
+ highest_exclusive=torch.iinfo(dtype).max
211
+ # In theory, `highest_exclusive` should always be the maximum value + 1. However, `torch.randint`
212
+ # internally converts the bounds to an int64 and would overflow. In other words: `torch.randint` cannot
213
+ # sample 2**63 - 1, i.e. the maximum value of `torch.int64` and we need to account for that here.
214
+ + (1 if dtype is not torch.int64 else 0),
215
+ # This is incorrect for `torch.uint8`, but since we clamp to `lowest`, i.e. 0 for `torch.uint8`,
216
+ # _after_ we use the default value, we don't need to special case it here
217
+ default_low=-9,
218
+ default_high=10,
219
+ ),
220
+ )
221
+ result = torch.randint(low, high, shape, device=device, dtype=dtype)
222
+ elif dtype in _FLOATING_OR_COMPLEX_TYPES:
223
+ low, high = modify_low_high(
224
+ low,
225
+ high,
226
+ lowest_inclusive=torch.finfo(dtype).min,
227
+ highest_exclusive=torch.finfo(dtype).max,
228
+ default_low=-9,
229
+ default_high=9,
230
+ )
231
+ result = torch.empty(shape, device=device, dtype=dtype)
232
+ _uniform_random_(
233
+ torch.view_as_real(result) if dtype in _COMPLEX_TYPES else result, low, high
234
+ )
235
+ elif dtype in _FLOATING_8BIT_TYPES:
236
+ low, high = modify_low_high(
237
+ low,
238
+ high,
239
+ lowest_inclusive=torch.finfo(dtype).min,
240
+ highest_exclusive=torch.finfo(dtype).max,
241
+ default_low=-9,
242
+ default_high=9,
243
+ )
244
+ result = torch.empty(shape, device=device, dtype=torch.float32)
245
+ _uniform_random_(result, low, high)
246
+ result = result.to(dtype)
247
+ else:
248
+ raise TypeError(
249
+ f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()."
250
+ " To request support, file an issue at: https://github.com/pytorch/pytorch/issues"
251
+ )
252
+
253
+ if noncontiguous and result.numel() > 1:
254
+ result = torch.repeat_interleave(result, 2, dim=-1)
255
+ result = result[..., ::2]
256
+ elif memory_format is not None:
257
+ result = result.clone(memory_format=memory_format)
258
+
259
+ if exclude_zero:
260
+ result[result == 0] = (
261
+ 1 if dtype in _BOOLEAN_OR_INTEGRAL_TYPES else torch.finfo(dtype).tiny
262
+ )
263
+
264
+ if dtype in _FLOATING_OR_COMPLEX_TYPES:
265
+ result.requires_grad = requires_grad
266
+
267
+ return result
venv/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ r"""This file is allowed to initialize CUDA context when imported."""
4
+
5
+ import functools
6
+ import torch
7
+ import torch.cuda
8
+ from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS
9
+ import inspect
10
+ import contextlib
11
+ import os
12
+
13
+
14
+ CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized()
15
+
16
+
17
+ TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
18
+ CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None
19
+ # note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN
20
+ if TEST_WITH_ROCM:
21
+ TEST_CUDNN = LazyVal(lambda: TEST_CUDA)
22
+ else:
23
+ TEST_CUDNN = LazyVal(lambda: TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE)))
24
+
25
+ TEST_CUDNN_VERSION = LazyVal(lambda: torch.backends.cudnn.version() if TEST_CUDNN else 0)
26
+
27
+ SM53OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3))
28
+ SM60OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0))
29
+ SM70OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0))
30
+ SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5))
31
+ SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0))
32
+ SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0))
33
+
34
+ def evaluate_gfx_arch_exact(matching_arch):
35
+ if not torch.cuda.is_available():
36
+ return False
37
+ gcn_arch_name = torch.cuda.get_device_properties('cuda').gcnArchName
38
+ arch = os.environ.get('PYTORCH_DEBUG_FLASH_ATTENTION_GCN_ARCH_OVERRIDE', gcn_arch_name)
39
+ return arch == matching_arch
40
+
41
+ GFX90A_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-'))
42
+ GFX942_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-'))
43
+
44
+ def evaluate_platform_supports_flash_attention():
45
+ if TEST_WITH_ROCM:
46
+ return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-')
47
+ if TEST_CUDA:
48
+ return not IS_WINDOWS and SM80OrLater
49
+ return False
50
+
51
+ PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_flash_attention())
52
+ PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM)
53
+ # TODO(eqy): gate this against a cuDNN version
54
+ PLATFORM_SUPPORTS_CUDNN_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM and
55
+ torch.backends.cuda.cudnn_sdp_enabled())
56
+ # This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate
57
+ PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION)
58
+
59
+ PLATFORM_SUPPORTS_FUSED_SDPA: bool = TEST_CUDA and not TEST_WITH_ROCM
60
+
61
+ if TEST_NUMBA:
62
+ try:
63
+ import numba.cuda
64
+ TEST_NUMBA_CUDA = numba.cuda.is_available()
65
+ except Exception as e:
66
+ TEST_NUMBA_CUDA = False
67
+ TEST_NUMBA = False
68
+ else:
69
+ TEST_NUMBA_CUDA = False
70
+
71
+ # Used below in `initialize_cuda_context_rng` to ensure that CUDA context and
72
+ # RNG have been initialized.
73
+ __cuda_ctx_rng_initialized = False
74
+
75
+
76
+ # after this call, CUDA context and RNG must have been initialized on each GPU
77
+ def initialize_cuda_context_rng():
78
+ global __cuda_ctx_rng_initialized
79
+ assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng'
80
+ if not __cuda_ctx_rng_initialized:
81
+ # initialize cuda context and rng for memory tests
82
+ for i in range(torch.cuda.device_count()):
83
+ torch.randn(1, device=f"cuda:{i}")
84
+ __cuda_ctx_rng_initialized = True
85
+
86
+
87
+ # Test whether hardware TF32 math mode enabled. It is enabled only on:
88
+ # - CUDA >= 11
89
+ # - arch >= Ampere
90
+ def tf32_is_not_fp32():
91
+ if not torch.cuda.is_available() or torch.version.cuda is None:
92
+ return False
93
+ if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
94
+ return False
95
+ if int(torch.version.cuda.split('.')[0]) < 11:
96
+ return False
97
+ return True
98
+
99
+
100
+ @contextlib.contextmanager
101
+ def tf32_off():
102
+ old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
103
+ try:
104
+ torch.backends.cuda.matmul.allow_tf32 = False
105
+ with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
106
+ yield
107
+ finally:
108
+ torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
109
+
110
+
111
+ @contextlib.contextmanager
112
+ def tf32_on(self, tf32_precision=1e-5):
113
+ old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
114
+ old_precision = self.precision
115
+ try:
116
+ torch.backends.cuda.matmul.allow_tf32 = True
117
+ self.precision = tf32_precision
118
+ with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
119
+ yield
120
+ finally:
121
+ torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
122
+ self.precision = old_precision
123
+
124
+
125
+ # This is a wrapper that wraps a test to run this test twice, one with
126
+ # allow_tf32=True, another with allow_tf32=False. When running with
127
+ # allow_tf32=True, it will use reduced precision as specified by the
128
+ # argument. For example:
129
+ # @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
130
+ # @tf32_on_and_off(0.005)
131
+ # def test_matmul(self, device, dtype):
132
+ # a = ...; b = ...;
133
+ # c = torch.matmul(a, b)
134
+ # self.assertEqual(c, expected)
135
+ # In the above example, when testing torch.float32 and torch.complex64 on CUDA
136
+ # on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at
137
+ # TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced
138
+ # precision to check values.
139
+ #
140
+ # This decorator can be used for function with or without device/dtype, such as
141
+ # @tf32_on_and_off(0.005)
142
+ # def test_my_op(self)
143
+ # @tf32_on_and_off(0.005)
144
+ # def test_my_op(self, device)
145
+ # @tf32_on_and_off(0.005)
146
+ # def test_my_op(self, device, dtype)
147
+ # @tf32_on_and_off(0.005)
148
+ # def test_my_op(self, dtype)
149
+ # if neither device nor dtype is specified, it will check if the system has ampere device
150
+ # if device is specified, it will check if device is cuda
151
+ # if dtype is specified, it will check if dtype is float32 or complex64
152
+ # tf32 and fp32 are different only when all the three checks pass
153
+ def tf32_on_and_off(tf32_precision=1e-5):
154
+ def with_tf32_disabled(self, function_call):
155
+ with tf32_off():
156
+ function_call()
157
+
158
+ def with_tf32_enabled(self, function_call):
159
+ with tf32_on(self, tf32_precision):
160
+ function_call()
161
+
162
+ def wrapper(f):
163
+ params = inspect.signature(f).parameters
164
+ arg_names = tuple(params.keys())
165
+
166
+ @functools.wraps(f)
167
+ def wrapped(*args, **kwargs):
168
+ for k, v in zip(arg_names, args):
169
+ kwargs[k] = v
170
+ cond = tf32_is_not_fp32()
171
+ if 'device' in kwargs:
172
+ cond = cond and (torch.device(kwargs['device']).type == 'cuda')
173
+ if 'dtype' in kwargs:
174
+ cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64})
175
+ if cond:
176
+ with_tf32_disabled(kwargs['self'], lambda: f(**kwargs))
177
+ with_tf32_enabled(kwargs['self'], lambda: f(**kwargs))
178
+ else:
179
+ f(**kwargs)
180
+
181
+ return wrapped
182
+ return wrapper
183
+
184
+
185
+ # This is a wrapper that wraps a test to run it with TF32 turned off.
186
+ # This wrapper is designed to be used when a test uses matmul or convolutions
187
+ # but the purpose of that test is not testing matmul or convolutions.
188
+ # Disabling TF32 will enforce torch.float tensors to be always computed
189
+ # at full precision.
190
+ def with_tf32_off(f):
191
+ @functools.wraps(f)
192
+ def wrapped(*args, **kwargs):
193
+ with tf32_off():
194
+ return f(*args, **kwargs)
195
+
196
+ return wrapped
197
+
198
+ def _get_magma_version():
199
+ if 'Magma' not in torch.__config__.show():
200
+ return (0, 0)
201
+ position = torch.__config__.show().find('Magma ')
202
+ version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0]
203
+ return tuple(int(x) for x in version_str.split("."))
204
+
205
+ def _get_torch_cuda_version():
206
+ if torch.version.cuda is None:
207
+ return (0, 0)
208
+ cuda_version = str(torch.version.cuda)
209
+ return tuple(int(x) for x in cuda_version.split("."))
210
+
211
+ def _get_torch_rocm_version():
212
+ if not TEST_WITH_ROCM:
213
+ return (0, 0)
214
+ rocm_version = str(torch.version.hip)
215
+ rocm_version = rocm_version.split("-")[0] # ignore git sha
216
+ return tuple(int(x) for x in rocm_version.split("."))
217
+
218
+ def _check_cusparse_generic_available():
219
+ return not TEST_WITH_ROCM
220
+
221
+ def _check_hipsparse_generic_available():
222
+ if not TEST_WITH_ROCM:
223
+ return False
224
+
225
+ rocm_version = str(torch.version.hip)
226
+ rocm_version = rocm_version.split("-")[0] # ignore git sha
227
+ rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
228
+ return not (rocm_version_tuple is None or rocm_version_tuple < (5, 1))
229
+
230
+
231
+ TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available()
232
+ TEST_HIPSPARSE_GENERIC = _check_hipsparse_generic_available()
233
+
234
+ # Shared by test_torch.py and test_multigpu.py
235
+ def _create_scaling_models_optimizers(device="cuda", optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None):
236
+ # Create a module+optimizer that will use scaling, and a control module+optimizer
237
+ # that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
238
+ mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
239
+ mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
240
+ with torch.no_grad():
241
+ for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
242
+ s.copy_(c)
243
+
244
+ kwargs = {"lr": 1.0}
245
+ if optimizer_kwargs is not None:
246
+ kwargs.update(optimizer_kwargs)
247
+ opt_control = optimizer_ctor(mod_control.parameters(), **kwargs)
248
+ opt_scaling = optimizer_ctor(mod_scaling.parameters(), **kwargs)
249
+
250
+ return mod_control, mod_scaling, opt_control, opt_scaling
251
+
252
+ # Shared by test_torch.py, test_cuda.py and test_multigpu.py
253
+ def _create_scaling_case(device="cuda", dtype=torch.float, optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None):
254
+ data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
255
+ (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
256
+ (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
257
+ (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
258
+
259
+ loss_fn = torch.nn.MSELoss().to(device)
260
+
261
+ skip_iter = 2
262
+
263
+ return _create_scaling_models_optimizers(
264
+ device=device, optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs,
265
+ ) + (data, loss_fn, skip_iter)
266
+
267
+
268
+ # Importing this module should NOT eagerly initialize CUDA
269
+ if not CUDA_ALREADY_INITIALIZED_ON_IMPORT:
270
+ assert not torch.cuda.is_initialized()
venv/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py ADDED
@@ -0,0 +1,1525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import copy
4
+ import gc
5
+ import inspect
6
+ import runpy
7
+ import sys
8
+ import threading
9
+ from collections import namedtuple
10
+ from enum import Enum
11
+ from functools import wraps, partial
12
+ from typing import List, Any, ClassVar, Optional, Sequence, Tuple, Union, Dict, Set
13
+ import unittest
14
+ import os
15
+ import torch
16
+ from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \
17
+ skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN, \
18
+ IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, IS_WINDOWS, TEST_MPS, \
19
+ _TestParametrizer, compose_parametrize_fns, dtype_name, \
20
+ TEST_WITH_MIOPEN_SUGGEST_NHWC, NATIVE_DEVICES, skipIfTorchDynamo, \
21
+ get_tracked_input, clear_tracked_input, PRINT_REPRO_ON_FAILURE, \
22
+ TEST_WITH_TORCHINDUCTOR
23
+ from torch.testing._internal.common_cuda import _get_torch_cuda_version, \
24
+ TEST_CUSPARSE_GENERIC, TEST_HIPSPARSE_GENERIC, _get_torch_rocm_version
25
+ from torch.testing._internal.common_dtype import get_all_dtypes
26
+
27
+ try:
28
+ import psutil # type: ignore[import]
29
+ HAS_PSUTIL = True
30
+ except ImportError:
31
+ HAS_PSUTIL = False
32
+
33
+ # Note [Writing Test Templates]
34
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
35
+ #
36
+ # This note was written shortly after the PyTorch 1.9 release.
37
+ # If you notice it's out-of-date or think it could be improved then please
38
+ # file an issue.
39
+ #
40
+ # PyTorch has its own framework for instantiating test templates. That is, for
41
+ # taking test classes that look similar to unittest or pytest
42
+ # compatible test classes and optionally doing the following:
43
+ #
44
+ # - instantiating a version of the test class for each available device type
45
+ # (often the CPU, CUDA, and META device types)
46
+ # - further instantiating a version of each test that's always specialized
47
+ # on the test class's device type, and optionally specialized further
48
+ # on datatypes or operators
49
+ #
50
+ # This functionality is similar to pytest's parametrize functionality
51
+ # (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable
52
+ # additional logic that specializes the instantiated test classes for their
53
+ # device types (see CPUTestBase and CUDATestBase below), supports a variety
54
+ # of composable decorators that allow for test filtering and setting
55
+ # tolerances, and allows tests parametrized by operators to instantiate
56
+ # only the subset of device type x dtype that operator supports.
57
+ #
58
+ # This framework was built to make it easier to write tests that run on
59
+ # multiple device types, multiple datatypes (dtypes), and for multiple
60
+ # operators. It's also useful for controlling which tests are run. For example,
61
+ # only tests that use a CUDA device can be run on platforms with CUDA.
62
+ # Let's dive in with an example to get an idea for how it works:
63
+ #
64
+ # --------------------------------------------------------
65
+ # A template class (looks like a regular unittest TestCase)
66
+ # class TestClassFoo(TestCase):
67
+ #
68
+ # # A template test that can be specialized with a device
69
+ # # NOTE: this test case is not runnable by unittest or pytest because it
70
+ # # accepts an extra positional argument, "device", that they do not understand
71
+ # def test_bar(self, device):
72
+ # pass
73
+ #
74
+ # # Function that instantiates a template class and its tests
75
+ # instantiate_device_type_tests(TestCommon, globals())
76
+ # --------------------------------------------------------
77
+ #
78
+ # In the above code example we see a template class and a single test template
79
+ # that can be instantiated with a device. The function
80
+ # instantiate_device_type_tests(), called at file scope, instantiates
81
+ # new test classes, one per available device type, and new tests in those
82
+ # classes from these templates. It actually does this by removing
83
+ # the class TestClassFoo and replacing it with classes like TestClassFooCPU
84
+ # and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase
85
+ # and CUDATestBase respectively. Additional device types, like XLA,
86
+ # (see https://github.com/pytorch/xla) can further extend the set of
87
+ # instantiated test classes to create classes like TestClassFooXLA.
88
+ #
89
+ # The test template, test_bar(), is also instantiated. In this case the template
90
+ # is only specialized on a device, so (depending on the available device
91
+ # types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda()
92
+ # in TestClassFooCUDA. We can think of the instantiated test classes as
93
+ # looking like this:
94
+ #
95
+ # --------------------------------------------------------
96
+ # # An instantiated test class for the CPU device type
97
+ # class TestClassFooCPU(CPUTestBase):
98
+ #
99
+ # # An instantiated test that calls the template with the string representation
100
+ # # of a device from the test class's device type
101
+ # def test_bar_cpu(self):
102
+ # test_bar(self, 'cpu')
103
+ #
104
+ # # An instantiated test class for the CUDA device type
105
+ # class TestClassFooCUDA(CUDATestBase):
106
+ #
107
+ # # An instantiated test that calls the template with the string representation
108
+ # # of a device from the test class's device type
109
+ # def test_bar_cuda(self):
110
+ # test_bar(self, 'cuda:0')
111
+ # --------------------------------------------------------
112
+ #
113
+ # These instantiated test classes ARE discoverable and runnable by both
114
+ # unittest and pytest. One thing that may be confusing, however, is that
115
+ # attempting to run "test_bar" will not work, despite it appearing in the
116
+ # original template code. This is because "test_bar" is no longer discoverable
117
+ # after instantiate_device_type_tests() runs, as the above snippet shows.
118
+ # Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both
119
+ # can be run with the option "-k test_bar".
120
+ #
121
+ # Removing the template class and adding the instantiated classes requires
122
+ # passing "globals()" to instantiate_device_type_tests(), because it
123
+ # edits the file's Python objects.
124
+ #
125
+ # As mentioned, tests can be additionally parametrized on dtypes or
126
+ # operators. Datatype parametrization uses the @dtypes decorator and
127
+ # require a test template like this:
128
+ #
129
+ # --------------------------------------------------------
130
+ # # A template test that can be specialized with a device and a datatype (dtype)
131
+ # @dtypes(torch.float32, torch.int64)
132
+ # def test_car(self, device, dtype)
133
+ # pass
134
+ # --------------------------------------------------------
135
+ #
136
+ # If the CPU and CUDA device types are available this test would be
137
+ # instantiated as 4 tests that cover the cross-product of the two dtypes
138
+ # and two device types:
139
+ #
140
+ # - test_car_cpu_float32
141
+ # - test_car_cpu_int64
142
+ # - test_car_cuda_float32
143
+ # - test_car_cuda_int64
144
+ #
145
+ # The dtype is passed as a torch.dtype object.
146
+ #
147
+ # Tests parametrized on operators (actually on OpInfos, more on that in a
148
+ # moment...) use the @ops decorator and require a test template like this:
149
+ # --------------------------------------------------------
150
+ # # A template test that can be specialized with a device, dtype, and OpInfo
151
+ # @ops(op_db)
152
+ # def test_car(self, device, dtype, op)
153
+ # pass
154
+ # --------------------------------------------------------
155
+ #
156
+ # See the documentation for the @ops decorator below for additional details
157
+ # on how to use it and see the note [OpInfos] in
158
+ # common_methods_invocations.py for more details on OpInfos.
159
+ #
160
+ # A test parametrized over the entire "op_db", which contains hundreds of
161
+ # OpInfos, will likely have hundreds or thousands of instantiations. The
162
+ # test will be instantiated on the cross-product of device types, operators,
163
+ # and the dtypes the operator supports on that device type. The instantiated
164
+ # tests will have names like:
165
+ #
166
+ # - test_car_add_cpu_float32
167
+ # - test_car_sub_cuda_int64
168
+ #
169
+ # The first instantiated test calls the original test_car() with the OpInfo
170
+ # for torch.add as its "op" argument, the string 'cpu' for its "device" argument,
171
+ # and the dtype torch.float32 for is "dtype" argument. The second instantiated
172
+ # test calls the test_car() with the OpInfo for torch.sub, a CUDA device string
173
+ # like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype
174
+ # torch.int64 for its "dtype argument."
175
+ #
176
+ # In addition to parametrizing over device, dtype, and ops via OpInfos, the
177
+ # @parametrize decorator is supported for arbitrary parametrizations:
178
+ # --------------------------------------------------------
179
+ # # A template test that can be specialized with a device, dtype, and value for x
180
+ # @parametrize("x", range(5))
181
+ # def test_car(self, device, dtype, x)
182
+ # pass
183
+ # --------------------------------------------------------
184
+ #
185
+ # See the documentation for @parametrize in common_utils.py for additional details
186
+ # on this. Note that the instantiate_device_type_tests() function will handle
187
+ # such parametrizations; there is no need to additionally call
188
+ # instantiate_parametrized_tests().
189
+ #
190
+ # Clever test filtering can be very useful when working with parametrized
191
+ # tests. "-k test_car" would run every instantiated variant of the test_car()
192
+ # test template, and "-k test_car_add" runs every variant instantiated with
193
+ # torch.add.
194
+ #
195
+ # It is important to use the passed device and dtype as appropriate. Use
196
+ # helper functions like make_tensor() that require explicitly specifying
197
+ # the device and dtype so they're not forgotten.
198
+ #
199
+ # Test templates can use a variety of composable decorators to specify
200
+ # additional options and requirements, some are listed here:
201
+ #
202
+ # - @deviceCountAtLeast(<minimum number of devices to run test with>)
203
+ # Passes a list of strings representing all available devices of
204
+ # the test class's device type as the test template's "device" argument.
205
+ # If there are fewer devices than the value passed to the decorator
206
+ # the test is skipped.
207
+ # - @dtypes(<list of tuples of dtypes>)
208
+ # In addition to accepting multiple dtypes, the @dtypes decorator
209
+ # can accept a sequence of tuple pairs of dtypes. The test template
210
+ # will be called with each tuple for its "dtype" argument.
211
+ # - @onlyNativeDeviceTypes
212
+ # Skips the test if the device is not a native device type (currently CPU, CUDA, Meta)
213
+ # - @onlyCPU
214
+ # Skips the test if the device is not a CPU device
215
+ # - @onlyCUDA
216
+ # Skips the test if the device is not a CUDA device
217
+ # - @onlyMPS
218
+ # Skips the test if the device is not a MPS device
219
+ # - @skipCPUIfNoLapack
220
+ # Skips the test if the device is a CPU device and LAPACK is not installed
221
+ # - @skipCPUIfNoMkl
222
+ # Skips the test if the device is a CPU device and MKL is not installed
223
+ # - @skipCUDAIfNoMagma
224
+ # Skips the test if the device is a CUDA device and MAGMA is not installed
225
+ # - @skipCUDAIfRocm
226
+ # Skips the test if the device is a CUDA device and ROCm is being used
227
+
228
+
229
+ # Note [Adding a Device Type]
230
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
231
+ #
232
+ # To add a device type:
233
+ #
234
+ # (1) Create a new "TestBase" extending DeviceTypeTestBase.
235
+ # See CPUTestBase and CUDATestBase below.
236
+ # (2) Define the "device_type" attribute of the base to be the
237
+ # appropriate string.
238
+ # (3) Add logic to this file that appends your base class to
239
+ # device_type_test_bases when your device type is available.
240
+ # (4) (Optional) Write setUpClass/tearDownClass class methods that
241
+ # instantiate dependencies (see MAGMA in CUDATestBase).
242
+ # (5) (Optional) Override the "instantiate_test" method for total
243
+ # control over how your class creates tests.
244
+ #
245
+ # setUpClass is called AFTER tests have been created and BEFORE and ONLY IF
246
+ # they are run. This makes it useful for initializing devices and dependencies.
247
+
248
+
249
+ # Note [Overriding methods in generic tests]
250
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
251
+ #
252
+ # Device generic tests look a lot like normal test classes, but they differ
253
+ # from ordinary classes in some important ways. In particular, overriding
254
+ # methods in generic tests doesn't work quite the way you expect.
255
+ #
256
+ # class TestFooDeviceType(TestCase):
257
+ # # Intention is to override
258
+ # def assertEqual(self, x, y):
259
+ # # This DOESN'T WORK!
260
+ # super().assertEqual(x, y)
261
+ #
262
+ # If you try to run this code, you'll get an error saying that TestFooDeviceType
263
+ # is not in scope. This is because after instantiating our classes, we delete
264
+ # it from the parent scope. Instead, you need to hardcode a direct invocation
265
+ # of the desired subclass call, e.g.,
266
+ #
267
+ # class TestFooDeviceType(TestCase):
268
+ # # Intention is to override
269
+ # def assertEqual(self, x, y):
270
+ # TestCase.assertEqual(x, y)
271
+ #
272
+ # However, a less error-prone way of customizing the behavior of TestCase
273
+ # is to either (1) add your functionality to TestCase and make it toggled
274
+ # by a class attribute, or (2) create your own subclass of TestCase, and
275
+ # then inherit from it for your generic test.
276
+
277
+
278
+ def _dtype_test_suffix(dtypes):
279
+ """ Returns the test suffix for a dtype, sequence of dtypes, or None. """
280
+ if isinstance(dtypes, (list, tuple)):
281
+ if len(dtypes) == 0:
282
+ return ''
283
+ return '_' + '_'.join(dtype_name(d) for d in dtypes)
284
+ elif dtypes:
285
+ return f'_{dtype_name(dtypes)}'
286
+ else:
287
+ return ''
288
+
289
+
290
+ def _update_param_kwargs(param_kwargs, name, value):
291
+ """ Adds a kwarg with the specified name and value to the param_kwargs dict. """
292
+ # Make name plural (e.g. devices / dtypes) if the value is composite.
293
+ plural_name = f'{name}s'
294
+
295
+ # Clear out old entries of the arg if any.
296
+ if name in param_kwargs:
297
+ del param_kwargs[name]
298
+ if plural_name in param_kwargs:
299
+ del param_kwargs[plural_name]
300
+
301
+ if isinstance(value, (list, tuple)):
302
+ param_kwargs[plural_name] = value
303
+ elif value is not None:
304
+ param_kwargs[name] = value
305
+
306
+ # Leave param_kwargs as-is when value is None.
307
+
308
+
309
+ class DeviceTypeTestBase(TestCase):
310
+ device_type: str = 'generic_device_type'
311
+
312
+ # Flag to disable test suite early due to unrecoverable error such as CUDA error.
313
+ _stop_test_suite = False
314
+
315
+ # Precision is a thread-local setting since it may be overridden per test
316
+ _tls = threading.local()
317
+ _tls.precision = TestCase._precision
318
+ _tls.rel_tol = TestCase._rel_tol
319
+
320
+ @property
321
+ def precision(self):
322
+ return self._tls.precision
323
+
324
+ @precision.setter
325
+ def precision(self, prec):
326
+ self._tls.precision = prec
327
+
328
+ @property
329
+ def rel_tol(self):
330
+ return self._tls.rel_tol
331
+
332
+ @rel_tol.setter
333
+ def rel_tol(self, prec):
334
+ self._tls.rel_tol = prec
335
+
336
+ # Returns a string representing the device that single device tests should use.
337
+ # Note: single device tests use this device exclusively.
338
+ @classmethod
339
+ def get_primary_device(cls):
340
+ return cls.device_type
341
+
342
+ @classmethod
343
+ def _init_and_get_primary_device(cls):
344
+ try:
345
+ return cls.get_primary_device()
346
+ except Exception:
347
+ # For CUDATestBase, XLATestBase, and possibly others, the primary device won't be available
348
+ # until setUpClass() sets it. Call that manually here if needed.
349
+ if hasattr(cls, 'setUpClass'):
350
+ cls.setUpClass()
351
+ return cls.get_primary_device()
352
+
353
+ # Returns a list of strings representing all available devices of this
354
+ # device type. The primary device must be the first string in the list
355
+ # and the list must contain no duplicates.
356
+ # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic
357
+ # mechanism of acquiring all available devices.
358
+ @classmethod
359
+ def get_all_devices(cls):
360
+ return [cls.get_primary_device()]
361
+
362
+ # Returns the dtypes the test has requested.
363
+ # Prefers device-specific dtype specifications over generic ones.
364
+ @classmethod
365
+ def _get_dtypes(cls, test):
366
+ if not hasattr(test, 'dtypes'):
367
+ return None
368
+
369
+ default_dtypes = test.dtypes.get('all')
370
+ msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it"
371
+ assert default_dtypes is not None, msg
372
+
373
+ return test.dtypes.get(cls.device_type, default_dtypes)
374
+
375
+ def _get_precision_override(self, test, dtype):
376
+ if not hasattr(test, 'precision_overrides'):
377
+ return self.precision
378
+ return test.precision_overrides.get(dtype, self.precision)
379
+
380
+ def _get_tolerance_override(self, test, dtype):
381
+ if not hasattr(test, 'tolerance_overrides'):
382
+ return self.precision, self.rel_tol
383
+ return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol))
384
+
385
+ def _apply_precision_override_for_test(self, test, param_kwargs):
386
+ dtype = param_kwargs['dtype'] if 'dtype' in param_kwargs else None
387
+ dtype = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else dtype
388
+ if dtype:
389
+ self.precision = self._get_precision_override(test, dtype)
390
+ self.precision, self.rel_tol = self._get_tolerance_override(test, dtype)
391
+
392
+ # Creates device-specific tests.
393
+ @classmethod
394
+ def instantiate_test(cls, name, test, *, generic_cls=None):
395
+
396
+ def instantiate_test_helper(cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []):
397
+ # Add the device param kwarg if the test needs device or devices.
398
+ param_kwargs = {} if param_kwargs is None else param_kwargs
399
+ test_sig_params = inspect.signature(test).parameters
400
+ if 'device' in test_sig_params or 'devices' in test_sig_params:
401
+ device_arg: str = cls._init_and_get_primary_device()
402
+ if hasattr(test, 'num_required_devices'):
403
+ device_arg = cls.get_all_devices()
404
+ _update_param_kwargs(param_kwargs, 'device', device_arg)
405
+
406
+ # Apply decorators based on param kwargs.
407
+ for decorator in decorator_fn(param_kwargs):
408
+ test = decorator(test)
409
+
410
+ # Constructs the test
411
+ @wraps(test)
412
+ def instantiated_test(self, param_kwargs=param_kwargs):
413
+ # Sets precision and runs test
414
+ # Note: precision is reset after the test is run
415
+ guard_precision = self.precision
416
+ guard_rel_tol = self.rel_tol
417
+ try:
418
+ self._apply_precision_override_for_test(test, param_kwargs)
419
+ result = test(self, **param_kwargs)
420
+ except RuntimeError as rte:
421
+ # check if rte should stop entire test suite.
422
+ self._stop_test_suite = self._should_stop_test_suite()
423
+ # Check if test has been decorated with `@expectedFailure`
424
+ # Using `__unittest_expecting_failure__` attribute, see
425
+ # https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164
426
+ # In that case, make it fail with "unexpected success" by suppressing exception
427
+ if getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite:
428
+ import sys
429
+ print("Suppressing fatal exception to trigger unexpected success", file=sys.stderr)
430
+ return
431
+ # raise the runtime error as is for the test suite to record.
432
+ raise rte
433
+ finally:
434
+ self.precision = guard_precision
435
+ self.rel_tol = guard_rel_tol
436
+
437
+ return result
438
+
439
+ assert not hasattr(cls, name), f"Redefinition of test {name}"
440
+ setattr(cls, name, instantiated_test)
441
+
442
+ def default_parametrize_fn(test, generic_cls, device_cls):
443
+ # By default, no parametrization is needed.
444
+ yield (test, '', {}, lambda _: [])
445
+
446
+ # Parametrization decorators set the parametrize_fn attribute on the test.
447
+ parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn)
448
+
449
+ # If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it.
450
+ dtypes = cls._get_dtypes(test)
451
+ if dtypes is not None:
452
+
453
+ def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes):
454
+ for dtype in dtypes:
455
+ param_kwargs: Dict[str, Any] = {}
456
+ _update_param_kwargs(param_kwargs, "dtype", dtype)
457
+
458
+ # Note that an empty test suffix is set here so that the dtype can be appended
459
+ # later after the device.
460
+ yield (test, '', param_kwargs, lambda _: [])
461
+
462
+ parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn)
463
+
464
+ # Instantiate the parametrized tests.
465
+ for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020
466
+ test_suffix = '' if test_suffix == '' else '_' + test_suffix
467
+ device_suffix = '_' + cls.device_type
468
+
469
+ # Note: device and dtype suffix placement
470
+ # Special handling here to place dtype(s) after device according to test name convention.
471
+ dtype_kwarg = None
472
+ if 'dtype' in param_kwargs or 'dtypes' in param_kwargs:
473
+ dtype_kwarg = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else param_kwargs['dtype']
474
+ test_name = f'{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}'
475
+
476
+ instantiate_test_helper(cls=cls, name=test_name, test=test, param_kwargs=param_kwargs,
477
+ decorator_fn=decorator_fn)
478
+
479
+ def run(self, result=None):
480
+ super().run(result=result)
481
+ # Early terminate test if _stop_test_suite is set.
482
+ if self._stop_test_suite:
483
+ result.stop()
484
+
485
+
486
+ class CPUTestBase(DeviceTypeTestBase):
487
+ device_type = 'cpu'
488
+
489
+ # No critical error should stop CPU test suite
490
+ def _should_stop_test_suite(self):
491
+ return False
492
+
493
+ class CUDATestBase(DeviceTypeTestBase):
494
+ device_type = 'cuda'
495
+ _do_cuda_memory_leak_check = True
496
+ _do_cuda_non_default_stream = True
497
+ primary_device: ClassVar[str]
498
+ cudnn_version: ClassVar[Any]
499
+ no_magma: ClassVar[bool]
500
+ no_cudnn: ClassVar[bool]
501
+
502
+ def has_cudnn(self):
503
+ return not self.no_cudnn
504
+
505
+ @classmethod
506
+ def get_primary_device(cls):
507
+ return cls.primary_device
508
+
509
+ @classmethod
510
+ def get_all_devices(cls):
511
+ primary_device_idx = int(cls.get_primary_device().split(':')[1])
512
+ num_devices = torch.cuda.device_count()
513
+
514
+ prim_device = cls.get_primary_device()
515
+ cuda_str = 'cuda:{0}'
516
+ non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
517
+ return [prim_device] + non_primary_devices
518
+
519
+ @classmethod
520
+ def setUpClass(cls):
521
+ # has_magma shows up after cuda is initialized
522
+ t = torch.ones(1).cuda()
523
+ cls.no_magma = not torch.cuda.has_magma
524
+
525
+ # Determines if cuDNN is available and its version
526
+ cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t)
527
+ cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()
528
+
529
+ # Acquires the current device as the primary (test) device
530
+ cls.primary_device = f'cuda:{torch.cuda.current_device()}'
531
+
532
+ # See Note [Lazy Tensor tests in device agnostic testing]
533
+ lazy_ts_backend_init = False
534
+ class LazyTestBase(DeviceTypeTestBase):
535
+ device_type = 'lazy'
536
+
537
+ def _should_stop_test_suite(self):
538
+ return False
539
+
540
+ @classmethod
541
+ def setUpClass(cls):
542
+ import torch._lazy
543
+ import torch._lazy.metrics
544
+ import torch._lazy.ts_backend
545
+ global lazy_ts_backend_init
546
+ if not lazy_ts_backend_init:
547
+ # Need to connect the TS backend to lazy key before running tests
548
+ torch._lazy.ts_backend.init()
549
+ lazy_ts_backend_init = True
550
+
551
+ class MPSTestBase(DeviceTypeTestBase):
552
+ device_type = 'mps'
553
+ primary_device: ClassVar[str]
554
+
555
+ @classmethod
556
+ def get_primary_device(cls):
557
+ return cls.primary_device
558
+
559
+ @classmethod
560
+ def get_all_devices(cls):
561
+ # currently only one device is supported on MPS backend
562
+ prim_device = cls.get_primary_device()
563
+ return [prim_device]
564
+
565
+ @classmethod
566
+ def setUpClass(cls):
567
+ cls.primary_device = 'mps:0'
568
+
569
+ def _should_stop_test_suite(self):
570
+ return False
571
+
572
+ class PrivateUse1TestBase(DeviceTypeTestBase):
573
+ primary_device: ClassVar[str]
574
+ device_mod = None
575
+ device_type = 'privateuse1'
576
+
577
+ @classmethod
578
+ def get_primary_device(cls):
579
+ return cls.primary_device
580
+
581
+ @classmethod
582
+ def get_all_devices(cls):
583
+ primary_device_idx = int(cls.get_primary_device().split(':')[1])
584
+ num_devices = cls.device_mod.device_count()
585
+ prim_device = cls.get_primary_device()
586
+ device_str = f'{cls.device_type}:{{0}}'
587
+ non_primary_devices = [device_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
588
+ return [prim_device] + non_primary_devices
589
+
590
+ @classmethod
591
+ def setUpClass(cls):
592
+ cls.device_type = torch._C._get_privateuse1_backend_name()
593
+ cls.device_mod = getattr(torch, cls.device_type, None)
594
+ assert cls.device_mod is not None, f'''torch has no module of `{cls.device_type}`, you should register
595
+ a module by `torch._register_device_module`.'''
596
+ cls.primary_device = f'{cls.device_type}:{cls.device_mod.current_device()}'
597
+
598
+ # Adds available device-type-specific test base classes
599
+ def get_device_type_test_bases():
600
+ # set type to List[Any] due to mypy list-of-union issue:
601
+ # https://github.com/python/mypy/issues/3351
602
+ test_bases: List[Any] = list()
603
+
604
+ if IS_SANDCASTLE or IS_FBCODE:
605
+ if IS_REMOTE_GPU:
606
+ # Skip if sanitizer is enabled
607
+ if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN:
608
+ test_bases.append(CUDATestBase)
609
+ else:
610
+ test_bases.append(CPUTestBase)
611
+ else:
612
+ test_bases.append(CPUTestBase)
613
+ if torch.cuda.is_available():
614
+ test_bases.append(CUDATestBase)
615
+ device_type = torch._C._get_privateuse1_backend_name()
616
+ device_mod = getattr(torch, device_type, None)
617
+ if hasattr(device_mod, "is_available") and device_mod.is_available():
618
+ test_bases.append(PrivateUse1TestBase)
619
+ # Disable MPS testing in generic device testing temporarily while we're
620
+ # ramping up support.
621
+ # elif torch.backends.mps.is_available():
622
+ # test_bases.append(MPSTestBase)
623
+
624
+ return test_bases
625
+
626
+ device_type_test_bases = get_device_type_test_bases()
627
+
628
+
629
+ def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None):
630
+ # device type cannot appear in both except_for and only_for
631
+ intersect = set(except_for if except_for else []) & set(only_for if only_for else [])
632
+ assert not intersect, f"device ({intersect}) appeared in both except_for and only_for"
633
+
634
+ if except_for:
635
+ device_type_test_bases = filter(
636
+ lambda x: x.device_type not in except_for, device_type_test_bases)
637
+ if only_for:
638
+ device_type_test_bases = filter(
639
+ lambda x: x.device_type in only_for, device_type_test_bases)
640
+
641
+ return list(device_type_test_bases)
642
+
643
+
644
+ # Note [How to extend DeviceTypeTestBase to add new test device]
645
+ # The following logic optionally allows downstream projects like pytorch/xla to
646
+ # add more test devices.
647
+ # Instructions:
648
+ # - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project.
649
+ # - Inside the file, one should inherit from `DeviceTypeTestBase` class and define
650
+ # a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of
651
+ # `instantiate_test` method.
652
+ # - DO NOT import common_device_type inside the file.
653
+ # `runpy.run_path` with `globals()` already properly setup the context so that
654
+ # `DeviceTypeTestBase` is already available.
655
+ # - Set a top-level variable `TEST_CLASS` equal to your new class.
656
+ # E.g. TEST_CLASS = XLATensorBase
657
+ # - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path
658
+ # to this file. Multiple paths can be separated by `:`.
659
+ # See pytorch/xla/test/pytorch_test_base.py for a more detailed example.
660
+ _TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None)
661
+ if _TORCH_TEST_DEVICES:
662
+ for path in _TORCH_TEST_DEVICES.split(':'):
663
+ # runpy (a stdlib module) lacks annotations
664
+ mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value]
665
+ device_type_test_bases.append(mod['TEST_CLASS'])
666
+
667
+
668
+ PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1'
669
+
670
+ PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = 'PYTORCH_TESTING_DEVICE_ONLY_FOR'
671
+ PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = 'PYTORCH_TESTING_DEVICE_EXCEPT_FOR'
672
+
673
+
674
+ def get_desired_device_type_test_bases(except_for=None, only_for=None, include_lazy=False, allow_mps=False):
675
+ # allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy`
676
+ test_bases = device_type_test_bases.copy()
677
+ if allow_mps and TEST_MPS and MPSTestBase not in test_bases:
678
+ test_bases.append(MPSTestBase)
679
+ # Filter out the device types based on user inputs
680
+ desired_device_type_test_bases = filter_desired_device_types(test_bases, except_for, only_for)
681
+ if include_lazy:
682
+ # Note [Lazy Tensor tests in device agnostic testing]
683
+ # Right now, test_view_ops.py runs with LazyTensor.
684
+ # We don't want to opt every device-agnostic test into using the lazy device,
685
+ # because many of them will fail.
686
+ # So instead, the only way to opt a specific device-agnostic test file into
687
+ # lazy tensor testing is with include_lazy=True
688
+ if IS_FBCODE:
689
+ print("TorchScript backend not yet supported in FBCODE/OVRSOURCE builds", file=sys.stderr)
690
+ else:
691
+ desired_device_type_test_bases.append(LazyTestBase)
692
+
693
+ def split_if_not_empty(x: str):
694
+ return x.split(",") if x else []
695
+
696
+ # Filter out the device types based on environment variables if available
697
+ # Usage:
698
+ # export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu
699
+ # export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla
700
+ env_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, ''))
701
+ env_except_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, ''))
702
+
703
+ return filter_desired_device_types(desired_device_type_test_bases, env_except_for, env_only_for)
704
+
705
+
706
+
707
+ # Adds 'instantiated' device-specific test cases to the given scope.
708
+ # The tests in these test cases are derived from the generic tests in
709
+ # generic_test_class. This function should be used instead of
710
+ # instantiate_parametrized_tests() if the test class contains
711
+ # device-specific tests (NB: this supports additional @parametrize usage).
712
+ #
713
+ # See note "Writing Test Templates"
714
+ def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None, include_lazy=False, allow_mps=False):
715
+ # Removes the generic test class from its enclosing scope so its tests
716
+ # are not discoverable.
717
+ del scope[generic_test_class.__name__]
718
+
719
+ # Creates an 'empty' version of the generic_test_class
720
+ # Note: we don't inherit from the generic_test_class directly because
721
+ # that would add its tests to our test classes and they would be
722
+ # discovered (despite not being runnable). Inherited methods also
723
+ # can't be removed later, and we can't rely on load_tests because
724
+ # pytest doesn't support it (as of this writing).
725
+ empty_name = generic_test_class.__name__ + "_base"
726
+ empty_class = type(empty_name, generic_test_class.__bases__, {})
727
+
728
+ # Acquires members names
729
+ # See Note [Overriding methods in generic tests]
730
+ generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys())
731
+ generic_tests = [x for x in generic_members if x.startswith('test')]
732
+
733
+ # Creates device-specific test cases
734
+ for base in get_desired_device_type_test_bases(except_for, only_for, include_lazy, allow_mps):
735
+ class_name = generic_test_class.__name__ + base.device_type.upper()
736
+
737
+ # type set to Any and suppressed due to unsupport runtime class:
738
+ # https://github.com/python/mypy/wiki/Unsupported-Python-Features
739
+ device_type_test_class: Any = type(class_name, (base, empty_class), {})
740
+
741
+ for name in generic_members:
742
+ if name in generic_tests: # Instantiates test member
743
+ test = getattr(generic_test_class, name)
744
+ # XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls)
745
+ sig = inspect.signature(device_type_test_class.instantiate_test)
746
+ if len(sig.parameters) == 3:
747
+ # Instantiates the device-specific tests
748
+ device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class)
749
+ else:
750
+ device_type_test_class.instantiate_test(name, copy.deepcopy(test))
751
+ else: # Ports non-test member
752
+ assert name not in device_type_test_class.__dict__, f"Redefinition of directly defined member {name}"
753
+ nontest = getattr(generic_test_class, name)
754
+ setattr(device_type_test_class, name, nontest)
755
+
756
+ # Mimics defining the instantiated class in the caller's file
757
+ # by setting its module to the given class's and adding
758
+ # the module to the given scope.
759
+ # This lets the instantiated class be discovered by unittest.
760
+ device_type_test_class.__module__ = generic_test_class.__module__
761
+ scope[class_name] = device_type_test_class
762
+
763
+
764
+ # Category of dtypes to run an OpInfo-based test for
765
+ # Example use: @ops(dtype=OpDTypes.supported)
766
+ #
767
+ # There are 5 categories:
768
+ # - supported: Every dtype supported by the operator. Use for exhaustive
769
+ # testing of all dtypes.
770
+ # - unsupported: Run tests on dtypes not supported by the operator. e.g. for
771
+ # testing the operator raises an error and doesn't crash.
772
+ # - supported_backward: Every dtype supported by the operator's backward pass.
773
+ # - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass.
774
+ # - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the
775
+ # operator supports in both forward and backward.
776
+ # - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test
777
+ # when this is selected.
778
+ class OpDTypes(Enum):
779
+ supported = 0 # Test all supported dtypes (default)
780
+ unsupported = 1 # Test only unsupported dtypes
781
+ supported_backward = 2 # Test all supported backward dtypes
782
+ unsupported_backward = 3 # Test only unsupported backward dtypes
783
+ any_one = 4 # Test precisely one supported dtype
784
+ none = 5 # Instantiate no dtype variants (no dtype kwarg needed)
785
+ any_common_cpu_cuda_one = 6 # Test precisely one supported dtype that is common to both cuda and cpu
786
+
787
+
788
+ # Arbitrary order
789
+ ANY_DTYPE_ORDER = (
790
+ torch.float32,
791
+ torch.float64,
792
+ torch.complex64,
793
+ torch.complex128,
794
+ torch.float16,
795
+ torch.bfloat16,
796
+ torch.long,
797
+ torch.int32,
798
+ torch.int16,
799
+ torch.int8,
800
+ torch.uint8,
801
+ torch.bool
802
+ )
803
+
804
+ def _serialize_sample(sample_input):
805
+ # NB: For OpInfos, SampleInput.summary() prints in a cleaner way.
806
+ if getattr(sample_input, "summary", None) is not None:
807
+ return sample_input.summary()
808
+ return str(sample_input)
809
+
810
+ # Decorator that defines the OpInfos a test template should be instantiated for.
811
+ #
812
+ # Example usage:
813
+ #
814
+ # @ops(unary_ufuncs)
815
+ # def test_numerics(self, device, dtype, op):
816
+ # <test_code>
817
+ #
818
+ # This will instantiate variants of test_numerics for each given OpInfo,
819
+ # on each device the OpInfo's operator supports, and for every dtype supported by
820
+ # that operator. There are a few caveats to the dtype rule, explained below.
821
+ #
822
+ # The @ops decorator can accept two
823
+ # additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified
824
+ # then the test variants are instantiated for those dtypes, regardless of
825
+ # what the operator supports. If given "allowed_dtypes" then test variants
826
+ # are instantiated only for the intersection of allowed_dtypes and the dtypes
827
+ # they would otherwise be instantiated with. That is, allowed_dtypes composes
828
+ # with the options listed above and below.
829
+ #
830
+ # The "dtypes" argument can also accept additional values (see OpDTypes above):
831
+ # OpDTypes.supported - the test is instantiated for all dtypes the operator
832
+ # supports
833
+ # OpDTypes.unsupported - the test is instantiated for all dtypes the operator
834
+ # doesn't support
835
+ # OpDTypes.supported_backward - the test is instantiated for all dtypes the
836
+ # operator's gradient formula supports
837
+ # OpDTypes.unsupported_backward - the test is instantiated for all dtypes the
838
+ # operator's gradient formula doesn't support
839
+ # OpDTypes.any_one - the test is instantiated for one dtype the
840
+ # operator supports. The dtype supports forward and backward if possible.
841
+ # OpDTypes.none - the test is instantiated without any dtype. The test signature
842
+ # should not include a dtype kwarg in this case.
843
+ #
844
+ # These options allow tests to have considerable control over the dtypes
845
+ # they're instantiated for.
846
+
847
+ class ops(_TestParametrizer):
848
+ def __init__(self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported,
849
+ allowed_dtypes: Optional[Sequence[torch.dtype]] = None, skip_if_dynamo=True):
850
+ self.op_list = list(op_list)
851
+ self.opinfo_dtypes = dtypes
852
+ self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None
853
+ self.skip_if_dynamo = skip_if_dynamo
854
+
855
+ def _parametrize_test(self, test, generic_cls, device_cls):
856
+ """ Parameterizes the given test function across each op and its associated dtypes. """
857
+ if device_cls is None:
858
+ raise RuntimeError('The @ops decorator is only intended to be used in a device-specific '
859
+ 'context; use it with instantiate_device_type_tests() instead of '
860
+ 'instantiate_parametrized_tests()')
861
+
862
+ op = check_exhausted_iterator = object()
863
+ for op in self.op_list:
864
+ # Determine the set of dtypes to use.
865
+ dtypes: Union[Set[torch.dtype], Set[None]]
866
+ if isinstance(self.opinfo_dtypes, Sequence):
867
+ dtypes = set(self.opinfo_dtypes)
868
+ elif self.opinfo_dtypes == OpDTypes.unsupported_backward:
869
+ dtypes = set(get_all_dtypes()).difference(op.supported_backward_dtypes(device_cls.device_type))
870
+ elif self.opinfo_dtypes == OpDTypes.supported_backward:
871
+ dtypes = op.supported_backward_dtypes(device_cls.device_type)
872
+ elif self.opinfo_dtypes == OpDTypes.unsupported:
873
+ dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type))
874
+ elif self.opinfo_dtypes == OpDTypes.supported:
875
+ dtypes = op.supported_dtypes(device_cls.device_type)
876
+ elif self.opinfo_dtypes == OpDTypes.any_one:
877
+ # Tries to pick a dtype that supports both forward or backward
878
+ supported = op.supported_dtypes(device_cls.device_type)
879
+ supported_backward = op.supported_backward_dtypes(device_cls.device_type)
880
+ supported_both = supported.intersection(supported_backward)
881
+ dtype_set = supported_both if len(supported_both) > 0 else supported
882
+ for dtype in ANY_DTYPE_ORDER:
883
+ if dtype in dtype_set:
884
+ dtypes = {dtype}
885
+ break
886
+ else:
887
+ dtypes = {}
888
+ elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one:
889
+ # Tries to pick a dtype that supports both CPU and CUDA
890
+ supported = op.dtypes.intersection(op.dtypesIfCUDA)
891
+ if supported:
892
+ dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)}
893
+ else:
894
+ dtypes = {}
895
+
896
+ elif self.opinfo_dtypes == OpDTypes.none:
897
+ dtypes = {None}
898
+ else:
899
+ raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}")
900
+
901
+ if self.allowed_dtypes is not None:
902
+ dtypes = dtypes.intersection(self.allowed_dtypes)
903
+
904
+ # Construct the test name; device / dtype parts are handled outside.
905
+ # See [Note: device and dtype suffix placement]
906
+ test_name = op.formatted_name
907
+
908
+ for dtype in dtypes:
909
+ # Construct parameter kwargs to pass to the test.
910
+ param_kwargs = {'op': op}
911
+ _update_param_kwargs(param_kwargs, 'dtype', dtype)
912
+
913
+ # NOTE: test_wrapper exists because we don't want to apply
914
+ # op-specific decorators to the original test.
915
+ # Test-specific decorators are applied to the original test,
916
+ # however.
917
+ try:
918
+ @wraps(test)
919
+ def test_wrapper(*args, **kwargs):
920
+ try:
921
+ return test(*args, **kwargs)
922
+ except unittest.SkipTest as e:
923
+ raise e
924
+ except Exception as e:
925
+ tracked_input = get_tracked_input()
926
+ if PRINT_REPRO_ON_FAILURE and tracked_input is not None:
927
+ raise Exception(
928
+ f"Caused by {tracked_input.type_desc} "
929
+ f"at index {tracked_input.index}: "
930
+ f"{_serialize_sample(tracked_input.val)}") from e
931
+ raise e
932
+ finally:
933
+ clear_tracked_input()
934
+
935
+ if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR:
936
+ test_wrapper = skipIfTorchDynamo("Policy: we don't run OpInfo tests w/ Dynamo")(test_wrapper)
937
+
938
+ # Initialize info for the last input seen. This is useful for tracking
939
+ # down which inputs caused a test failure. Note that TrackedInputIter is
940
+ # responsible for managing this.
941
+ test.tracked_input = None
942
+
943
+ decorator_fn = partial(op.get_decorators, generic_cls.__name__,
944
+ test.__name__, device_cls.device_type, dtype)
945
+
946
+ yield (test_wrapper, test_name, param_kwargs, decorator_fn)
947
+ except Exception as ex:
948
+ # Provides an error message for debugging before rethrowing the exception
949
+ print(f"Failed to instantiate {test_name} for op {op.name}!")
950
+ raise ex
951
+ if op is check_exhausted_iterator:
952
+ raise ValueError('An empty op_list was passed to @ops. '
953
+ 'Note that this may result from reuse of a generator.')
954
+
955
+ # Decorator that skips a test if the given condition is true.
956
+ # Notes:
957
+ # (1) Skip conditions stack.
958
+ # (2) Skip conditions can be bools or strings. If a string the
959
+ # test base must have defined the corresponding attribute to be False
960
+ # for the test to run. If you want to use a string argument you should
961
+ # probably define a new decorator instead (see below).
962
+ # (3) Prefer the existing decorators to defining the 'device_type' kwarg.
963
+ class skipIf:
964
+
965
+ def __init__(self, dep, reason, device_type=None):
966
+ self.dep = dep
967
+ self.reason = reason
968
+ self.device_type = device_type
969
+
970
+ def __call__(self, fn):
971
+
972
+ @wraps(fn)
973
+ def dep_fn(slf, *args, **kwargs):
974
+ if self.device_type is None or self.device_type == slf.device_type:
975
+ if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep):
976
+ raise unittest.SkipTest(self.reason)
977
+
978
+ return fn(slf, *args, **kwargs)
979
+ return dep_fn
980
+
981
+
982
+ # Skips a test on CPU if the condition is true.
983
+ class skipCPUIf(skipIf):
984
+
985
+ def __init__(self, dep, reason):
986
+ super().__init__(dep, reason, device_type='cpu')
987
+
988
+
989
+ # Skips a test on CUDA if the condition is true.
990
+ class skipCUDAIf(skipIf):
991
+
992
+ def __init__(self, dep, reason):
993
+ super().__init__(dep, reason, device_type='cuda')
994
+
995
+ # Skips a test on Lazy if the condition is true.
996
+ class skipLazyIf(skipIf):
997
+
998
+ def __init__(self, dep, reason):
999
+ super().__init__(dep, reason, device_type='lazy')
1000
+
1001
+ # Skips a test on Meta if the condition is true.
1002
+ class skipMetaIf(skipIf):
1003
+
1004
+ def __init__(self, dep, reason):
1005
+ super().__init__(dep, reason, device_type='meta')
1006
+
1007
+ # Skips a test on MPS if the condition is true.
1008
+ class skipMPSIf(skipIf):
1009
+
1010
+ def __init__(self, dep, reason):
1011
+ super().__init__(dep, reason, device_type='mps')
1012
+
1013
+ # Skips a test on XLA if the condition is true.
1014
+ class skipXLAIf(skipIf):
1015
+
1016
+ def __init__(self, dep, reason):
1017
+ super().__init__(dep, reason, device_type='xla')
1018
+
1019
+ class skipPRIVATEUSE1If(skipIf):
1020
+
1021
+ def __init__(self, dep, reason):
1022
+ device_type = torch._C._get_privateuse1_backend_name()
1023
+ super().__init__(dep, reason, device_type=device_type)
1024
+
1025
+ def _has_sufficient_memory(device, size):
1026
+ if torch.device(device).type == 'cuda':
1027
+ if not torch.cuda.is_available():
1028
+ return False
1029
+ gc.collect()
1030
+ torch.cuda.empty_cache()
1031
+ # torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU
1032
+ if device == 'cuda':
1033
+ device = 'cuda:0'
1034
+ return torch.cuda.memory.mem_get_info(device)[0] >= size
1035
+
1036
+ if device == 'xla':
1037
+ raise unittest.SkipTest('TODO: Memory availability checks for XLA?')
1038
+
1039
+ if device != 'cpu':
1040
+ raise unittest.SkipTest('Unknown device type')
1041
+
1042
+ # CPU
1043
+ if not HAS_PSUTIL:
1044
+ raise unittest.SkipTest('Need psutil to determine if memory is sufficient')
1045
+
1046
+ # The sanitizers have significant memory overheads
1047
+ if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN:
1048
+ effective_size = size * 10
1049
+ else:
1050
+ effective_size = size
1051
+
1052
+ if psutil.virtual_memory().available < effective_size:
1053
+ gc.collect()
1054
+ return psutil.virtual_memory().available >= effective_size
1055
+
1056
+
1057
+ def largeTensorTest(size, device=None):
1058
+ """Skip test if the device has insufficient memory to run the test
1059
+
1060
+ size may be a number of bytes, a string of the form "N GB", or a callable
1061
+
1062
+ If the test is a device generic test, available memory on the primary device will be checked.
1063
+ It can also be overriden by the optional `device=` argument.
1064
+ In other tests, the `device=` argument needs to be specified.
1065
+ """
1066
+ if isinstance(size, str):
1067
+ assert size.endswith(('GB', 'gb')), "only bytes or GB supported"
1068
+ size = 1024 ** 3 * int(size[:-2])
1069
+
1070
+ def inner(fn):
1071
+ @wraps(fn)
1072
+ def dep_fn(self, *args, **kwargs):
1073
+ size_bytes = size(self, *args, **kwargs) if callable(size) else size
1074
+ _device = device if device is not None else self.get_primary_device()
1075
+ if not _has_sufficient_memory(_device, size_bytes):
1076
+ raise unittest.SkipTest(f'Insufficient {_device} memory')
1077
+
1078
+ return fn(self, *args, **kwargs)
1079
+ return dep_fn
1080
+ return inner
1081
+
1082
+
1083
+ class expectedFailure:
1084
+
1085
+ def __init__(self, device_type):
1086
+ self.device_type = device_type
1087
+
1088
+ def __call__(self, fn):
1089
+
1090
+ @wraps(fn)
1091
+ def efail_fn(slf, *args, **kwargs):
1092
+ if self.device_type is None or self.device_type == slf.device_type:
1093
+ try:
1094
+ fn(slf, *args, **kwargs)
1095
+ except Exception:
1096
+ return
1097
+ else:
1098
+ slf.fail('expected test to fail, but it passed')
1099
+
1100
+ return fn(slf, *args, **kwargs)
1101
+ return efail_fn
1102
+
1103
+
1104
+ class onlyOn:
1105
+
1106
+ def __init__(self, device_type):
1107
+ self.device_type = device_type
1108
+
1109
+ def __call__(self, fn):
1110
+
1111
+ @wraps(fn)
1112
+ def only_fn(slf, *args, **kwargs):
1113
+ if self.device_type != slf.device_type:
1114
+ reason = f"Only runs on {self.device_type}"
1115
+ raise unittest.SkipTest(reason)
1116
+
1117
+ return fn(slf, *args, **kwargs)
1118
+
1119
+ return only_fn
1120
+
1121
+
1122
+ # Decorator that provides all available devices of the device type to the test
1123
+ # as a list of strings instead of providing a single device string.
1124
+ # Skips the test if the number of available devices of the variant's device
1125
+ # type is less than the 'num_required_devices' arg.
1126
+ class deviceCountAtLeast:
1127
+
1128
+ def __init__(self, num_required_devices):
1129
+ self.num_required_devices = num_required_devices
1130
+
1131
+ def __call__(self, fn):
1132
+ assert not hasattr(fn, 'num_required_devices'), f"deviceCountAtLeast redefinition for {fn.__name__}"
1133
+ fn.num_required_devices = self.num_required_devices
1134
+
1135
+ @wraps(fn)
1136
+ def multi_fn(slf, devices, *args, **kwargs):
1137
+ if len(devices) < self.num_required_devices:
1138
+ reason = f"fewer than {self.num_required_devices} devices detected"
1139
+ raise unittest.SkipTest(reason)
1140
+
1141
+ return fn(slf, devices, *args, **kwargs)
1142
+
1143
+ return multi_fn
1144
+
1145
+ # Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1)
1146
+ def onlyNativeDeviceTypes(fn):
1147
+ @wraps(fn)
1148
+ def only_fn(self, *args, **kwargs):
1149
+ if self.device_type not in NATIVE_DEVICES:
1150
+ reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}"
1151
+ raise unittest.SkipTest(reason)
1152
+
1153
+ return fn(self, *args, **kwargs)
1154
+
1155
+ return only_fn
1156
+
1157
+ # Specifies per-dtype precision overrides.
1158
+ # Ex.
1159
+ #
1160
+ # @precisionOverride({torch.half : 1e-2, torch.float : 1e-4})
1161
+ # @dtypes(torch.half, torch.float, torch.double)
1162
+ # def test_X(self, device, dtype):
1163
+ # ...
1164
+ #
1165
+ # When the test is instantiated its class's precision will be set to the
1166
+ # corresponding override, if it exists.
1167
+ # self.precision can be accessed directly, and it also controls the behavior of
1168
+ # functions like self.assertEqual().
1169
+ #
1170
+ # Note that self.precision is a scalar value, so if you require multiple
1171
+ # precisions (or are working with multiple dtypes) they should be specified
1172
+ # explicitly and computed using self.precision (e.g.
1173
+ # self.precision *2, max(1, self.precision)).
1174
+ class precisionOverride:
1175
+
1176
+ def __init__(self, d):
1177
+ assert isinstance(d, dict), "precisionOverride not given a dtype : precision dict!"
1178
+ for dtype in d.keys():
1179
+ assert isinstance(dtype, torch.dtype), f"precisionOverride given unknown dtype {dtype}"
1180
+
1181
+ self.d = d
1182
+
1183
+ def __call__(self, fn):
1184
+ fn.precision_overrides = self.d
1185
+ return fn
1186
+
1187
+ # Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over
1188
+ # precisionOverride.
1189
+ # Ex.
1190
+ #
1191
+ # @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3},
1192
+ # torch.double : tol{atol=1e-4, rtol = 0})
1193
+ # @dtypes(torch.half, torch.float, torch.double)
1194
+ # def test_X(self, device, dtype):
1195
+ # ...
1196
+ #
1197
+ # When the test is instantiated its class's tolerance will be set to the
1198
+ # corresponding override, if it exists.
1199
+ # self.rtol and self.precision can be accessed directly, and they also control
1200
+ # the behavior of functions like self.assertEqual().
1201
+ #
1202
+ # The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and
1203
+ # atol = 1e-4 and rtol = 0 for torch.double.
1204
+ tol = namedtuple('tol', ['atol', 'rtol'])
1205
+
1206
+ class toleranceOverride:
1207
+ def __init__(self, d):
1208
+ assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!"
1209
+ for dtype, prec in d.items():
1210
+ assert isinstance(dtype, torch.dtype), f"toleranceOverride given unknown dtype {dtype}"
1211
+ assert isinstance(prec, tol), "toleranceOverride not given a dtype : tol dict!"
1212
+
1213
+ self.d = d
1214
+
1215
+ def __call__(self, fn):
1216
+ fn.tolerance_overrides = self.d
1217
+ return fn
1218
+
1219
+ # Decorator that instantiates a variant of the test for each given dtype.
1220
+ # Notes:
1221
+ # (1) Tests that accept the dtype argument MUST use this decorator.
1222
+ # (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU
1223
+ # or dtypesIfCUDA.
1224
+ # (3) Can accept an iterable of dtypes or an iterable of tuples
1225
+ # of dtypes.
1226
+ # Examples:
1227
+ # @dtypes(torch.float32, torch.float64)
1228
+ # @dtypes((torch.long, torch.float32), (torch.int, torch.float64))
1229
+ class dtypes:
1230
+
1231
+ def __init__(self, *args, device_type="all"):
1232
+ if len(args) > 0 and isinstance(args[0], (list, tuple)):
1233
+ for arg in args:
1234
+ assert isinstance(arg, (list, tuple)), \
1235
+ "When one dtype variant is a tuple or list, " \
1236
+ "all dtype variants must be. " \
1237
+ f"Received non-list non-tuple dtype {str(arg)}"
1238
+ assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}"
1239
+ else:
1240
+ assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}"
1241
+
1242
+ self.args = args
1243
+ self.device_type = device_type
1244
+
1245
+ def __call__(self, fn):
1246
+ d = getattr(fn, 'dtypes', {})
1247
+ assert self.device_type not in d, f"dtypes redefinition for {self.device_type}"
1248
+ d[self.device_type] = self.args
1249
+ fn.dtypes = d
1250
+ return fn
1251
+
1252
+
1253
+ # Overrides specified dtypes on the CPU.
1254
+ class dtypesIfCPU(dtypes):
1255
+
1256
+ def __init__(self, *args):
1257
+ super().__init__(*args, device_type='cpu')
1258
+
1259
+
1260
+ # Overrides specified dtypes on CUDA.
1261
+ class dtypesIfCUDA(dtypes):
1262
+
1263
+ def __init__(self, *args):
1264
+ super().__init__(*args, device_type='cuda')
1265
+
1266
+ class dtypesIfMPS(dtypes):
1267
+
1268
+ def __init__(self, *args):
1269
+ super().__init__(*args, device_type='mps')
1270
+
1271
+ class dtypesIfPRIVATEUSE1(dtypes):
1272
+
1273
+ def __init__(self, *args):
1274
+ super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name())
1275
+
1276
+ def onlyCPU(fn):
1277
+ return onlyOn('cpu')(fn)
1278
+
1279
+
1280
+ def onlyCUDA(fn):
1281
+ return onlyOn('cuda')(fn)
1282
+
1283
+
1284
+ def onlyMPS(fn):
1285
+ return onlyOn('mps')(fn)
1286
+
1287
+ def onlyPRIVATEUSE1(fn):
1288
+ device_type = torch._C._get_privateuse1_backend_name()
1289
+ device_mod = getattr(torch, device_type, None)
1290
+ if device_mod is None:
1291
+ reason = f"Skip as torch has no module of {device_type}"
1292
+ return unittest.skip(reason)(fn)
1293
+ return onlyOn(device_type)(fn)
1294
+
1295
+ def onlyCUDAAndPRIVATEUSE1(fn):
1296
+ @wraps(fn)
1297
+ def only_fn(self, *args, **kwargs):
1298
+ if self.device_type not in ('cuda', torch._C._get_privateuse1_backend_name()):
1299
+ reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}"
1300
+ raise unittest.SkipTest(reason)
1301
+
1302
+ return fn(self, *args, **kwargs)
1303
+
1304
+ return only_fn
1305
+
1306
+ def disablecuDNN(fn):
1307
+
1308
+ @wraps(fn)
1309
+ def disable_cudnn(self, *args, **kwargs):
1310
+ if self.device_type == 'cuda' and self.has_cudnn():
1311
+ with torch.backends.cudnn.flags(enabled=False):
1312
+ return fn(self, *args, **kwargs)
1313
+ return fn(self, *args, **kwargs)
1314
+
1315
+ return disable_cudnn
1316
+
1317
+ def disableMkldnn(fn):
1318
+
1319
+ @wraps(fn)
1320
+ def disable_mkldnn(self, *args, **kwargs):
1321
+ if torch.backends.mkldnn.is_available():
1322
+ with torch.backends.mkldnn.flags(enabled=False):
1323
+ return fn(self, *args, **kwargs)
1324
+ return fn(self, *args, **kwargs)
1325
+
1326
+ return disable_mkldnn
1327
+
1328
+
1329
+ def expectedFailureCPU(fn):
1330
+ return expectedFailure('cpu')(fn)
1331
+
1332
+
1333
+ def expectedFailureCUDA(fn):
1334
+ return expectedFailure('cuda')(fn)
1335
+
1336
+ def expectedFailureMeta(fn):
1337
+ return skipIfTorchDynamo()(expectedFailure('meta')(fn))
1338
+
1339
+ def expectedFailureXLA(fn):
1340
+ return expectedFailure('xla')(fn)
1341
+
1342
+ # Skips a test on CPU if LAPACK is not available.
1343
+ def skipCPUIfNoLapack(fn):
1344
+ return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn)
1345
+
1346
+
1347
+ # Skips a test on CPU if FFT is not available.
1348
+ def skipCPUIfNoFFT(fn):
1349
+ return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(fn)
1350
+
1351
+
1352
+ # Skips a test on CPU if MKL is not available.
1353
+ def skipCPUIfNoMkl(fn):
1354
+ return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn)
1355
+
1356
+
1357
+ # Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows).
1358
+ def skipCPUIfNoMklSparse(fn):
1359
+ return skipCPUIf(IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support")(fn)
1360
+
1361
+
1362
+ # Skips a test on CPU if mkldnn is not available.
1363
+ def skipCPUIfNoMkldnn(fn):
1364
+ return skipCPUIf(not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support")(fn)
1365
+
1366
+
1367
+ # Skips a test on CUDA if MAGMA is not available.
1368
+ def skipCUDAIfNoMagma(fn):
1369
+ return skipCUDAIf('no_magma', "no MAGMA library detected")(skipCUDANonDefaultStreamIf(True)(fn))
1370
+
1371
+ def has_cusolver():
1372
+ return not TEST_WITH_ROCM
1373
+
1374
+ def has_hipsolver():
1375
+ rocm_version = _get_torch_rocm_version()
1376
+ # hipSOLVER is disabled on ROCM < 5.3
1377
+ return rocm_version >= (5, 3)
1378
+
1379
+ # Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available
1380
+ def skipCUDAIfNoCusolver(fn):
1381
+ return skipCUDAIf(not has_cusolver() and not has_hipsolver(), "cuSOLVER not available")(fn)
1382
+
1383
+
1384
+ # Skips a test if both cuSOLVER and MAGMA are not available
1385
+ def skipCUDAIfNoMagmaAndNoCusolver(fn):
1386
+ if has_cusolver():
1387
+ return fn
1388
+ else:
1389
+ # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
1390
+ return skipCUDAIfNoMagma(fn)
1391
+
1392
+ # Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available
1393
+ def skipCUDAIfNoMagmaAndNoLinalgsolver(fn):
1394
+ if has_cusolver() or has_hipsolver():
1395
+ return fn
1396
+ else:
1397
+ # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
1398
+ return skipCUDAIfNoMagma(fn)
1399
+
1400
+ # Skips a test on CUDA when using ROCm.
1401
+ def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"):
1402
+ def dec_fn(fn):
1403
+ reason = f"skipCUDAIfRocm: {msg}"
1404
+ return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn)
1405
+ if func:
1406
+ return dec_fn(func)
1407
+ return dec_fn
1408
+
1409
+ # Skips a test on CUDA when not using ROCm.
1410
+ def skipCUDAIfNotRocm(fn):
1411
+ return skipCUDAIf(not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack")(fn)
1412
+
1413
+ # Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
1414
+ def skipCUDAIfRocmVersionLessThan(version=None):
1415
+
1416
+ def dec_fn(fn):
1417
+ @wraps(fn)
1418
+ def wrap_fn(self, *args, **kwargs):
1419
+ if self.device_type == 'cuda':
1420
+ if not TEST_WITH_ROCM:
1421
+ reason = "ROCm not available"
1422
+ raise unittest.SkipTest(reason)
1423
+ rocm_version_tuple = _get_torch_rocm_version()
1424
+ if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
1425
+ reason = f"ROCm {rocm_version_tuple} is available but {version} required"
1426
+ raise unittest.SkipTest(reason)
1427
+
1428
+ return fn(self, *args, **kwargs)
1429
+
1430
+ return wrap_fn
1431
+ return dec_fn
1432
+
1433
+ # Skips a test on CUDA when using ROCm.
1434
+ def skipCUDAIfNotMiopenSuggestNHWC(fn):
1435
+ return skipCUDAIf(not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation")(fn)
1436
+
1437
+ # Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s.
1438
+ def skipCUDAVersionIn(versions : List[Tuple[int, int]] = None):
1439
+ def dec_fn(fn):
1440
+ @wraps(fn)
1441
+ def wrap_fn(self, *args, **kwargs):
1442
+ version = _get_torch_cuda_version()
1443
+ if version == (0, 0): # cpu or rocm
1444
+ return fn(self, *args, **kwargs)
1445
+ if version in (versions or []):
1446
+ reason = f"test skipped for CUDA version {version}"
1447
+ raise unittest.SkipTest(reason)
1448
+ return fn(self, *args, **kwargs)
1449
+
1450
+ return wrap_fn
1451
+ return dec_fn
1452
+
1453
+ # Skips a test for CUDA versions less than specified, given in the form of [major, minor].
1454
+ def skipCUDAIfVersionLessThan(versions : Tuple[int, int] = None):
1455
+ def dec_fn(fn):
1456
+ @wraps(fn)
1457
+ def wrap_fn(self, *args, **kwargs):
1458
+ version = _get_torch_cuda_version()
1459
+ if version == (0, 0): # cpu or rocm
1460
+ return fn(self, *args, **kwargs)
1461
+ if version < versions:
1462
+ reason = f"test skipped for CUDA versions < {version}"
1463
+ raise unittest.SkipTest(reason)
1464
+ return fn(self, *args, **kwargs)
1465
+
1466
+ return wrap_fn
1467
+ return dec_fn
1468
+
1469
+ # Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested.
1470
+ def skipCUDAIfCudnnVersionLessThan(version=0):
1471
+
1472
+ def dec_fn(fn):
1473
+ @wraps(fn)
1474
+ def wrap_fn(self, *args, **kwargs):
1475
+ if self.device_type == 'cuda':
1476
+ if self.no_cudnn:
1477
+ reason = "cuDNN not available"
1478
+ raise unittest.SkipTest(reason)
1479
+ if self.cudnn_version is None or self.cudnn_version < version:
1480
+ reason = f"cuDNN version {self.cudnn_version} is available but {version} required"
1481
+ raise unittest.SkipTest(reason)
1482
+
1483
+ return fn(self, *args, **kwargs)
1484
+
1485
+ return wrap_fn
1486
+ return dec_fn
1487
+
1488
+ # Skips a test on CUDA if cuSparse generic API is not available
1489
+ def skipCUDAIfNoCusparseGeneric(fn):
1490
+ return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(fn)
1491
+
1492
+ def skipCUDAIfNoHipsparseGeneric(fn):
1493
+ return skipCUDAIf(not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available")(fn)
1494
+
1495
+ def skipCUDAIfNoSparseGeneric(fn):
1496
+ return skipCUDAIf(not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC), "Sparse Generic API not available")(fn)
1497
+
1498
+ def skipCUDAIfNoCudnn(fn):
1499
+ return skipCUDAIfCudnnVersionLessThan(0)(fn)
1500
+
1501
+ def skipCUDAIfMiopen(fn):
1502
+ return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn)
1503
+
1504
+ def skipCUDAIfNoMiopen(fn):
1505
+ return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(skipCUDAIfNoCudnn(fn))
1506
+
1507
+ def skipLazy(fn):
1508
+ return skipLazyIf(True, "test doesn't work with lazy tensors")(fn)
1509
+
1510
+ def skipMeta(fn):
1511
+ return skipMetaIf(True, "test doesn't work with meta tensors")(fn)
1512
+
1513
+ def skipXLA(fn):
1514
+ return skipXLAIf(True, "Marked as skipped for XLA")(fn)
1515
+
1516
+ def skipMPS(fn):
1517
+ return skipMPSIf(True, "test doesn't work on MPS backend")(fn)
1518
+
1519
+ def skipPRIVATEUSE1(fn):
1520
+ return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn)
1521
+
1522
+ # TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now.
1523
+ # This should probably enumerate all available device type test base classes.
1524
+ def get_all_device_types() -> List[str]:
1525
+ return ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
venv/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Owner(s): ["oncall: distributed"]
4
+
5
+ from typing import Tuple
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+
11
+ class UnitModule(nn.Module):
12
+ def __init__(self, device: torch.device):
13
+ super().__init__()
14
+ self.l1 = nn.Linear(100, 100, device=device)
15
+ self.seq = nn.Sequential(
16
+ nn.ReLU(),
17
+ nn.Linear(100, 100, device=device),
18
+ nn.ReLU(),
19
+ )
20
+ self.l2 = nn.Linear(100, 100, device=device)
21
+
22
+ def forward(self, x):
23
+ return self.l2(self.seq(self.l1(x)))
24
+
25
+
26
+ class CompositeModel(nn.Module):
27
+ def __init__(self, device: torch.device):
28
+ super().__init__()
29
+ self.l1 = nn.Linear(100, 100, device=device)
30
+ self.u1 = UnitModule(device)
31
+ self.u2 = UnitModule(device)
32
+ self.l2 = nn.Linear(100, 100, device=device)
33
+
34
+ def forward(self, x):
35
+ return self.l2(self.u2(self.u1(self.l1(x))))
36
+
37
+
38
+ class UnitParamModule(nn.Module):
39
+ def __init__(self, device: torch.device):
40
+ super().__init__()
41
+ self.l = nn.Linear(100, 100, device=device)
42
+ self.seq = nn.Sequential(
43
+ nn.ReLU(),
44
+ nn.Linear(100, 100, device=device),
45
+ nn.ReLU(),
46
+ )
47
+ self.p = nn.Parameter(torch.randn((100, 100), device=device))
48
+
49
+ def forward(self, x):
50
+ return torch.mm(self.seq(self.l(x)), self.p)
51
+
52
+
53
+ class CompositeParamModel(nn.Module):
54
+ def __init__(self, device: torch.device):
55
+ super().__init__()
56
+ self.l = nn.Linear(100, 100, device=device)
57
+ self.u1 = UnitModule(device)
58
+ self.u2 = UnitModule(device)
59
+ self.p = nn.Parameter(torch.randn((100, 100), device=device))
60
+ self.register_buffer(
61
+ "buffer", torch.randn((100, 100), device=device), persistent=True
62
+ )
63
+
64
+ def forward(self, x):
65
+ a = self.u2(self.u1(self.l(x)))
66
+ b = self.p
67
+ return torch.mm(a, b)
68
+
69
+
70
+ class FakeSequential(nn.Module):
71
+ # Define this class to achieve a desired nested wrapping using the module
72
+ # wrap policy with `nn.Sequential`
73
+ def __init__(self, *modules: Tuple[nn.Module, ...]) -> None:
74
+ super().__init__()
75
+ self._module_sequence = list(modules)
76
+
77
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
78
+ for module in self._module_sequence:
79
+ x = module(x)
80
+ return x
81
+
82
+
83
+ class NestedSequentialModel(nn.Module):
84
+ def __init__(self, device: torch.device) -> None:
85
+ super().__init__()
86
+ # This nested structure exercises traversal order to catch differences
87
+ # between valid traversals (e.g. BFS and DFS variations).
88
+ self.seq1 = nn.Sequential(
89
+ nn.Linear(1, 1, device=device),
90
+ FakeSequential(
91
+ nn.Linear(1, 1, device=device),
92
+ nn.ReLU(),
93
+ FakeSequential(
94
+ nn.Linear(1, 1, device=device),
95
+ ),
96
+ nn.ReLU(),
97
+ ),
98
+ nn.Linear(1, 2, device=device),
99
+ )
100
+ self.lin = nn.Linear(2, 2, device=device)
101
+ self.seq2 = nn.Sequential(
102
+ nn.ReLU(),
103
+ nn.Linear(2, 3, device=device),
104
+ FakeSequential(
105
+ nn.Linear(3, 2, bias=False, device=device),
106
+ nn.Linear(2, 4, bias=False, device=device),
107
+ ),
108
+ )
109
+
110
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
111
+ return self.seq2(self.lin(self.seq1(x)))
venv/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from typing import List
4
+
5
+ import torch
6
+
7
+
8
+ # Functions and classes for describing the dtypes a function supports
9
+ # NOTE: these helpers should correspond to PyTorch's C++ dispatch macros
10
+
11
+ # Verifies each given dtype is a torch.dtype
12
+ def _validate_dtypes(*dtypes):
13
+ for dtype in dtypes:
14
+ assert isinstance(dtype, torch.dtype)
15
+ return dtypes
16
+
17
+ # class for tuples corresponding to a PyTorch dispatch macro
18
+ class _dispatch_dtypes(tuple):
19
+ def __add__(self, other):
20
+ assert isinstance(other, tuple)
21
+ return _dispatch_dtypes(tuple.__add__(self, other))
22
+
23
+ _empty_types = _dispatch_dtypes(())
24
+ def empty_types():
25
+ return _empty_types
26
+
27
+ _floating_types = _dispatch_dtypes((torch.float32, torch.float64))
28
+ def floating_types():
29
+ return _floating_types
30
+
31
+ _floating_types_and_half = _floating_types + (torch.half,)
32
+ def floating_types_and_half():
33
+ return _floating_types_and_half
34
+
35
+ def floating_types_and(*dtypes):
36
+ return _floating_types + _validate_dtypes(*dtypes)
37
+
38
+ _floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble)
39
+ def floating_and_complex_types():
40
+ return _floating_and_complex_types
41
+
42
+ def floating_and_complex_types_and(*dtypes):
43
+ return _floating_and_complex_types + _validate_dtypes(*dtypes)
44
+
45
+ _double_types = _dispatch_dtypes((torch.float64, torch.complex128))
46
+ def double_types():
47
+ return _double_types
48
+
49
+ # NB: Does not contain uint16/uint32/uint64 for BC reasons
50
+ _integral_types = _dispatch_dtypes((torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64))
51
+ def integral_types():
52
+ return _integral_types
53
+
54
+ def integral_types_and(*dtypes):
55
+ return _integral_types + _validate_dtypes(*dtypes)
56
+
57
+ _all_types = _floating_types + _integral_types
58
+ def all_types():
59
+ return _all_types
60
+
61
+ def all_types_and(*dtypes):
62
+ return _all_types + _validate_dtypes(*dtypes)
63
+
64
+ _complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble))
65
+ def complex_types():
66
+ return _complex_types
67
+
68
+ def complex_types_and(*dtypes):
69
+ return _complex_types + _validate_dtypes(*dtypes)
70
+
71
+ _all_types_and_complex = _all_types + _complex_types
72
+ def all_types_and_complex():
73
+ return _all_types_and_complex
74
+
75
+ def all_types_and_complex_and(*dtypes):
76
+ return _all_types_and_complex + _validate_dtypes(*dtypes)
77
+
78
+ _all_types_and_half = _all_types + (torch.half,)
79
+ def all_types_and_half():
80
+ return _all_types_and_half
81
+
82
+ def custom_types(*dtypes):
83
+ """Create a list of arbitrary dtypes"""
84
+ return _empty_types + _validate_dtypes(*dtypes)
85
+
86
+ # The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro
87
+
88
+ # See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS.
89
+ def get_all_dtypes(include_half=True,
90
+ include_bfloat16=True,
91
+ include_bool=True,
92
+ include_complex=True,
93
+ include_complex32=False,
94
+ include_qint=False,
95
+ ) -> List[torch.dtype]:
96
+ dtypes = get_all_int_dtypes() + get_all_fp_dtypes(include_half=include_half, include_bfloat16=include_bfloat16)
97
+ if include_bool:
98
+ dtypes.append(torch.bool)
99
+ if include_complex:
100
+ dtypes += get_all_complex_dtypes(include_complex32)
101
+ if include_qint:
102
+ dtypes += get_all_qint_dtypes()
103
+ return dtypes
104
+
105
+ def get_all_math_dtypes(device) -> List[torch.dtype]:
106
+ return get_all_int_dtypes() + get_all_fp_dtypes(include_half=device.startswith('cuda'),
107
+ include_bfloat16=False) + get_all_complex_dtypes()
108
+
109
+ def get_all_complex_dtypes(include_complex32=False) -> List[torch.dtype]:
110
+ return [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128]
111
+
112
+
113
+ def get_all_int_dtypes() -> List[torch.dtype]:
114
+ return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
115
+
116
+
117
+ def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]:
118
+ dtypes = [torch.float32, torch.float64]
119
+ if include_half:
120
+ dtypes.append(torch.float16)
121
+ if include_bfloat16:
122
+ dtypes.append(torch.bfloat16)
123
+ return dtypes
124
+
125
+
126
+ def get_all_qint_dtypes() -> List[torch.dtype]:
127
+ return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4]
128
+
129
+
130
+ float_to_corresponding_complex_type_map = {
131
+ torch.float16: torch.complex32,
132
+ torch.float32: torch.complex64,
133
+ torch.float64: torch.complex128,
134
+ }
venv/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py ADDED
@@ -0,0 +1,2033 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import itertools
5
+ import unittest
6
+ from copy import deepcopy
7
+ from enum import Enum
8
+ from typing import Any, Dict, List, Tuple, Union
9
+
10
+ import torch
11
+ from torch import Tensor
12
+ from torch.nn import Parameter
13
+ from torch.optim import (
14
+ Adadelta,
15
+ Adagrad,
16
+ Adam,
17
+ Adamax,
18
+ AdamW,
19
+ ASGD,
20
+ LBFGS,
21
+ NAdam,
22
+ Optimizer,
23
+ RAdam,
24
+ RMSprop,
25
+ Rprop,
26
+ SGD,
27
+ SparseAdam,
28
+ )
29
+ from torch.testing._internal.common_device_type import tol, toleranceOverride
30
+ from torch.testing._internal.common_methods_invocations import DecorateInfo
31
+ from torch.testing._internal.common_utils import (
32
+ _TestParametrizer,
33
+ set_single_threaded_if_parallel_tbb,
34
+ skipIfMps,
35
+ skipIfTorchDynamo,
36
+ TEST_WITH_TORCHDYNAMO,
37
+ )
38
+ from torch.utils._foreach_utils import (
39
+ _get_foreach_kernels_supported_devices,
40
+ _get_fused_kernels_supported_devices,
41
+ )
42
+
43
+
44
+ class OptimizerInput:
45
+ """Contains args / kwargs to be passed to an optimizer constructor."""
46
+
47
+ __slots__ = ["params", "kwargs", "desc"]
48
+
49
+ def __init__(
50
+ self,
51
+ params: Union[List[Parameter], List[Tensor], Dict[Any, Any]],
52
+ kwargs: Dict[str, Any],
53
+ desc: str = "",
54
+ ):
55
+ # params can be a list of Tensors OR param_groups OR None
56
+ self.params = params
57
+ self.kwargs = kwargs
58
+ self.desc = desc
59
+
60
+ def __repr__(self):
61
+ return f"params={self.params}, kwargs={self.kwargs}, desc={self.desc}"
62
+
63
+
64
+ class OptimizerErrorEnum(Enum):
65
+ """Enumerates when an error is raised when testing optimizers."""
66
+
67
+ CONSTRUCTION_ERROR = 0
68
+ STEP_ERROR = 1
69
+
70
+
71
+ class ErrorOptimizerInput:
72
+ """
73
+ An OptimizerInput that will cause the optimizer to throw an error when constructed.
74
+ Includes the type and string of the resulting error.
75
+ """
76
+
77
+ __slots__ = ["optimizer_error_input", "error_on", "error_type", "error_regex"]
78
+
79
+ def __init__(
80
+ self,
81
+ optimizer_error_input,
82
+ *,
83
+ error_on=OptimizerErrorEnum.CONSTRUCTION_ERROR,
84
+ error_type=RuntimeError,
85
+ error_regex="",
86
+ ):
87
+ self.optimizer_error_input = optimizer_error_input
88
+ self.error_on = error_on
89
+ self.error_type = error_type
90
+ self.error_regex = error_regex
91
+
92
+
93
+ class OptimizerInfo:
94
+ """Optimizer information to be used in testing."""
95
+
96
+ def __init__(
97
+ self,
98
+ optim_cls: Optimizer, # Class object for the Optimizer under test
99
+ *,
100
+ # Function to generate optimizer inputs EXCLUDING params. We delegate params responsibility
101
+ # to the test using the OptimizerInfo. OptimizerInput.params is likely None.
102
+ # Can optionally take in device to filter out certain unsupported configs
103
+ optim_inputs_func,
104
+ # A subset of the global-cliquey flags (fused, foreach, differentiable) the optimizer
105
+ # supports. See NOTE: [optimizer kwarg categories] for what global-cliquey means.
106
+ supported_impls: Tuple[str] = ("foreach", "differentiable"),
107
+ # the devices on which the optim supports sparse tensors for params and grads, see SGD
108
+ supports_sparse_on: Tuple[str] = (),
109
+ # the optim only supports one config: sparse grads w/ dense params, see SparseAdam
110
+ only_supports_sparse_grads: bool = False,
111
+ # the optim supports complex parameters
112
+ supports_complex: bool = True,
113
+ # whether the optimizer.step() function requires a closure to be passed
114
+ step_requires_closure: bool = False,
115
+ # whether the optimizer supports per-param options with parameter groups
116
+ supports_param_groups: bool = True,
117
+ # whether the optimizer supports parameters on multiple devices
118
+ supports_multiple_devices: bool = True,
119
+ skips=(), # Indicates which tests to skip
120
+ decorators=None, # Additional decorators to apply to generated tests
121
+ optim_error_inputs_func=None, # Function to generate optim inputs that error
122
+ ):
123
+ self.optim_cls = optim_cls
124
+ self.optim_inputs_func = optim_inputs_func
125
+ self.supported_impls = supported_impls
126
+ self.supports_sparse_on = supports_sparse_on
127
+ self.only_supports_sparse_grads = only_supports_sparse_grads
128
+ self.supports_complex = supports_complex
129
+ self.step_requires_closure = step_requires_closure
130
+ self.supports_param_groups = supports_param_groups
131
+ self.supports_multiple_devices = supports_multiple_devices
132
+ self.decorators = (
133
+ *(decorators if decorators else []),
134
+ *(skips if skips else []),
135
+ )
136
+ self.optim_error_inputs_func = optim_error_inputs_func
137
+
138
+ def get_decorators(self, test_class, test_name, device, dtype, param_kwargs):
139
+ result = [set_single_threaded_if_parallel_tbb]
140
+ for decorator in self.decorators:
141
+ if isinstance(decorator, DecorateInfo):
142
+ if decorator.is_active(
143
+ test_class, test_name, device, dtype, param_kwargs
144
+ ):
145
+ result.extend(decorator.decorators)
146
+ else:
147
+ result.append(decorator)
148
+ return result
149
+
150
+ @property
151
+ def name(self):
152
+ return self.optim_cls.__name__
153
+
154
+
155
+ class optims(_TestParametrizer):
156
+ """Decorator for specifying a list of optimizers over which to run a test."""
157
+
158
+ def __init__(self, optim_info_iterable, dtypes=None):
159
+ self.optim_info_list = list(optim_info_iterable)
160
+
161
+ # optimizers aren't limited to be one dtype as parameters can have different dtypes
162
+ # We default to torch.float32, but dtypes should be specified through passed in
163
+ # parameters.
164
+ self.dtypes = dtypes if dtypes is not None else [torch.float32]
165
+
166
+ def _parametrize_test(self, test, generic_cls, device_cls):
167
+ if device_cls is None:
168
+ raise RuntimeError(
169
+ "The @optims decorator is only intended to be used in a device-specific "
170
+ "context; use it with instantiate_device_type_tests() instead of "
171
+ "instantiate_parametrized_tests()"
172
+ )
173
+
174
+ for optim_info, dtype in itertools.product(self.optim_info_list, self.dtypes):
175
+ # Construct the test name; device / dtype parts are handled outside.
176
+ # See [Note: device and dtype suffix placement]
177
+ test_name = optim_info.name
178
+
179
+ # Construct parameter kwargs to pass to the test.
180
+ param_kwargs = {"optim_info": optim_info, "dtype": dtype}
181
+
182
+ try:
183
+
184
+ @functools.wraps(test)
185
+ def test_wrapper(*args, **kwargs):
186
+ return test(*args, **kwargs)
187
+
188
+ decorator_fn = functools.partial(
189
+ optim_info.get_decorators,
190
+ generic_cls.__name__,
191
+ test.__name__,
192
+ device_cls.device_type,
193
+ dtype,
194
+ )
195
+
196
+ yield (test_wrapper, test_name, param_kwargs, decorator_fn)
197
+ except Exception as ex:
198
+ # Provides an error message for debugging before rethrowing the exception
199
+ print(
200
+ f"Failed to instantiate {test_name} for module {optim_info.name}!"
201
+ )
202
+ raise ex
203
+
204
+
205
+ # Helper function for generating error inputs for all optimizers, used below.
206
+ def get_error_inputs_for_all_optims(device, dtype):
207
+ if str(device) == "cpu":
208
+ sample_param = Parameter(torch.randn(1, device=device, dtype=dtype))
209
+ return [
210
+ ErrorOptimizerInput(
211
+ OptimizerInput(
212
+ params=sample_param,
213
+ kwargs={},
214
+ desc="invalid param type",
215
+ ),
216
+ error_type=TypeError,
217
+ error_regex="params argument given to the optimizer should be an iterable of Tensors or dicts",
218
+ ),
219
+ ErrorOptimizerInput(
220
+ OptimizerInput(
221
+ params=[sample_param, sample_param],
222
+ kwargs={},
223
+ desc="a param group cannot have duplicate parameters",
224
+ ),
225
+ error_type=UserWarning,
226
+ error_regex=".*a parameter group with duplicate parameters.*",
227
+ ),
228
+ ErrorOptimizerInput(
229
+ OptimizerInput(
230
+ params=[{"params": sample_param}, {"params": sample_param}],
231
+ kwargs={},
232
+ desc="duplicate parameters should not occur across param groups either",
233
+ ),
234
+ error_type=ValueError,
235
+ error_regex="some parameters appear in more than one parameter group",
236
+ ),
237
+ ]
238
+ else:
239
+ return []
240
+
241
+
242
+ # ------------------------------------------------------------------------------------------
243
+ # NOTE: [optimizer kwarg categories]
244
+ # We categorize optimizer kwargs as 3 types:
245
+ # 1. optimizer-specific flags are like amsgrad or rho or beta, flags that are specific to
246
+ # algorithms and thus only show up for certain optimizers. There are many of these, so I
247
+ # do not bother gathering them all and listing them here. The converse to these would be
248
+ # global flags that every optimizer ideally _should_ support. We break global flags into
249
+ # 2 further categories and list them all below.
250
+ # 2. global-friendly = ["lr", "weight_decay", "maximize", "capturable"]
251
+ # global-friendly flags are global flags who play nicely with all other global flags,
252
+ # i.e., are mutually exclusive in function. This means that any pair of the following
253
+ # flags can be toggled at once (e.g., maximize and weight_decay). Furthermore, any of the
254
+ # following flags theoretically can be enabled with ANY other global flag, including the
255
+ # cliquey ones (e.g, capturable and foreach).
256
+ # 3. global-cliquey = ["foreach", "fused", "differentiable"]
257
+ # global-cliquey flags are global flags that do NOT coexist with other cliquey flags,
258
+ # usually because they contradict each other in function. For example, one should not flip
259
+ # both foreach AND fused to True, because they are two differing performance optimizations
260
+ # in which you can only opt into one.
261
+ #
262
+ # The following optim_inputs_func_* sampling functions only return constructor combinations of
263
+ # optimizer-specific and global-friendly flags. This is because we are confident they would mesh
264
+ # well with additional kwargs. On the flip side of the same coin, we reserve setting the
265
+ # global-cliquey flags to individual tests and fully expect tests to edit OptimizerInput.kwargs.
266
+
267
+
268
+ def optim_inputs_func_adadelta(device):
269
+ return [
270
+ OptimizerInput(params=None, kwargs={}, desc="default"),
271
+ OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"),
272
+ OptimizerInput(
273
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
274
+ ),
275
+ OptimizerInput(
276
+ params=None,
277
+ kwargs={"weight_decay": 0.1, "maximize": True},
278
+ desc="maximize",
279
+ ),
280
+ OptimizerInput(
281
+ params=None, kwargs={"rho": 0.95, "weight_decay": 0.9}, desc="rho"
282
+ ),
283
+ ]
284
+
285
+
286
+ def optim_error_inputs_func_adadelta(device, dtype):
287
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
288
+ if str(device) == "cpu":
289
+ error_inputs += [
290
+ ErrorOptimizerInput(
291
+ OptimizerInput(
292
+ params=None,
293
+ kwargs=dict(lr=1e-2, rho=1.1),
294
+ desc="rho should be between 0 and 1",
295
+ ),
296
+ error_type=ValueError,
297
+ error_regex="Invalid rho value: 1.1",
298
+ ),
299
+ ]
300
+ return error_inputs
301
+
302
+
303
+ def optim_inputs_func_adagrad(device):
304
+ return [
305
+ OptimizerInput(params=None, kwargs={}, desc="default"),
306
+ OptimizerInput(
307
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
308
+ ),
309
+ OptimizerInput(
310
+ params=None,
311
+ kwargs={"weight_decay": 0.1, "maximize": True},
312
+ desc="maximize",
313
+ ),
314
+ OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"),
315
+ OptimizerInput(
316
+ params=None,
317
+ kwargs={"initial_accumulator_value": 0.1, "weight_decay": 0.1},
318
+ desc="initial_accumulator_value",
319
+ ),
320
+ OptimizerInput(
321
+ params=None,
322
+ kwargs={"lr": 0.1, "lr_decay": 0.5, "weight_decay": 0.1},
323
+ desc="lr_decay",
324
+ ), # TODO: Move out to testing in param_group?
325
+ ]
326
+
327
+
328
+ def optim_error_inputs_func_adagrad(device, dtype):
329
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
330
+ if str(device) == "cpu":
331
+ error_inputs += [
332
+ ErrorOptimizerInput(
333
+ OptimizerInput(
334
+ params=None,
335
+ kwargs=dict(lr=1e-2, lr_decay=-0.5),
336
+ desc="lr_decay must be bigger than 0",
337
+ ),
338
+ error_type=ValueError,
339
+ error_regex="Invalid lr_decay value: -0.5",
340
+ ),
341
+ ]
342
+ return error_inputs
343
+
344
+
345
+ # TODO: consider tensor LR! See multi_tensor_optimizer_configs in test_optim.py --> tensor LR should work
346
+ # with all implementation code paths...
347
+ def optim_inputs_func_adam(device):
348
+ cuda_supported_configs = [
349
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
350
+ OptimizerInput(
351
+ params=None,
352
+ kwargs={"weight_decay": 0.1, "amsgrad": True, "capturable": True},
353
+ desc="capturable, amsgrad",
354
+ ),
355
+ OptimizerInput(
356
+ params=None,
357
+ kwargs={"lr": torch.tensor(0.001), "amsgrad": True, "capturable": True},
358
+ desc="Tensor lr with capturable and amsgrad",
359
+ ),
360
+ ]
361
+
362
+ return [
363
+ OptimizerInput(params=None, kwargs={}, desc="default"),
364
+ OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"),
365
+ OptimizerInput(
366
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
367
+ ),
368
+ OptimizerInput(
369
+ params=None,
370
+ kwargs={"weight_decay": 0.1, "maximize": True},
371
+ desc="maximize",
372
+ ),
373
+ OptimizerInput(
374
+ params=None, kwargs={"weight_decay": 0.1, "amsgrad": True}, desc="amsgrad"
375
+ ),
376
+ ] + (cuda_supported_configs if "cuda" in str(device) else [])
377
+
378
+
379
+ def optim_error_inputs_func_adam(device, dtype):
380
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
381
+ if str(device) == "cpu":
382
+ error_inputs += [
383
+ ErrorOptimizerInput(
384
+ OptimizerInput(
385
+ params=None,
386
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
387
+ desc="beta1 should be between 0 and 1",
388
+ ),
389
+ error_type=ValueError,
390
+ error_regex="Invalid beta parameter at index 0: 1.0",
391
+ ),
392
+ ErrorOptimizerInput(
393
+ OptimizerInput(
394
+ params=None,
395
+ kwargs=dict(lr=1e-2, weight_decay=-1),
396
+ desc="weight_decay should > 0",
397
+ ),
398
+ error_type=ValueError,
399
+ error_regex="Invalid weight_decay value: -1",
400
+ ),
401
+ ErrorOptimizerInput(
402
+ OptimizerInput(
403
+ params=None,
404
+ kwargs=dict(lr=torch.tensor(0.001), foreach=True),
405
+ desc="lr as Tensor doesn't work with foreach & not capturable",
406
+ ),
407
+ error_type=ValueError,
408
+ error_regex="lr as a Tensor is not supported for capturable=False and foreach=True",
409
+ ),
410
+ ]
411
+ if "cuda" in str(device):
412
+ sample_tensor = torch.empty((), device=device, dtype=dtype)
413
+ error_inputs += [
414
+ ErrorOptimizerInput(
415
+ OptimizerInput(
416
+ params=[sample_tensor],
417
+ kwargs={"foreach": True, "fused": True},
418
+ desc="`fused` and `foreach` cannot be `True` together",
419
+ ),
420
+ error_type=RuntimeError,
421
+ error_regex="`fused` and `foreach` cannot be `True` together",
422
+ ),
423
+ ErrorOptimizerInput(
424
+ OptimizerInput(
425
+ params=[sample_tensor],
426
+ kwargs={"fused": True, "differentiable": True},
427
+ desc="`fused` does not support `differentiable`",
428
+ ),
429
+ error_type=RuntimeError,
430
+ error_regex="`fused` does not support `differentiable`",
431
+ ),
432
+ ]
433
+ return error_inputs
434
+
435
+
436
+ def optim_inputs_func_adamax(device):
437
+ cuda_supported_configs = [
438
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
439
+ OptimizerInput(
440
+ params=None,
441
+ kwargs={"weight_decay": 0.9, "maximize": True, "capturable": True},
442
+ desc="capturable, maximize, weight_decay",
443
+ ),
444
+ OptimizerInput(
445
+ params=None,
446
+ kwargs={"weight_decay": 0, "maximize": True, "capturable": True},
447
+ desc="capturable, maximize",
448
+ ),
449
+ OptimizerInput(
450
+ params=None,
451
+ kwargs={"weight_decay": 0.9, "maximize": False, "capturable": True},
452
+ desc="capturable, weight_decay",
453
+ ),
454
+ ]
455
+
456
+ return [
457
+ OptimizerInput(params=None, kwargs={}, desc="default"),
458
+ OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"),
459
+ OptimizerInput(
460
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
461
+ ),
462
+ OptimizerInput(
463
+ params=None,
464
+ kwargs={"weight_decay": 0.1, "maximize": True},
465
+ desc="maximize",
466
+ ),
467
+ ] + (cuda_supported_configs if "cuda" in str(device) else [])
468
+
469
+
470
+ def optim_error_inputs_func_adamax(device, dtype):
471
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
472
+ if str(device) == "cpu":
473
+ error_inputs += [
474
+ ErrorOptimizerInput(
475
+ OptimizerInput(
476
+ params=None,
477
+ kwargs=dict(lr=1e-2, betas=(0.0, 1.0)),
478
+ desc="beta2 should be between 0 and 1",
479
+ ),
480
+ error_type=ValueError,
481
+ error_regex="Invalid beta parameter at index 1: 1.0",
482
+ ),
483
+ ]
484
+ return error_inputs
485
+
486
+
487
+ def optim_inputs_func_adamw(device):
488
+ return optim_inputs_func_adam(device)
489
+
490
+
491
+ def optim_error_inputs_func_adamw(device, dtype):
492
+ return optim_error_inputs_func_adam(device, dtype)
493
+
494
+
495
+ def optim_inputs_func_asgd(device):
496
+ cuda_supported_configs = [
497
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
498
+ OptimizerInput(
499
+ params=None,
500
+ kwargs={"maximize": True, "capturable": True},
501
+ desc="maximize, capturable",
502
+ ),
503
+ OptimizerInput(
504
+ params=None,
505
+ kwargs={"weight_decay": 0.1, "capturable": True},
506
+ desc="weight_decay, capturable",
507
+ ),
508
+ OptimizerInput(
509
+ params=None,
510
+ kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True},
511
+ desc="maximize, weight_decay, capturable",
512
+ ),
513
+ ]
514
+ return [
515
+ OptimizerInput(params=None, kwargs={}, desc="default"),
516
+ OptimizerInput(params=None, kwargs={"lr": 0.02}, desc="non-default lr"),
517
+ OptimizerInput(params=None, kwargs={"t0": 100}, desc="t0"),
518
+ OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"),
519
+ OptimizerInput(
520
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
521
+ ),
522
+ OptimizerInput(
523
+ params=None,
524
+ kwargs={"weight_decay": 0.1, "maximize": True},
525
+ desc="maximize, nonzero weight_decay",
526
+ ),
527
+ ] + (cuda_supported_configs if "cuda" in str(device) else [])
528
+
529
+
530
+ def optim_error_inputs_func_asgd(device, dtype):
531
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
532
+ if str(device) == "cpu":
533
+ error_inputs += [
534
+ ErrorOptimizerInput(
535
+ OptimizerInput(
536
+ params=None,
537
+ kwargs=dict(lr=1e-2, weight_decay=-0.5),
538
+ desc="weight_decay should > 0",
539
+ ),
540
+ error_type=ValueError,
541
+ error_regex="Invalid weight_decay value: -0.5",
542
+ ),
543
+ ]
544
+ return error_inputs
545
+
546
+
547
+ def optim_inputs_func_lbfgs(device):
548
+ return [
549
+ OptimizerInput(params=None, kwargs={}, desc="default"),
550
+ OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"),
551
+ OptimizerInput(
552
+ params=None, kwargs={"tolerance_grad": 1e-6}, desc="tolerance_grad"
553
+ ),
554
+ OptimizerInput(
555
+ params=None,
556
+ kwargs={"line_search_fn": "strong_wolfe"},
557
+ desc="strong_wolfe",
558
+ ),
559
+ ]
560
+
561
+
562
+ def optim_error_inputs_func_lbfgs(device, dtype):
563
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
564
+ return error_inputs
565
+
566
+
567
+ # Weird story bro, NAdam and RAdam do not have maximize.
568
+ def optim_inputs_func_nadam(device):
569
+ cuda_supported_configs = [
570
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
571
+ OptimizerInput(
572
+ params=None,
573
+ kwargs={"weight_decay": 0.9, "momentum_decay": 6e-3, "capturable": True},
574
+ desc="weight_decay, capturable",
575
+ ),
576
+ OptimizerInput(
577
+ params=None,
578
+ kwargs={
579
+ "weight_decay": 0.9,
580
+ "momentum_decay": 6e-3,
581
+ "decoupled_weight_decay": True,
582
+ "capturable": True,
583
+ },
584
+ desc="decoupled_weight_decay, capturable",
585
+ ),
586
+ ]
587
+ return [
588
+ OptimizerInput(params=None, kwargs={}, desc="default"),
589
+ OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"),
590
+ OptimizerInput(
591
+ params=None,
592
+ kwargs={"momentum_decay": 6e-3},
593
+ desc="non-zero momentum_decay",
594
+ ),
595
+ OptimizerInput(
596
+ params=None,
597
+ kwargs={"weight_decay": 0.1, "momentum_decay": 6e-3},
598
+ desc="weight_decay",
599
+ ),
600
+ OptimizerInput(
601
+ params=None,
602
+ kwargs={
603
+ "weight_decay": 0.1,
604
+ "momentum_decay": 6e-3,
605
+ "decoupled_weight_decay": True,
606
+ },
607
+ desc="decoupled_weight_decay",
608
+ ),
609
+ ] + (cuda_supported_configs if "cuda" in str(device) else [])
610
+
611
+
612
+ def optim_error_inputs_func_nadam(device, dtype):
613
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
614
+ if str(device) == "cpu":
615
+ error_inputs += [
616
+ ErrorOptimizerInput(
617
+ OptimizerInput(
618
+ params=None,
619
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
620
+ desc="beta1 should be between 0 and 1",
621
+ ),
622
+ error_type=ValueError,
623
+ error_regex="Invalid beta parameter at index 0: 1.0",
624
+ ),
625
+ ErrorOptimizerInput(
626
+ OptimizerInput(
627
+ params=None,
628
+ kwargs=dict(lr=1e-2, momentum_decay=-0.2),
629
+ desc="momentum_decay should > 0",
630
+ ),
631
+ error_type=ValueError,
632
+ error_regex="Invalid momentum_decay value: -0.2",
633
+ ),
634
+ ]
635
+ return error_inputs
636
+
637
+
638
+ # Weird story bro, NAdam and RAdam do not have maximize.
639
+ def optim_inputs_func_radam(device=None):
640
+ cuda_supported_configs = [
641
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
642
+ OptimizerInput(
643
+ params=None,
644
+ kwargs={
645
+ "capturable": True,
646
+ "weight_decay": 0.1,
647
+ },
648
+ desc="capturable, weight_decay",
649
+ ),
650
+ OptimizerInput(
651
+ params=None,
652
+ kwargs={
653
+ "capturable": True,
654
+ "weight_decay": 0.1,
655
+ "decoupled_weight_decay": True,
656
+ },
657
+ desc="capturable, weight_decay, decoupled_weight_decay",
658
+ ),
659
+ ]
660
+ return [
661
+ OptimizerInput(params=None, kwargs={}, desc="default"),
662
+ OptimizerInput(params=None, kwargs={"lr": 2e-3}, desc="non-default lr"),
663
+ OptimizerInput(params=None, kwargs={"eps": 1e-6}, desc="non-default eps"),
664
+ OptimizerInput(
665
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
666
+ ),
667
+ OptimizerInput(
668
+ params=None,
669
+ kwargs={"weight_decay": 0.1, "decoupled_weight_decay": True},
670
+ desc="decoupled_weight_decay",
671
+ ),
672
+ ] + (cuda_supported_configs if "cuda" in str(device) else [])
673
+
674
+
675
+ def optim_error_inputs_func_radam(device, dtype):
676
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
677
+ if str(device) == "cpu":
678
+ error_inputs += [
679
+ ErrorOptimizerInput(
680
+ OptimizerInput(
681
+ params=None,
682
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
683
+ desc="beta1 should be between 0 and 1",
684
+ ),
685
+ error_type=ValueError,
686
+ error_regex="Invalid beta parameter at index 0: 1.0",
687
+ ),
688
+ ErrorOptimizerInput(
689
+ OptimizerInput(
690
+ params=None,
691
+ kwargs=dict(lr=1e-2, weight_decay=-1),
692
+ desc="weight_decay should > 0",
693
+ ),
694
+ error_type=ValueError,
695
+ error_regex="Invalid weight_decay value: -1",
696
+ ),
697
+ ]
698
+ return error_inputs
699
+
700
+
701
+ def optim_inputs_func_rmsprop(device):
702
+ return [
703
+ OptimizerInput(params=None, kwargs={}, desc="default"),
704
+ OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"),
705
+ OptimizerInput(
706
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
707
+ ),
708
+ OptimizerInput(
709
+ params=None,
710
+ kwargs={"weight_decay": 0.1, "centered": True},
711
+ desc="centered",
712
+ ),
713
+ OptimizerInput(
714
+ params=None,
715
+ kwargs={"weight_decay": 0.1, "centered": True, "momentum": 0.1},
716
+ desc="momentum",
717
+ ),
718
+ OptimizerInput(
719
+ params=None,
720
+ kwargs={
721
+ "weight_decay": 0.1,
722
+ "centered": True,
723
+ "momentum": 0.1,
724
+ "maximize": True,
725
+ },
726
+ desc="maximize",
727
+ ),
728
+ ]
729
+
730
+
731
+ def optim_error_inputs_func_rmsprop(device, dtype):
732
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
733
+ if str(device) == "cpu":
734
+ error_inputs += [
735
+ ErrorOptimizerInput(
736
+ OptimizerInput(
737
+ params=None,
738
+ kwargs=dict(lr=1e-2, momentum=-1.0),
739
+ desc="momentum should be between 0 and 1",
740
+ ),
741
+ error_type=ValueError,
742
+ error_regex="Invalid momentum value: -1.0",
743
+ ),
744
+ ]
745
+ return error_inputs
746
+
747
+
748
+ def optim_inputs_func_rprop(device):
749
+ return [
750
+ OptimizerInput(params=None, kwargs={}, desc="default"),
751
+ OptimizerInput(params=None, kwargs={"lr": 2e-4}, desc="non-default lr"),
752
+ OptimizerInput(
753
+ params=None, kwargs={"etas": (0.5, 1.5)}, desc="non-default etas"
754
+ ),
755
+ OptimizerInput(
756
+ params=None,
757
+ kwargs={"step_sizes": (2e-6, 100)},
758
+ desc="non-default step_sizes",
759
+ ),
760
+ OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"),
761
+ ]
762
+
763
+
764
+ def optim_error_inputs_func_rprop(device, dtype):
765
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
766
+ if str(device) == "cpu":
767
+ error_inputs += [
768
+ ErrorOptimizerInput(
769
+ OptimizerInput(
770
+ params=None,
771
+ kwargs=dict(lr=1e-2, etas=(1.0, 0.5)),
772
+ desc="0 < eta1 < 1 < eta2",
773
+ ),
774
+ error_type=ValueError,
775
+ error_regex="Invalid eta values: 1.0, 0.5",
776
+ ),
777
+ ]
778
+ return error_inputs
779
+
780
+
781
+ def optim_inputs_func_sgd(device):
782
+ return [
783
+ OptimizerInput(params=None, kwargs={}, desc="default"),
784
+ OptimizerInput(params=None, kwargs={"lr": 1e-2}, desc="non-default lr"),
785
+ OptimizerInput(params=None, kwargs={"momentum": 0.9}, desc="momentum"),
786
+ OptimizerInput(
787
+ params=None,
788
+ kwargs={"momentum": 0.9, "dampening": 0.5},
789
+ desc="dampening",
790
+ ),
791
+ OptimizerInput(
792
+ params=None,
793
+ kwargs={"momentum": 0.9, "weight_decay": 0.1},
794
+ desc="non-zero weight_decay",
795
+ ),
796
+ OptimizerInput(
797
+ params=None,
798
+ kwargs={"momentum": 0.9, "nesterov": True, "weight_decay": 0.1},
799
+ desc="nesterov",
800
+ ),
801
+ OptimizerInput(
802
+ params=None,
803
+ kwargs={"weight_decay": 0.1, "maximize": True},
804
+ desc="maximize",
805
+ ),
806
+ ]
807
+
808
+
809
+ def optim_error_inputs_func_sgd(device, dtype):
810
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
811
+ if str(device) == "cpu":
812
+ error_inputs += [
813
+ ErrorOptimizerInput(
814
+ OptimizerInput(
815
+ params=None,
816
+ kwargs=dict(lr=1e-2, momentum=-0.5),
817
+ desc="momentum should be between 0 and 1",
818
+ ),
819
+ error_type=ValueError,
820
+ error_regex="Invalid momentum value: -0.5",
821
+ ),
822
+ ]
823
+ return error_inputs
824
+
825
+
826
+ def optim_inputs_func_sparseadam(device):
827
+ return [
828
+ OptimizerInput(params=None, kwargs={}, desc="default"),
829
+ OptimizerInput(
830
+ params=None, kwargs={"lr": 0.01}, desc="non-default lr"
831
+ ), # TODO: Move out to testing in param_group?
832
+ OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"),
833
+ ]
834
+
835
+
836
+ def optim_error_inputs_func_sparseadam(device, dtype):
837
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
838
+
839
+ if str(device) == "cpu":
840
+ # SparseAdam raises a warning and not an error for the first entry. We
841
+ # update it here:
842
+ error_inputs[0].error_type = FutureWarning
843
+ error_inputs[
844
+ 0
845
+ ].error_regex = "Passing in a raw Tensor as ``params`` to SparseAdam"
846
+
847
+ error_inputs += [
848
+ ErrorOptimizerInput(
849
+ OptimizerInput(
850
+ params=None,
851
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
852
+ desc="beta1 should be between 0 and 1",
853
+ ),
854
+ error_type=ValueError,
855
+ error_regex="Invalid beta parameter at index 0: 1.0",
856
+ ),
857
+ ErrorOptimizerInput(
858
+ OptimizerInput(
859
+ params=[
860
+ torch.zeros(
861
+ 3, layout=torch.sparse_coo, device=device, dtype=dtype
862
+ )
863
+ ],
864
+ kwargs={},
865
+ desc="dense params required",
866
+ ),
867
+ error_type=ValueError,
868
+ error_regex="SparseAdam requires dense parameter tensors",
869
+ ),
870
+ ErrorOptimizerInput(
871
+ OptimizerInput(
872
+ params=[
873
+ {
874
+ "params": [
875
+ torch.zeros(
876
+ 3,
877
+ layout=torch.sparse_coo,
878
+ device=device,
879
+ dtype=dtype,
880
+ )
881
+ ]
882
+ }
883
+ ],
884
+ kwargs={},
885
+ desc="dense params required in param_groups",
886
+ ),
887
+ error_type=ValueError,
888
+ error_regex="SparseAdam requires dense parameter tensors",
889
+ ),
890
+ ErrorOptimizerInput(
891
+ OptimizerInput(
892
+ params=[torch.rand(2, 3, device=device, dtype=torch.complex64)],
893
+ kwargs=dict(),
894
+ desc="complex not supported",
895
+ ),
896
+ error_type=ValueError,
897
+ error_regex="SparseAdam does not support complex parameters",
898
+ ),
899
+ ]
900
+ return error_inputs
901
+
902
+
903
+ def _get_device_type(device: Union[str, torch.device]) -> str:
904
+ # Returns the device type as a string, e.g., "cpu" or "cuda"
905
+ if isinstance(device, torch.device):
906
+ device = str(device.type)
907
+ assert isinstance(device, str)
908
+ return device.split(":")[0]
909
+
910
+
911
+ def _get_optim_inputs_including_global_cliquey_kwargs(
912
+ device, dtype, optim_info, skip=()
913
+ ) -> List[OptimizerInput]:
914
+ """
915
+ Return a list of all configs for a given optimizer as a list of OptimizerInputs,
916
+ including configs that have supported global cliquey kwargs (foreach, fused,
917
+ differentiable) based on optim_info.supported_impls.
918
+
919
+ The configs (optim_inputs) returned by optim_info.optim_inputs_func(...)
920
+ intentionally do NOT include global cliquey kwargs to give flexibility to tests.
921
+ For example, testing correctness between toggling foreach on and off is now
922
+ trivial. That said, we sometimes want to test for all possible configs on an
923
+ optimizer including all supported flags, so this helper returns all optim inputs.
924
+ """
925
+ assert all(
926
+ x in ["foreach", "fused", "differentiable"] for x in skip
927
+ ), "skip must be a subset of ['foreach', 'fused', 'differentiable']"
928
+
929
+ optim_inputs = optim_info.optim_inputs_func(device)
930
+
931
+ supported_impls = tuple(
932
+ x
933
+ for x in optim_info.supported_impls
934
+ if x not in skip
935
+ and (
936
+ _get_device_type(device) in _get_fused_kernels_supported_devices()
937
+ or x != "fused"
938
+ )
939
+ and (
940
+ _get_device_type(device) in _get_foreach_kernels_supported_devices()
941
+ or x != "foreach"
942
+ )
943
+ )
944
+
945
+ all_optim_inputs = []
946
+ for optim_input in optim_inputs:
947
+ # Add the base config where all the flags are False
948
+ base_kwargs = deepcopy(optim_input.kwargs)
949
+ if len(supported_impls) != 0:
950
+ for flag in supported_impls:
951
+ base_kwargs[flag] = False
952
+ all_optim_inputs.append(
953
+ OptimizerInput(params=None, kwargs=base_kwargs, desc=optim_input.desc)
954
+ )
955
+ else:
956
+ all_optim_inputs.append(optim_input)
957
+ # Add a config for when each of the global cliquey kwargs is True
958
+ # Note that in [optimizer kwarg categories], these kwargs are mutually
959
+ # exclusive, so we do not need to product them together.
960
+ for flag in supported_impls:
961
+ new_kwargs = deepcopy(base_kwargs)
962
+ new_kwargs[flag] = True
963
+ all_optim_inputs.append(
964
+ OptimizerInput(
965
+ params=None, kwargs=new_kwargs, desc=f"{optim_input.desc} & {flag}"
966
+ )
967
+ )
968
+ return all_optim_inputs
969
+
970
+
971
+ # Database of OptimizerInfo entries in alphabetical order.
972
+ optim_db: List[OptimizerInfo] = [
973
+ OptimizerInfo(
974
+ Adadelta,
975
+ optim_inputs_func=optim_inputs_func_adadelta,
976
+ optim_error_inputs_func=optim_error_inputs_func_adadelta,
977
+ supported_impls=("foreach", "differentiable"),
978
+ skips=(
979
+ DecorateInfo(
980
+ skipIfTorchDynamo(
981
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
982
+ ),
983
+ "TestOptimRenewed",
984
+ "test_forloop_goes_right_direction",
985
+ ),
986
+ DecorateInfo(
987
+ skipIfTorchDynamo(
988
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
989
+ ),
990
+ "TestOptimRenewed",
991
+ "test_forloop_goes_right_direction_multigpu",
992
+ ),
993
+ DecorateInfo(
994
+ skipIfTorchDynamo(
995
+ "See https://github.com/pytorch/pytorch/issues/115679"
996
+ ),
997
+ "TestOptimRenewed",
998
+ "test_foreach_matches_forloop",
999
+ ),
1000
+ DecorateInfo(
1001
+ skipIfTorchDynamo(
1002
+ "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046"
1003
+ ),
1004
+ "TestOptimRenewed",
1005
+ "test_peak_memory_foreach",
1006
+ ),
1007
+ DecorateInfo(
1008
+ skipIfTorchDynamo(
1009
+ "See https://github.com/pytorch/pytorch/issues/115679 and #116028"
1010
+ ),
1011
+ "TestOptimRenewed",
1012
+ "test_set_default_dtype_works_with_foreach",
1013
+ ),
1014
+ DecorateInfo(
1015
+ skipIfTorchDynamo(
1016
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1017
+ ),
1018
+ "TestOptimRenewed",
1019
+ "test_complex_2d",
1020
+ ),
1021
+ DecorateInfo(
1022
+ skipIfTorchDynamo(
1023
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1024
+ ),
1025
+ "TestOptimRenewed",
1026
+ "test_state_dict_deterministic",
1027
+ ),
1028
+ DecorateInfo(
1029
+ skipIfTorchDynamo(
1030
+ "See https://github.com/pytorch/pytorch/issues/115679"
1031
+ ),
1032
+ "TestOptimRenewed",
1033
+ "test_state_dict_with_cuda_params",
1034
+ ),
1035
+ DecorateInfo(
1036
+ skipIfTorchDynamo(
1037
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1038
+ ),
1039
+ "TestOptimRenewed",
1040
+ "test_deepcopy_copies_all_public_attrs",
1041
+ ),
1042
+ # Note on tolerances:
1043
+ # test_correctness_Adadelta_cuda_float32
1044
+ # Mismatched elements: 10 / 100 (10.0%)
1045
+ # Greatest absolute difference: 4.838220775127411e-05 at index (7, 4) (up to 1e-05 allowed)
1046
+ # Greatest relative difference: 0.007270356640219688 at index (7, 2) (up to 1e-05 allowed)
1047
+ # This is due to floating point ordering error + usage of sqrt
1048
+ DecorateInfo(
1049
+ toleranceOverride(
1050
+ {
1051
+ torch.float32: tol(
1052
+ rtol=5.5e-4,
1053
+ atol=5e-5,
1054
+ )
1055
+ }
1056
+ ),
1057
+ "CompiledOptimizerParityTests",
1058
+ "test_correctness",
1059
+ ),
1060
+ ),
1061
+ ),
1062
+ OptimizerInfo(
1063
+ Adagrad,
1064
+ optim_inputs_func=optim_inputs_func_adagrad,
1065
+ optim_error_inputs_func=optim_error_inputs_func_adagrad,
1066
+ supported_impls=("foreach", "differentiable"),
1067
+ supports_sparse_on=("cpu"),
1068
+ skips=(
1069
+ DecorateInfo(
1070
+ skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115
1071
+ "TestOptimRenewed",
1072
+ "test_forloop_goes_right_direction",
1073
+ active_if=lambda kwargs: not kwargs["contiguous"],
1074
+ ),
1075
+ DecorateInfo(
1076
+ skipIfTorchDynamo(
1077
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1078
+ ),
1079
+ "TestOptimRenewed",
1080
+ "test_forloop_goes_right_direction",
1081
+ ),
1082
+ DecorateInfo(
1083
+ skipIfTorchDynamo(
1084
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1085
+ ),
1086
+ "TestOptimRenewed",
1087
+ "test_forloop_goes_right_direction_multigpu",
1088
+ ),
1089
+ DecorateInfo(
1090
+ skipIfTorchDynamo(
1091
+ "See https://github.com/pytorch/pytorch/issues/115607"
1092
+ ),
1093
+ "TestOptimRenewed",
1094
+ "test_foreach_matches_forloop",
1095
+ ),
1096
+ DecorateInfo(
1097
+ skipIfTorchDynamo(
1098
+ "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046"
1099
+ ),
1100
+ "TestOptimRenewed",
1101
+ "test_peak_memory_foreach",
1102
+ ),
1103
+ DecorateInfo(
1104
+ skipIfTorchDynamo(
1105
+ "See https://github.com/pytorch/pytorch/issues/115607 and #116028"
1106
+ ),
1107
+ "TestOptimRenewed",
1108
+ "test_set_default_dtype_works_with_foreach",
1109
+ ),
1110
+ DecorateInfo(
1111
+ skipIfTorchDynamo(
1112
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1113
+ ),
1114
+ "TestOptimRenewed",
1115
+ "test_complex_2d",
1116
+ ),
1117
+ DecorateInfo(
1118
+ skipIfTorchDynamo(
1119
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1120
+ ),
1121
+ "TestOptimRenewed",
1122
+ "test_state_dict_deterministic",
1123
+ ),
1124
+ DecorateInfo(
1125
+ skipIfTorchDynamo(
1126
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1127
+ ),
1128
+ "TestOptimRenewed",
1129
+ "test_deepcopy_copies_all_public_attrs",
1130
+ ),
1131
+ ),
1132
+ ),
1133
+ OptimizerInfo(
1134
+ Adam,
1135
+ optim_inputs_func=optim_inputs_func_adam,
1136
+ optim_error_inputs_func=optim_error_inputs_func_adam,
1137
+ supported_impls=("foreach", "differentiable", "fused"),
1138
+ skips=(
1139
+ DecorateInfo(
1140
+ skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115
1141
+ "TestOptimRenewed",
1142
+ "test_forloop_goes_right_direction",
1143
+ active_if=lambda kwargs: not kwargs["contiguous"],
1144
+ ),
1145
+ DecorateInfo(
1146
+ skipIfTorchDynamo(
1147
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1148
+ ),
1149
+ "TestOptimRenewed",
1150
+ "test_forloop_goes_right_direction",
1151
+ ),
1152
+ DecorateInfo(
1153
+ skipIfTorchDynamo(
1154
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1155
+ ),
1156
+ "TestOptimRenewed",
1157
+ "test_forloop_goes_right_direction_multigpu",
1158
+ ),
1159
+ DecorateInfo(
1160
+ skipIfTorchDynamo(
1161
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1162
+ ),
1163
+ "TestOptimRenewed",
1164
+ "test_set_default_dtype_works_with_foreach",
1165
+ ),
1166
+ DecorateInfo(
1167
+ skipIfTorchDynamo(
1168
+ "Fixing #115607 should fix this test. fused is correct, but forloop is not."
1169
+ ),
1170
+ "TestOptimRenewed",
1171
+ "test_fused_matches_forloop",
1172
+ ),
1173
+ DecorateInfo(
1174
+ skipIfTorchDynamo(
1175
+ "See https://github.com/pytorch/pytorch/issues/116046"
1176
+ ),
1177
+ "TestOptimRenewed",
1178
+ "test_peak_memory_foreach",
1179
+ ),
1180
+ DecorateInfo(
1181
+ skipIfTorchDynamo(
1182
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1183
+ ),
1184
+ "TestOptimRenewed",
1185
+ "test_complex_2d",
1186
+ ),
1187
+ DecorateInfo(
1188
+ skipIfTorchDynamo(
1189
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1190
+ ),
1191
+ "TestOptimRenewed",
1192
+ "test_state_dict_deterministic",
1193
+ ),
1194
+ DecorateInfo(
1195
+ skipIfTorchDynamo(
1196
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1197
+ ),
1198
+ "TestOptimRenewed",
1199
+ "test_deepcopy_copies_all_public_attrs",
1200
+ ),
1201
+ ),
1202
+ ),
1203
+ OptimizerInfo(
1204
+ Adamax,
1205
+ optim_inputs_func=optim_inputs_func_adamax,
1206
+ optim_error_inputs_func=optim_error_inputs_func_adamax,
1207
+ supported_impls=("foreach", "differentiable"),
1208
+ skips=(
1209
+ DecorateInfo(
1210
+ skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115
1211
+ "TestOptimRenewed",
1212
+ "test_forloop_goes_right_direction",
1213
+ active_if=lambda kwargs: not kwargs["contiguous"],
1214
+ ),
1215
+ DecorateInfo(
1216
+ skipIfTorchDynamo(
1217
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1218
+ ),
1219
+ "TestOptimRenewed",
1220
+ "test_forloop_goes_right_direction",
1221
+ ),
1222
+ DecorateInfo(
1223
+ skipIfTorchDynamo(
1224
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1225
+ ),
1226
+ "TestOptimRenewed",
1227
+ "test_forloop_goes_right_direction_multigpu",
1228
+ ),
1229
+ DecorateInfo(
1230
+ skipIfTorchDynamo("Mismatched _foreach_addcdiv_ types, see #118159"),
1231
+ "TestOptimRenewed",
1232
+ "test_complex",
1233
+ ),
1234
+ DecorateInfo(
1235
+ skipIfTorchDynamo(
1236
+ "See https://github.com/pytorch/pytorch/issues/115607"
1237
+ ),
1238
+ "TestOptimRenewed",
1239
+ "test_foreach_matches_forloop",
1240
+ ),
1241
+ DecorateInfo(
1242
+ skipIfTorchDynamo(
1243
+ "See https://github.com/pytorch/pytorch/issues/115607 and #116028"
1244
+ ),
1245
+ "TestOptimRenewed",
1246
+ "test_set_default_dtype_works_with_foreach",
1247
+ ),
1248
+ DecorateInfo(
1249
+ skipIfTorchDynamo(
1250
+ "See https://github.com/pytorch/pytorch/issues/116046"
1251
+ ),
1252
+ "TestOptimRenewed",
1253
+ "test_peak_memory_foreach",
1254
+ ),
1255
+ DecorateInfo(
1256
+ skipIfTorchDynamo(
1257
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1258
+ ),
1259
+ "TestOptimRenewed",
1260
+ "test_complex_2d",
1261
+ ),
1262
+ DecorateInfo(
1263
+ unittest.skip("Uses too much memory, even for H100, surprisingly."),
1264
+ "TestOptimRenewed",
1265
+ "test_foreach_large_tensor",
1266
+ ),
1267
+ DecorateInfo(
1268
+ skipIfTorchDynamo(
1269
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1270
+ ),
1271
+ "TestOptimRenewed",
1272
+ "test_state_dict_deterministic",
1273
+ ),
1274
+ DecorateInfo(
1275
+ skipIfTorchDynamo(
1276
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1277
+ ),
1278
+ "TestOptimRenewed",
1279
+ "test_deepcopy_copies_all_public_attrs",
1280
+ ),
1281
+ DecorateInfo(
1282
+ skipIfTorchDynamo("cpu fails due to #115607"),
1283
+ "TestOptimRenewed",
1284
+ "test_can_load_older_state_dict",
1285
+ device_type="cpu",
1286
+ ),
1287
+ DecorateInfo(
1288
+ skipIfTorchDynamo(
1289
+ "capturable path no longer called after hitting cache limit, see #121178"
1290
+ ),
1291
+ "TestOptimRenewed",
1292
+ "test_save_load_equality_with_weights_only",
1293
+ ),
1294
+ DecorateInfo(
1295
+ skipIfTorchDynamo(
1296
+ "capturable path no longer called after hitting cache limit, see #121178"
1297
+ ),
1298
+ "TestOptimRenewed",
1299
+ "test_load_nontensor_step",
1300
+ ),
1301
+ DecorateInfo(
1302
+ skipIfTorchDynamo(
1303
+ "capturable path no longer called after hitting cache limit, see #121178"
1304
+ ),
1305
+ "TestOptimRenewed",
1306
+ "test_param_groups_lr",
1307
+ ),
1308
+ ),
1309
+ ),
1310
+ OptimizerInfo(
1311
+ AdamW,
1312
+ optim_inputs_func=optim_inputs_func_adamw,
1313
+ optim_error_inputs_func=optim_error_inputs_func_adamw,
1314
+ supported_impls=("foreach", "differentiable", "fused"),
1315
+ skips=(
1316
+ DecorateInfo(
1317
+ skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115
1318
+ "TestOptimRenewed",
1319
+ "test_forloop_goes_right_direction",
1320
+ active_if=lambda kwargs: not kwargs["contiguous"],
1321
+ ),
1322
+ DecorateInfo(
1323
+ skipIfTorchDynamo(
1324
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1325
+ ),
1326
+ "TestOptimRenewed",
1327
+ "test_forloop_goes_right_direction",
1328
+ ),
1329
+ DecorateInfo(
1330
+ skipIfTorchDynamo(
1331
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1332
+ ),
1333
+ "TestOptimRenewed",
1334
+ "test_forloop_goes_right_direction_multigpu",
1335
+ ),
1336
+ DecorateInfo(
1337
+ skipIfTorchDynamo(
1338
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1339
+ ),
1340
+ "TestOptimRenewed",
1341
+ "test_set_default_dtype_works_with_foreach",
1342
+ ),
1343
+ DecorateInfo(
1344
+ skipIfTorchDynamo(
1345
+ "Fixing #115607 should fix this test. fused is correct, but forloop is not."
1346
+ ),
1347
+ "TestOptimRenewed",
1348
+ "test_fused_matches_forloop",
1349
+ ),
1350
+ DecorateInfo(
1351
+ skipIfTorchDynamo(
1352
+ "See https://github.com/pytorch/pytorch/issues/116046"
1353
+ ),
1354
+ "TestOptimRenewed",
1355
+ "test_peak_memory_foreach",
1356
+ ),
1357
+ DecorateInfo(
1358
+ skipIfTorchDynamo(
1359
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1360
+ ),
1361
+ "TestOptimRenewed",
1362
+ "test_complex_2d",
1363
+ ),
1364
+ DecorateInfo(
1365
+ skipIfTorchDynamo(
1366
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1367
+ ),
1368
+ "TestOptimRenewed",
1369
+ "test_state_dict_deterministic",
1370
+ ),
1371
+ DecorateInfo(
1372
+ skipIfTorchDynamo(
1373
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1374
+ ),
1375
+ "TestOptimRenewed",
1376
+ "test_deepcopy_copies_all_public_attrs",
1377
+ ),
1378
+ ),
1379
+ ),
1380
+ OptimizerInfo(
1381
+ ASGD,
1382
+ optim_inputs_func=optim_inputs_func_asgd,
1383
+ optim_error_inputs_func=optim_error_inputs_func_asgd,
1384
+ supported_impls=("foreach", "differentiable"),
1385
+ skips=(
1386
+ DecorateInfo(
1387
+ skipIfTorchDynamo(
1388
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1389
+ ),
1390
+ "TestOptimRenewed",
1391
+ "test_forloop_goes_right_direction",
1392
+ ),
1393
+ DecorateInfo(
1394
+ skipIfTorchDynamo(
1395
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1396
+ ),
1397
+ "TestOptimRenewed",
1398
+ "test_forloop_goes_right_direction_multigpu",
1399
+ ),
1400
+ DecorateInfo(
1401
+ skipIfTorchDynamo(
1402
+ "See discrepancy in https://github.com/pytorch/pytorch/issues/115607"
1403
+ ),
1404
+ "TestOptimRenewed",
1405
+ "test_foreach_matches_forloop",
1406
+ ),
1407
+ DecorateInfo(
1408
+ skipIfTorchDynamo(
1409
+ "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046"
1410
+ ),
1411
+ "TestOptimRenewed",
1412
+ "test_peak_memory_foreach",
1413
+ ),
1414
+ DecorateInfo(
1415
+ skipIfTorchDynamo(
1416
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1417
+ ),
1418
+ "TestOptimRenewed",
1419
+ "test_set_default_dtype_works_with_foreach",
1420
+ ),
1421
+ DecorateInfo(
1422
+ skipIfTorchDynamo(
1423
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1424
+ ),
1425
+ "TestOptimRenewed",
1426
+ "test_complex_2d",
1427
+ ),
1428
+ DecorateInfo(
1429
+ toleranceOverride(
1430
+ {
1431
+ torch.float32: tol(atol=1.5e-5, rtol=1e-5),
1432
+ }
1433
+ ),
1434
+ "TestOptimRenewed",
1435
+ "test_step_is_noop_for_zero_grads",
1436
+ ),
1437
+ DecorateInfo(
1438
+ skipIfTorchDynamo(
1439
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1440
+ ),
1441
+ "TestOptimRenewed",
1442
+ "test_state_dict_deterministic",
1443
+ ),
1444
+ DecorateInfo(
1445
+ skipIfTorchDynamo(
1446
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1447
+ ),
1448
+ "TestOptimRenewed",
1449
+ "test_deepcopy_copies_all_public_attrs",
1450
+ ),
1451
+ ),
1452
+ ),
1453
+ OptimizerInfo(
1454
+ LBFGS,
1455
+ optim_inputs_func=optim_inputs_func_lbfgs,
1456
+ optim_error_inputs_func=optim_error_inputs_func_lbfgs,
1457
+ supported_impls=(),
1458
+ step_requires_closure=True,
1459
+ supports_param_groups=False,
1460
+ supports_multiple_devices=False,
1461
+ skips=(
1462
+ # Fails on MacOS 13.2.1 in CI https://github.com/pytorch/pytorch/issues/117094
1463
+ DecorateInfo(
1464
+ skipIfMps, "TestOptimRenewed", "test_can_load_older_state_dict"
1465
+ ),
1466
+ DecorateInfo(
1467
+ skipIfTorchDynamo(
1468
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1469
+ ),
1470
+ "TestOptimRenewed",
1471
+ "test_deepcopy_copies_all_public_attrs",
1472
+ ),
1473
+ DecorateInfo(
1474
+ unittest.skip("Does not support param groups"),
1475
+ "TestOptimRenewed",
1476
+ "test_param_groups_lr",
1477
+ ),
1478
+ DecorateInfo(
1479
+ unittest.skip("Does not support param groups"),
1480
+ "TestOptimRenewed",
1481
+ "test_param_groups_weight_decay",
1482
+ ),
1483
+ DecorateInfo(
1484
+ skipIfTorchDynamo(
1485
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1486
+ ),
1487
+ "TestOptimRenewed",
1488
+ "test_forloop_goes_right_direction",
1489
+ ),
1490
+ DecorateInfo(
1491
+ unittest.skip("LBFGS doesn't support multidevice"),
1492
+ "TestOptimRenewed",
1493
+ "test_forloop_goes_right_direction_multigpu",
1494
+ ),
1495
+ ),
1496
+ ),
1497
+ OptimizerInfo(
1498
+ NAdam,
1499
+ optim_inputs_func=optim_inputs_func_nadam,
1500
+ optim_error_inputs_func=optim_error_inputs_func_nadam,
1501
+ supported_impls=("foreach", "differentiable"),
1502
+ skips=(
1503
+ DecorateInfo(
1504
+ skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115
1505
+ "TestOptimRenewed",
1506
+ "test_forloop_goes_right_direction",
1507
+ active_if=lambda kwargs: not kwargs["contiguous"],
1508
+ ),
1509
+ DecorateInfo(
1510
+ skipIfTorchDynamo(
1511
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1512
+ ),
1513
+ "TestOptimRenewed",
1514
+ "test_forloop_goes_right_direction",
1515
+ ),
1516
+ DecorateInfo(
1517
+ skipIfTorchDynamo(
1518
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1519
+ ),
1520
+ "TestOptimRenewed",
1521
+ "test_forloop_goes_right_direction_multigpu",
1522
+ ),
1523
+ DecorateInfo(
1524
+ skipIfTorchDynamo(
1525
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1526
+ ),
1527
+ "TestOptimRenewed",
1528
+ "test_set_default_dtype_works_with_foreach",
1529
+ ),
1530
+ DecorateInfo(
1531
+ skipIfTorchDynamo(
1532
+ "See https://github.com/pytorch/pytorch/issues/116046"
1533
+ ),
1534
+ "TestOptimRenewed",
1535
+ "test_peak_memory_foreach",
1536
+ ),
1537
+ DecorateInfo(
1538
+ skipIfTorchDynamo(
1539
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1540
+ ),
1541
+ "TestOptimRenewed",
1542
+ "test_complex_2d",
1543
+ ),
1544
+ DecorateInfo(
1545
+ skipIfTorchDynamo(
1546
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1547
+ ),
1548
+ "TestOptimRenewed",
1549
+ "test_state_dict_deterministic",
1550
+ ),
1551
+ DecorateInfo(
1552
+ skipIfTorchDynamo(
1553
+ "See https://github.com/pytorch/pytorch/issues/116499"
1554
+ ),
1555
+ "TestOptimRenewed",
1556
+ "test_can_load_older_state_dict",
1557
+ device_type="cuda",
1558
+ ),
1559
+ DecorateInfo(
1560
+ skipIfTorchDynamo(
1561
+ "Errors, https://github.com/pytorch/pytorch/issues/117150"
1562
+ ),
1563
+ "TestOptimRenewed",
1564
+ "test_load_nontensor_step",
1565
+ ),
1566
+ DecorateInfo(
1567
+ skipIfTorchDynamo(
1568
+ "Errors, see https://github.com/pytorch/pytorch/issues/117150"
1569
+ ),
1570
+ "TestOptimRenewed",
1571
+ "test_state_dict_with_cuda_params",
1572
+ ),
1573
+ DecorateInfo(
1574
+ skipIfTorchDynamo(
1575
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1576
+ ),
1577
+ "TestOptimRenewed",
1578
+ "test_deepcopy_copies_all_public_attrs",
1579
+ ),
1580
+ ),
1581
+ ),
1582
+ OptimizerInfo(
1583
+ RAdam,
1584
+ optim_inputs_func=optim_inputs_func_radam,
1585
+ optim_error_inputs_func=optim_error_inputs_func_radam,
1586
+ supported_impls=("foreach", "differentiable"),
1587
+ skips=(
1588
+ DecorateInfo(
1589
+ skipIfTorchDynamo(
1590
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1591
+ ),
1592
+ "TestOptimRenewed",
1593
+ "test_forloop_goes_right_direction",
1594
+ ),
1595
+ DecorateInfo(
1596
+ skipIfTorchDynamo(
1597
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1598
+ ),
1599
+ "TestOptimRenewed",
1600
+ "test_forloop_goes_right_direction_multigpu",
1601
+ ),
1602
+ DecorateInfo(
1603
+ skipIfTorchDynamo(
1604
+ "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046"
1605
+ ),
1606
+ "TestOptimRenewed",
1607
+ "test_peak_memory_foreach",
1608
+ ),
1609
+ DecorateInfo(
1610
+ skipIfTorchDynamo(
1611
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1612
+ ),
1613
+ "TestOptimRenewed",
1614
+ "test_set_default_dtype_works_with_foreach",
1615
+ ),
1616
+ DecorateInfo(
1617
+ skipIfTorchDynamo(
1618
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1619
+ ),
1620
+ "TestOptimRenewed",
1621
+ "test_complex_2d",
1622
+ ),
1623
+ DecorateInfo(
1624
+ skipIfTorchDynamo(
1625
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1626
+ ),
1627
+ "TestOptimRenewed",
1628
+ "test_deepcopy_copies_all_public_attrs",
1629
+ ),
1630
+ DecorateInfo(
1631
+ skipIfTorchDynamo(
1632
+ "See https://github.com/pytorch/pytorch/issues/115607"
1633
+ ),
1634
+ "TestOptimRenewed",
1635
+ "test_foreach_matches_forloop",
1636
+ ),
1637
+ DecorateInfo(
1638
+ toleranceOverride(
1639
+ {
1640
+ # previously atol=1e-7, rtol=1e-7
1641
+ torch.float64: tol(atol=1.5e-7, rtol=1.1e-7)
1642
+ }
1643
+ ),
1644
+ "TestOptimRenewed",
1645
+ "test_foreach_matches_forloop",
1646
+ ),
1647
+ DecorateInfo(
1648
+ skipIfTorchDynamo(
1649
+ "See https://github.com/pytorch/pytorch/issues/116494"
1650
+ ),
1651
+ "TestOptimRenewed",
1652
+ "test_state_dict_deterministic",
1653
+ ),
1654
+ DecorateInfo(
1655
+ skipIfTorchDynamo(
1656
+ "Should be fixed by https://github.com/pytorch/pytorch/issues/115607"
1657
+ ),
1658
+ "TestOptimRenewed",
1659
+ "test_can_load_older_state_dict",
1660
+ device_type="cpu",
1661
+ ),
1662
+ ),
1663
+ ),
1664
+ OptimizerInfo(
1665
+ RMSprop,
1666
+ optim_inputs_func=optim_inputs_func_rmsprop,
1667
+ optim_error_inputs_func=optim_error_inputs_func_rmsprop,
1668
+ supported_impls=("foreach", "differentiable"),
1669
+ skips=(
1670
+ DecorateInfo(
1671
+ skipIfMps, # addcdiv doesn't work for non-contiguous, see #118115
1672
+ "TestOptimRenewed",
1673
+ "test_forloop_goes_right_direction",
1674
+ active_if=lambda kwargs: not kwargs["contiguous"],
1675
+ ),
1676
+ DecorateInfo(
1677
+ skipIfTorchDynamo(
1678
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1679
+ ),
1680
+ "TestOptimRenewed",
1681
+ "test_forloop_goes_right_direction",
1682
+ ),
1683
+ DecorateInfo(
1684
+ skipIfTorchDynamo(
1685
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1686
+ ),
1687
+ "TestOptimRenewed",
1688
+ "test_forloop_goes_right_direction_multigpu",
1689
+ ),
1690
+ DecorateInfo(
1691
+ skipIfTorchDynamo(
1692
+ "See https://github.com/pytorch/pytorch/issues/115679"
1693
+ ),
1694
+ "TestOptimRenewed",
1695
+ "test_foreach_matches_forloop",
1696
+ ),
1697
+ DecorateInfo(
1698
+ skipIfTorchDynamo(
1699
+ "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046"
1700
+ ),
1701
+ "TestOptimRenewed",
1702
+ "test_peak_memory_foreach",
1703
+ ),
1704
+ DecorateInfo(
1705
+ skipIfTorchDynamo(
1706
+ "See https://github.com/pytorch/pytorch/issues/115679 and #116028"
1707
+ ),
1708
+ "TestOptimRenewed",
1709
+ "test_set_default_dtype_works_with_foreach",
1710
+ ),
1711
+ DecorateInfo(
1712
+ skipIfTorchDynamo(
1713
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1714
+ ),
1715
+ "TestOptimRenewed",
1716
+ "test_complex_2d",
1717
+ ),
1718
+ DecorateInfo(
1719
+ toleranceOverride(
1720
+ { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202
1721
+ torch.float32: tol(atol=5e-04, rtol=0.01),
1722
+ }
1723
+ ),
1724
+ "TestOptimRenewed",
1725
+ "test_mixed_device_dtype",
1726
+ active_if=TEST_WITH_TORCHDYNAMO,
1727
+ ),
1728
+ DecorateInfo(
1729
+ skipIfTorchDynamo(
1730
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1731
+ ),
1732
+ "TestOptimRenewed",
1733
+ "test_state_dict_deterministic",
1734
+ ),
1735
+ DecorateInfo(
1736
+ skipIfTorchDynamo(
1737
+ "See https://github.com/pytorch/pytorch/issues/115679"
1738
+ ),
1739
+ "TestOptimRenewed",
1740
+ "test_state_dict_with_cuda_params",
1741
+ ),
1742
+ DecorateInfo(
1743
+ skipIfTorchDynamo(
1744
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1745
+ ),
1746
+ "TestOptimRenewed",
1747
+ "test_deepcopy_copies_all_public_attrs",
1748
+ ),
1749
+ ),
1750
+ ),
1751
+ OptimizerInfo(
1752
+ Rprop,
1753
+ optim_inputs_func=optim_inputs_func_rprop,
1754
+ optim_error_inputs_func=optim_error_inputs_func_rprop,
1755
+ supported_impls=("foreach", "differentiable"),
1756
+ skips=(
1757
+ DecorateInfo(
1758
+ skipIfMps, # Rprop doesn't update for non-contiguous, see #118117
1759
+ "TestOptimRenewed",
1760
+ "test_forloop_goes_right_direction",
1761
+ active_if=lambda kwargs: not kwargs["contiguous"],
1762
+ ),
1763
+ DecorateInfo(
1764
+ skipIfTorchDynamo(
1765
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1766
+ ),
1767
+ "TestOptimRenewed",
1768
+ "test_forloop_goes_right_direction",
1769
+ ),
1770
+ DecorateInfo(
1771
+ skipIfTorchDynamo(
1772
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1773
+ ),
1774
+ "TestOptimRenewed",
1775
+ "test_forloop_goes_right_direction_multigpu",
1776
+ ),
1777
+ DecorateInfo(
1778
+ skipIfTorchDynamo(
1779
+ "See https://github.com/pytorch/pytorch/issues/115679"
1780
+ ),
1781
+ "TestOptimRenewed",
1782
+ "test_foreach_matches_forloop",
1783
+ ),
1784
+ DecorateInfo(
1785
+ skipIfTorchDynamo(
1786
+ "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046"
1787
+ ),
1788
+ "TestOptimRenewed",
1789
+ "test_peak_memory_foreach",
1790
+ ),
1791
+ DecorateInfo(
1792
+ skipIfTorchDynamo(
1793
+ "See https://github.com/pytorch/pytorch/issues/115679 and #116028"
1794
+ ),
1795
+ "TestOptimRenewed",
1796
+ "test_set_default_dtype_works_with_foreach",
1797
+ ),
1798
+ DecorateInfo(
1799
+ skipIfTorchDynamo(
1800
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1801
+ ),
1802
+ "TestOptimRenewed",
1803
+ "test_complex_2d",
1804
+ ),
1805
+ DecorateInfo(
1806
+ skipIfTorchDynamo(
1807
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1808
+ ),
1809
+ "TestOptimRenewed",
1810
+ "test_state_dict_deterministic",
1811
+ ),
1812
+ DecorateInfo(
1813
+ skipIfTorchDynamo(
1814
+ "See https://github.com/pytorch/pytorch/issues/115679"
1815
+ ),
1816
+ "TestOptimRenewed",
1817
+ "test_state_dict_with_cuda_params",
1818
+ ),
1819
+ DecorateInfo(
1820
+ skipIfTorchDynamo(
1821
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1822
+ ),
1823
+ "TestOptimRenewed",
1824
+ "test_deepcopy_copies_all_public_attrs",
1825
+ ),
1826
+ ),
1827
+ ),
1828
+ OptimizerInfo(
1829
+ SGD,
1830
+ optim_inputs_func=optim_inputs_func_sgd,
1831
+ optim_error_inputs_func=optim_error_inputs_func_sgd,
1832
+ supported_impls=("foreach", "differentiable", "fused"),
1833
+ supports_sparse_on=("cpu", "cuda"),
1834
+ skips=(
1835
+ DecorateInfo(
1836
+ skipIfTorchDynamo(
1837
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1838
+ ),
1839
+ "TestOptimRenewed",
1840
+ "test_forloop_goes_right_direction",
1841
+ ),
1842
+ DecorateInfo(
1843
+ skipIfTorchDynamo(
1844
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1845
+ ),
1846
+ "TestOptimRenewed",
1847
+ "test_forloop_goes_right_direction_multigpu",
1848
+ ),
1849
+ DecorateInfo(
1850
+ skipIfTorchDynamo(
1851
+ "Dynamo memory usage is flaky, see https://github.com/pytorch/pytorch/issues/116046"
1852
+ ),
1853
+ "TestOptimRenewed",
1854
+ "test_peak_memory_foreach",
1855
+ ),
1856
+ DecorateInfo(
1857
+ skipIfTorchDynamo(
1858
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1859
+ ),
1860
+ "TestOptimRenewed",
1861
+ "test_set_default_dtype_works_with_foreach",
1862
+ ),
1863
+ DecorateInfo(
1864
+ skipIfTorchDynamo(
1865
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1866
+ ),
1867
+ "TestOptimRenewed",
1868
+ "test_complex_2d",
1869
+ ),
1870
+ DecorateInfo(
1871
+ toleranceOverride(
1872
+ { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202
1873
+ torch.float32: tol(atol=5e-04, rtol=0.007),
1874
+ }
1875
+ ),
1876
+ "TestOptimRenewed",
1877
+ "test_mixed_device_dtype",
1878
+ active_if=TEST_WITH_TORCHDYNAMO,
1879
+ ),
1880
+ DecorateInfo(
1881
+ skipIfTorchDynamo(
1882
+ "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061"
1883
+ ),
1884
+ "TestOptimRenewed",
1885
+ "test_step_is_noop_for_zero_grads",
1886
+ device_type="cpu",
1887
+ ),
1888
+ DecorateInfo(
1889
+ skipIfTorchDynamo(
1890
+ "No closure handling, https://github.com/pytorch/pytorch/issues/116494"
1891
+ ),
1892
+ "TestOptimRenewed",
1893
+ "test_state_dict_deterministic",
1894
+ ),
1895
+ DecorateInfo(
1896
+ skipIfTorchDynamo(
1897
+ "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061"
1898
+ ),
1899
+ "TestOptimRenewed",
1900
+ "test_param_groups_weight_decay",
1901
+ device_type="cpu",
1902
+ ),
1903
+ DecorateInfo(
1904
+ skipIfTorchDynamo(
1905
+ "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061"
1906
+ ),
1907
+ "TestOptimRenewed",
1908
+ "test_param_groups_lr",
1909
+ device_type="cpu",
1910
+ ),
1911
+ DecorateInfo(
1912
+ skipIfTorchDynamo(
1913
+ "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061"
1914
+ ),
1915
+ "TestOptimRenewed",
1916
+ "test_load_nontensor_step",
1917
+ device_type="cpu",
1918
+ ),
1919
+ DecorateInfo(
1920
+ skipIfTorchDynamo(
1921
+ "momentum_buffer inconsistency, https://github.com/pytorch/pytorch/issues/117147"
1922
+ ),
1923
+ "TestOptimRenewed",
1924
+ "test_state_dict_with_cuda_params",
1925
+ ),
1926
+ DecorateInfo(
1927
+ skipIfTorchDynamo(
1928
+ "fails, https://github.com/pytorch/pytorch/issues/117165"
1929
+ ),
1930
+ "TestOptimRenewed",
1931
+ "test_deepcopy_copies_all_public_attrs",
1932
+ ),
1933
+ ),
1934
+ ),
1935
+ OptimizerInfo(
1936
+ SparseAdam,
1937
+ optim_inputs_func=optim_inputs_func_sparseadam,
1938
+ optim_error_inputs_func=optim_error_inputs_func_sparseadam,
1939
+ supported_impls=(),
1940
+ only_supports_sparse_grads=True,
1941
+ supports_complex=False, # Missing complex support, see #118153
1942
+ skips=(
1943
+ DecorateInfo(
1944
+ skipIfMps, # SparseAdam does not support MPS
1945
+ "TestOptimRenewed",
1946
+ ),
1947
+ DecorateInfo(
1948
+ unittest.skip(
1949
+ "SparseAdam does not support dense gradients, see #116507"
1950
+ ),
1951
+ "TestOptimRenewed",
1952
+ "test_state_dict_deterministic",
1953
+ ),
1954
+ DecorateInfo(
1955
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
1956
+ "TestOptimRenewed",
1957
+ "test_param_groups_lr",
1958
+ ),
1959
+ DecorateInfo(
1960
+ unittest.skip(
1961
+ "SparseAdam does not support dense gradients, see #116507"
1962
+ ),
1963
+ "TestOptimRenewed",
1964
+ "test_can_load_older_state_dict",
1965
+ ),
1966
+ DecorateInfo(
1967
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
1968
+ "TestOptimRenewed",
1969
+ "test_load_nontensor_step",
1970
+ ),
1971
+ DecorateInfo(
1972
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
1973
+ "TestOptimRenewed",
1974
+ "test_forloop_goes_right_direction",
1975
+ ),
1976
+ DecorateInfo(
1977
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
1978
+ "TestOptimRenewed",
1979
+ "test_forloop_goes_right_direction_multigpu",
1980
+ ),
1981
+ DecorateInfo(
1982
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
1983
+ "TestOptimRenewed",
1984
+ "test_state_dict_with_cuda_params",
1985
+ ),
1986
+ DecorateInfo(
1987
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
1988
+ "TestOptimRenewed",
1989
+ "test_deepcopy_copies_all_public_attrs",
1990
+ ),
1991
+ ),
1992
+ ),
1993
+ ]
1994
+
1995
+
1996
+ class TensorTracker:
1997
+ """
1998
+ A utility to track tensor clones in a list, with the expectation of popping them later (in
1999
+ order) to make fair comparisons between two multi-step computation. The intended use case is
2000
+ usually when comparing two supposed equal computations, such as an optimizer step that each
2001
+ individually consists of multiple steps, where numerical deviation could multiply.
2002
+
2003
+ The goal is to be able to compare and align numbers at every milestone so as to minimize
2004
+ numerical discrepancies, and so when the test fails, it is likely a real problem.
2005
+ """
2006
+
2007
+ def __init__(self):
2008
+ self.tensors = []
2009
+
2010
+ def add(self, tensor):
2011
+ """
2012
+ Add a clone().detach()'d version of the tensor
2013
+ """
2014
+ self.tensors.append(tensor.clone().detach())
2015
+
2016
+ # pops from beginning, like a queue and not a stack!
2017
+ def pop_check_set(self, tensor_to_set, testcase):
2018
+ """
2019
+ Pop the first element in the tensor tracker, assert equality between the popped tensor and
2020
+ the input tensor, and then set the input tensor to have the same values as the popped tensor
2021
+ (with copy_).
2022
+ """
2023
+ testcase.assertGreater(len(self.tensors), 0, "no tensors to pop")
2024
+ ref = self.tensors.pop(0)
2025
+
2026
+ testcase.assertTrue(isinstance(ref, Tensor), f"{type(ref)=}")
2027
+ testcase.assertEqual(tensor_to_set, ref)
2028
+
2029
+ with torch.no_grad():
2030
+ tensor_to_set.copy_(ref)
2031
+
2032
+ def all_popped(self):
2033
+ return len(self.tensors) == 0
venv/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ import itertools
6
+
7
+ from torch.utils._python_dispatch import TorchDispatchMode
8
+ from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
9
+ from torch.utils import _pytree as pytree
10
+ from functools import partial
11
+ from torch.utils._mode_utils import no_dispatch, all_same_mode
12
+ import torch.autograd.forward_ad as fwAD
13
+ from typing import Callable
14
+ import re
15
+
16
+
17
+ def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor):
18
+ elem = wrapper_tensor.elem
19
+ metadata_wrapper_tensor = metadata_accessor(wrapper_tensor)
20
+ metadata_elem = metadata_accessor(elem)
21
+ if metadata_wrapper_tensor == metadata_elem:
22
+ return
23
+ raise RuntimeError(
24
+ f"This operator is not Composite Compliant: the "
25
+ f"{metadata_name} of the tensor was modified directly without "
26
+ f"going through the PyTorch dispatcher.")
27
+
28
+ def check_metadata_consistency(wrapper_tensor, CCT):
29
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct
30
+ if not isinstance(wrapper_tensor, CCT):
31
+ return
32
+ things_to_check = {
33
+ 'shape': Tensor.size,
34
+ 'dtype': lambda x: x.dtype,
35
+ 'device': lambda x: x.device,
36
+ 'numel': Tensor.numel,
37
+ 'stride': Tensor.stride,
38
+ 'storage_offset': Tensor.storage_offset,
39
+ }
40
+ for metadata_name, metadata_accessor in things_to_check.items():
41
+ check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor)
42
+
43
+ def is_view_fn(func):
44
+ return func.overloadpacket.__name__ in {
45
+ 'as_strided',
46
+ 'detach',
47
+ 'diagonal',
48
+ 'expand',
49
+ 'expand_as',
50
+ 'movedim',
51
+ 'narrow',
52
+ 'permute',
53
+ 'select',
54
+ 'squeeze',
55
+ 'transpose',
56
+ 't',
57
+ 'real',
58
+ 'imag',
59
+ 'view_as_real',
60
+ 'view_as_complex',
61
+ 'unflatten',
62
+ 'unfold',
63
+ 'unsqueeze',
64
+ 'view',
65
+ 'view_as',
66
+ 'unbind',
67
+ 'split',
68
+ 'split_with_sizes',
69
+ 'vsplit',
70
+ 'hsplit',
71
+ 'tensor_split',
72
+ 'chunk',
73
+ 'swapaxes',
74
+ 'slice',
75
+ '_reshape_alias',
76
+ '_unsafe_view',
77
+ '_conj',
78
+ 'alias',
79
+ }
80
+
81
+ # manually populated from native_functions that have inplace_view: True.
82
+ # In the future we will probably be able to grab that list directly
83
+ def is_inplace_view_fn(func):
84
+ return func.overloadpacket.__name__ in {
85
+ 'as_strided_',
86
+ 'detach_',
87
+ 'squeeze_',
88
+ 'swapaxes_',
89
+ 'swapdims_',
90
+ 't_',
91
+ 'transpose_',
92
+ 'unsqueeze_',
93
+ }
94
+
95
+
96
+ # Introspection please save us
97
+ def is_inplace(func):
98
+ name = func.overloadpacket.__name__
99
+ if re.match('__i.+__', name):
100
+ return True
101
+ if re.match('__.+__', name):
102
+ return False
103
+ return name[-1] == '_'
104
+
105
+
106
+ def generate_cct_and_mode(autograd_view_consistency=True):
107
+ # This function returns a new class CompositeCompliantTensor
108
+ # The two arguments control the behaviour described below.
109
+
110
+ # autograd_view_consistency:
111
+ # If True, alias result using `set_` if func returns a view
112
+ # (See Note [Alias Result]).
113
+ # Since Forward AD doesn't work with `set_`
114
+ # we disable it by setting alias to False.
115
+
116
+ class CompositeCompliantTensor(torch.Tensor):
117
+ elem: torch.Tensor
118
+
119
+ __slots__ = ['elem']
120
+
121
+ @staticmethod
122
+ def __new__(cls, elem, mode, *args, **kwargs):
123
+ assert type(elem) is not cls, \
124
+ "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported"
125
+
126
+ # The storage of CompositeCompliantTensor should never be used directly
127
+ # by a Composite operation; if the Composite
128
+ # operator attempts to read from the storage without dispatching then it'll
129
+ # raise a RuntimeError due to it being a meta storage.
130
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
131
+ cls, elem.size(),
132
+ dtype=elem.dtype, layout=elem.layout,
133
+ device=elem.device, requires_grad=elem.requires_grad,
134
+ strides=elem.stride(), storage_offset=elem.storage_offset())
135
+
136
+ if elem.requires_grad:
137
+ # CompositeCompliantTensor steals the "requires_grad"-ness.
138
+ # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests...
139
+ tmp = torch.empty_strided(elem.shape, elem.stride(), dtype=elem.dtype,
140
+ device=elem.device, layout=elem.layout,
141
+ requires_grad=False)
142
+ tmp.copy_(elem.detach())
143
+ r.elem = tmp
144
+ else:
145
+ r.elem = elem
146
+
147
+ assert r.stride() == r.elem.stride()
148
+
149
+ # Propagate conjugate bits to the wrapper tensor
150
+ # Ref: https://github.com/albanD/subclass_zoo/issues/24
151
+ # Ref: https://github.com/albanD/subclass_zoo/issues/21
152
+ torch._C._set_conj(r, r.elem.is_conj())
153
+ torch._C._set_neg(r, r.elem.is_neg())
154
+
155
+ r.mode = mode
156
+ return r
157
+
158
+ def __repr__(self):
159
+ return f"CompositeCompliantTensor({self.elem})"
160
+
161
+ @classmethod
162
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
163
+ all_args = pytree.arg_tree_leaves(*args, **(kwargs or {}))
164
+ modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor))
165
+ if not all_same_mode(modes):
166
+ raise RuntimeError("Multiple CompositeCompliantTensorModes NYI")
167
+ with modes[0]:
168
+ return func(*args, **kwargs)
169
+
170
+ class CompositeCompliantTensorMode(TorchDispatchMode):
171
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
172
+ def unwrap(e):
173
+ return e.elem if isinstance(e, CompositeCompliantTensor) else e
174
+
175
+ def wrap(e):
176
+ return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e
177
+
178
+ if func == torch.ops.aten._local_scalar_dense.default:
179
+ raise RuntimeError(
180
+ ".item() is not allowed to be called inside of composite "
181
+ "functions in the PyTorch library because not all backends "
182
+ "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.")
183
+
184
+ if func.overloadpacket.__name__ in ('set_', 'resize_'):
185
+ raise RuntimeError(
186
+ f"{func.__name__} is not allowed to be called inside of "
187
+ f"Composite operators.")
188
+
189
+ if is_inplace(func):
190
+ # NB: We are making an assumption that if the function is in-place,
191
+ # then the first argument is being written to. Introspection please save us!
192
+ mutated_argument = args[0]
193
+ if not isinstance(mutated_argument, CompositeCompliantTensor) and \
194
+ any(isinstance(a, CompositeCompliantTensor) for a in args[1:]):
195
+ raise RuntimeError(
196
+ 'Not composite compliant: performing in-place operation '
197
+ f'{func.__name__} where the Tensor being written to is '
198
+ 'regular Tensor but the other tensors are Tensor Subclasses. '
199
+ 'Please try to avoid this in-place operation.')
200
+
201
+ unwrapped_args = tree_map(unwrap, args)
202
+ unwrapped_kwargs = tree_map(unwrap, kwargs)
203
+ unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs)
204
+ rs = tree_map(wrap, unwrapped_rs)
205
+
206
+ if is_view_fn(func) and autograd_view_consistency:
207
+ # Note [Alias Result]
208
+ # Autograd asserts that for B = A.view_fn(...), B and A's storages
209
+ # are the same. Here we try to make B alias A to avoid those asserts.
210
+ # See https://github.com/pytorch/pytorch/issues/65339 for more information
211
+ # about the issue.
212
+ with no_dispatch():
213
+ # Idea: this is a weird way of getting a storage that aliases the input.
214
+ # This is a workaround for #65339.
215
+ # 1. under no_dispatch, all of the wrapper tensors look like regular
216
+ # tensors with special storage (the storage is nullptr and
217
+ # advertises CPU/CUDA device.
218
+ # 2. we run func, which ends up running the view operation
219
+ # 3. All view operations reuse the input's storage and return
220
+ # result Tensor(s) with new sizes/strides/offset that alias
221
+ # the input.
222
+ # 4. we set the storage (and sizes/strides/offset) of the wrapper
223
+ # tensor results to be that of the tensors that alias the input
224
+ result = func(*args, **kwargs)
225
+ if isinstance(result, (tuple, list)):
226
+ for a, b in zip(rs, result):
227
+ a.set_(b)
228
+ else:
229
+ rs.set_(result)
230
+
231
+ # Some operations are allowed to in-place modify the metadata of the
232
+ # inputs. The only ones are the "inplace view functions"; when we
233
+ # run into these, we manually modify the metadata of the input.
234
+ with no_dispatch():
235
+ if is_inplace_view_fn(func):
236
+ func(*args, **kwargs)
237
+
238
+ # For each CompositeCompliantTensor t, we check that t and t.elem
239
+ # have consistent metadata. If they don't have consistent metadata,
240
+ # that means the operator did something fishy.
241
+ check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor)
242
+ pytree.tree_map_(check, args)
243
+ pytree.tree_map_(check, kwargs)
244
+ pytree.tree_map_(check, rs)
245
+ return rs
246
+
247
+ return CompositeCompliantTensor, CompositeCompliantTensorMode()
248
+
249
+ def is_tensorlist(lst):
250
+ if not isinstance(lst, list) and not isinstance(lst, tuple):
251
+ return False
252
+ if len(lst) == 0:
253
+ return False
254
+ all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst)
255
+ if all_tensors:
256
+ return True
257
+ exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst)
258
+ if exists_one_tensor:
259
+ raise RuntimeError('This test assumes that PyTorch APIs cannot take '
260
+ 'mixed lists of Tensor and other things')
261
+ return False
262
+
263
+
264
+ def maybe_map(fn, should_map, arg):
265
+ return fn(arg) if should_map else arg
266
+
267
+
268
+ def wrap(arg, CCT, cct_mode):
269
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
270
+ if isinstance(arg, torch.Tensor):
271
+ return CCT(arg, cct_mode)
272
+ if is_tensorlist(arg):
273
+ return [CCT(a, cct_mode) for a in arg]
274
+ raise RuntimeError("wrap assumes that the input can be wrapped")
275
+
276
+
277
+ # Given a list of flat arguments, some of which may be Tensors, return all
278
+ # possible ways some of the arguments could be CompositeCompliantTensors (CCT).
279
+ # For example, given Tensors A, B, C and flat_args = [A, 1, B],
280
+ # We would return the following 4 options:
281
+ # [CCT(A), 1, CCT(B)]
282
+ # [CCT(A), 1, B]
283
+ # [A, 1, CCT(B)]
284
+ # [A, 1, B]
285
+ # NB: Yes, this is exponential. No, we don't care too much because PyTorch ops
286
+ # don't accept that many input Tensors.
287
+ def generate_subclass_choices(flat_args, CCT, cct_mode):
288
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
289
+ is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args]
290
+ subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes]
291
+
292
+ for which_args_are_wrapped in itertools.product(*subclass_options):
293
+
294
+ result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg)
295
+ for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)]
296
+ yield result, which_args_are_wrapped
297
+
298
+
299
+ # For an operation f(*args, **kwargs), each Tensor argument may either be
300
+ # a regular Tensor or a Tensor Subclass. This iterator iterates through
301
+ # all of those options.
302
+ def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
303
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
304
+ flat_kwargs, spec = tree_flatten(kwargs)
305
+ flat_args_kwargs = list(args) + list(flat_kwargs)
306
+ for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode):
307
+ new_args = choice[:len(args)]
308
+ new_kwargs = tree_unflatten(choice[len(args):], spec)
309
+ which_args_are_wrapped = debug_metadata[:len(args)]
310
+ which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec)
311
+ yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped
312
+
313
+
314
+ def raise_composite_compliance_error(err, additional_info=''):
315
+ raise RuntimeError(
316
+ "Composite compliance check failed with "
317
+ "the above error.\n"
318
+ f"{additional_info}"
319
+ "If you are adding an OpInfo of an "
320
+ "existing operator, please feel free to skip this test "
321
+ "because the problem was pre-existing and file an issue. "
322
+ "Otherwise, if you added a new operator, please read "
323
+ "through the Composite Compliance section in "
324
+ "aten/src/ATen/native/README.md for how to resolve this. "
325
+ ) from err
326
+
327
+
328
+ # This test checks ALL possible permutations of calling `op` with arguments
329
+ # that are individually either a regular Tensor or a Tensor subclass.
330
+ #
331
+ # The general strategy is to wrap some Tensor args and kwargs in
332
+ # CompositeCompliantTensor wrappers and call the operation.
333
+
334
+ # If some composite operation does any non-compliant behavior,
335
+ # CompositeCompliantTensor will raise an error.
336
+ def check_all_permutations(op, args, kwargs, assert_equal_fn):
337
+ CCT, cct_mode = generate_cct_and_mode()
338
+ expected = op(*args, **kwargs)
339
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
340
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
341
+
342
+ try:
343
+ actual = op(*new_args, **new_kwargs)
344
+ # NOTE: [What errors are Composite Compliance trying to catch?]
345
+ #
346
+ # There's two things we want to catch:
347
+ # - errors that would raise within the torch_dispatch impl
348
+ # - data_ptr accesses
349
+ # The first is easy to filter for (we could make the error a different
350
+ # error class), the second is always going to be a RuntimeError due to
351
+ # how it is implemented (if you try to access the data_ptr of thex
352
+ # wrapper Tensor, it raises you some internal RuntimeError).
353
+ #
354
+ # So the most general thing to catch here was RuntimeError. If you
355
+ # are here and debugging why your test failed, it's plausible that
356
+ # the operator itself is broken and that there are other tests failing.
357
+ except RuntimeError as err:
358
+ raise_composite_compliance_error(
359
+ err,
360
+ f"- wrapped_args: {which_args_are_wrapped}\n"
361
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
362
+ )
363
+
364
+ def unwrap(e):
365
+ return e.elem if isinstance(e, CCT) else e
366
+
367
+ assert_equal_fn(tree_map(unwrap, actual), expected)
368
+
369
+ # Checks via the usage of torch dispatch mode certain anti-patterns that
370
+ # are not composite compliant.
371
+ #
372
+ # In particular, the anti-pattern we are trying to prevent is a user
373
+ # creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps
374
+ # here because all factory functions will create tensors that are
375
+ # CompositeCompliantTensor.
376
+ #
377
+ # The general strategy is to wrap all Tensor args and kwargs in
378
+ # CompositeCompliantTensor wrappers. If an operator that is
379
+ # Composite does any non-compliant behavior,
380
+ # CompositeCompliantTensor will raise an error.
381
+ def check_with_mode(op, args, kwargs, assert_equal_fn):
382
+ CCT, cct_mode = generate_cct_and_mode()
383
+
384
+ def wrap(e):
385
+ return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e
386
+
387
+ expected = op(*args, **kwargs)
388
+
389
+ args = tree_map(wrap, args)
390
+ kwargs = tree_map(wrap, kwargs)
391
+ try:
392
+ with cct_mode:
393
+ actual = op(*args, **kwargs)
394
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
395
+ except RuntimeError as err:
396
+ raise_composite_compliance_error(err)
397
+
398
+ def unwrap(e):
399
+ return e.elem if isinstance(e, CCT) else e
400
+
401
+ assert_equal_fn(tree_map(unwrap, actual), expected)
402
+
403
+ def gather_leaf_tensors(args, kwargs):
404
+ leaf_tensors = []
405
+ args, args_spec = tree_flatten(args)
406
+ kwargs, kwargs_spec = tree_flatten(kwargs)
407
+ args = args + kwargs
408
+ for arg in args:
409
+ if not isinstance(arg, torch.Tensor):
410
+ continue
411
+ if arg.requires_grad:
412
+ leaf_tensors.append(arg)
413
+ return leaf_tensors
414
+
415
+
416
+ def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None):
417
+ if gradcheck_wrapper is None:
418
+ results = op(*args, **kwargs)
419
+ else:
420
+ results = gradcheck_wrapper(op, *args, **kwargs)
421
+
422
+ if output_process_fn_grad is not None:
423
+ results = output_process_fn_grad(results)
424
+
425
+ flat_results = pytree.tree_leaves(results)
426
+ flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)]
427
+ flat_diff_results = [r for r in flat_results if r.requires_grad]
428
+ assert len(flat_diff_results) > 0
429
+
430
+ grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results]
431
+ leaf_tensors = gather_leaf_tensors(args, kwargs)
432
+ assert len(leaf_tensors) > 0
433
+ return torch.autograd.grad(flat_diff_results, leaf_tensors,
434
+ grads, allow_unused=True, retain_graph=True)
435
+
436
+
437
+ # Checks if the backward formula is composite compliant by testing
438
+ # all possible permutations of {inputs, grad_outputs} being
439
+ # CompositeCompliantTensor or regular Tensors.
440
+ #
441
+ # NB: it is important that op is accepted as a Callable and not an OpInfo,
442
+ # this means we can apply check_backward_formula to things that aren't OpInfos
443
+ # while debugging.
444
+ def check_backward_formula(op: Callable, args, kwargs,
445
+ output_process_fn_grad=None,
446
+ gradcheck_wrapper=None, assert_equal_fn=None):
447
+ CCT, cct_mode = generate_cct_and_mode()
448
+
449
+ expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper)
450
+
451
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
452
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
453
+ leaf_tensors = gather_leaf_tensors(new_args, new_kwargs)
454
+ assert len(leaf_tensors) > 0
455
+
456
+ try:
457
+ if gradcheck_wrapper is None:
458
+ results = op(*new_args, **new_kwargs)
459
+ else:
460
+ results = gradcheck_wrapper(op, *new_args, **new_kwargs)
461
+ if output_process_fn_grad is not None:
462
+ results = output_process_fn_grad(results)
463
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
464
+ except RuntimeError as err:
465
+ raise_composite_compliance_error(
466
+ err,
467
+ f"- wrapped_args: {which_args_are_wrapped}\n"
468
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
469
+ )
470
+
471
+ flat_results = pytree.tree_leaves(results)
472
+ flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)]
473
+ flat_diff_results = [r for r in flat_results if r.requires_grad]
474
+ assert len(flat_diff_results) > 0
475
+
476
+ # NB: ones, not ones_like, so we get a regular Tensor here
477
+ grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype)
478
+ for r in flat_diff_results]
479
+ for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode):
480
+ try:
481
+ actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads,
482
+ allow_unused=True, retain_graph=True)
483
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
484
+ except RuntimeError as err:
485
+ raise_composite_compliance_error(
486
+ err,
487
+ f"- wrapped_args: {which_args_are_wrapped}\n"
488
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
489
+ f"- wrapped_grads: {which_grad_is_batched}\n"
490
+ )
491
+
492
+ def unwrap(e):
493
+ return e.elem if isinstance(e, CCT) else e
494
+
495
+ assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True)
496
+
497
+ # Checks if the forward AD formula is composite compliant by testing
498
+ # all possible permutations of {primals, tangents} being
499
+ # CompositeCompliantTensor or regular Tensors.
500
+ #
501
+ # NB: it is important that op is accepted as a Callable and not an OpInfo,
502
+ # this means we can apply check_forward_ad_formula to things that aren't OpInfos
503
+ # while debugging.
504
+ def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None):
505
+ CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False)
506
+
507
+ def maybe_tangent(t):
508
+ assert type(t) is not CCT
509
+ # Generate `tangent` tensor
510
+ # if given object is a Tensor and requires grad is set.
511
+ if isinstance(t, torch.Tensor) and t.requires_grad:
512
+ return torch.randn_like(t)
513
+ elif is_tensorlist(t):
514
+ return [torch.randn_like(e) if e.requires_grad else None for e in t]
515
+ return None
516
+
517
+ tangent_args = tuple(maybe_tangent(arg) for arg in args)
518
+ flat_kwargs, spec = tree_flatten(kwargs)
519
+ flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs)
520
+ tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec)
521
+
522
+ with fwAD.dual_level():
523
+ def maybe_make_dual(dual):
524
+ # Returns dual tensor if primal is a tensor/tensor subclass
525
+ # with requires_grad set.
526
+ primal, tangent = dual
527
+ if isinstance(primal, torch.Tensor) and primal.requires_grad:
528
+ return fwAD.make_dual(primal.detach(), tangent)
529
+ elif is_tensorlist(primal):
530
+ return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri
531
+ for pri, tang in zip(primal, tangent))
532
+ return primal
533
+
534
+ def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs):
535
+ op_args = tuple(map(maybe_make_dual, zip(args, tangent_args)))
536
+ op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()}
537
+
538
+ if gradcheck_wrapper is None:
539
+ return op(*op_args, **op_kwargs)
540
+ return gradcheck_wrapper(op, *op_args, **op_kwargs)
541
+
542
+ expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs)
543
+ expected = tree_map(fwAD.unpack_dual, expected)
544
+ expected_primals = tree_map(lambda x: x.primal, expected)
545
+ expected_tangents = tree_map(lambda x: x.tangent, expected)
546
+
547
+ # Permutations of arg and kwargs in CCT.
548
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
549
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
550
+
551
+ # Permutations tangent arg and tangent kwargs in CCT.
552
+ for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode):
553
+ new_tang_args, new_tang_kwargs, \
554
+ which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice
555
+
556
+ op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args)))
557
+ op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()}
558
+
559
+ try:
560
+ if gradcheck_wrapper is None:
561
+ actual = op(*op_args, **op_kwargs)
562
+ else:
563
+ actual = gradcheck_wrapper(op, *op_args, **op_kwargs)
564
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
565
+ except RuntimeError as err:
566
+ raise_composite_compliance_error(
567
+ err,
568
+ f"- wrapped_args: {which_args_are_wrapped}\n"
569
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
570
+ f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n"
571
+ f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n"
572
+ )
573
+
574
+ def unwrap(e):
575
+ return e.elem if isinstance(e, CCT) else e
576
+
577
+ actual = tree_map(fwAD.unpack_dual, actual)
578
+ actual_primals = tree_map(lambda x: unwrap(x.primal), actual)
579
+ actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual)
580
+ assert_equal_fn(actual_primals, expected_primals, equal_nan=True)
581
+ assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True)
venv/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import functools
5
+ from torch.testing import make_tensor
6
+ from functorch.experimental.control_flow import map
7
+ from torch.testing._internal.opinfo.core import (
8
+ OpInfo,
9
+ SampleInput,
10
+ )
11
+ from torch.testing._internal.common_dtype import all_types_and
12
+
13
+ def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs):
14
+ make_arg = functools.partial(
15
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
16
+ yield SampleInput([make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)],
17
+ args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2)))
18
+
19
+ def inner_f(x, y0, y1):
20
+ return [x[0].cos().add_(1.) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())]
21
+
22
+ def simple_map(xs, y0, y1):
23
+ def f(x, y0, y1):
24
+ return inner_f(x, y0, y1)
25
+ return map(f, xs, y0, y1)
26
+
27
+ def nested_map(xs, y0, y1):
28
+ def f1(xx, y0, y1):
29
+ def f2(x, y0, y1):
30
+ return inner_f(x, y0, y1)
31
+ return map(f2, xx, y0, y1)
32
+ return map(f1, xs, y0, y1)
33
+
34
+ def triple_nested_map(xs, y0, y1):
35
+ def f0(xs, y0, y1):
36
+ def f1(xx, y0, y1):
37
+ def f2(x, y0, y1):
38
+ return inner_f(x, y0, y1)
39
+ return map(f2, xx, y0, y1)
40
+ return map(f1, xs, y0, y1)
41
+ return map(f0, xs, y0, y1)
42
+
43
+ control_flow_opinfo_db = [
44
+ OpInfo(
45
+ "MapControlflowOp",
46
+ op=simple_map,
47
+ sample_inputs_func=sample_inputs_map,
48
+ dtypes=all_types_and(torch.bool, torch.half),
49
+ supports_out=False,
50
+ check_batched_grad=False,
51
+ check_batched_gradgrad=False,
52
+ check_batched_forward_grad=False,
53
+ check_inplace_batched_forward_grad=False,
54
+ ),
55
+ OpInfo(
56
+ "NestedMapControlflowOp",
57
+ op=nested_map,
58
+ sample_inputs_func=sample_inputs_map,
59
+ dtypes=all_types_and(torch.bool, torch.half),
60
+ supports_out=False,
61
+ check_batched_grad=False,
62
+ check_batched_gradgrad=False,
63
+ check_batched_forward_grad=False,
64
+ check_inplace_batched_forward_grad=False,
65
+ ),
66
+ OpInfo(
67
+ "TripleNestedMapControlflowOp",
68
+ op=triple_nested_map,
69
+ sample_inputs_func=sample_inputs_map,
70
+ dtypes=all_types_and(torch.bool, torch.half),
71
+ supports_out=False,
72
+ check_batched_grad=False,
73
+ check_batched_gradgrad=False,
74
+ check_batched_forward_grad=False,
75
+ check_inplace_batched_forward_grad=False,
76
+ )
77
+ ]
venv/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import functools
5
+ from torch.testing import make_tensor
6
+ from torch.testing._internal.opinfo.core import (
7
+ OpInfo,
8
+ SampleInput,
9
+ )
10
+ from torch.testing._internal.common_dtype import all_types_and
11
+ import numpy as np
12
+ from torch.testing._internal.autograd_function_db import (
13
+ sample_inputs_numpy_cube,
14
+ sample_inputs_numpy_mul,
15
+ sample_inputs_numpy_sort,
16
+ sample_inputs_numpy_take,
17
+ )
18
+ from torch import Tensor
19
+ from torch.types import Number
20
+ from typing import * # noqa: F403
21
+ import torch._custom_ops as custom_ops
22
+
23
+ # Note: [custom op db]
24
+ #
25
+ # This is a collection of custom operator test cases written as OpInfos
26
+ # so they can easily be consumed by OpInfo-based tests to check if subsystems
27
+ # support them correctly.
28
+
29
+ def to_numpy(tensor):
30
+ return tensor.cpu().numpy()
31
+
32
+ @custom_ops.custom_op('_torch_testing::numpy_cube')
33
+ def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]:
34
+ raise NotImplementedError()
35
+
36
+ @custom_ops.impl('_torch_testing::numpy_cube')
37
+ def numpy_cube_impl(x):
38
+ x_np = to_numpy(x)
39
+ dx = torch.tensor(3 * x_np ** 2, device=x.device)
40
+ return torch.tensor(x_np ** 3, device=x.device), dx
41
+
42
+ @custom_ops.impl_abstract('_torch_testing::numpy_cube')
43
+ def numpy_cube_abstract(x):
44
+ return x.clone(), x.clone()
45
+
46
+ @custom_ops.impl_save_for_backward('_torch_testing::numpy_cube')
47
+ def numpy_cube_save_for_backward(inputs, output):
48
+ return (inputs.x, output[1])
49
+
50
+ @custom_ops.impl_backward('_torch_testing::numpy_cube')
51
+ def numpy_cube_backward(ctx, saved, grad_out, grad_dx):
52
+ x, dx = saved
53
+ grad_x = torch.ops._torch_testing.numpy_mul(grad_out, dx) + 6 * torch.ops._torch_testing.numpy_mul(grad_dx, x)
54
+ return {'x': grad_x}
55
+
56
+ @custom_ops.custom_op('_torch_testing::numpy_mul')
57
+ def numpy_mul(x: Tensor, y: Tensor) -> Tensor:
58
+ raise NotImplementedError()
59
+
60
+ @custom_ops.impl('_torch_testing::numpy_mul')
61
+ def numpy_mul_impl(x, y):
62
+ return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
63
+
64
+ @custom_ops.impl_abstract('_torch_testing::numpy_mul')
65
+ def numpy_mul_abstract(x, y):
66
+ assert x.device == y.device
67
+ return (x * y).contiguous()
68
+
69
+ @custom_ops.impl_save_for_backward('_torch_testing::numpy_mul')
70
+ def numpy_mul_save_for_backward(inputs, output):
71
+ saved = {}
72
+ saved['x_requires_grad'] = inputs.x.requires_grad
73
+ saved['y_requires_grad'] = inputs.y.requires_grad
74
+ # Optimization: only save what is necessary
75
+ saved['y'] = inputs.y if inputs.x.requires_grad else None
76
+ saved['x'] = inputs.x if inputs.y.requires_grad else None
77
+ return saved
78
+
79
+ @custom_ops.impl_backward('_torch_testing::numpy_mul')
80
+ def numpy_mul_backward(ctx, saved, grad_out):
81
+ grad_x = grad_out * saved['y'] if saved['x_requires_grad'] else None
82
+ grad_y = grad_out * saved['x'] if saved['x_requires_grad'] else None
83
+ return {'y': grad_y, 'x': grad_x}
84
+
85
+ @custom_ops.custom_op('_torch_testing::numpy_sort')
86
+ def numpy_sort(x: Tensor, dim: int) -> Tuple[Tensor, Tensor, Tensor]:
87
+ raise NotImplementedError()
88
+
89
+ @custom_ops.impl("_torch_testing::numpy_sort")
90
+ def numpy_sort_impl(x, dim):
91
+ device = x.device
92
+ x = to_numpy(x)
93
+ ind = np.argsort(x, axis=dim)
94
+ ind_inv = np.argsort(ind, axis=dim)
95
+ result = np.take_along_axis(x, ind, axis=dim)
96
+ return (
97
+ torch.tensor(result, device=device),
98
+ torch.tensor(ind, device=device),
99
+ torch.tensor(ind_inv, device=device),
100
+ )
101
+
102
+ @custom_ops.impl_abstract('_torch_testing::numpy_sort')
103
+ def numpy_sort_abstract(x, dim):
104
+ return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long)
105
+
106
+ @custom_ops.impl_save_for_backward('_torch_testing::numpy_sort')
107
+ def numpy_sort_save_for_backward(inputs, output):
108
+ out, ind, ind_inv = output
109
+ return [inputs.dim, ind, ind_inv]
110
+
111
+ @custom_ops.impl_backward('_torch_testing::numpy_sort', output_differentiability=[True, False, False])
112
+ def numpy_sort_backward(ctx, saved, grad_out, grad_ind, grad_ind_inv):
113
+ dim, ind, ind_inv = saved
114
+ return {'x': torch.ops._torch_testing.numpy_take(grad_out, ind_inv, ind, dim)}
115
+
116
+ @custom_ops.custom_op('_torch_testing::numpy_take')
117
+ def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor:
118
+ raise NotImplementedError()
119
+
120
+ @custom_ops.impl("_torch_testing::numpy_take")
121
+ def numpy_take_impl(x, ind, ind_inv, dim):
122
+ device = x.device
123
+ x = to_numpy(x)
124
+ ind = to_numpy(ind)
125
+ return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
126
+
127
+ @custom_ops.impl_abstract('_torch_testing::numpy_take')
128
+ def numpy_take_abstract(x, ind, ind_inv, dim):
129
+ assert x.device == ind.device
130
+ assert x.device == ind_inv.device
131
+ assert ind.dtype == torch.long
132
+ assert ind_inv.dtype == torch.long
133
+ return torch.empty_like(x)
134
+
135
+ @custom_ops.impl_save_for_backward('_torch_testing::numpy_take')
136
+ def numpy_take_save_for_backward(inputs, output):
137
+ return {
138
+ 'dim': inputs.dim,
139
+ 'ind': inputs.ind,
140
+ 'ind_inv': inputs.ind_inv,
141
+ }
142
+
143
+ @custom_ops.impl_backward('_torch_testing::numpy_take')
144
+ def numpy_take_backward(ctx, saved, grad_out):
145
+ return {
146
+ 'x': torch.ops._torch_testing.numpy_take(grad_out, saved['ind_inv'], saved['ind'], saved['dim']),
147
+ 'ind': None,
148
+ 'ind_inv': None,
149
+ }
150
+
151
+ @custom_ops.custom_op('_torch_testing::numpy_nonzero')
152
+ def numpy_nonzero(x: Tensor) -> Tensor:
153
+ raise NotImplementedError()
154
+
155
+ @custom_ops.impl('_torch_testing::numpy_nonzero')
156
+ def numpy_nonzero_impl(x):
157
+ x_np = to_numpy(x)
158
+ res = np.stack(np.nonzero(x_np), axis=1)
159
+ if res.shape[0] <= 1:
160
+ raise RuntimeError("not supported")
161
+ return torch.tensor(res, device=x.device)
162
+
163
+ @custom_ops.impl_abstract('_torch_testing::numpy_nonzero')
164
+ def numpy_nonzero_abstract(x):
165
+ ctx = torch._custom_op.impl.get_ctx()
166
+ i0 = ctx.create_unbacked_symint()
167
+ shape = [i0, x.dim()]
168
+ result = x.new_empty(shape, dtype=torch.long)
169
+ return result
170
+
171
+ def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs):
172
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
173
+ shape = 10
174
+ result = make_arg(shape, low=0.9, high=2)
175
+ mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long)
176
+ with torch.no_grad():
177
+ result *= mask
178
+
179
+ yield SampleInput(result, args=())
180
+
181
+ @custom_ops.custom_op('_torch_testing::numpy_view_copy')
182
+ def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor:
183
+ raise NotImplementedError()
184
+
185
+ @custom_ops.impl('_torch_testing::numpy_view_copy')
186
+ def numpy_view_copy_impl(x, shape) -> Tensor:
187
+ return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device)
188
+
189
+ @custom_ops.impl_abstract('_torch_testing::numpy_view_copy')
190
+ def numpy_view_copy_abstract(x, shape) -> Tensor:
191
+ return x.clone().view(shape).clone()
192
+
193
+ @custom_ops.impl_save_for_backward('_torch_testing::numpy_view_copy')
194
+ def numpy_view_copy_save_for_backward(inputs, output) -> Tensor:
195
+ return inputs.x.shape
196
+
197
+ @custom_ops.impl_backward('_torch_testing::numpy_view_copy')
198
+ def numpy_view_copy_backward(ctx, x_shape, grad_out) -> Tensor:
199
+ return {'x': torch.ops._torch_testing.numpy_view_copy(grad_out, x_shape)}
200
+
201
+ def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs):
202
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
203
+ result = make_arg(2, 3, 4, low=0.9, high=2)
204
+ yield SampleInput(result, args=([2, 12],))
205
+
206
+ @custom_ops.custom_op('_torch_testing::numpy_cat')
207
+ def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor:
208
+ raise NotImplementedError()
209
+
210
+ @custom_ops.impl('_torch_testing::numpy_cat')
211
+ def numpy_cat_impl(xs, dim):
212
+ assert len(xs) > 0
213
+ assert all(x.device == xs[0].device for x in xs)
214
+ assert all(x.dtype == xs[0].dtype for x in xs)
215
+ np_xs = [to_numpy(x) for x in xs]
216
+ np_out = np.concatenate(np_xs, axis=dim)
217
+ return torch.tensor(np_out, device=xs[0].device)
218
+
219
+ @custom_ops.impl_abstract('_torch_testing::numpy_cat')
220
+ def numpy_cat_abstract(xs, dim):
221
+ assert len(xs) > 0
222
+ assert all(x.device == xs[0].device for x in xs)
223
+ assert all(x.dtype == xs[0].dtype for x in xs)
224
+ return torch.cat(xs, dim=dim)
225
+
226
+ @custom_ops.impl_save_for_backward('_torch_testing::numpy_cat')
227
+ def numpy_cat_save_for_backward(inputs, output):
228
+ dim_sizes = [x.shape[inputs.dim] for x in inputs.xs]
229
+ return dim_sizes, inputs.dim
230
+
231
+ @custom_ops.impl_backward('_torch_testing::numpy_cat')
232
+ def numpy_cat_backward(ctx, saved, grad_out):
233
+ dim_sizes, dim = saved
234
+ splits = list(np.cumsum(dim_sizes)[:-1])
235
+ grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim)
236
+ return {'xs': grad_xs}
237
+
238
+ def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs):
239
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
240
+ r0 = make_arg(2, 3, 4, low=0.9, high=2)
241
+ r1 = make_arg(4, 3, 4, low=0.9, high=2)
242
+ r2 = make_arg(5, 3, 4, low=0.9, high=2)
243
+ yield SampleInput([r0, r1, r2], args=(0,))
244
+
245
+ @custom_ops.custom_op('_torch_testing::numpy_split_copy')
246
+ def numpy_split_copy(x: Tensor, sections: Sequence[int], dim: int) -> List[Tensor]:
247
+ raise NotImplementedError()
248
+
249
+ @custom_ops.impl('_torch_testing::numpy_split_copy')
250
+ def numpy_split_copy_impl(x, splits, dim):
251
+ x_np = to_numpy(x)
252
+ arrs = np.split(x_np, splits, axis=dim)
253
+ return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs]
254
+
255
+ @custom_ops.impl_abstract('_torch_testing::numpy_split_copy')
256
+ def numpy_split_copy_abstract(x, splits, dim):
257
+ return [xi.clone() for xi in torch.tensor_split(x, splits, dim)]
258
+
259
+ @custom_ops.impl_save_for_backward('_torch_testing::numpy_split_copy')
260
+ def numpy_split_copy_save_for_backward(inputs, output):
261
+ return inputs.dim
262
+
263
+ @custom_ops.impl_backward('_torch_testing::numpy_split_copy')
264
+ def numpy_split_copy_backward(ctx, saved, grad_out):
265
+ dim = saved
266
+ return {'x': torch.ops._torch_testing.numpy_cat(grad_out, dim=dim)}
267
+
268
+ def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs):
269
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
270
+ x = make_arg(2, 9, low=0.9, high=2)
271
+ yield SampleInput(x, args=([1, 3, 6], 1))
272
+
273
+ @custom_ops.custom_op('_torch_testing::numpy_split_copy_with_int')
274
+ def numpy_split_copy_with_int(x: Tensor, sections: Sequence[int], dim: int) -> Tuple[List[Tensor], int]:
275
+ raise NotImplementedError()
276
+
277
+ @custom_ops.impl('_torch_testing::numpy_split_copy_with_int')
278
+ def numpy_split_copy_with_int_impl(x, splits, dim):
279
+ x_np = to_numpy(x)
280
+ arrs = np.split(x_np, splits, axis=dim)
281
+ return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits)
282
+
283
+ @custom_ops.impl_abstract('_torch_testing::numpy_split_copy_with_int')
284
+ def numpy_split_copy_with_int_abstract(x, splits, dim):
285
+ return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits)
286
+
287
+ @custom_ops.impl_save_for_backward(
288
+ '_torch_testing::numpy_split_copy_with_int')
289
+ def numpy_split_copy_with_int_save_for_backward(inputs, output):
290
+ return inputs.dim
291
+
292
+ @custom_ops.impl_backward(
293
+ '_torch_testing::numpy_split_copy_with_int',
294
+ output_differentiability=[True, False])
295
+ def numpy_split_copy_with_int_backward(ctx, saved, grad_out, _):
296
+ dim = saved
297
+ return {'x': torch.ops._torch_testing.numpy_cat(grad_out, dim=dim)}
298
+
299
+ @custom_ops.custom_op('_torch_testing::numpy_nms')
300
+ def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor:
301
+ raise NotImplementedError()
302
+
303
+ @custom_ops.impl('_torch_testing::numpy_nms')
304
+ def numpy_nms_impl(boxes, scores, iou_threshold):
305
+ # Adapted from Ross Girshick's fast-rcnn implementation at
306
+ # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
307
+ assert boxes.device == scores.device
308
+ device = boxes.device
309
+
310
+ boxes = to_numpy(boxes)
311
+ scores = to_numpy(scores)
312
+
313
+ N = boxes.shape[0]
314
+ assert boxes.shape == (N, 4)
315
+ assert scores.shape == (N,)
316
+
317
+ x1 = boxes[:, 0]
318
+ y1 = boxes[:, 1]
319
+ x2 = boxes[:, 2]
320
+ y2 = boxes[:, 3]
321
+
322
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
323
+ order = scores.argsort()[::-1]
324
+
325
+ keep = []
326
+ while order.size > 0:
327
+ i = order[0]
328
+ keep.append(i)
329
+ xx1 = np.maximum(x1[i], x1[order[1:]])
330
+ yy1 = np.maximum(y1[i], y1[order[1:]])
331
+ xx2 = np.minimum(x2[i], x2[order[1:]])
332
+ yy2 = np.minimum(y2[i], y2[order[1:]])
333
+
334
+ w = np.maximum(0.0, xx2 - xx1 + 1)
335
+ h = np.maximum(0.0, yy2 - yy1 + 1)
336
+ inter = w * h
337
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
338
+
339
+ inds = np.where(ovr <= iou_threshold)[0]
340
+ order = order[inds + 1]
341
+
342
+ result = torch.tensor(np.stack(keep), device=device)
343
+ # Needed for data-dependent condition :(
344
+ assert result.size(0) >= 2
345
+ return result
346
+
347
+ @custom_ops.impl_abstract('_torch_testing::numpy_nms')
348
+ def numpy_nms_abstract(boxes, scores, iou_threshold):
349
+ assert boxes.device == scores.device
350
+ N = boxes.shape[0]
351
+ assert boxes.shape == (N, 4)
352
+ assert scores.shape == (N,)
353
+
354
+ ctx = torch._custom_op.impl.get_ctx()
355
+ i0 = ctx.create_unbacked_symint()
356
+ result = boxes.new_empty([i0], dtype=torch.int64)
357
+ return result
358
+
359
+ def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs):
360
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype)
361
+ N = 64
362
+ xs = make_arg([N], low=0, high=28)
363
+ dx = make_arg([N], low=0, high=4)
364
+ ys = make_arg([N], low=0, high=28)
365
+ dy = make_arg([N], low=0, high=4)
366
+ boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad)
367
+ scores = make_arg([N], low=0, high=1, requires_grad=requires_grad)
368
+ iou_threshold = make_arg([], low=0, high=1).item()
369
+
370
+ yield SampleInput(boxes, args=(scores, iou_threshold))
371
+
372
+ custom_op_db = [
373
+ OpInfo(
374
+ 'NumpyCubeCustomOp',
375
+ op=torch.ops._torch_testing.numpy_cube,
376
+ sample_inputs_func=sample_inputs_numpy_cube,
377
+ dtypes=all_types_and(torch.bool, torch.half),
378
+ supports_out=False,
379
+ ),
380
+ OpInfo(
381
+ 'NumpyMulCustomOp',
382
+ op=torch.ops._torch_testing.numpy_mul,
383
+ sample_inputs_func=sample_inputs_numpy_mul,
384
+ dtypes=all_types_and(torch.bool, torch.half),
385
+ supports_out=False,
386
+ ),
387
+ OpInfo(
388
+ 'NumpySortCustomOp',
389
+ op=torch.ops._torch_testing.numpy_sort,
390
+ sample_inputs_func=sample_inputs_numpy_sort,
391
+ dtypes=all_types_and(torch.bool, torch.half),
392
+ supports_out=False,
393
+ ),
394
+ OpInfo(
395
+ 'NumpyTakeCustomOp',
396
+ op=torch.ops._torch_testing.numpy_take,
397
+ sample_inputs_func=sample_inputs_numpy_take,
398
+ dtypes=all_types_and(torch.bool, torch.half),
399
+ supports_out=False,
400
+ ),
401
+ OpInfo(
402
+ 'NumpyNonzeroCustomOp',
403
+ op=torch.ops._torch_testing.numpy_nonzero,
404
+ sample_inputs_func=sample_inputs_numpy_nonzero,
405
+ dtypes=all_types_and(torch.bool, torch.half),
406
+ supports_autograd=False,
407
+ supports_out=False,
408
+ ),
409
+ OpInfo(
410
+ 'NumpyNMSCustomOp',
411
+ op=torch.ops._torch_testing.numpy_nms,
412
+ sample_inputs_func=sample_inputs_numpy_nms,
413
+ dtypes=all_types_and(torch.bool, torch.half),
414
+ supports_autograd=False,
415
+ supports_out=False,
416
+ ),
417
+ OpInfo(
418
+ 'NumpyViewCopyCustomOp',
419
+ op=torch.ops._torch_testing.numpy_view_copy,
420
+ sample_inputs_func=sample_inputs_numpy_view_copy,
421
+ dtypes=all_types_and(torch.bool, torch.half),
422
+ supports_autograd=True,
423
+ supports_out=False,
424
+ ),
425
+ OpInfo(
426
+ 'NumpyCatCustomOp',
427
+ op=torch.ops._torch_testing.numpy_cat,
428
+ sample_inputs_func=sample_inputs_numpy_cat,
429
+ dtypes=all_types_and(torch.bool, torch.half),
430
+ supports_autograd=True,
431
+ check_batched_grad=False,
432
+ check_batched_gradgrad=False,
433
+ supports_out=False,
434
+ ),
435
+ OpInfo(
436
+ 'NumpySplitCopyCustomOp',
437
+ op=torch.ops._torch_testing.numpy_split_copy,
438
+ sample_inputs_func=sample_inputs_numpy_split_copy,
439
+ dtypes=all_types_and(torch.bool, torch.half),
440
+ supports_autograd=True,
441
+ check_batched_grad=False,
442
+ check_batched_gradgrad=False,
443
+ supports_out=False,
444
+ ),
445
+ OpInfo(
446
+ 'NumpySplitCopyWithIntCustomOp',
447
+ op=torch.ops._torch_testing.numpy_split_copy_with_int,
448
+ sample_inputs_func=sample_inputs_numpy_split_copy,
449
+ dtypes=all_types_and(torch.bool, torch.half),
450
+ gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0],
451
+ supports_autograd=True,
452
+ check_batched_grad=False,
453
+ check_batched_gradgrad=False,
454
+ supports_out=False,
455
+ ),
456
+ ]
venv/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import re
4
+ import sys
5
+ import time
6
+ from functools import partial, wraps
7
+ from typing import Tuple
8
+
9
+ import torch.distributed as dist
10
+ import torch.distributed.rpc as rpc
11
+ from torch.distributed.rpc import _rref_context_get_debug_info
12
+ from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN
13
+
14
+
15
+ if not dist.is_available():
16
+ print("c10d not available, skipping tests", file=sys.stderr)
17
+ sys.exit(0)
18
+
19
+
20
+ INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}"
21
+
22
+ def dist_init(
23
+ old_test_method=None,
24
+ setup_rpc: bool = True,
25
+ clean_shutdown: bool = True,
26
+ faulty_messages=None,
27
+ messages_to_delay=None,
28
+ ):
29
+ """
30
+ We use this decorator for setting up and tearing down state since
31
+ MultiProcessTestCase runs each `test*` method in a separate process and
32
+ each process just runs the `test*` method without actually calling
33
+ 'setUp' and 'tearDown' methods of unittest.
34
+
35
+ Note: pass the string representation of MessageTypes that should be used
36
+ with the faulty agent's send function. By default, all retriable messages
37
+ ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE",
38
+ "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is
39
+ set from faulty_rpc_agent_test_fixture.py).
40
+ """
41
+ # If we use dist_init without arguments (ex: @dist_init), old_test_method is
42
+ # appropriately set and we return the wrapper appropriately. On the other
43
+ # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)),
44
+ # old_test_method is None and we return a functools.partial which is the real
45
+ # decorator that is used and as a result we recursively call dist_init with
46
+ # old_test_method and the rest of the arguments appropriately set.
47
+ if old_test_method is None:
48
+ return partial(
49
+ dist_init,
50
+ setup_rpc=setup_rpc,
51
+ clean_shutdown=clean_shutdown,
52
+ faulty_messages=faulty_messages,
53
+ messages_to_delay=messages_to_delay,
54
+ )
55
+
56
+ @wraps(old_test_method)
57
+ def new_test_method(self, *arg, **kwargs):
58
+ # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted
59
+ # in tests.
60
+ import torch.distributed.rpc.api as api
61
+
62
+ api._ignore_rref_leak = False
63
+ self.worker_id = self.rank
64
+ self.setup_fault_injection(faulty_messages, messages_to_delay)
65
+
66
+ rpc_backend_options = self.rpc_backend_options
67
+ if setup_rpc:
68
+ if TEST_WITH_TSAN:
69
+ # TSAN runs much slower.
70
+ rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5
71
+ rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60
72
+
73
+ rpc.init_rpc(
74
+ name="worker%d" % self.rank,
75
+ backend=self.rpc_backend,
76
+ rank=self.rank,
77
+ world_size=self.world_size,
78
+ rpc_backend_options=rpc_backend_options,
79
+ )
80
+
81
+ return_value = old_test_method(self, *arg, **kwargs)
82
+
83
+ if setup_rpc:
84
+ rpc.shutdown(graceful=clean_shutdown)
85
+
86
+ return return_value
87
+
88
+ return new_test_method
89
+
90
+
91
+ def noop() -> None:
92
+ pass
93
+
94
+
95
+ def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str:
96
+ """
97
+ Loops until an RPC to the given rank fails. This is used to
98
+ indicate that the node has failed in unit tests.
99
+ Args:
100
+ rank (int): Rank of the node expected to fail
101
+ expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure
102
+ occurs, not just any.
103
+ """
104
+ while True:
105
+ try:
106
+ rpc.rpc_sync(f"worker{rank}", noop, args=())
107
+ time.sleep(0.1)
108
+ except Exception as e:
109
+ if re.search(pattern=expected_error_regex, string=str(e)):
110
+ return str(e)
111
+
112
+
113
+ def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None:
114
+ """
115
+ The RRef protocol holds forkIds of rrefs in a map until those forks are
116
+ confirmed by the owner. The message confirming the fork may arrive after
117
+ our tests check whether this map is empty, which leads to failures and
118
+ flaky tests. to_here also does not guarantee that we have finished
119
+ processind the owner's confirmation message for the RRef. This function
120
+ loops until the map is empty, which means the messages have been received
121
+ as processed. Call this function before asserting the map returned by
122
+ _get_debug_info is empty.
123
+ """
124
+ start = time.time()
125
+ while True:
126
+ debug_info = _rref_context_get_debug_info()
127
+ num_pending_futures = int(debug_info["num_pending_futures"])
128
+ num_pending_users = int(debug_info["num_pending_users"])
129
+ if num_pending_futures == 0 and num_pending_users == 0:
130
+ break
131
+ time.sleep(0.1)
132
+ if time.time() - start > timeout:
133
+ raise ValueError(
134
+ "Timed out waiting to flush pending futures and users, had {} pending futures and {} pending users".format(
135
+ num_pending_futures, num_pending_users
136
+ )
137
+ )
138
+
139
+
140
+ def get_num_owners_and_forks() -> Tuple[str, str]:
141
+ """
142
+ Retrieves number of OwnerRRefs and forks on this node from
143
+ _rref_context_get_debug_info.
144
+ """
145
+ rref_dbg_info = _rref_context_get_debug_info()
146
+ num_owners = rref_dbg_info["num_owner_rrefs"]
147
+ num_forks = rref_dbg_info["num_forks"]
148
+ return num_owners, num_forks
149
+
150
+
151
+ def wait_until_owners_and_forks_on_rank(
152
+ num_owners: int, num_forks: int, rank: int, timeout: int = 20
153
+ ) -> None:
154
+ """
155
+ Waits until timeout for num_forks and num_owners to exist on the rank. Used
156
+ to ensure proper deletion of RRefs in tests.
157
+ """
158
+ start = time.time()
159
+ while True:
160
+ num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync(
161
+ worker_name(rank), get_num_owners_and_forks, args=(), timeout=5
162
+ )
163
+ num_owners_on_rank = int(num_owners_on_rank)
164
+ num_forks_on_rank = int(num_forks_on_rank)
165
+ if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks:
166
+ return
167
+ time.sleep(1)
168
+ if time.time() - start > timeout:
169
+ raise ValueError(
170
+ "Timed out waiting {} sec for {} owners and {} forks on rank, had {} owners and {} forks".format(
171
+ timeout,
172
+ num_owners,
173
+ num_forks,
174
+ num_owners_on_rank,
175
+ num_forks_on_rank,
176
+ )
177
+ )
178
+
179
+
180
+ def initialize_pg(init_method, rank: int, world_size: int) -> None:
181
+ # This is for tests using `dist.barrier`.
182
+ if not dist.is_initialized():
183
+ dist.init_process_group(
184
+ backend="gloo",
185
+ init_method=init_method,
186
+ rank=rank,
187
+ world_size=world_size,
188
+ )
189
+
190
+
191
+ def worker_name(rank: int) -> str:
192
+ return f"worker{rank}"
193
+
194
+
195
+ def get_function_event(function_events, partial_event_name):
196
+ """
197
+ Returns the first event that matches partial_event_name in the provided
198
+ function_events. These function_events should be the output of
199
+ torch.autograd.profiler.function_events().
200
+
201
+ Args:
202
+ function_events: function_events returned by the profiler.
203
+ event_name (str): partial key that the event was profiled with.
204
+ """
205
+ event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015
206
+ return event
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: ignore-errors
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import copy
4
+ import random
5
+ import torch
6
+ from torch.distributed._shard import sharded_tensor
7
+
8
+ from torch.distributed._shard.sharding_spec import (
9
+ ChunkShardingSpec,
10
+ )
11
+
12
+ PLACEMENTS = [
13
+ "rank:0/cuda:0",
14
+ "rank:1/cuda:1",
15
+ "rank:2/cuda:2",
16
+ "rank:3/cuda:3",
17
+ ]
18
+
19
+ DEFAULT_GPU_NUM = 4
20
+
21
+
22
+ def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0):
23
+ spec_list = []
24
+ for i in range(len(sharding_dims)):
25
+ random.Random(seed + i).shuffle(PLACEMENTS)
26
+ spec_list.append(
27
+ ChunkShardingSpec(
28
+ dim=sharding_dims[i],
29
+ placements=copy.deepcopy(PLACEMENTS),
30
+ )
31
+ )
32
+ return spec_list
33
+
34
+ class MyShardedModel2(torch.nn.Module):
35
+ def __init__(
36
+ self,
37
+ spec=None,
38
+ group=None,
39
+ init_rrefs=True
40
+ ) -> None:
41
+ super().__init__()
42
+ if spec is not None:
43
+ self.sharded_tensor2 = sharded_tensor.rand(
44
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
45
+ )
46
+ else:
47
+ self.sharded_tensor2 = None
48
+ self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2))
49
+
50
+
51
+ class MyShardedModel1(torch.nn.Module):
52
+ def __init__(
53
+ self,
54
+ spec=None,
55
+ group=None,
56
+ init_rrefs=True
57
+ ) -> None:
58
+ super().__init__()
59
+ if spec is not None:
60
+ self.sharded_tensor1 = sharded_tensor.rand(
61
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
62
+ )
63
+ else:
64
+ self.sharded_tensor1 = None
65
+ self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2))
66
+ self.submodule = MyShardedModel2(spec, group, init_rrefs)
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
7
+
8
+
9
+ class SimpleMegatronLM(nn.Module):
10
+ def __init__(self, linear_size, rank=None, dtype=torch.float32):
11
+ super().__init__()
12
+ self.fc1 = nn.Linear(*linear_size[0], dtype=dtype)
13
+ self.gelu = nn.GELU()
14
+ self.fc2 = nn.Linear(*linear_size[1], dtype=dtype)
15
+ if rank is not None:
16
+ self.fc1.cuda(rank)
17
+ self.fc2.cuda(rank)
18
+
19
+ def forward(self, inp):
20
+ return self.fc2(self.gelu(self.fc1(inp)))
21
+
22
+ def get_weights(self):
23
+ if isinstance(self.fc1.weight, ShardedTensor):
24
+ weight1 = self.fc1.weight.local_tensor()
25
+ else:
26
+ weight1 = self.fc1.weight
27
+
28
+ if isinstance(self.fc2.weight, ShardedTensor):
29
+ weight2 = self.fc2.weight.local_tensor()
30
+ else:
31
+ weight2 = self.fc2.weight
32
+
33
+ return (weight1, weight2)
34
+
35
+ def get_biases(self):
36
+ return (self.fc1.bias, self.fc2.bias)
37
+
38
+ def get_weight_grads(self):
39
+ return (self.fc1.weight.grad, self.fc2.weight.grad)
40
+
41
+ def get_bias_grads(self):
42
+ return (self.fc1.bias.grad, self.fc2.bias.grad)
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates
4
+
5
+ import os
6
+ import shutil
7
+ import tempfile
8
+ from functools import wraps
9
+ from typing import Any, Callable, Dict, Optional, Tuple
10
+
11
+ import torch.distributed as dist
12
+
13
+
14
+ def with_temp_dir(
15
+ func: Optional[Callable] = None,
16
+ ) -> Optional[Callable]:
17
+ """
18
+ Wrapper to initialize temp directory for distributed checkpoint.
19
+ """
20
+ assert func is not None
21
+
22
+ @wraps(func)
23
+ def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None:
24
+ if dist.is_initialized():
25
+ # Only create temp_dir when rank is 0
26
+ if dist.get_rank() == 0:
27
+ temp_dir = tempfile.mkdtemp()
28
+ print(f"Using temp directory: {temp_dir}")
29
+ else:
30
+ temp_dir = ""
31
+ object_list = [temp_dir]
32
+
33
+ # Broadcast temp_dir to all the other ranks
34
+ os.sync()
35
+ dist.broadcast_object_list(object_list)
36
+ self.temp_dir = object_list[0]
37
+ os.sync()
38
+ else:
39
+ temp_dir = tempfile.mkdtemp()
40
+ print(f"No process group initialized, using temp directory: {temp_dir}")
41
+ self.temp_dir = temp_dir
42
+
43
+ try:
44
+ func(self, *args, **kwargs)
45
+ finally:
46
+ if dist.is_initialized() and dist.get_rank() == 0:
47
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
48
+ else:
49
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
50
+
51
+ return wrapper
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Owner(s): ["oncall: distributed"]
4
+
5
+ import copy
6
+ from itertools import chain
7
+ from typing import Any, Dict
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+
12
+ from torch.distributed._sharded_tensor import ShardedTensor
13
+ from torch.distributed._state_dict_utils import _gather_state_dict
14
+ from torch.distributed._tensor import DTensor
15
+ from torch.distributed.checkpoint.state_dict import (
16
+ PG,
17
+ set_state_dict,
18
+ STATE,
19
+ StateDictOptions,
20
+ )
21
+
22
+
23
+ class VerifyStateDictMixin:
24
+ def _compare_tensor(self, orig_tensor, dist_tensor):
25
+ if isinstance(dist_tensor, (DTensor, ShardedTensor)):
26
+ dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey")
27
+ self.assertTrue(isinstance(dist_tensor, torch.Tensor))
28
+ self.assertTrue(torch.allclose(orig_tensor, dist_tensor))
29
+
30
+ def _verify_msd(
31
+ self,
32
+ msd: Dict[str, Any],
33
+ dist_msd: Dict[str, Any],
34
+ options: StateDictOptions = StateDictOptions(),
35
+ ) -> None:
36
+ if not options.ignore_frozen_params:
37
+ self.assertEqual(len(msd), len(dist_msd))
38
+ for fqn, param in msd.items():
39
+ dist_param = dist_msd.get(fqn, None)
40
+ if not options.ignore_frozen_params:
41
+ self.assertIsNotNone(dist_param, f"{fqn=}")
42
+ self._compare_tensor(param, dist_param)
43
+ elif dist_param is None:
44
+ self.assertFalse(param.requires_grad, f"{fqn=}")
45
+
46
+ def _verify_osd(
47
+ self,
48
+ model: nn.Module,
49
+ optim: torch.optim.Optimizer,
50
+ osd: Dict[str, Any],
51
+ dist_osd: Dict[str, Any],
52
+ ) -> None:
53
+ params = list(chain.from_iterable(g["params"] for g in optim.param_groups))
54
+ param_pid_mapping = dict(zip(params, range(len(params))))
55
+ fqn_pid_mapping = {}
56
+ for fqn, param in model.named_parameters():
57
+ pid = param_pid_mapping[param]
58
+ fqn_pid_mapping[fqn] = pid
59
+ fqn_pid_mapping[pid] = fqn
60
+ # Check optimizer_state_dict state
61
+
62
+ self.assertEqual(len(osd[STATE]), len(dist_osd[STATE]))
63
+ for pid, states in osd[STATE].items():
64
+ fqn = fqn_pid_mapping[pid]
65
+ dist_states = dist_osd[STATE].get(fqn, None)
66
+ self.assertIsNotNone(dist_states, fqn)
67
+ self.assertEqual(len(states), len(dist_states))
68
+ for key, state in states.items():
69
+ dist_state = states.get(key, None)
70
+ self.assertIsNotNone(dist_state)
71
+ self._compare_tensor(state, dist_state)
72
+
73
+ # Check optimizer_state_dict param_group
74
+ old_dist_osd_pg = dist_osd[PG]
75
+ if len(osd[PG]) != len(dist_osd[PG]):
76
+ self.assertTrue(len(dist_osd[PG]) > len(osd[PG]))
77
+ new_pg = copy.deepcopy(dist_osd[PG][0])
78
+ new_pg["params"] = []
79
+ for dist_group in dist_osd[PG]:
80
+ new_pg["params"].extend(dist_group["params"])
81
+ dist_osd[PG] = [new_pg]
82
+
83
+ self.assertEqual(len(osd[PG]), len(dist_osd[PG]))
84
+ for group, dist_group in zip(osd[PG], dist_osd[PG]):
85
+ self.assertEqual(len(group), len(dist_group))
86
+ for key, value in group.items():
87
+ # Below doesn't work because param_groups can have None
88
+ # values.
89
+ # dist_value = dist_group.get(key, None)
90
+ # self.assertIsNotNone(dist_value, (dist_group, group))
91
+ dist_value = dist_group[key]
92
+ if key == "params":
93
+ fqns = [fqn_pid_mapping[pid] for pid in value]
94
+ self.assertEqual(sorted(fqns), sorted(dist_value))
95
+ else:
96
+ self.assertEqual(value, dist_value)
97
+ dist_osd[PG] = old_dist_osd_pg
98
+
99
+ def _verify_osd_by_load(
100
+ self,
101
+ model: nn.Module,
102
+ optim: torch.optim.Optimizer,
103
+ new_optim: torch.optim.Optimizer,
104
+ dist_osd: Dict[str, Any],
105
+ ) -> None:
106
+ new_dist_osd = _gather_state_dict(dist_osd)
107
+ set_state_dict(
108
+ model,
109
+ optimizers=new_optim,
110
+ model_state_dict={},
111
+ optim_state_dict=new_dist_osd,
112
+ )
113
+ self.assertEqual(optim.state_dict(), new_optim.state_dict())
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import contextlib
4
+ import enum
5
+ import logging
6
+ import os
7
+ import threading
8
+ from typing import NamedTuple
9
+
10
+ import torch
11
+ import torch.distributed as dist
12
+ import torch.distributed.autograd as dist_autograd
13
+ import torch.nn as nn
14
+ from torch.distributed import rpc
15
+ from torch.distributed.nn import RemoteModule
16
+ from torch.nn.parallel import DistributedDataParallel
17
+ from torch.testing._internal.common_distributed import (
18
+ requires_gloo,
19
+ requires_nccl,
20
+ skip_if_lt_x_gpu,
21
+ skip_if_rocm,
22
+ )
23
+ from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init
24
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
25
+ RpcAgentTestFixture,
26
+ )
27
+
28
+
29
+ NUM_EM_ROW = 2
30
+ D_SPARSE = 3
31
+ D_DENSE = 2
32
+ D_HID = 3
33
+ D_OUT = 1
34
+ NUM_TRAINERS = 4
35
+ # Trainers + the master + the remote worker
36
+ WORLD_SIZE = NUM_TRAINERS + 2
37
+ TRAINER_RANKS = list(range(NUM_TRAINERS))
38
+ REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1
39
+ MASTER_RANK = REMOTE_WORKER_RANK + 1
40
+
41
+
42
+ class DdpMode(enum.Enum):
43
+ # Don't apply DDP
44
+ NONE = enum.auto()
45
+ # Apply DDP to the top level nn.Module
46
+ OUTSIDE = enum.auto()
47
+ # Embed DDP inside the top level nn.Module
48
+ INSIDE = enum.auto()
49
+
50
+
51
+ def init_logger():
52
+ logger = logging.getLogger(__name__)
53
+ level = logging.DEBUG if "debug" in os.environ else logging.INFO
54
+ logger.setLevel(level)
55
+ console = logging.StreamHandler()
56
+ formatter = logging.Formatter(
57
+ "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
58
+ )
59
+ console.setFormatter(formatter)
60
+ console.setLevel(level)
61
+ # add the handlers to the logger
62
+ logger.addHandler(console)
63
+ logger.propagate = False
64
+ return logger
65
+
66
+
67
+ gLogger = init_logger()
68
+
69
+
70
+ class FeatureSet(NamedTuple):
71
+ """ A feature set has 2 types of features"""
72
+
73
+ dense_features: torch.Tensor
74
+ sparse_features: torch.LongTensor
75
+ values: torch.Tensor
76
+
77
+
78
+ def _call_method(method, rref, *args, **kwargs):
79
+ return method(rref.local_value(), *args, **kwargs)
80
+
81
+
82
+ def _remote_method(method, rref, *args, **kwargs):
83
+ args_tup = tuple([method, rref] + list(args))
84
+ return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
85
+
86
+
87
+ def _remote_method_async(method, rref, *args, **kwargs):
88
+ args_tup = tuple([method, rref] + list(args))
89
+ return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
90
+
91
+
92
+ class RemoteEM(nn.Module):
93
+ def __init__(self, num_embeddings: int, embedding_dim: int):
94
+ gLogger.info("Initing RemoteEM with %s %s", num_embeddings, embedding_dim)
95
+ super().__init__()
96
+ init_em = [0.5] * embedding_dim
97
+ self.em = nn.EmbeddingBag(
98
+ num_embeddings,
99
+ embedding_dim,
100
+ _weight=torch.tensor([init_em] * num_embeddings),
101
+ )
102
+
103
+ def forward(self, input: torch.Tensor):
104
+ gLogger.debug("Running RemoteEM.forward() on: %s", input)
105
+ return self.em(input, offsets=torch.LongTensor(range(input.shape[0])))
106
+
107
+
108
+ # Return a linear module with predefined parameters.
109
+ def getLinear(d_in, d_out):
110
+ l = nn.Linear(d_in, d_out, bias=False)
111
+ w = torch.ones((d_out, d_in))
112
+ w[0][0] = -1
113
+ w.requires_grad_()
114
+ l.weight.data = w
115
+ return l
116
+
117
+
118
+ class RemoteNet(nn.Module):
119
+ def __init__(self, d_in: int, d_out: int):
120
+ gLogger.info("Initing RemoteNet with %s %s", d_in, d_out)
121
+ super().__init__()
122
+ self.fc = getLinear(d_in, d_out)
123
+ self.relu = nn.ReLU()
124
+
125
+ def forward(self, input: torch.Tensor):
126
+ gLogger.debug("Running RemoteNet.forward() on: %s", input)
127
+ return self.relu(self.fc(input))
128
+
129
+
130
+ class HybridModel(nn.Module):
131
+ def __init__(
132
+ self,
133
+ remote_em_rref: rpc.RRef,
134
+ remote_net_rref: rpc.RRef,
135
+ process_group_for_ddp: dist.ProcessGroup = None,
136
+ ):
137
+ super().__init__()
138
+ self.remote_em_rref = remote_em_rref
139
+ self.remote_net_rref = remote_net_rref
140
+ self.fc1 = getLinear(D_DENSE, D_DENSE)
141
+ self.fc2 = getLinear(D_HID, D_OUT)
142
+
143
+ self.non_ddp_params = tuple(self.fc1.parameters()) + tuple(
144
+ self.fc2.parameters()
145
+ )
146
+ self.ddp_params = ()
147
+
148
+ if process_group_for_ddp is not None:
149
+ self.non_ddp_params, self.ddp_params = (
150
+ tuple(self.fc1.parameters()),
151
+ tuple(self.fc2.parameters()),
152
+ )
153
+ gLogger.info("Use DDP for the second local net.")
154
+ self.fc2 = DistributedDataParallel(
155
+ self.fc2, check_reduction=True, process_group=process_group_for_ddp
156
+ )
157
+
158
+ gLogger.info(
159
+ "HybridModel has %s groups of parameters.", len(list(self.parameters()))
160
+ )
161
+
162
+ def forward(self, input: FeatureSet):
163
+ gLogger.debug("Running HybridModel.forward on %s", input)
164
+ sparse = _remote_method(
165
+ RemoteEM.forward, self.remote_em_rref, input.sparse_features
166
+ )
167
+ # The same size of mini batch.
168
+ assert sparse.shape[0] == input.dense_features.shape[0]
169
+ dense = self.fc1(input.dense_features)
170
+ x = torch.cat((dense, sparse), 1)
171
+ gLogger.debug("Concatenated feature: %s", x)
172
+ x = _remote_method(RemoteNet.forward, self.remote_net_rref, x)
173
+ return self.fc2(x)
174
+
175
+
176
+ class Trainer:
177
+ def __init__(
178
+ self,
179
+ remote_em_rref: rpc.RRef,
180
+ remote_net_rref: rpc.RRef,
181
+ ddp_mode: DdpMode,
182
+ rank: int,
183
+ ):
184
+ self.rank = rank
185
+ self.trainer_group = (
186
+ dist.new_group(TRAINER_RANKS)
187
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE)
188
+ else None
189
+ )
190
+ self.remote_em_rref = remote_em_rref
191
+ self.remote_net_rref = remote_net_rref
192
+ self.hybrid_module = HybridModel(
193
+ self.remote_em_rref,
194
+ self.remote_net_rref,
195
+ self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None,
196
+ )
197
+ self.ddp_params, self.non_ddp_params = (
198
+ self.hybrid_module.ddp_params,
199
+ self.hybrid_module.non_ddp_params,
200
+ )
201
+ if ddp_mode == DdpMode.OUTSIDE:
202
+ gLogger.info("Wrapping the whole hybrid module into DDP.")
203
+ self.ddp_params += self.non_ddp_params
204
+ self.non_ddp_params = ()
205
+ self.hybrid_module = DistributedDataParallel(
206
+ self.hybrid_module,
207
+ check_reduction=True,
208
+ process_group=self.trainer_group,
209
+ )
210
+ gLogger.info(
211
+ "Succeeded in creating a HybridModel instance with "
212
+ "%s ddp params and %s other local params.",
213
+ len(self.ddp_params), len(self.non_ddp_params)
214
+ )
215
+
216
+ def destroy_pg(self):
217
+ if self.trainer_group:
218
+ dist.destroy_process_group(self.trainer_group)
219
+
220
+ def train_batch(
221
+ self,
222
+ mini_batch: FeatureSet,
223
+ trainer_has_less_inputs: bool,
224
+ simulate_uneven_inputs: bool,
225
+ ):
226
+ grads_dict = None
227
+
228
+ if not simulate_uneven_inputs:
229
+ input_batches = [mini_batch]
230
+ else:
231
+ # Split into microbatches, and trim to simulate uneven inputs.
232
+ dense_features = mini_batch.dense_features
233
+ sparse_features = mini_batch.sparse_features
234
+ values = mini_batch.values
235
+
236
+ dense_microbatch = torch.split(dense_features, 2)
237
+ sparse_microbatch = torch.split(sparse_features, 2)
238
+ values_microbatch = torch.split(values, 2)
239
+ batches = []
240
+ for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch):
241
+ feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v)
242
+ batches.append(feature_set)
243
+
244
+ if trainer_has_less_inputs:
245
+ input_batches = batches[: len(batches) // 2]
246
+ gLogger.info(
247
+ "Trainer reduced input patches from %s "
248
+ "to %s to simulate uneven inputs.",
249
+ len(batches), len(input_batches)
250
+ )
251
+ else:
252
+ input_batches = batches
253
+
254
+ with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.nullcontext():
255
+ for b in input_batches:
256
+ with dist_autograd.context() as context_id:
257
+ output = self.hybrid_module.forward(b)
258
+ loss = (output * mini_batch.values).sum()
259
+ dist_autograd.backward(context_id, [loss])
260
+ grads_dict = dist_autograd.get_gradients(context_id)
261
+ gLogger.info(
262
+ "Loss is %s for mini batch: %s. "
263
+ "Grads dict has %s entries: %s", loss, mini_batch, len(grads_dict), grads_dict
264
+ )
265
+ return (
266
+ tuple(grads_dict[param] for param in self.ddp_params),
267
+ tuple(grads_dict[param] for param in self.non_ddp_params),
268
+ )
269
+
270
+
271
+ def get_training_examples():
272
+ n = 16
273
+ training_examples = FeatureSet(
274
+ dense_features=torch.zeros((n, D_DENSE)),
275
+ sparse_features=torch.zeros(n, dtype=torch.long),
276
+ values=torch.zeros(n),
277
+ )
278
+ idx = 0
279
+ # Every example has another one that has exactly the same features but an
280
+ # opposite value. Therefore, their grads cancel each other in all-reduce.
281
+ for value in (-1, 1):
282
+ for x in (-1.0 * value, 1.0 * value):
283
+ for y in (1.0 * value, -1.0 * value):
284
+ for z in (0, 1):
285
+ training_examples.dense_features[idx, :] = torch.tensor((x, y))
286
+ training_examples.sparse_features[idx] = z
287
+ training_examples.values[idx] = value
288
+ idx += 1
289
+
290
+ # Split the examples among NUM_TRAINERS trainers
291
+ assert 0 == (n % NUM_TRAINERS)
292
+ examples_per_trainer = int(n / NUM_TRAINERS)
293
+ return [
294
+ FeatureSet(
295
+ dense_features=training_examples.dense_features[
296
+ start : start + examples_per_trainer, :
297
+ ],
298
+ sparse_features=training_examples.sparse_features[
299
+ start : start + examples_per_trainer
300
+ ],
301
+ values=training_examples.values[start : start + examples_per_trainer],
302
+ )
303
+ for start in range(0, n, examples_per_trainer)
304
+ ]
305
+
306
+
307
+ shutdown_signal = threading.Condition()
308
+
309
+
310
+ def set_shutdown_signal():
311
+ global shutdown_signal
312
+ with shutdown_signal:
313
+ shutdown_signal.notify()
314
+
315
+
316
+ class DdpUnderDistAutogradTest(RpcAgentTestFixture):
317
+ @property
318
+ def world_size(self) -> int:
319
+ return WORLD_SIZE
320
+
321
+ def remote_worker_name(self) -> str:
322
+ # The name has to be consistent with that in 'dist_init' decorator.
323
+ return f"worker{REMOTE_WORKER_RANK}"
324
+
325
+ def trainer_name(self, rank):
326
+ # The name has to be consistent with that in 'dist_init' decorator.
327
+ return f"worker{rank}"
328
+
329
+ def _remote_worker_process(self, ddp_mode):
330
+ gLogger.info("The remote worker is running.")
331
+ dist.init_process_group(
332
+ backend="gloo",
333
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
334
+ world_size=self.world_size,
335
+ rank=self.rank,
336
+ )
337
+
338
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
339
+ # new_group needs to be called on ranks.
340
+ dist.new_group(TRAINER_RANKS)
341
+
342
+ global shutdown_signal
343
+ with shutdown_signal:
344
+ shutdown_signal.wait()
345
+ gLogger.info("Exiting remote worker.")
346
+ dist.destroy_process_group()
347
+
348
+ def _trainer_process(self, rank: int):
349
+ gLogger.info("Running the trainer #%s...", rank)
350
+ gLogger.info(
351
+ "Initing trainer process group by trainer #%s with ranks %s", rank, TRAINER_RANKS
352
+ )
353
+ dist.init_process_group(
354
+ backend="gloo",
355
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
356
+ world_size=self.world_size,
357
+ rank=self.rank,
358
+ )
359
+
360
+ gLogger.info("Waiting for shutdown signal on trainer #%s...", rank)
361
+
362
+ global shutdown_signal
363
+ with shutdown_signal:
364
+ shutdown_signal.wait()
365
+ gLogger.info("Exiting the trainer #%s...", rank)
366
+ dist.destroy_process_group()
367
+
368
+ def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool):
369
+ gLogger.info("Running the master process...")
370
+ dist.init_process_group(
371
+ backend="gloo",
372
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
373
+ world_size=self.world_size,
374
+ rank=self.rank,
375
+ )
376
+
377
+ remote_em_rref = rpc.remote(
378
+ self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE)
379
+ )
380
+ remote_net_rref = rpc.remote(
381
+ self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID)
382
+ )
383
+ gLogger.info("Created remote rrefs on master")
384
+ self.do_test_on_master(
385
+ ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref
386
+ )
387
+
388
+ def do_test_on_master(
389
+ self,
390
+ ddp_mode: DdpMode,
391
+ simulate_uneven_inputs: bool,
392
+ remote_em_rref: rpc.RRef,
393
+ remote_net_rref: rpc.RRef,
394
+ ):
395
+ if simulate_uneven_inputs:
396
+ gLogger.info(
397
+ "Running DDP + RPC test with simulating uneven inputs across trainers."
398
+ )
399
+
400
+ trainer_rrefs = []
401
+ for rank in TRAINER_RANKS:
402
+ trainer = self.trainer_name(rank)
403
+ trainer_rrefs.append(
404
+ rpc.remote(
405
+ trainer,
406
+ Trainer,
407
+ args=(remote_em_rref, remote_net_rref, ddp_mode, rank),
408
+ )
409
+ )
410
+
411
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
412
+ # new_group needs to be called on ranks.
413
+ dist.new_group(TRAINER_RANKS)
414
+
415
+ training_examples = get_training_examples()
416
+ for _ in range(3):
417
+ futures = []
418
+ num_trainers = len(trainer_rrefs)
419
+ for idx, trainer_rref in enumerate(trainer_rrefs):
420
+ # Half the trainers will deplete inputs earlier than the rest.
421
+ trainer_has_less_inputs = (
422
+ simulate_uneven_inputs and idx < num_trainers // 2
423
+ )
424
+ futures.append(
425
+ _remote_method_async(
426
+ Trainer.train_batch,
427
+ trainer_rref,
428
+ training_examples[idx],
429
+ trainer_has_less_inputs,
430
+ simulate_uneven_inputs,
431
+ )
432
+ )
433
+
434
+ for future in futures:
435
+ ddp_grads, non_ddp_grads = future.wait()
436
+ # When there are uneven inputs, it is not necessary that grads
437
+ # cancel each other out, since some trainers contribute 0 grad.
438
+ if not simulate_uneven_inputs:
439
+ for grad in ddp_grads:
440
+ self.assertEqual(
441
+ grad,
442
+ torch.zeros_like(grad),
443
+ msg=f"The grad for any ddp parameter should be zeros, because "
444
+ "the training examples' grads cancel each other. Received "
445
+ f"gradient {grad}",
446
+ )
447
+ for grad in non_ddp_grads:
448
+ self.assertNotEqual(
449
+ grad,
450
+ torch.zeros_like(grad),
451
+ msg="The grad for any non-ddp parameter shouldn't be zeros",
452
+ )
453
+
454
+ # Destroy process groups
455
+ for idx, trainer_rref in enumerate(trainer_rrefs):
456
+ _remote_method_async(Trainer.destroy_pg, trainer_rref).wait()
457
+
458
+ # Send shutdown signals.
459
+ for rank in TRAINER_RANKS:
460
+ trainer = self.trainer_name(rank)
461
+ rpc.rpc_sync(trainer, set_shutdown_signal, args=())
462
+
463
+ rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=())
464
+
465
+ def _do_test(self, ddp_mode, simulate_uneven_inputs=False):
466
+ if self.rank == MASTER_RANK:
467
+ self._master_process(ddp_mode, simulate_uneven_inputs)
468
+ elif self.rank == REMOTE_WORKER_RANK:
469
+ self._remote_worker_process(ddp_mode)
470
+ elif self.rank in TRAINER_RANKS:
471
+ self._trainer_process(self.rank)
472
+ else:
473
+ raise RuntimeError(f"Unknown process rank: {self.rank}")
474
+
475
+ @requires_gloo()
476
+ @dist_init
477
+ def test_backward_no_ddp(self):
478
+ self._do_test(DdpMode.NONE)
479
+
480
+ @requires_gloo()
481
+ @dist_init
482
+ def test_backward_ddp_outside(self):
483
+ self._do_test(DdpMode.OUTSIDE)
484
+
485
+ @requires_gloo()
486
+ @dist_init
487
+ def test_backward_ddp_outside_uneven_inputs(self):
488
+ self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True)
489
+
490
+ @requires_gloo()
491
+ @dist_init
492
+ def test_backward_ddp_inside(self):
493
+ self._do_test(DdpMode.INSIDE)
494
+
495
+
496
+ # Common utils for both CPU and CUDA test suites
497
+ class CommonDdpComparisonTest(RpcAgentTestFixture):
498
+ @property
499
+ def world_size(self) -> int:
500
+ return NUM_TRAINERS
501
+
502
+ def trainer_name(self, rank):
503
+ # The name has to be consistent with that in 'dist_init' decorator.
504
+ return f"worker{rank}"
505
+
506
+ @staticmethod
507
+ def get_remote_grads(rref, context_id):
508
+ return dist_autograd.get_gradients(context_id)[rref.local_value().weight]
509
+
510
+
511
+ class DdpComparisonTest(CommonDdpComparisonTest):
512
+ def _run_test_ddp_comparision(self, simulate_uneven_inputs=False):
513
+ gLogger.info("Running trainer rank: %s", self.rank)
514
+ # Each trainer uses a different random seed. Otherwise, they are going
515
+ # to have exactly the same initial model parameters, input, and
516
+ # therefore grads. That means the grads will be the same before and
517
+ # after DDP's all-reduce.
518
+ torch.manual_seed(self.rank)
519
+ dist.init_process_group(
520
+ backend="gloo",
521
+ # Postfix file_name with "pg" since file_name is also used by RPC agent
522
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"),
523
+ world_size=self.world_size,
524
+ rank=self.rank,
525
+ )
526
+ net = nn.Linear(2, 3)
527
+ ddp_net = DistributedDataParallel(net)
528
+
529
+ # Odd ranks join early if simulate_uneven_inputs.
530
+ num_inputs = 1
531
+ if simulate_uneven_inputs:
532
+ if self.rank % 2 == 0:
533
+ num_inputs += 2
534
+ inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)]
535
+
536
+ if simulate_uneven_inputs:
537
+ gLogger.info("Rank %s training with %s inputs.", self.rank, len(inputs_list))
538
+
539
+ # Use distributed autograd. The gradients will be in RPC context map.
540
+ grads_dict = {}
541
+ with ddp_net.join(simulate_uneven_inputs):
542
+ for i, inputs in enumerate(inputs_list):
543
+ with dist_autograd.context() as context_id:
544
+ loss = ddp_net(inputs).norm()
545
+ dist_autograd.backward(context_id, [loss])
546
+ grads_dict = dist_autograd.get_gradients(context_id)
547
+ gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict)
548
+
549
+ # Use local autograd. The gradients will be in each variable's '.grad'.
550
+ ddp_net.zero_grad()
551
+ loss = ddp_net(inputs).norm()
552
+ loss.backward()
553
+
554
+ # The gradients should be the same
555
+ for param in net.parameters():
556
+ self.assertTrue(
557
+ param in grads_dict,
558
+ msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}",
559
+ )
560
+ self.assertEqual(
561
+ grads_dict[param],
562
+ param.grad,
563
+ msg=f"The grads for param {param} are different under local "
564
+ f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}",
565
+ )
566
+ dist.destroy_process_group()
567
+
568
+ @requires_gloo()
569
+ @dist_init
570
+ def test_ddp_comparison(self):
571
+ self._run_test_ddp_comparision()
572
+
573
+ @requires_gloo()
574
+ @dist_init
575
+ def test_ddp_comparison_uneven_inputs(self):
576
+ # test with simulating uneven inputs in DDP
577
+ self._run_test_ddp_comparision(simulate_uneven_inputs=True)
578
+
579
+ @requires_gloo()
580
+ @dist_init
581
+ def test_ddp_dist_autograd_sparse_grads(self):
582
+ # Each trainer uses a different random seed. Otherwise, they are going
583
+ # to have exactly the same initial model parameters, input, and
584
+ # therefore grads. That means the grads will be the same before and
585
+ # after DDP's all-reduce.
586
+ torch.manual_seed(self.rank)
587
+ dist.init_process_group(
588
+ backend="gloo",
589
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
590
+ world_size=self.world_size,
591
+ rank=self.rank,
592
+ )
593
+
594
+ model = nn.EmbeddingBag(10, 3, sparse=True)
595
+ ddp_model = DistributedDataParallel(model)
596
+
597
+ # Different inputs for each
598
+ input = torch.LongTensor(10).random_(0, 10)
599
+ offsets = torch.LongTensor([0, 4])
600
+
601
+ # Run local.
602
+ loss = ddp_model(input, offsets).sum()
603
+ loss.backward()
604
+
605
+ with dist_autograd.context() as context_id:
606
+ loss = ddp_model(input, offsets).sum()
607
+ dist_autograd.backward(context_id, [loss])
608
+ grads_dict = dist_autograd.get_gradients(context_id)
609
+ self.assertEqual(1, len(grads_dict))
610
+ self.assertEqual(model.weight.grad, grads_dict[model.weight])
611
+
612
+ @requires_gloo()
613
+ @dist_init
614
+ def test_ddp_dist_autograd_local_vs_remote(self):
615
+ # Each trainer uses a different random seed. Otherwise, they are going
616
+ # to have exactly the same initial model parameters, input, and
617
+ # therefore grads. That means the grads will be the same before and
618
+ # after DDP's all-reduce.
619
+ torch.manual_seed(self.rank)
620
+ dist.init_process_group(
621
+ backend="gloo",
622
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
623
+ world_size=self.world_size,
624
+ rank=self.rank,
625
+ )
626
+
627
+ # Use two different remote device input string, w/ and w/o the default
628
+ # device string "cpu", respectively.
629
+ for remote_device in ["worker0/cpu", "worker0"]:
630
+ remote_layer1 = RemoteModule(
631
+ remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False)
632
+ )
633
+ layer1 = nn.Linear(10, 5, False)
634
+ # Start with the same parameters for remote and local
635
+ layer1.weight = remote_layer1.module_rref.to_here().weight
636
+
637
+ # Run local case.
638
+ layer2 = nn.Linear(5, 1)
639
+ inputs = torch.rand((10, 10))
640
+ ddp_model = DistributedDataParallel(layer2)
641
+ loss = ddp_model(layer1(inputs)).sum()
642
+ loss.backward()
643
+
644
+ # Run remote case.
645
+ with dist_autograd.context() as context_id:
646
+ loss = ddp_model(remote_layer1(inputs)).sum()
647
+ dist_autograd.backward(context_id, [loss])
648
+ grads_dict = dist_autograd.get_gradients(context_id)
649
+ dist.barrier()
650
+ self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
651
+ self.assertEqual(
652
+ layer1.weight.grad,
653
+ rpc.rpc_sync(
654
+ "worker0",
655
+ CommonDdpComparisonTest.get_remote_grads,
656
+ args=(remote_layer1.module_rref, context_id),
657
+ ),
658
+ )
659
+
660
+
661
+ class CudaDdpComparisonTest(CommonDdpComparisonTest):
662
+ @skip_if_lt_x_gpu(NUM_TRAINERS)
663
+ @requires_nccl()
664
+ @dist_init
665
+ @skip_if_rocm
666
+ def test_ddp_dist_autograd_local_vs_remote_gpu(self):
667
+ # Each trainer uses a different random seed. Otherwise, they are going
668
+ # to have exactly the same initial model parameters, input, and
669
+ # therefore grads. That means the grads will be the same before and
670
+ # after DDP's all-reduce.
671
+ torch.manual_seed(self.rank)
672
+ dist.init_process_group(
673
+ backend="gloo",
674
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
675
+ world_size=self.world_size,
676
+ rank=self.rank,
677
+ )
678
+
679
+ remote_layer1 = RemoteModule(
680
+ remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False)
681
+ )
682
+ layer1 = nn.Linear(10, 7, False)
683
+ # Start with the same parameters for remote and local
684
+ layer1.weight = remote_layer1.module_rref.to_here().weight
685
+
686
+ layer2 = nn.Linear(7, 5).cuda(self.rank)
687
+ ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank])
688
+
689
+ remote_layer3 = RemoteModule(
690
+ remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False)
691
+ )
692
+ layer3 = nn.Linear(5, 3, False)
693
+ # Start with the same parameters for remote and local
694
+ layer3.weight = remote_layer3.module_rref.to_here().weight
695
+
696
+ layer4 = nn.Linear(3, 1).cuda(self.rank)
697
+ ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank])
698
+
699
+ # Run local case.
700
+ inputs = torch.rand((10, 10))
701
+ loss = ddp_layer4(
702
+ layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank)
703
+ ).sum()
704
+ loss.backward()
705
+
706
+ # Run remote case.
707
+ with dist_autograd.context() as context_id:
708
+ loss = ddp_layer4(
709
+ remote_layer3(
710
+ ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu()
711
+ ).cuda(self.rank)
712
+ ).sum()
713
+ dist_autograd.backward(context_id, [loss])
714
+ grads_dict = dist_autograd.get_gradients(context_id)
715
+ dist.barrier()
716
+ self.assertEqual(
717
+ layer1.weight.grad,
718
+ rpc.rpc_sync(
719
+ "worker0",
720
+ CommonDdpComparisonTest.get_remote_grads,
721
+ args=(remote_layer1.module_rref, context_id),
722
+ ),
723
+ )
724
+ self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
725
+ self.assertEqual(
726
+ layer3.weight.grad,
727
+ rpc.rpc_sync(
728
+ "worker0",
729
+ CommonDdpComparisonTest.get_remote_grads,
730
+ args=(remote_layer3.module_rref, context_id),
731
+ ),
732
+ )
733
+ self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight])
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from contextlib import contextmanager
4
+ from datetime import timedelta
5
+ from functools import (
6
+ partial,
7
+ wraps,
8
+ )
9
+
10
+ import torch.distributed as dist
11
+ import torch.distributed.distributed_c10d as c10d
12
+
13
+ class MockProcessGroup(dist.ProcessGroup):
14
+
15
+ def __init__(self, rank, world):
16
+ super().__init__(rank, world)
17
+
18
+ def getBackendName(self):
19
+ return "mock_process_group"
20
+
21
+ def create_mock_pg(prefix_store, rank, world_size, timeout):
22
+ return MockProcessGroup(rank, world_size)
23
+
24
+ dist.Backend.register_backend('mock_process_group', create_mock_pg)
25
+
26
+ def mock_init_dist(rank, world_size):
27
+ # !!! WARNING !!!
28
+ # Kids don't try this at home, this is a cute pile of hacks that
29
+ # depends on a small mountain of c10d internals
30
+ assert not dist.is_initialized()
31
+ store = dist.HashStore()
32
+ # Trick _store_based_barrier into believing everyone else already checked-in
33
+ # Zero is the group index
34
+ store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1)
35
+ dist.init_process_group(
36
+ backend="mock_process_group",
37
+ rank=rank,
38
+ world_size=world_size,
39
+ store=store,
40
+ group_name="fake",
41
+ timeout=timedelta(seconds=1))
42
+
43
+ @contextmanager
44
+ def with_dist(rank=0, world_size=2):
45
+ """
46
+ Context manager that initializer c10d with a fake process group.
47
+ """
48
+ mock_init_dist(rank=rank, world_size=world_size)
49
+ try:
50
+ yield
51
+ finally:
52
+ dist.destroy_process_group()
53
+
54
+ def with_fake_comms(func=None, rank=0, world_size=2):
55
+ """
56
+ Function wrapper that inits a fake process group designed for testing.
57
+ Right now only querying for world size is available
58
+ """
59
+ if func is None:
60
+ return partial(with_fake_comms, rank=rank, world_size=world_size)
61
+
62
+ @wraps(func)
63
+ def wrapper(self, *args, **kwargs):
64
+ with with_dist(rank, world_size):
65
+ func(self, *args, **kwargs)
66
+ return wrapper
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch.distributed as dist
4
+
5
+ from torch._C._distributed_c10d import (
6
+ FakeProcessGroup,
7
+ )
8
+
9
+
10
+ class FakeStore(dist.Store):
11
+ """
12
+ A fake store is a fake Key-Value store simply for initialization usage
13
+ the of fake process group, one can either use FakeStore or HashStore.
14
+ """
15
+ pass
16
+
17
+
18
+ def _create_fake_pg(prefix_store, rank, world_size, timeout):
19
+ """
20
+ A fake process group (not related to FakeTensor) is a process group which
21
+ doesn't actually do any communication, it just hallucinates some
22
+ communication. You can run a single rank with a fake process group
23
+ without needing multiple processes (simulates per-rank behavior)
24
+
25
+ NOTE: This is not a real process group, and it would produce wrong results
26
+ for every collective. It should be used as a convinient tool when playing
27
+ with distributed but don't care about the actual data.
28
+ """
29
+ return FakeProcessGroup(rank, world_size)
30
+
31
+
32
+ dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda'])
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import sys
4
+ import threading
5
+ from dataclasses import dataclass
6
+ from typing import Dict, List, Optional, Tuple, Union
7
+ from functools import partial, reduce
8
+
9
+ import torch
10
+ import torch.distributed as dist
11
+ import weakref
12
+ from torch._C._distributed_c10d import (
13
+ _create_work_from_future,
14
+ AllgatherOptions,
15
+ AllreduceOptions,
16
+ AllToAllOptions,
17
+ BarrierOptions,
18
+ BroadcastOptions,
19
+ ReduceScatterOptions,
20
+ ScatterOptions,
21
+ Store,
22
+ ReduceOp,
23
+ )
24
+ from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp
25
+ from torch.futures import Future
26
+ from torch.utils import _pytree as pytree
27
+
28
+ """
29
+ TODO:
30
+ Lots of missing collectives.
31
+ Collectives validation.
32
+ Make timeout robust by making collectives respect the test deadline.
33
+ Make tests robust by making collectives interruptible.
34
+ We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures.
35
+
36
+ """
37
+
38
+
39
+ def flatten_list(lst):
40
+ return pytree.tree_leaves(lst)
41
+
42
+
43
+ def ret_work(ret):
44
+ fut = Future()
45
+ fut.set_result(ret)
46
+ return _create_work_from_future(fut)
47
+
48
+ def binop_reduce(tensors, op):
49
+ res = op(torch.stack(tensors), dim=0)
50
+ if isinstance(res, torch.Tensor):
51
+ return res
52
+ # min/max return a namedtuple
53
+ return res.values
54
+
55
+ def bitwise_reduce(tensors, op):
56
+ return reduce(op, tensors)
57
+
58
+ _reduce_ops = {
59
+ ReduceOp.SUM: partial(binop_reduce, op=torch.sum),
60
+ ReduceOp.AVG: partial(binop_reduce, op=torch.mean),
61
+ ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod),
62
+ ReduceOp.MIN: partial(binop_reduce, op=torch.min),
63
+ ReduceOp.MAX: partial(binop_reduce, op=torch.max),
64
+ ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and),
65
+ ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or),
66
+ ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor),
67
+ }
68
+
69
+ class AllToAll:
70
+ @torch.no_grad()
71
+ def work(self, data):
72
+ world_size = len(data)
73
+ for dest_rank in range(world_size):
74
+ output_tensor_list, _ = data[dest_rank]
75
+ for src_rank in range(world_size):
76
+ _, input_tensor_list = data[src_rank]
77
+ output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank])
78
+
79
+ class AllReduce:
80
+ def __init__(self, op):
81
+ if op.op not in _reduce_ops:
82
+ raise NotImplementedError(
83
+ f"AllReduce op {op.op} not supported on multithreaded pg for now."
84
+ )
85
+ self.op = op.op
86
+
87
+ @torch.no_grad()
88
+ def work(self, data):
89
+ for i in range(len(data[0])):
90
+ tensors = []
91
+ # use rank0 as the device for sum
92
+ rank_0_device = data[0][i].device
93
+ # collect all data to the list and make them
94
+ # all on rank 0 device
95
+ for src_rank in range(0, len(data)):
96
+ tensors.append(data[src_rank][i].to(rank_0_device))
97
+
98
+ # now mimic reduce across all ranks
99
+ res = _reduce_ops[self.op](tensors)
100
+
101
+ # copy all the reduced value to each rank
102
+ for src_rank in range(len(data)):
103
+ data[src_rank][i].copy_(res.to(data[src_rank][i].device))
104
+
105
+
106
+ class AllGather:
107
+ @torch.no_grad()
108
+ def work(self, data):
109
+ for src_rank in range(len(data)):
110
+ in_tensor_list = data[src_rank][1]
111
+ # Can't handle all_gather with multiple tensors
112
+ assert len(in_tensor_list) == 1
113
+ src_tensor = in_tensor_list[0]
114
+
115
+ for dest in data:
116
+ dest_tensor = dest[0][0][src_rank]
117
+ dest_tensor.copy_(src_tensor)
118
+
119
+
120
+ class Scatter:
121
+ def __init__(self, src):
122
+ self.src = src
123
+
124
+ @torch.no_grad()
125
+ def work(self, data):
126
+ src_in_tensor_list = data[self.src][1]
127
+ # Can't handle scatter with multiple input tensor list
128
+ assert len(src_in_tensor_list) == 1
129
+ src_in_tensors = src_in_tensor_list[0]
130
+
131
+ for rank, each_rank_data in enumerate(data):
132
+ out_tensor_list = each_rank_data[0]
133
+ # Can't handle scatter with multiple output tensor
134
+ assert len(out_tensor_list) == 1
135
+ dest_tensor = out_tensor_list[0]
136
+ dest_tensor.copy_(src_in_tensors[rank])
137
+
138
+
139
+ class Gather:
140
+ def __init__(self, dst):
141
+ self.dst = dst
142
+
143
+ @torch.no_grad()
144
+ def work(self, data):
145
+ # Can't handle gather with multiple tensor lists
146
+ assert len(data[self.dst][0]) == 1
147
+ out_tensor_list = data[self.dst][0][0]
148
+ for rank, each_rank_data in enumerate(data):
149
+ src_in_tensor_list = each_rank_data[1]
150
+ # Can't handle gather with multiple tensor lists
151
+ assert len(src_in_tensor_list) == 1
152
+ dest_tensor = out_tensor_list[rank]
153
+ dest_tensor.copy_(src_in_tensor_list[0])
154
+
155
+ class ReduceScatter:
156
+ def __init__(self, op):
157
+ if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG:
158
+ raise NotImplementedError(f"ReduceScatter does not support {op}")
159
+ self.op = op
160
+
161
+ @torch.no_grad()
162
+ def work(self, data):
163
+ start_reduction = [False for _ in range(len(data))]
164
+ for each_rank_data in data:
165
+ # Can't handle reduce_scatter with multiple scatter list
166
+ assert len(each_rank_data[1]) == 1
167
+ to_scatter = each_rank_data[1][0]
168
+ for i in range(len(to_scatter)):
169
+ dest_tensor_on_rank_i = data[i][0]
170
+ # Can't handle reduce_scatter with multiple output tensor
171
+ assert len(dest_tensor_on_rank_i) == 1
172
+ dst_tensor_device = dest_tensor_on_rank_i[0].device
173
+ if not start_reduction[i]:
174
+ dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device))
175
+ start_reduction[i] = True
176
+ else:
177
+ dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device))
178
+ if self.op == dist.ReduceOp.AVG:
179
+ num_ranks = len(data)
180
+ for each_rank_data in data:
181
+ each_rank_data[0][0] /= num_ranks
182
+
183
+
184
+ class Broadcast:
185
+ def __init__(self, src):
186
+ self.src = src
187
+
188
+ @torch.no_grad()
189
+ def work(self, data):
190
+ in_tensor_list = flatten_list(data[self.src])
191
+ for i in range(len(data)):
192
+ out_tensor_list = flatten_list(data[i])
193
+ for j in range(len(in_tensor_list)):
194
+ out_tensor_list[j].copy_(in_tensor_list[j])
195
+
196
+
197
+ class Collective:
198
+ def __init__(self, world_size, collective, pg):
199
+ self._world_size = world_size
200
+ self._collective = collective
201
+
202
+ self._start_cond = threading.Condition()
203
+ self._done_cond = threading.Condition()
204
+
205
+ self._data = [None] * world_size
206
+ self._count = 0
207
+ self._done = False
208
+
209
+ self._pg = pg
210
+
211
+ def join(self, rank, data):
212
+ with self._start_cond:
213
+ self._data[rank] = data
214
+ self._count += 1
215
+
216
+ # notify rank 0
217
+ if self._count == self._world_size:
218
+ if rank > 0:
219
+ self._start_cond.notify()
220
+
221
+ if rank == 0:
222
+ self._start_cond.wait_for(
223
+ lambda: self._count == self._world_size or self._pg._terminate.is_set()
224
+ )
225
+ # SystemExit is not a subclass of Exception but BaseException
226
+ # and can be distinguished from normal exception raised from program errors
227
+ # so that we can hide it from the exception queue
228
+ if self._pg._terminate.is_set():
229
+ sys.exit("Test termination event occurs.")
230
+
231
+ with self._done_cond:
232
+ # wait for rank 0 to finish
233
+ if rank > 0:
234
+ self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set())
235
+ if self._pg._terminate.is_set():
236
+ sys.exit("Test termination event occurs.")
237
+ else:
238
+ # copy data around
239
+ self._collective.work(self._data)
240
+ self._done = True
241
+ self._done_cond.notify_all()
242
+ return ret_work(data)
243
+
244
+
245
+ class ProcessLocalGroup(dist.ProcessGroup):
246
+ _coll_lock = threading.Lock()
247
+ _cur_coll_on_pgs = {}
248
+
249
+ _terminate = threading.Event()
250
+
251
+ @classmethod
252
+ def _start_coll(cls, collective, pg):
253
+ with cls._coll_lock:
254
+ # pg_name is unique, we use that to record the mapping between pg and collective
255
+ if pg.pg_name not in cls._cur_coll_on_pgs:
256
+ cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls)
257
+ return cls._cur_coll_on_pgs[pg.pg_name]
258
+
259
+ @classmethod
260
+ def _end_coll(cls, collective, pg):
261
+ # This is racily called by all ranks, so only one will work
262
+ with cls._coll_lock:
263
+ if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective:
264
+ cls._cur_coll_on_pgs.pop(pg.pg_name)
265
+
266
+ @classmethod
267
+ def exception_handle(cls, exc):
268
+ cls._terminate.set()
269
+ for coll in cls._cur_coll_on_pgs.values():
270
+ with coll._start_cond:
271
+ coll._start_cond.notify()
272
+ with coll._done_cond:
273
+ coll._done_cond.notify_all()
274
+
275
+ @classmethod
276
+ def reset(cls):
277
+ with cls._coll_lock:
278
+ cls._cur_coll_on_pgs = {}
279
+ cls._terminate.clear()
280
+
281
+ def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()):
282
+ coll = ProcessLocalGroup._start_coll(AllToAll(), self)
283
+ res = coll.join(self._rank, (output_tensor_list, input_tensor_list))
284
+ ProcessLocalGroup._end_coll(coll, self)
285
+ return res
286
+
287
+ def allreduce(self, tensor_list, opts=AllreduceOptions()):
288
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
289
+ res = coll.join(self._rank, tensor_list)
290
+ ProcessLocalGroup._end_coll(coll, self)
291
+ return res
292
+
293
+ def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()):
294
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
295
+ res = coll.join(self._rank, tensor_list)
296
+ ProcessLocalGroup._end_coll(coll, self)
297
+ return res
298
+
299
+ def barrier(self, opts=BarrierOptions()):
300
+ return self.allreduce(tensor_list=[torch.ones(1)])
301
+
302
+ def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()):
303
+ coll = ProcessLocalGroup._start_coll(AllGather(), self)
304
+ res = coll.join(self._rank, (output_tensors, input_tensor))
305
+ ProcessLocalGroup._end_coll(coll, self)
306
+ return res
307
+
308
+ def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()):
309
+ tensor_list = list(torch.chunk(output_tensor, self._world_size))
310
+ return self.allgather([tensor_list], [input_tensor], opts)
311
+
312
+ def broadcast(self, tensor_list, opts=BroadcastOptions()):
313
+ coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self)
314
+ res = coll.join(self._rank, tensor_list)
315
+ ProcessLocalGroup._end_coll(coll, self)
316
+ return res
317
+
318
+ def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()):
319
+ coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self)
320
+ res = coll.join(self._rank, (output_tensors, input_tensors))
321
+ ProcessLocalGroup._end_coll(coll, self)
322
+ return res
323
+
324
+ def gather(self, output_tensors, input_tensors, opts=ScatterOptions()):
325
+ coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self)
326
+ res = coll.join(self._rank, (output_tensors, input_tensors))
327
+ ProcessLocalGroup._end_coll(coll, self)
328
+ return res
329
+
330
+ def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()):
331
+ coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self)
332
+ res = coll.join(self._rank, (output_tensor, scatter_list))
333
+ ProcessLocalGroup._end_coll(coll, self)
334
+ return res
335
+
336
+ def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()):
337
+ tensor_list = list(torch.chunk(input_tensor, self._world_size))
338
+ return self.reduce_scatter([output_tensor], [tensor_list], opts)
339
+
340
+ def reduce_scatter_tensor_coalesced(self, output_tensors, input_tensors, opts=ReduceScatterOptions()):
341
+ works = [
342
+ self._reduce_scatter_base(output_tensor, input_tensor, opts)
343
+ for output_tensor, input_tensor
344
+ in zip(output_tensors, input_tensors)
345
+ ]
346
+ for work in works[:-1]:
347
+ work.wait()
348
+ return works[-1]
349
+
350
+ def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOptions()):
351
+ res = None
352
+ for o_t, i_t in zip(output_tensor_list, input_tensor_list):
353
+ res = self._allgather_base(o_t, i_t)
354
+ return res
355
+
356
+ def __init__(self, rank, world_size):
357
+ super().__init__(rank, world_size)
358
+ self._rank = rank
359
+ self._world_size = world_size
360
+ world = dist.distributed_c10d._world
361
+ if isinstance(world, ThreadLocalWorld):
362
+ world = world._get_world()
363
+ self._world = weakref.ref(world)
364
+ self._ctx = torch.autograd.set_multithreading_enabled(False)
365
+
366
+ def size(self):
367
+ return self._world_size
368
+
369
+ @property
370
+ def pg_name(self):
371
+ """
372
+ return the global registered name of the current pg in the world
373
+ """
374
+ return self._world().pg_names[self]
375
+
376
+ @property
377
+ def group_name(self):
378
+ return self.pg_name
379
+
380
+ def getBackendName(self):
381
+ return "threaded"
382
+
383
+ def __repr__(self):
384
+ return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}"
385
+
386
+
387
+ def _create_threaded_pg(prefix_store, rank, world_size, timeout):
388
+ pg = ProcessLocalGroup(rank, world_size)
389
+ # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional
390
+ # When device mesh involves sub groups while store based barrier is not enabled in c10d,
391
+ # even though threaded pg actual collectives are assumed to be single threaded,
392
+ # different threads may be initializing different groups,
393
+ # leading to race conditions.
394
+ # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups
395
+ # (dim 0 and 1) would be initialized in different threads independently.
396
+ # In this case we can no longer rely on class or global variables
397
+ # but have to rely on store based barrier to make sure each group
398
+ # is ready separately before we can invoke collectives in any of the groups.
399
+
400
+ # the prefix store is already per group so we pass an empty name here
401
+ _store_based_barrier(rank, prefix_store, "", world_size, timeout)
402
+ return pg
403
+
404
+
405
+ dist.Backend.register_backend("threaded", _create_threaded_pg, devices=["cpu", "cuda"])
406
+
407
+
408
+ @dataclass
409
+ class WorldData:
410
+ default_pg: dist.ProcessGroup
411
+ pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]]
412
+ pg_names: Dict[dist.ProcessGroup, str]
413
+ pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]]
414
+ pg_backend_config: Dict[dist.ProcessGroup, str]
415
+ group_count: int
416
+ tags_to_pg: Dict[str, List[dist.ProcessGroup]]
417
+ pg_to_tag: Dict[dist.ProcessGroup, str]
418
+ pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]
419
+ pg_default_device: Dict[dist.ProcessGroup, torch.device]
420
+
421
+
422
+ class ThreadLocalWorld:
423
+ _world = threading.local()
424
+
425
+ def _get_world(self) -> WorldData:
426
+ if not hasattr(ThreadLocalWorld._world, "world"):
427
+ ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {})
428
+ return ThreadLocalWorld._world.world
429
+
430
+ @property
431
+ def default_pg(self):
432
+ return self._get_world().default_pg
433
+
434
+ @default_pg.setter
435
+ def default_pg(self, value):
436
+ self._get_world().default_pg = value
437
+
438
+ @property
439
+ def pg_map(self):
440
+ return self._get_world().pg_map
441
+
442
+ @property
443
+ def pg_names(self):
444
+ return self._get_world().pg_names
445
+
446
+ @property
447
+ def pg_group_ranks(self):
448
+ return self._get_world().pg_group_ranks
449
+
450
+ @property
451
+ def pg_backend_config(self):
452
+ return self._get_world().pg_backend_config
453
+
454
+ @property
455
+ def group_count(self) -> int:
456
+ return self._get_world().group_count
457
+
458
+ @group_count.setter
459
+ def group_count(self, value):
460
+ self._get_world().group_count = value
461
+
462
+ @property
463
+ def tags_to_pg(self):
464
+ return self._get_world().tags_to_pg
465
+
466
+ @property
467
+ def pg_to_tag(self):
468
+ return self._get_world().pg_to_tag
469
+
470
+ @property
471
+ def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]:
472
+ return self._get_world().pg_coalesce_state
473
+
474
+ @property
475
+ def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]:
476
+ return self._get_world().pg_default_device
477
+
478
+
479
+ _old_pg_world = None
480
+ _ctx_manager = None
481
+
482
+
483
+ def _install_threaded_pg():
484
+ global _old_pg_world
485
+ global _ctx_manager
486
+ _old_pg_world = dist.distributed_c10d._world
487
+ dist.distributed_c10d._world = ThreadLocalWorld()
488
+ _ctx_manager = torch.autograd.set_multithreading_enabled(False)
489
+
490
+ return dist.distributed_c10d._world
491
+
492
+
493
+ def _uninstall_threaded_pg():
494
+ dist.distributed_c10d._world = _old_pg_world
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/pipe_with_ddp_test.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import torch.distributed as dist
5
+
6
+ from torch import nn
7
+ from torch.nn.parallel import DistributedDataParallel
8
+ from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init
9
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
10
+ RpcAgentTestFixture,
11
+ )
12
+ from torch.testing._internal.common_distributed import (
13
+ requires_gloo,
14
+ requires_nccl,
15
+ skip_if_lt_x_gpu,
16
+ skip_if_rocm,
17
+ )
18
+ from torch.distributed.pipeline.sync import Pipe
19
+
20
+ class PipeWithDDPTest(RpcAgentTestFixture):
21
+ @property
22
+ def world_size(self) -> int:
23
+ return 2
24
+
25
+ @skip_if_lt_x_gpu(4)
26
+ @requires_nccl()
27
+ @dist_init
28
+ @skip_if_rocm
29
+ def test_basic_nccl_ckpt_never(self):
30
+ self._run_basic_test("nccl", "never")
31
+
32
+ @skip_if_lt_x_gpu(4)
33
+ @requires_nccl()
34
+ @dist_init
35
+ @skip_if_rocm
36
+ def test_basic_nccl_ckpt_never_find_unused(self):
37
+ self._run_basic_test("nccl", "never", find_unused_parameters=True)
38
+
39
+ @skip_if_lt_x_gpu(4)
40
+ @requires_nccl()
41
+ @dist_init
42
+ @skip_if_rocm
43
+ def test_basic_nccl_ckpt_always(self):
44
+ self._run_basic_test("nccl", "always", static_graph=True)
45
+
46
+ @skip_if_lt_x_gpu(4)
47
+ @requires_nccl()
48
+ @dist_init
49
+ @skip_if_rocm
50
+ def test_basic_nccl_ckpt_except_last(self):
51
+ self._run_basic_test("nccl", "except_last", static_graph=True)
52
+
53
+ @skip_if_lt_x_gpu(4)
54
+ @requires_gloo()
55
+ @dist_init
56
+ @skip_if_rocm
57
+ def test_basic_gloo_ckpt_never(self):
58
+ self._run_basic_test("gloo", "never")
59
+
60
+ @skip_if_lt_x_gpu(4)
61
+ @requires_gloo()
62
+ @dist_init
63
+ @skip_if_rocm
64
+ def test_basic_gloo_ckpt_never_find_unused(self):
65
+ self._run_basic_test("gloo", "never", find_unused_parameters=True)
66
+
67
+ @skip_if_lt_x_gpu(4)
68
+ @requires_gloo()
69
+ @dist_init
70
+ @skip_if_rocm
71
+ def test_basic_gloo_ckpt_always(self):
72
+ self._run_basic_test("gloo", "always", static_graph=True)
73
+
74
+ @skip_if_lt_x_gpu(4)
75
+ @requires_gloo()
76
+ @dist_init
77
+ @skip_if_rocm
78
+ def test_basic_gloo_ckpt_except_last(self):
79
+ self._run_basic_test("gloo", "except_last", static_graph=True)
80
+
81
+ def _run_basic_test(self, backend, checkpoint, find_unused_parameters=False, static_graph=False):
82
+ dist.init_process_group(
83
+ backend=backend,
84
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
85
+ world_size=self.world_size,
86
+ rank=self.rank,
87
+ )
88
+
89
+ # Use 4 GPUs, two replicas of a pipe across GPU 0 and 1 and another
90
+ # pipe between GPU 2 and 3. Both replicas are replicated via DDP.
91
+ fc1 = nn.Linear(16, 8, bias=False).cuda(2 * self.rank)
92
+
93
+ class MyModule(nn.Module):
94
+ def __init__(self, device):
95
+ super().__init__()
96
+ self.fc2 = nn.Linear(8, 4, bias=False).cuda(device)
97
+ self.fc3 = nn.Linear(4, 2, bias=False).cuda(device)
98
+
99
+ def forward(self, inp):
100
+ if find_unused_parameters:
101
+ return self.fc2(inp)
102
+ else:
103
+ return self.fc3(self.fc2(inp))
104
+
105
+ layer2 = MyModule(2 * self.rank + 1)
106
+ model = nn.Sequential(
107
+ fc1,
108
+ layer2
109
+ )
110
+ model = Pipe(model, chunks=2, checkpoint=checkpoint)
111
+ model = DistributedDataParallel(
112
+ model,
113
+ find_unused_parameters=find_unused_parameters,
114
+ static_graph=static_graph,
115
+ )
116
+
117
+ # Ensure inputs are different across ranks to verify that gradient
118
+ # sync indeed occurs.
119
+ model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
120
+ out = model(model_input).local_value()
121
+ out.sum().backward()
122
+
123
+ # Run forward again for find_unused_parameters to trigger any potential errors.
124
+ if find_unused_parameters:
125
+ # Ensure inputs are different across ranks to verify that gradient
126
+ # sync indeed occurs.
127
+ unused_param_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
128
+ model(unused_param_input).local_value().sum().backward()
129
+
130
+ # Run a few more iterations of fwd + bwd to ensure gradient synchronization
131
+ # occurs properly across iterations via delay_all_reduce/bucketized allreduce.
132
+ for _ in range(3):
133
+ model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
134
+ out = model(model_input).local_value()
135
+ out.sum().backward()
136
+
137
+ # Check grads
138
+ output = [torch.empty_like(fc1.weight.grad), torch.empty_like(fc1.weight.grad)]
139
+ dist.all_gather(output, fc1.weight.grad)
140
+ self.assertEqual(output[0], output[1])
141
+
142
+ output = [torch.empty_like(layer2.fc2.weight.grad), torch.empty_like(layer2.fc2.weight.grad)]
143
+ dist.all_gather(output, layer2.fc2.weight.grad)
144
+ self.assertEqual(output[0], output[1])
145
+
146
+ if not find_unused_parameters:
147
+ output = [torch.empty_like(layer2.fc3.weight.grad), torch.empty_like(layer2.fc3.weight.grad)]
148
+ dist.all_gather(output, layer2.fc3.weight.grad)
149
+ self.assertEqual(output[0], output[1])
venv/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import os
4
+ import sys
5
+ import unittest
6
+ from typing import Dict, List, Type
7
+
8
+ from torch.testing._internal.common_distributed import MultiProcessTestCase
9
+ from torch.testing._internal.common_utils import (
10
+ TEST_WITH_DEV_DBG_ASAN,
11
+ find_free_port,
12
+ IS_SANDCASTLE,
13
+ )
14
+ from torch.testing._internal.distributed.ddp_under_dist_autograd_test import (
15
+ CudaDdpComparisonTest,
16
+ DdpComparisonTest,
17
+ DdpUnderDistAutogradTest,
18
+ )
19
+ from torch.testing._internal.distributed.pipe_with_ddp_test import (
20
+ PipeWithDDPTest,
21
+ )
22
+ from torch.testing._internal.distributed.nn.api.remote_module_test import (
23
+ CudaRemoteModuleTest,
24
+ RemoteModuleTest,
25
+ ThreeWorkersRemoteModuleTest,
26
+ )
27
+ from torch.testing._internal.distributed.rpc.dist_autograd_test import (
28
+ DistAutogradTest,
29
+ CudaDistAutogradTest,
30
+ FaultyAgentDistAutogradTest,
31
+ TensorPipeAgentDistAutogradTest,
32
+ TensorPipeCudaDistAutogradTest
33
+ )
34
+ from torch.testing._internal.distributed.rpc.dist_optimizer_test import (
35
+ DistOptimizerTest,
36
+ )
37
+ from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import (
38
+ JitDistAutogradTest,
39
+ )
40
+ from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest
41
+ from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import (
42
+ JitFaultyAgentRpcTest,
43
+ )
44
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
45
+ RpcAgentTestFixture,
46
+ )
47
+ from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import (
48
+ FaultyAgentRpcTest,
49
+ )
50
+ from torch.testing._internal.distributed.rpc.rpc_test import (
51
+ CudaRpcTest,
52
+ RpcTest,
53
+ TensorPipeAgentRpcTest,
54
+ TensorPipeAgentCudaRpcTest,
55
+ )
56
+ from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest
57
+ from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import (
58
+ ReinforcementLearningRpcTest,
59
+ )
60
+
61
+
62
+ def _check_and_set_tcp_init():
63
+ # if we are running with TCP init, set main address and port
64
+ # before spawning subprocesses, since different processes could find
65
+ # different ports.
66
+ use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
67
+ if use_tcp_init == "1":
68
+ os.environ["MASTER_ADDR"] = '127.0.0.1'
69
+ os.environ["MASTER_PORT"] = str(find_free_port())
70
+
71
+ def _check_and_unset_tcp_init():
72
+ use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
73
+ if use_tcp_init == "1":
74
+ del os.environ["MASTER_ADDR"]
75
+ del os.environ["MASTER_PORT"]
76
+
77
+ # The tests for the RPC module need to cover multiple possible combinations:
78
+ # - different aspects of the API, each one having its own suite of tests;
79
+ # - different agents (ProcessGroup, TensorPipe, ...);
80
+ # To avoid a combinatorial explosion in code size, and to prevent forgetting to
81
+ # add a combination, these are generated automatically by the code in this file.
82
+ # Here, we collect all the test suites that we need to cover.
83
+ # We then have one separate file for each agent, from which
84
+ # we call the generate_tests function of this file, passing to it a fixture for
85
+ # the agent, which then gets mixed-in with each test suite.
86
+
87
+ @unittest.skipIf(
88
+ TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues"
89
+ )
90
+ class SpawnHelper(MultiProcessTestCase):
91
+ def setUp(self):
92
+ super().setUp()
93
+ _check_and_set_tcp_init()
94
+ self._spawn_processes()
95
+
96
+ def tearDown(self):
97
+ _check_and_unset_tcp_init()
98
+ super().tearDown()
99
+
100
+
101
+ # This list contains test suites that are agent-agnostic and that only verify
102
+ # compliance with the generic RPC interface specification. These tests should
103
+ # *not* make use of implementation details of a specific agent (options,
104
+ # attributes, ...). These test suites will be instantiated multiple times, once
105
+ # for each agent (except the faulty agent, which is special).
106
+ GENERIC_TESTS = [
107
+ RpcTest,
108
+ ParameterServerTest,
109
+ DistAutogradTest,
110
+ DistOptimizerTest,
111
+ JitRpcTest,
112
+ JitDistAutogradTest,
113
+ RemoteModuleTest,
114
+ ThreeWorkersRemoteModuleTest,
115
+ DdpUnderDistAutogradTest,
116
+ DdpComparisonTest,
117
+ ReinforcementLearningRpcTest,
118
+ ]
119
+ GENERIC_CUDA_TESTS = [
120
+ CudaRpcTest,
121
+ CudaDistAutogradTest,
122
+ CudaRemoteModuleTest,
123
+ CudaDdpComparisonTest,
124
+ PipeWithDDPTest,
125
+ ]
126
+
127
+
128
+ # This list contains test suites that will only be run on the TensorPipeAgent.
129
+ # These suites should be standalone, and separate from the ones in the generic
130
+ # list (not subclasses of those!).
131
+ TENSORPIPE_TESTS = [
132
+ TensorPipeAgentRpcTest,
133
+ TensorPipeAgentDistAutogradTest,
134
+ ]
135
+ TENSORPIPE_CUDA_TESTS = [
136
+ TensorPipeAgentCudaRpcTest,
137
+ TensorPipeCudaDistAutogradTest,
138
+ ]
139
+
140
+
141
+ # This list contains test suites that will only be run on the faulty RPC agent.
142
+ # That agent is special as it's only used to perform fault injection in order to
143
+ # verify the error handling behavior. Thus the faulty agent will only run the
144
+ # suites in this list, which were designed to test such behaviors, and not the
145
+ # ones in the generic list.
146
+ FAULTY_AGENT_TESTS = [
147
+ FaultyAgentRpcTest,
148
+ FaultyAgentDistAutogradTest,
149
+ JitFaultyAgentRpcTest,
150
+ ]
151
+
152
+
153
+ def generate_tests(
154
+ prefix: str,
155
+ mixin: Type[RpcAgentTestFixture],
156
+ tests: List[Type[RpcAgentTestFixture]],
157
+ module_name: str,
158
+ ) -> Dict[str, Type[RpcAgentTestFixture]]:
159
+ """Mix in the classes needed to autogenerate the tests based on the params.
160
+
161
+ Takes a series of test suites, each written against a "generic" agent (i.e.,
162
+ derived from the abstract RpcAgentTestFixture class), as the `tests` args.
163
+ Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a
164
+ certain agent, as the `mixin` arg. Produces all combinations of them.
165
+ Returns a dictionary of class names to class type
166
+ objects which can be inserted into the global namespace of the calling
167
+ module. The name of each test will be a concatenation of the `prefix` arg
168
+ and the original name of the test suite.
169
+ The `module_name` should be the name of the calling module so
170
+ that the classes can be fixed to make it look like they belong to it, which
171
+ is necessary for pickling to work on them.
172
+ """
173
+ ret: Dict[str, Type[RpcAgentTestFixture]] = {}
174
+ for test_class in tests:
175
+ if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN:
176
+ print(
177
+ f'Skipping test {test_class} on sandcastle for the following reason: '
178
+ 'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr)
179
+ continue
180
+
181
+ name = f"{prefix}{test_class.__name__}"
182
+ class_ = type(name, (test_class, mixin, SpawnHelper), {})
183
+ class_.__module__ = module_name
184
+ ret[name] = class_
185
+ return ret
venv/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import re
5
+ import unittest
6
+ import functools
7
+ from subprocess import CalledProcessError
8
+
9
+ from torch._inductor.codecache import CppCodeCache
10
+ from torch.utils._triton import has_triton
11
+ from torch.testing._internal.common_utils import (
12
+ LazyVal,
13
+ IS_FBCODE,
14
+ )
15
+ from torch._dynamo.backends.registry import register_backend
16
+ from torch._inductor.compile_fx import compile_fx, count_bytes_inner
17
+ from torch.testing._internal.common_utils import TestCase
18
+
19
+ def test_cpu():
20
+ try:
21
+ CppCodeCache.load("")
22
+ return not IS_FBCODE
23
+ except (
24
+ CalledProcessError,
25
+ OSError,
26
+ torch._inductor.exc.InvalidCxxCompiler,
27
+ torch._inductor.exc.CppCompileError,
28
+ ):
29
+ return False
30
+
31
+ HAS_CPU = LazyVal(test_cpu)
32
+
33
+ HAS_CUDA = torch.cuda.is_available() and has_triton()
34
+
35
+ HAS_GPU = HAS_CUDA
36
+
37
+ GPUS = ["cuda"]
38
+
39
+ HAS_MULTIGPU = any(
40
+ getattr(torch, gpu).is_available() and getattr(torch, gpu).device_count() >= 2
41
+ for gpu in GPUS
42
+ )
43
+
44
+ tmp_gpus = [x for x in GPUS if getattr(torch, x).is_available()]
45
+ assert len(tmp_gpus) <= 1
46
+ GPU_TYPE = "cuda" if len(tmp_gpus) == 0 else tmp_gpus.pop()
47
+ del tmp_gpus
48
+
49
+ @register_backend
50
+ def count_bytes_inductor(gm, example_inputs):
51
+ return compile_fx(gm, example_inputs, inner_compile=count_bytes_inner)
52
+
53
+ def _check_has_dynamic_shape(
54
+ self: TestCase,
55
+ code,
56
+ ):
57
+ for_loop_found = False
58
+ has_dynamic = False
59
+ lines = code.split("\n")
60
+ for line in lines:
61
+ if "for(" in line:
62
+ for_loop_found = True
63
+ if re.search(r";.*ks.*;", line) is not None:
64
+ has_dynamic = True
65
+ break
66
+ self.assertTrue(
67
+ has_dynamic, msg=f"Failed to find dynamic for loop variable\n{code}"
68
+ )
69
+ self.assertTrue(for_loop_found, f"Failed to find for loop\n{code}")
70
+
71
+
72
+ def skipDeviceIf(cond, msg, *, device):
73
+ if cond:
74
+ def decorate_fn(fn):
75
+ def inner(self, *args, **kwargs):
76
+ if self.device == device:
77
+ raise unittest.SkipTest(msg)
78
+ return fn(self, *args, **kwargs)
79
+ return inner
80
+ else:
81
+ def decorate_fn(fn):
82
+ return fn
83
+
84
+ return decorate_fn
85
+
86
+ skipCUDAIf = functools.partial(skipDeviceIf, device="cuda")
87
+ skipCPUIf = functools.partial(skipDeviceIf, device="cpu")
venv/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Torch
4
+ from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
5
+ import torch.nn.functional as F
6
+ import torch
7
+ import torch.cuda
8
+ import torch.jit
9
+ import torch.jit._logging
10
+ import torch.jit.frontend
11
+ from torch.testing._internal.common_nn import module_tests, new_module_tests
12
+ from torch.testing._internal.common_utils import is_iterable_of_tensors, noncontiguous_like
13
+
14
+ import collections
15
+ from copy import deepcopy
16
+ from typing import Any, Dict, List, Union
17
+ import math # noqa: F401
18
+
19
+ # Testing utils
20
+ from torch import inf
21
+
22
+ assert torch.get_default_dtype() == torch.float32
23
+
24
+ L = 20
25
+ M = 10
26
+ S = 5
27
+
28
+
29
+ def unpack_variables(args):
30
+ if isinstance(args, tuple):
31
+ return tuple(unpack_variables(elem) for elem in args)
32
+ else:
33
+ return args
34
+
35
+ class dont_convert(tuple):
36
+ pass
37
+
38
+ non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
39
+
40
+ def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None):
41
+ if not isinstance(call_args, tuple):
42
+ call_args = (call_args,)
43
+
44
+ def map_arg(arg):
45
+ def maybe_non_contig(tensor):
46
+ if not non_contiguous or tensor.numel() < 2:
47
+ return tensor.clone()
48
+
49
+ return noncontiguous_like(tensor)
50
+
51
+ def conjugate(tensor):
52
+ return tensor.conj()
53
+
54
+ if isinstance(arg, (torch.Size, dont_convert)):
55
+ return arg
56
+ elif isinstance(arg, tuple) and len(arg) == 0:
57
+ var = conjugate(torch.randn((), dtype=dtype, device=device))
58
+ var.requires_grad = requires_grad
59
+ return var
60
+ elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
61
+ return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
62
+ # double check casting
63
+ elif isinstance(arg, non_differentiable):
64
+ if isinstance(arg.tensor, torch.Tensor):
65
+ return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
66
+ return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
67
+ elif isinstance(arg, torch.Tensor):
68
+ if arg.is_complex() != dtype.is_complex:
69
+ raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
70
+ "which is not supported for now")
71
+ # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
72
+ v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
73
+ v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
74
+ return v
75
+ elif callable(arg):
76
+ return map_arg(arg(dtype=dtype, device=device))
77
+ else:
78
+ return arg
79
+ args_out = tuple(map_arg(arg) for arg in call_args)
80
+ kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
81
+ return args_out, kwargs_out
82
+
83
+ # NB: JIT script tests for all nn functional interfaces, script mode does
84
+ # not support in_place operations yet, so no inplace operation tests added.
85
+ # removed all the deprecated functions
86
+ #
87
+ # (
88
+ # method name,
89
+ # input size/constructing fn,
90
+ # args (tuple represents shape of a tensor arg),
91
+ # test variant name(will be used at test name suffix,
92
+ # 'inplace' skips grad tests), // optional
93
+ # (True, nonfusible_nodes, fusible_nodes) for autodiff // optional
94
+ # fn to determine if test should be skipped, // optional
95
+ # fn mapping output to part that should be gradcheck'ed, // optional
96
+ # kwargs for function, // optional
97
+ # )
98
+ nn_functional_tests = [
99
+ ('conv1d', (S, S, S), ((S, S, S),)),
100
+ ('conv2d', (S, S, S, S), ((S, S, S, S),)),
101
+ ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),
102
+ ('conv_transpose1d', (S, S, S), ((S, S, S),)),
103
+ ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),
104
+ ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),
105
+ ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),
106
+ ('avg_pool1d', (S, S, S), (3,)),
107
+ ('avg_pool2d', (S, S, S, S), (3,), '', (True,)),
108
+ ('avg_pool3d', (S, S, S, S, S), (3,)),
109
+ ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),
110
+ ('max_pool1d', (S, S, S), (2, 1)),
111
+ ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),
112
+ ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
113
+ ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
114
+ ('max_pool3d', (S, S, S, S, S), (2, 1)),
115
+ ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
116
+ ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
117
+ ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
118
+ ('lp_pool1d', (S, S, S), (2., 3, 2,)),
119
+ ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
120
+ ('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)),
121
+ ('adaptive_max_pool1d', (S, S, S), (5,)),
122
+ ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
123
+ ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),
124
+ ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),
125
+ ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),
126
+ ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),
127
+ ('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')),
128
+ ('alpha_dropout', (S, S, S), (0.5,)),
129
+ ('dropout2d', (S, S, S), (0.5,)),
130
+ ('dropout2d', (S, S, S, S), (0.5,), 'batched'),
131
+ ('dropout3d', (S, S, S, S), (0.5,)),
132
+ ('dropout3d', (S, S, S, S, S), (0.5,), 'batched'),
133
+ ('feature_alpha_dropout', (S, S, S), (0.5,)),
134
+ ('threshold', (S, S, S), (0.1, 2.), '', (True,)),
135
+ ('threshold', (S, S, S), (0.1, 2., True), 'inplace'),
136
+ ('relu', (S, S, S), (), '', (True,)),
137
+ ('relu', (S, S, S), (), 'inplace'),
138
+ ('glu', (S - 1, S - 1, S - 1), (),),
139
+ ('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)),
140
+ ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),
141
+ ('relu6', (S, S, S), (), '', (True,)),
142
+ ('relu6', (S, S, S), (True), 'inplace'),
143
+ ('elu', (S, S, S), (0.9,),),
144
+ ('elu', (S, S, S), (0.9, True), 'inplace'),
145
+ ('selu', (S, S, S), (),),
146
+ ('selu', (S, S, S), (True), 'inplace'),
147
+ ('celu', (S, S, S), (0.9,),),
148
+ ('celu', (S, S, S), (0.9, True), 'inplace'),
149
+ ('leaky_relu', (S, S, S), (0.02,), '', (True,)),
150
+ ('leaky_relu', (S, S, S), (0.02,), 'inplace'),
151
+ ('rrelu', (S, S), (0.1, 0.3, False),),
152
+ ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),
153
+ ('hardshrink', (S, S, S), (0.4,), '', (True,)),
154
+ ('tanhshrink', (S, S, S), (),),
155
+ ('softsign', (S, S, S), (),),
156
+ ('softplus', (S, S, S), (), '', (True,)),
157
+ ('softmin', (S, S, S), (0,),),
158
+ ('softmax', (S, S, S), (0,), '', (True,)),
159
+ ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
160
+ ('tanh', (S, S, S), (), '', (True,)),
161
+ ('sigmoid', (S, S, S), (), '', (True,)),
162
+ ('silu', (S, S, S), (), '', (True,)),
163
+ ('log_softmax', (S, S, S), (0,), '', (True,)),
164
+ ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
165
+ ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
166
+ ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
167
+ ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
168
+ ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
169
+ ('batch_norm', (S, S),
170
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ),
171
+ 'training', (True, 'aten::_batch_norm_impl_index')),
172
+ ('batch_norm', (0, S, S, S),
173
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
174
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
175
+ 'size_zero', (True, 'aten::_batch_norm_impl_index')),
176
+ ('batch_norm', (0, S, S, S),
177
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
178
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
179
+ 'size_zero_inference', (True, 'aten::_batch_norm_impl_index')),
180
+ ('batch_norm', (S, S),
181
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
182
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
183
+ 'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')),
184
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
185
+ None, non_differentiable(torch.ones(S)), True, ),
186
+ 'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')),
187
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
188
+ non_differentiable(torch.randn(S)), None, True, ),
189
+ 'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')),
190
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
191
+ None, None, False, ),
192
+ 'inference', (True, 'aten::_batch_norm_impl_index')),
193
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
194
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ),
195
+ 'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')),
196
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
197
+ None, non_differentiable(torch.ones(S)), False, ),
198
+ 'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')),
199
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
200
+ non_differentiable(torch.randn(S)), None, False, ),
201
+ 'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')),
202
+ ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
203
+ ('layer_norm', (S, S, S, S), ([5],), '',
204
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
205
+ ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',
206
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
207
+ ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',
208
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
209
+ ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),
210
+ non_differentiable(torch.rand(S))), 'with_weight_and_bias',
211
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),
212
+ ('group_norm', (S, S, S), (1, torch.rand(5),),),
213
+ ('local_response_norm', (S, S, S), (2, ),),
214
+ ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',),
215
+ ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),
216
+ ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),
217
+ ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),
218
+ ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),
219
+ ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),
220
+ ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
221
+ ('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
222
+ ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
223
+ ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
224
+ ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
225
+ ('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
226
+ ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
227
+ ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
228
+ ('margin_ranking_loss', (S,), ((S,), (S,)),),
229
+ ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
230
+ ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
231
+ ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
232
+ ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),
233
+ ('pixel_shuffle', (1, 9, 4, 4), (3,),),
234
+ ('pixel_unshuffle', (1, 1, 12, 12), (3,),),
235
+ ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),
236
+ ('pad', (3, 3, 4, 2), ([1, 1],),),
237
+ ('pairwise_distance', (S, S), ((S, S),),),
238
+ ('pdist', (S, S), (),),
239
+ ('cosine_similarity', (S, S), ((S, S),),),
240
+ ('triplet_margin_loss', (S, S), ((S, S), (S, S)),),
241
+ ('normalize', (S, S, S), (),),
242
+ ('unfold', (S, S, S, S), ([2, 3]),),
243
+ ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),
244
+ ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),
245
+ ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
246
+ ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
247
+ ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),
248
+ ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),
249
+ 1, 1., non_differentiable(torch.randn(S))),),
250
+ ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),
251
+ non_differentiable(torch.randn(3, 2))),),
252
+ ('binary_cross_entropy', torch.randn(3, 2).sigmoid(),
253
+ (non_differentiable(torch.rand(3, 2)),
254
+ non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),
255
+ ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),
256
+ (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),
257
+ torch.randint(1, S, (S,), dtype=torch.long))),
258
+ ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),
259
+ ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),
260
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),
261
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),
262
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),
263
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),
264
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),
265
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),
266
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),
267
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),
268
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),
269
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),
270
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),
271
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),
272
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),
273
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),
274
+ ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),
275
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),
276
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),
277
+ ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),
278
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),
279
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),
280
+ ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),
281
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),
282
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),
283
+ ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),
284
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),
285
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),
286
+ ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),
287
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),
288
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),
289
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),
290
+ 'nearest_4d_not_recompute_scale_factor'),
291
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),
292
+ 'nearest_4d_with_size_not_recompute_scale_factor'),
293
+ ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),
294
+ 'bilinear_4d_with_scale_not_recompute_scale_factor'),
295
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),
296
+ 'bilinear_4d_with_size_not_recompute_scale_factor'),
297
+ ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),
298
+ 'bicubic_4d_with_scale_not_recompute_scale_factor'),
299
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),
300
+ 'bicubic_4d_with_size_not_recompute_scale_factor'),
301
+ ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),
302
+ 'nearest_3d_with_scale_not_recompute_scale_factor'),
303
+ ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),
304
+ 'nearest_3d_with_size_not_recompute_scale_factor'),
305
+ ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),
306
+ 'linear_3d_with_scale_not_recompute_scale_factor'),
307
+ ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),
308
+ 'linear_3d_with_size_not_recompute_scale_factor'),
309
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),
310
+ 'nearest_5d_with_scale_not_recompute_scale_factor'),
311
+ ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),
312
+ 'nearest_5d_with_size_not_recompute_scale_factor'),
313
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),
314
+ 'trilinear_5d_with_scale_not_recompute_scale_factor'),
315
+ ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),
316
+ 'trilinear_5d_with_size_not_recompute_scale_factor'),
317
+ ]
318
+
319
+ script_template = '''
320
+ def the_method({}):
321
+ return {}
322
+ '''
323
+
324
+ def value_to_literal(value):
325
+ if isinstance(value, str):
326
+ # Quotes string and escapes special characters
327
+ return ascii(value)
328
+ if isinstance(value, torch.Tensor):
329
+ return 'torch.' + str(value)
330
+ else:
331
+ return str(value)
332
+
333
+ def get_call(method_name, func_type, args, kwargs):
334
+ kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()])
335
+ self_arg = args[0]
336
+ if func_type == 'method':
337
+ args = args[1:]
338
+
339
+ argument_str = ', '.join(args)
340
+ argument_str += ', ' if len(args) and len(kwargs) else ''
341
+ argument_str += kwargs_str
342
+
343
+ if func_type == 'functional' or func_type == 'function':
344
+ call = f'torch.{method_name}({argument_str})'
345
+ elif func_type == 'method':
346
+ call = f'{self_arg}.{method_name}({argument_str})'
347
+ elif func_type == 'nn_functional':
348
+ call = f'torch.nn.functional.{method_name}({argument_str})'
349
+ else:
350
+ raise TypeError('Unsupported function type')
351
+
352
+ return call
353
+
354
+ def get_constant(x):
355
+ if x == inf:
356
+ return 'math.inf'
357
+ if x == -inf:
358
+ return '-math.inf'
359
+ return x
360
+
361
+ def get_script_args(args):
362
+ formals: List[str] = []
363
+ tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = []
364
+ actuals: List[str] = []
365
+ for arg in args:
366
+ if isinstance(arg, torch.Tensor):
367
+ name = f'i{len(formals)}'
368
+ formals.append(name)
369
+ actuals.append(name)
370
+ tensors.append(arg)
371
+ elif is_iterable_of_tensors(arg):
372
+ name = f'i{len(formals)}'
373
+ formals.append(name + ': List[torch.Tensor]')
374
+ actuals.append(name)
375
+ tensors.append(list(arg))
376
+ elif isinstance(arg, str):
377
+ actuals.append(f"'{arg}'")
378
+ else:
379
+ actuals.append(str(get_constant(arg)))
380
+ return (formals, tensors, actuals)
381
+
382
+ # create a script function from (name, func_type, output_process_fn),
383
+ # and returns the compiled function and example inputs
384
+ def gen_script_fn_and_args(method_name, func_type, *args, **kwargs):
385
+ formals, tensors, actuals = get_script_args(args)
386
+ call = get_call(method_name, func_type, actuals, kwargs)
387
+ script = script_template.format(', '.join(formals), call)
388
+ CU = torch.jit.CompilationUnit(script)
389
+ return CU.the_method, tensors
390
+
391
+ # create a script function from (name, func_type),
392
+ # returns a function takes in (args, kwargs) and runs the compiled function
393
+ def create_script_fn(self, method_name, func_type):
394
+ # function returns tuple containing original output and
395
+ # filtered output to be used in checking gradients
396
+ def script_fn(*args, **kwargs):
397
+ fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)
398
+ self.assertExportImport(fn.graph, tensors)
399
+ output = fn(*tensors)
400
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
401
+ script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined]
402
+ return output
403
+ return script_fn
404
+
405
+ class SplitInputs:
406
+ all_tensors: List[Any]
407
+ tensor_args: List[Any]
408
+ nontensor_args: List[Any]
409
+ arg_types: List[str]
410
+ tensor_kwargs: Dict[str, Any]
411
+ kwarg_order: List[str]
412
+ nontensor_kwargs: Dict[str, Any]
413
+ kwarg_types: Dict[str, Any]
414
+
415
+ @staticmethod
416
+ def _is_tensor_input(arg):
417
+ return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)
418
+
419
+ def __init__(self, args, kwargs):
420
+ self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args]
421
+ self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()}
422
+ self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)]
423
+ self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)]
424
+ self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)}
425
+ self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)}
426
+ self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]]
427
+ self.kwarg_order = [k for k, v in kwargs.items()]
428
+
429
+ def nontensors_match(self, other: 'SplitInputs'):
430
+ if self.arg_types != other.arg_types:
431
+ return False
432
+ if self.kwarg_types != other.kwarg_types:
433
+ return False
434
+ if self.kwarg_order != other.kwarg_order:
435
+ return False
436
+ if self.nontensor_args != other.nontensor_args:
437
+ return False
438
+ if self.nontensor_kwargs != other.nontensor_kwargs:
439
+ return False
440
+ return True
441
+
442
+ # make a new function where all non-tensor arguments in 'args' have been partially
443
+ # applied, and all tensor arguments remain.
444
+ # used to trace functions when some arguments are not tensors
445
+ def partial_apply_nontensors(fn, args, kwargs):
446
+ inputs = SplitInputs(args, kwargs)
447
+
448
+ def new_fn(*tensors_):
449
+ tensors = iter(tensors_)
450
+ full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)]
451
+ full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()}
452
+ return fn(*full_args, **full_kwargs)
453
+
454
+ return new_fn, inputs
455
+
456
+ # create a trace function from input fn
457
+ def create_traced_fn(self, fn, cache_traced_fn=False):
458
+ def traced_fn(*inputs, **kwargs):
459
+ # `check_trace` is set to False because check_trace is run with @no_grad
460
+ # Also, `check_against_reference` already does all the checks
461
+ # against python function
462
+ fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs)
463
+ if not cache_traced_fn or not hasattr(traced_fn, 'traced'):
464
+ traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False)
465
+ self.assertExportImport(traced.graph, split_inputs.all_tensors)
466
+ output = traced(*split_inputs.all_tensors)
467
+ if cache_traced_fn:
468
+ traced_fn.traced = traced
469
+ traced_fn.split_inputs = split_inputs
470
+ else:
471
+ # Guard to check that nontensor inputs are the same as during tracing
472
+ self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs))
473
+ output = traced_fn.traced(*split_inputs.all_tensors)
474
+ traced = traced_fn.traced
475
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
476
+ traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined]
477
+ traced_fn.graph = traced.graph # type: ignore[attr-defined]
478
+ return output
479
+ return traced_fn
480
+
481
+ # known to be failing in script
482
+ EXCLUDE_SCRIPT = {
483
+ 'test_norm_fro_default',
484
+ 'test_norm_fro_cpu',
485
+ 'test_norm_nuc',
486
+ 'test_norm_fro',
487
+ 'test_norm_nuc_batched',
488
+
489
+ # aten op has additional cudnn argument
490
+ 'test_nn_unfold',
491
+
492
+ # flaky test - TODO fix
493
+ 'test_nn_ctc_loss',
494
+
495
+ # unknown builtin op
496
+ 'test_nn_fold',
497
+
498
+ # jit doesn't support sparse tensors.
499
+ 'test_to_sparse',
500
+ 'test_to_sparse_dim',
501
+ }
502
+
503
+ # generates a script function and set of example inputs
504
+ # from a specified test in the format of nn_functional_tests
505
+ def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):
506
+ test_name = 'test_nn_' + name
507
+
508
+ if variant_name != '':
509
+ test_name = test_name + '_' + variant_name
510
+
511
+ no_grad = variant_name == 'inplace'
512
+
513
+ self_variable = create_input((self_size,))[0][0]
514
+ kwargs = None
515
+
516
+ # need to record this because methods can change the size (e.g. unsqueeze)
517
+ args_variable, kwargs_variable = create_input(args)
518
+
519
+ self_tensor = deepcopy(self_variable.data)
520
+ args_tensor = deepcopy(unpack_variables(args_variable))
521
+
522
+ f_args_variable = (self_variable,) + args_variable
523
+ f_args_tensor = (self_tensor,) + args_tensor
524
+ with torch._jit_internal._disable_emit_hooks():
525
+ script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable)
526
+ return script_fn, inputs
527
+
528
+
529
+ # additional modules test
530
+ # TODO: delete this list once we make all nn_tests work
531
+ additional_module_tests = [
532
+ {
533
+ 'module_name': 'Bilinear',
534
+ 'constructor_args': (S, S, M),
535
+ 'input_size': (S, S),
536
+ 'extra_args': ((S, S),)
537
+ },
538
+ {
539
+ 'module_name': 'RNNCell',
540
+ 'constructor_args': (S, S),
541
+ 'input_size': (S, S),
542
+ },
543
+ {
544
+ 'module_name': 'LSTMCell',
545
+ 'constructor_args': (S, S),
546
+ 'input_size': (S, S),
547
+ },
548
+ {
549
+ 'module_name': 'GRUCell',
550
+ 'constructor_args': (S, S),
551
+ 'input_size': (S, S),
552
+ },
553
+ {
554
+ 'module_name': 'MultiheadAttention',
555
+ 'constructor_args': (128, 8),
556
+ 'input_size': (10, 8, 128),
557
+ 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),
558
+ 'slowTest': True
559
+ },
560
+ {
561
+ 'module_name': 'Transformer',
562
+ 'constructor_args': (1, 1, 1, 1, 2),
563
+ 'input_size': (3, 1, 1),
564
+ 'extra_args': (torch.randn(1, 1, 1),),
565
+ 'slowTest': True
566
+ }
567
+ ]
568
+
569
+ EXCLUDE_SCRIPT_MODULES = {
570
+ 'test_nn_AdaptiveAvgPool2d_tuple_none',
571
+ 'test_nn_AdaptiveAvgPool3d_tuple_none',
572
+ 'test_nn_AdaptiveMaxPool2d_tuple_none',
573
+ 'test_nn_AdaptiveMaxPool3d_tuple_none',
574
+
575
+ # Doesn't use future division, so this is not supported
576
+ 'test_nn_CrossMapLRN2d',
577
+ # Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented
578
+ 'test_nn_TransformerDecoderLayer_gelu_activation',
579
+ 'test_nn_TransformerDecoderLayer_relu_activation',
580
+ 'test_nn_TransformerEncoderLayer_gelu_activation',
581
+ 'test_nn_TransformerEncoderLayer_relu_activation',
582
+ 'test_nn_Transformer_multilayer_coder',
583
+ }
584
+
585
+ script_method_template = '''
586
+ def forward({}):
587
+ return {}
588
+ '''
589
+
590
+ def create_script_module(self, nn_module, constructor_args, *args, **kwargs):
591
+ def script_module(*args, **kwargs):
592
+ formals, tensors, actuals = get_script_args(args)
593
+
594
+ method_args = ', '.join(['self'] + actuals)
595
+ call_args_str = ', '.join(actuals)
596
+ call = f"self.submodule({call_args_str})"
597
+ script = script_method_template.format(method_args, call)
598
+
599
+ submodule_constants = []
600
+ if kwargs.get('is_constant'):
601
+ submodule_constants = ['submodule']
602
+
603
+ # Create module to use the script method
604
+ class TheModule(torch.jit.ScriptModule):
605
+ __constants__ = submodule_constants
606
+
607
+ def __init__(self):
608
+ super().__init__()
609
+ self.submodule = nn_module(*constructor_args)
610
+
611
+ def make_module(script):
612
+ module = TheModule()
613
+ # check __repr__
614
+ str(module)
615
+ module.define(script)
616
+ return module
617
+
618
+ module = make_module(script)
619
+ if self:
620
+ self.assertExportImportModule(module, tensors)
621
+ module(*args)
622
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
623
+ create_script_module.last_graph = module.graph # type: ignore[attr-defined]
624
+ return module
625
+ return script_module
626
+
627
+ def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'):
628
+ formals, tensors, actuals = get_script_args(args)
629
+ call = get_call(method_name, func_type, actuals, kwargs)
630
+ script = script_template.format(', '.join(formals), call)
631
+ CU = torch.jit.CompilationUnit(script)
632
+ # to clean up IR
633
+ torch._C._jit_pass_inline(CU.the_method.graph)
634
+ torch._C._jit_pass_constant_propagation(CU.the_method.graph)
635
+ torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name)
636
+
637
+ def get_nn_module_name_from_kwargs(**kwargs):
638
+ if 'module_name' in kwargs:
639
+ return kwargs['module_name']
640
+ elif 'fullname' in kwargs:
641
+ return kwargs['fullname']
642
+ elif 'constructor' in kwargs:
643
+ return kwargs['constructor'].__name__
644
+
645
+ def get_nn_mod_test_name(**kwargs):
646
+ if 'fullname' in kwargs:
647
+ test_name = kwargs['fullname']
648
+ else:
649
+ test_name = get_nn_module_name_from_kwargs(**kwargs)
650
+ if 'desc' in kwargs:
651
+ test_name = f"{test_name}_{kwargs['desc']}"
652
+ return f'test_nn_{test_name}'
653
+
654
+ def get_nn_module_class_from_kwargs(**kwargs):
655
+ name = get_nn_module_name_from_kwargs(**kwargs)
656
+ index = name.find("_")
657
+ if index == -1:
658
+ return name
659
+ else:
660
+ return name[0:name.find("_")]
661
+
662
+ def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):
663
+ name = get_nn_module_name_from_kwargs(**kwargs)
664
+
665
+ if 'desc' in kwargs and 'eval' in kwargs['desc']:
666
+ # eval() is not supported, so skip these tests
667
+ return
668
+
669
+ test_name = name
670
+ if 'desc' in kwargs:
671
+ test_name = f"{test_name}_{kwargs['desc']}"
672
+ test_name = get_nn_mod_test_name(**kwargs)
673
+
674
+ if test_name in EXCLUDE_SCRIPT_MODULES:
675
+ return
676
+ if 'constructor' in kwargs:
677
+ nn_module = kwargs['constructor']
678
+ else:
679
+ nn_module = getattr(torch.nn, name)
680
+
681
+ if "FunctionalModule" in str(nn_module):
682
+ return
683
+
684
+ if 'constructor_args_fn' in kwargs:
685
+ constructor_args = kwargs['constructor_args_fn']()
686
+ else:
687
+ constructor_args = kwargs.get('constructor_args', ())
688
+
689
+ # Set up inputs from tuple of sizes or constructor fn
690
+ input_dtype = torch.double
691
+ if 'input_fn' in kwargs:
692
+ input = kwargs['input_fn']()
693
+ if isinstance(input, torch.Tensor):
694
+ input = (input,)
695
+
696
+ if all(tensor.is_complex() for tensor in input):
697
+ input_dtype = torch.cdouble
698
+ else:
699
+ input = (kwargs['input_size'],)
700
+
701
+ # Extra parameters to forward()
702
+ if 'extra_args' in kwargs:
703
+ input = input + kwargs['extra_args']
704
+
705
+ if 'target_size' in kwargs:
706
+ input = input + (kwargs['target_size'],)
707
+ elif 'target_fn' in kwargs:
708
+ if torch.is_tensor(input):
709
+ input = (input,)
710
+ input = input + (kwargs['target_fn'](),)
711
+
712
+ args_variable, kwargs_variable = create_input(input, dtype=input_dtype)
713
+ f_args_variable = deepcopy(unpack_variables(args_variable))
714
+ out_var = deepcopy(f_args_variable)
715
+
716
+ args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable)
717
+
718
+ return mod, out_var
719
+
720
+
721
+ def get_all_nn_module_tests():
722
+ return module_tests + new_module_tests + additional_module_tests
venv/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py ADDED
@@ -0,0 +1,893 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Torch
4
+ from torch.autograd import Variable
5
+ from torch.autograd.function import _nested_map
6
+ from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
7
+
8
+ from torch.onnx import OperatorExportTypes
9
+ import torch
10
+ import torch.cuda
11
+ import torch.jit
12
+ import torch.jit._logging
13
+ import torch.jit.frontend
14
+ import torch.jit.quantized
15
+ import zipfile
16
+ import functools
17
+
18
+ # Testing utils
19
+ from torch.testing import FileCheck
20
+ from torch.testing._internal.common_utils import IS_WINDOWS, \
21
+ freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \
22
+ is_iterable_of_tensors
23
+ from torch.testing._internal.common_jit import JitCommonTestCase
24
+ from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
25
+
26
+ # Standard library
27
+ from contextlib import contextmanager
28
+ from functools import reduce
29
+ from io import StringIO
30
+ from collections import defaultdict
31
+
32
+ import importlib.util
33
+ import inspect
34
+ import io
35
+ import math
36
+ import os
37
+ import pickle
38
+ import sys
39
+ import tempfile
40
+ import textwrap
41
+ from importlib.abc import Loader
42
+ from typing import Any, Dict, List, Tuple, Union
43
+
44
+ RUN_CUDA = torch.cuda.is_available()
45
+ RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1
46
+ RUN_CUDA_HALF = RUN_CUDA
47
+ # HIP supports half, no version check necessary
48
+ if torch.cuda.is_available() and not torch.version.hip:
49
+ CUDA_VERSION = torch._C._cuda_getCompiledVersion()
50
+ for d in range(torch.cuda.device_count()):
51
+ major = torch.cuda.get_device_capability(d)[0]
52
+ if (major < 6):
53
+ RUN_CUDA_HALF = False
54
+
55
+ def execWrapper(code, glob, loc):
56
+ exec(code, glob, loc)
57
+
58
+ def do_input_map(fn, input):
59
+ return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input)
60
+
61
+ def clear_class_registry():
62
+ torch._C._jit_clear_class_registry()
63
+ torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
64
+ torch.jit._state._clear_class_state()
65
+
66
+ def get_execution_plan(graph_executor_state):
67
+ execution_plans = list(graph_executor_state.execution_plans.values())
68
+ num_plans = len(execution_plans)
69
+ if num_plans != 1:
70
+ raise RuntimeError('This test assumes this GraphExecutor should '
71
+ f'only have one execution plan, got: {num_plans}')
72
+ return execution_plans[0]
73
+
74
+ class _AssertRaisesRegexWithHighlightContext:
75
+ """
76
+ A context manager that is useful for checking that error messages highlight
77
+ the correct part of the source code.
78
+ """
79
+
80
+ def __init__(self, test_case, exception, regex, highlight):
81
+ self.test_case = test_case
82
+ self.exception_type = exception
83
+ self.regex = regex
84
+ self.highlight = highlight
85
+
86
+ def __enter__(self):
87
+ return self
88
+
89
+ def __exit__(self, type, value, traceback):
90
+ with self.test_case.assertRaisesRegex(self.exception_type, self.regex):
91
+ if type:
92
+ raise value
93
+
94
+ if self.highlight:
95
+ FileCheck().check_source_highlighted(self.highlight).run(str(value))
96
+
97
+ return True
98
+
99
+ FUSION_GROUP = "prim::TensorExprGroup"
100
+
101
+ class JitTestCase(JitCommonTestCase):
102
+ _do_cuda_memory_leak_check = True
103
+ _restored_warnings = False
104
+
105
+ class capture_stdout(list):
106
+ """
107
+ Replace sys.stdout with a temporary StringIO
108
+ """
109
+ def __enter__(self):
110
+ self.sys_stdout = sys.stdout
111
+ self.stringio = StringIO()
112
+ sys.stdout = self.stringio
113
+ return self
114
+
115
+ def __exit__(self, *args):
116
+ self.append(str(self.stringio.getvalue()))
117
+ del self.stringio
118
+ sys.stdout = self.sys_stdout
119
+
120
+ class capture_stderr(list):
121
+ """
122
+ Replace sys.stderr with a temporary StringIO
123
+ """
124
+ def __enter__(self):
125
+ self.sys_stderr = sys.stderr
126
+ self.stringio = StringIO()
127
+ sys.stderr = self.stringio
128
+ return self
129
+
130
+ def __exit__(self, *args):
131
+ self.append(str(self.stringio.getvalue()))
132
+ del self.stringio
133
+ sys.stderr = self.sys_stderr
134
+
135
+ def setHooks(self):
136
+ torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook)
137
+
138
+ def clearHooks(self):
139
+ torch._C._jit_set_emit_hooks(None, None)
140
+
141
+ def setUp(self):
142
+ super().setUp()
143
+ # unittest overrides all warning filters and forces all of them to show up
144
+ # after we install our own to silence those coming from inside PyTorch.
145
+ # This will ensure that our filter still takes precedence.
146
+ if not JitTestCase._restored_warnings:
147
+ torch.jit.TracerWarning.ignore_lib_warnings()
148
+ JitTestCase._restored_warnings = True
149
+ self.setHooks()
150
+
151
+ def tearDown(self):
152
+ super().tearDown()
153
+ # needs to be cleared because python might be unloaded before
154
+ # the callback gets destructed
155
+ self.clearHooks()
156
+ clear_class_registry()
157
+
158
+ def assertAllFused(self, graph, except_for=()):
159
+
160
+ # note this helper collects nodes on 'fast path' only
161
+ # i.e. the true blocks of specialized checks
162
+ def get_nodes_and_parents_recursively(block, kind, acc):
163
+ for node in block.nodes():
164
+ if node.kind() == kind:
165
+ acc[block].append(node)
166
+ elif node.kind() == 'prim::DifferentiableGraph':
167
+ get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc)
168
+ elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or
169
+ node.inputs().__next__().node().kind() == 'prim::TypeCheck' or
170
+ node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'):
171
+ get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc)
172
+ else:
173
+ for inner_block in node.blocks():
174
+ get_nodes_and_parents_recursively(inner_block, kind, acc)
175
+
176
+ allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate',
177
+ 'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for)
178
+
179
+ fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list)
180
+ get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups)
181
+ self.assertTrue(len(fusion_groups) == 1, f'got {graph}')
182
+ (graph, fusion_nodes) = next(iter(fusion_groups.items()))
183
+ # the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes`
184
+ self.assertTrue(len(fusion_nodes) == 1, f'got {graph}')
185
+ self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
186
+ f'got {graph}')
187
+
188
+ def _isHookExceptionOk(self, e):
189
+ se = str(e)
190
+ allowed = ("Could not export Python function",
191
+ "closures are not exportable")
192
+ for a in allowed:
193
+ if a in se:
194
+ return True
195
+ return False
196
+
197
+ def _compared_saved_loaded(self, m):
198
+ def extract_files(buffer):
199
+ # crack open the zip format to get at the main module code
200
+ archive = zipfile.ZipFile(buffer)
201
+ # check that we have no duplicate names
202
+ self.assertEqual(len(set(archive.namelist())), len(archive.namelist()))
203
+ files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
204
+ # unwrap all the code files into strings
205
+ code_files_str = filter(lambda x: x.endswith('.py'), files)
206
+ code_files_stream = (archive.open(f) for f in code_files_str)
207
+ code_files = ("".join([line.decode() for line in file]) for file in code_files_stream)
208
+
209
+ # unpickled all the debug files
210
+ debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files)
211
+ debug_files_stream = (archive.open(f) for f in debug_files_str)
212
+ debug_files = (pickle.load(f) for f in debug_files_stream)
213
+ return code_files, debug_files
214
+
215
+ # disable the hook while we parse code, otherwise we will re-enter the hook
216
+ with torch._jit_internal._disable_emit_hooks():
217
+ try:
218
+ # short-circuit if this is an empty function or module
219
+ if len(m.code) == 0:
220
+ return
221
+ if isinstance(m, torch._C.ScriptModule):
222
+ if len(m._method_names()) == 0:
223
+ return
224
+
225
+ # save the module to a buffer
226
+ buffer = io.BytesIO()
227
+ torch.jit.save(m, buffer)
228
+ # copy the data in the buffer so we can restore it later. This
229
+ # is because py2 and py3 have different semantics with zipfile
230
+ # and it's easier to just work with a fresh copy each time.
231
+ buffer_copy = buffer.getvalue()
232
+
233
+ code_files, debug_files = extract_files(buffer)
234
+
235
+ except RuntimeError as e:
236
+ if not self._isHookExceptionOk(e):
237
+ raise
238
+ else:
239
+ return
240
+
241
+ # import the model again (from a the copy we made of the original)
242
+ buffer2 = io.BytesIO(buffer_copy)
243
+ imported = torch.jit.load(buffer2)
244
+
245
+ # save it again
246
+ saved_module_buffer_2 = io.BytesIO()
247
+ torch.jit.save(imported, saved_module_buffer_2)
248
+
249
+ saved_module_buffer_2.seek(0)
250
+ code_files_2, debug_files_2 = extract_files(saved_module_buffer_2)
251
+
252
+ for a, b in zip(code_files, code_files_2):
253
+ self.assertMultiLineEqual(a, b)
254
+
255
+ if isinstance(m, torch._C.ScriptModule):
256
+ self.assertTrue(torch._C._ivalue_tags_match(m, imported._c))
257
+
258
+
259
+ def emitFunctionHook(self, func):
260
+ # func has invalid names for export, skip the jitter check
261
+ if func.name == "<lambda>" or "aten::" in func.name:
262
+ return
263
+ self._compared_saved_loaded(func)
264
+
265
+ def emitModuleHook(self, module):
266
+ self._compared_saved_loaded(module)
267
+
268
+
269
+ def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None):
270
+ buffer = io.BytesIO()
271
+ m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None)
272
+ torch.jit.save(m, buffer)
273
+ m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
274
+ buffer.seek(0)
275
+ imported = torch.jit.load(buffer, map_location=map_location)
276
+ imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
277
+
278
+ if not also_test_file:
279
+ return imported
280
+
281
+ # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
282
+ # opens the file, and it cannot be opened multiple times in Windows. To support Windows,
283
+ # close the file after creation and try to remove it manually
284
+ f = tempfile.NamedTemporaryFile(delete=False)
285
+ try:
286
+ f.close()
287
+ imported.save(f.name)
288
+ result = torch.jit.load(f.name, map_location=map_location)
289
+ finally:
290
+ os.unlink(f.name)
291
+
292
+ result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
293
+ return result
294
+
295
+ def assertGraphContains(self, graph, kind, consider_subgraphs=False):
296
+
297
+ if consider_subgraphs:
298
+ strgraph = str(graph)
299
+ count = strgraph.count(kind) - strgraph.count(f'with {kind}')
300
+ self.assertTrue(count > 0)
301
+ return
302
+
303
+ def nodes(block):
304
+ out = []
305
+ for node in block.nodes():
306
+ if node.kind() == kind:
307
+ out.append(node)
308
+ for block in node.blocks():
309
+ out += nodes(block)
310
+ return out
311
+
312
+ out_nodes = nodes(graph)
313
+ self.assertTrue(len(out_nodes) > 0)
314
+
315
+ def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False):
316
+ def perform_assert(graph, kind, actual, expected, consider_subgraphs):
317
+ if actual == expected:
318
+ return
319
+ subgraph = 'including' if consider_subgraphs else 'excluding'
320
+ raise AssertionError(
321
+ f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}')
322
+
323
+ if consider_subgraphs:
324
+ strgraph = str(graph)
325
+ count = strgraph.count(kind) - strgraph.count(f'with {kind}')
326
+ perform_assert(graph, kind, count, num_kind_nodes,
327
+ consider_subgraphs)
328
+ return
329
+
330
+ def nodes(block):
331
+ out = []
332
+ for node in block.nodes():
333
+ if node.kind() == kind:
334
+ out.append(node)
335
+ for block in node.blocks():
336
+ out += nodes(block)
337
+ return out
338
+
339
+ out_nodes = nodes(graph)
340
+ perform_assert(graph, kind, len(out_nodes), num_kind_nodes,
341
+ consider_subgraphs)
342
+
343
+ def assertExpectedONNXGraph(self, g, *args, **kwargs):
344
+ g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX)
345
+ self.assertExpectedGraph(g, *args, **kwargs)
346
+
347
+ def assertExpectedGraph(self, trace, *args, **kwargs):
348
+ if isinstance(trace, torch._C.Graph):
349
+ graph = trace
350
+ else:
351
+ graph = trace.graph()
352
+
353
+ torch._C._jit_pass_lint(graph)
354
+ torch._C._jit_pass_dce(graph)
355
+ torch._C._jit_pass_lint(graph)
356
+ graph = torch._C._jit_pass_canonicalize(graph)
357
+ torch._C._jit_pass_lint(graph)
358
+ self.assertExpected(str(graph), *args, **kwargs)
359
+
360
+ def run_pass(self, name, trace):
361
+ if isinstance(trace, torch._C.Graph):
362
+ graph = trace
363
+ set_graph = False
364
+ else:
365
+ set_graph = True
366
+ graph = trace.graph()
367
+
368
+ torch._C._jit_pass_lint(graph)
369
+ result = getattr(torch._C, '_jit_pass_' + name)(graph)
370
+ if result is not None and not isinstance(result, bool):
371
+ graph = result
372
+ torch._C._jit_pass_lint(graph)
373
+
374
+ if set_graph:
375
+ trace.set_graph(graph)
376
+ return graph
377
+
378
+ def get_frame_vars(self, frames_up):
379
+ frame = inspect.currentframe()
380
+ if not frame:
381
+ raise RuntimeError("failed to inspect frame")
382
+ i = 0
383
+ while i < frames_up + 1:
384
+ frame = frame.f_back
385
+ if not frame:
386
+ raise RuntimeError("failed to get frame")
387
+ i += 1
388
+ defined_vars: Dict[str, Any] = {}
389
+ defined_vars.update(frame.f_locals)
390
+ defined_vars.update(frame.f_globals)
391
+ return defined_vars
392
+
393
+ def assertRaisesRegexWithHighlight(self, exception, regex, highlight):
394
+ return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight)
395
+
396
+ def checkScriptRaisesRegex(self, script, inputs, exception, regex,
397
+ name=None, outputs=None, capture_output=False,
398
+ frames_up=1, profiling=ProfilingMode.PROFILING):
399
+ """
400
+ Checks that a given function will throw the correct exception,
401
+ when executed with normal python, the string frontend, and the
402
+ AST frontend. Logic taken from `checkScript` (see comments there
403
+ for details)
404
+ """
405
+ with enable_profiling_mode_for_profiling_tests():
406
+ # Normal Python
407
+ with self.assertRaisesRegex(exception, regex):
408
+ if isinstance(script, str):
409
+ frame = self.get_frame_vars(frames_up)
410
+ the_locals: Dict[str, Any] = {}
411
+ execWrapper(script, glob=frame, loc=the_locals)
412
+ frame.update(the_locals)
413
+
414
+ python_fn = frame[name]
415
+ else:
416
+ python_fn = script
417
+
418
+ python_fn(*inputs)
419
+
420
+ # String frontend
421
+ with self.assertRaisesRegex(exception, regex):
422
+ if isinstance(script, str):
423
+ cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
424
+ string_frontend = getattr(cu, name)
425
+ else:
426
+ source = textwrap.dedent(inspect.getsource(script))
427
+ cu = torch.jit.CompilationUnit(source, _frames_up=frames_up)
428
+ string_frontend = getattr(cu, script.__name__)
429
+
430
+ string_frontend(*inputs)
431
+
432
+ # Python AST frontend
433
+ if not isinstance(script, str):
434
+ with self.assertRaisesRegex(exception, regex):
435
+ ge = torch.jit.script(python_fn)
436
+ ge(*inputs)
437
+
438
+ def checkBailouts(self, model, inputs, expected):
439
+ state = model.get_debug_state()
440
+ plan = get_execution_plan(state)
441
+ num_bailouts = plan.code.num_bailouts()
442
+ for i in range(0, num_bailouts):
443
+ plan.code.request_bailout(i)
444
+ bailout_outputs = model(*inputs)
445
+ self.assertEqual(bailout_outputs, expected)
446
+
447
+ def checkScript(self,
448
+ script,
449
+ inputs,
450
+ name='func',
451
+ optimize=True,
452
+ inputs_requires_grad=False,
453
+ capture_output=False,
454
+ frames_up=1,
455
+ profiling=ProfilingMode.PROFILING,
456
+ atol=None,
457
+ rtol=None):
458
+ """
459
+ Checks that a given script generates the same output as the Python
460
+ version using the given inputs.
461
+ """
462
+ with torch.jit.optimized_execution(optimize):
463
+ with enable_profiling_mode_for_profiling_tests():
464
+ extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs)
465
+ if isinstance(script, str):
466
+ # Compile the string to a Script function
467
+ # with enable_profiling_mode():
468
+ cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
469
+
470
+ # Execute the Python function so we can run it later and get its
471
+ # outputs
472
+
473
+ frame = self.get_frame_vars(frames_up)
474
+ the_locals: Dict[str, Any] = {}
475
+ execWrapper(script, glob=frame, loc=the_locals)
476
+ frame.update(the_locals)
477
+
478
+ python_fn = frame[name]
479
+ scripted_fn = getattr(cu, name)
480
+ else:
481
+
482
+ # Check the string frontend first
483
+ source = textwrap.dedent(inspect.getsource(script))
484
+ self.checkScript(
485
+ source,
486
+ inputs,
487
+ script.__name__,
488
+ optimize=optimize,
489
+ inputs_requires_grad=inputs_requires_grad,
490
+ capture_output=capture_output,
491
+ profiling=profiling,
492
+ frames_up=2)
493
+
494
+ # Continue checking the Python frontend
495
+ scripted_fn = torch.jit.script(script, _frames_up=1)
496
+ python_fn = script
497
+
498
+ if inputs_requires_grad:
499
+ recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs)
500
+ else:
501
+ recording_inputs = inputs
502
+
503
+ if capture_output:
504
+ with self.capture_stdout() as script_stdout:
505
+ script_outputs = scripted_fn(*recording_inputs)
506
+ with self.capture_stdout() as opt_script_stdout:
507
+ opt_script_outputs = scripted_fn(*recording_inputs)
508
+ with self.capture_stdout() as _python_stdout:
509
+ python_outputs = python_fn(*inputs)
510
+ if not IS_WINDOWS:
511
+ self.assertExpected(script_stdout[0], subname='stdout')
512
+ self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol)
513
+ else:
514
+ # profiling run
515
+ script_outputs = scripted_fn(*recording_inputs)
516
+ if inputs_requires_grad or extra_profile_runs:
517
+ opt_script_outputs = scripted_fn(*recording_inputs)
518
+ # optimized run
519
+ opt_script_outputs = scripted_fn(*recording_inputs)
520
+ if TEST_BAILOUTS:
521
+ self.checkBailouts(scripted_fn, inputs, opt_script_outputs)
522
+ python_outputs = python_fn(*inputs)
523
+ self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol)
524
+ self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol)
525
+ return scripted_fn
526
+
527
+ def checkTrace(self, func, reference_tensors, input_tensors=None,
528
+ drop=None, allow_unused=False, verbose=False,
529
+ inputs_require_grads=True, check_tolerance=1e-5, export_import=True,
530
+ _force_outplace=False, grad_atol=None, grad_rtol=None):
531
+
532
+ # TODO: check gradients for parameters, not just inputs
533
+ def allSum(vs):
534
+ # drop allows us to remove some values from ever being used
535
+ # to test unused outputs
536
+ if drop is not None:
537
+ vs = vs[:-drop]
538
+ # we don't want all the grad for all the outputs to be the same
539
+ # so we multiply each by a constant
540
+ return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None)
541
+ if input_tensors is None:
542
+ input_tensors = reference_tensors
543
+
544
+ def flatten_inputs(inputs):
545
+ def input_reduce(input, fn, acc):
546
+ if isinstance(input, torch.Tensor):
547
+ fn(input, acc)
548
+ elif isinstance(input, dict):
549
+ reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc)
550
+ else:
551
+ reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc)
552
+ return acc
553
+ return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), []))
554
+
555
+ nograd_inputs = reference_tensors
556
+ if inputs_require_grads:
557
+ recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors)
558
+ flattened_recording_inputs = flatten_inputs(recording_inputs)
559
+ else:
560
+ recording_inputs = reference_tensors
561
+
562
+ # `check_trace` is set to False because check_trace is run with @no_grad
563
+ # Also, `checkTrace` already does all the checks
564
+ # against python function
565
+ ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance,
566
+ _force_outplace=_force_outplace, check_trace=False)
567
+
568
+ if export_import:
569
+ ge = self.getExportImportCopy(ge)
570
+
571
+ if verbose:
572
+ print(ge.graph)
573
+
574
+ # test no gradients case
575
+ outputs = func(*nograd_inputs)
576
+ outputs_ge = ge(*nograd_inputs)
577
+ self.assertEqual(outputs, outputs_ge)
578
+
579
+ # test gradients case
580
+ outputs = func(*recording_inputs)
581
+ if inputs_require_grads:
582
+ grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs,
583
+ allow_unused=allow_unused)
584
+
585
+ outputs_ge = ge(*recording_inputs)
586
+ if inputs_require_grads:
587
+ grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs,
588
+ allow_unused=allow_unused)
589
+ self.assertEqual(outputs, outputs_ge)
590
+ if inputs_require_grads:
591
+ self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol)
592
+
593
+ # test the grad grad case
594
+ outputs = func(*recording_inputs)
595
+ l1 = allSum(outputs)
596
+ if inputs_require_grads:
597
+ grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True,
598
+ allow_unused=allow_unused)
599
+ if inputs_require_grads:
600
+ l2 = (allSum(grads) * l1)
601
+ grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused)
602
+
603
+ if inputs_require_grads:
604
+ recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors)
605
+ flattened_recording_inputs = flatten_inputs(recording_inputs)
606
+
607
+ outputs_ge = ge(*recording_inputs)
608
+ l1_ge = allSum(outputs_ge)
609
+ if inputs_require_grads:
610
+ grads_ge = torch.autograd.grad(
611
+ l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused)
612
+
613
+ if inputs_require_grads:
614
+ l2_ge = (allSum(grads_ge) * l1_ge)
615
+ grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused)
616
+
617
+ self.assertEqual(outputs, outputs_ge)
618
+ if inputs_require_grads:
619
+ self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol)
620
+ for g2, g2_ge in zip(grads2, grads2_ge):
621
+ if g2 is None and g2_ge is None:
622
+ continue
623
+ self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4)
624
+
625
+ return ge
626
+
627
+ def checkModule(self, nn_module, args):
628
+ """
629
+ Check that a nn.Module's results in Script mode match eager and that it
630
+ can be exported
631
+ """
632
+ sm = torch.jit.script(nn_module)
633
+
634
+ with freeze_rng_state():
635
+ eager_out = nn_module(*args)
636
+
637
+ with freeze_rng_state():
638
+ script_out = sm(*args)
639
+
640
+ self.assertEqual(eager_out, script_out)
641
+ self.assertExportImportModule(sm, args)
642
+
643
+ return sm
644
+
645
+ class NoTracerWarnContextManager:
646
+ def __enter__(self):
647
+ self.prev = torch._C._jit_get_tracer_state_warn()
648
+ torch._C._jit_set_tracer_state_warn(False)
649
+
650
+ def __exit__(self, *args):
651
+ torch._C._jit_set_tracer_state_warn(self.prev)
652
+
653
+ @contextmanager
654
+ def inline_everything_mode(should_inline):
655
+ old = torch._C._jit_get_inline_everything_mode()
656
+ torch._C._jit_set_inline_everything_mode(should_inline)
657
+ try:
658
+ yield
659
+ finally:
660
+ torch._C._jit_set_inline_everything_mode(old)
661
+
662
+ @contextmanager
663
+ def set_fusion_group_inlining(inlining):
664
+ old = torch._C._debug_get_fusion_group_inlining()
665
+ torch._C._debug_set_fusion_group_inlining(inlining)
666
+ try:
667
+ yield
668
+ finally:
669
+ torch._C._debug_set_fusion_group_inlining(old)
670
+
671
+ # note: not re-entrant, use unnested only
672
+ @contextmanager
673
+ def disable_autodiff_subgraph_inlining(enabled=True):
674
+ torch._C._debug_set_autodiff_subgraph_inlining(not enabled)
675
+ try:
676
+ yield
677
+ finally:
678
+ torch._C._debug_set_autodiff_subgraph_inlining(True)
679
+
680
+ def _inline_everything(fn):
681
+ @functools.wraps(fn)
682
+ def wrapper(*args, **kwargs):
683
+ with inline_everything_mode(True):
684
+ fn(*args, **kwargs)
685
+ return wrapper
686
+
687
+ # this exists for forward compatibility reasons temporarily.
688
+ # TODO(suo) remove
689
+ def _tmp_donotuse_dont_inline_everything(fn):
690
+ @functools.wraps(fn)
691
+ def wrapper(*args, **kwargs):
692
+ with inline_everything_mode(False):
693
+ fn(*args, **kwargs)
694
+ return wrapper
695
+
696
+ # make it easy to quicky define/trace a function for these tests
697
+ def _trace(*args, **kwargs):
698
+ def wrapper(func):
699
+ return torch.jit.trace(func, args, **kwargs)
700
+ return wrapper
701
+
702
+
703
+ def enable_cpu_fuser(fn):
704
+ def wrapper(*args, **kwargs):
705
+ torch._C._jit_override_can_fuse_on_cpu_legacy(True)
706
+ torch._C._jit_override_can_fuse_on_cpu(True)
707
+ torch._C._jit_set_te_must_use_llvm_cpu(False)
708
+ try:
709
+ fn(*args, **kwargs)
710
+ finally:
711
+ torch._C._jit_override_can_fuse_on_cpu_legacy(False)
712
+ torch._C._jit_override_can_fuse_on_cpu(False)
713
+ torch._C._jit_set_te_must_use_llvm_cpu(True)
714
+ return wrapper
715
+
716
+
717
+ def enable_cpu_fuser_if(cond):
718
+ if cond:
719
+ return enable_cpu_fuser
720
+ else:
721
+ def noop_fuser(fn):
722
+ def wrapper(*args, **kwargs):
723
+ return fn(*args, **kwargs)
724
+ return wrapper
725
+ return noop_fuser
726
+
727
+ def get_forward(c):
728
+ return c._get_method('forward')
729
+
730
+ def get_forward_graph(c):
731
+ return c._get_method('forward').graph
732
+
733
+ def get_module_method(m, module, method):
734
+ return m._c.getattr(module)._get_method(method)
735
+
736
+ def attrs_with_prefix(module, prefix):
737
+ return [x for x, _ in module._modules._c.items()
738
+ if x.startswith(prefix)]
739
+
740
+ def warmup_backward(f, *args):
741
+ profiling_count = 3
742
+ results = []
743
+ for i in range(profiling_count):
744
+ if len(args) > 0:
745
+ r = torch.autograd.grad(f, *args)
746
+ results.append(r)
747
+ else:
748
+ f.backward(retain_graph=True)
749
+
750
+ return results
751
+
752
+ # TODO: Remove me once https://bugs.python.org/issue42666 is resolved
753
+ def make_global(*args):
754
+ for arg in args:
755
+ setattr(sys.modules[arg.__module__], arg.__name__, arg)
756
+
757
+ # Helper function to eval Python3 code without causing a syntax error for
758
+ # this file under py2
759
+ def _get_py3_code(code, fn_name):
760
+ with tempfile.TemporaryDirectory() as tmp_dir:
761
+ script_path = os.path.join(tmp_dir, 'script.py')
762
+ with open(script_path, 'w') as f:
763
+ f.write(code)
764
+ spec = importlib.util.spec_from_file_location(fn_name, script_path)
765
+ module = importlib.util.module_from_spec(spec)
766
+ loader = spec.loader
767
+ assert isinstance(loader, Loader) # Assert type to meet MyPy requirement
768
+ loader.exec_module(module)
769
+ fn = getattr(module, fn_name)
770
+ return fn
771
+
772
+ class TensorExprTestOptions:
773
+ def __init__(self):
774
+ self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
775
+ self.old_profiling_mode = torch._C._get_graph_executor_optimize(True)
776
+
777
+ self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
778
+ self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
779
+ torch._C._jit_override_can_fuse_on_cpu(True)
780
+ torch._C._jit_override_can_fuse_on_gpu(True)
781
+ self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
782
+ torch._C._jit_set_texpr_fuser_enabled(True)
783
+ self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
784
+ torch._C._debug_set_fusion_group_inlining(False)
785
+ self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
786
+ torch._C._jit_set_te_must_use_llvm_cpu(False)
787
+
788
+ def restore(self):
789
+ torch._C._jit_set_profiling_executor(self.old_profiling_executor)
790
+ torch._C._get_graph_executor_optimize(self.old_profiling_mode)
791
+
792
+ torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
793
+ torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)
794
+ torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)
795
+ torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
796
+ torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
797
+
798
+ def clone_inputs(args):
799
+ inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
800
+
801
+ for arg in args:
802
+ if isinstance(arg, torch.Tensor):
803
+ inputs.append(arg.detach().clone())
804
+ elif is_iterable_of_tensors(arg):
805
+ inputs.append([t.detach().clone() for t in arg])
806
+ else:
807
+ inputs.append(arg)
808
+
809
+ return inputs
810
+
811
+ def get_traced_sample_variant_pairs(device, dtype, op):
812
+ # tuples of (variant, sample)
813
+ outputs: List[Tuple[Any, Any]] = []
814
+
815
+ samples = op.sample_inputs(device, dtype)
816
+
817
+ # Acquires variants to test
818
+ func = op.get_op()
819
+ method = op.get_method()
820
+ variants = {
821
+ # TODO: inplace tests currently fail, fix and add inplace variant
822
+ 'function': func, 'method': method,
823
+ }
824
+
825
+ # TODO: find better way to standardize on op registration itself..
826
+ has_fake_function = op.name in ["resize_", 'resize_as_']
827
+
828
+ if has_fake_function:
829
+ variants = {'method': getattr(torch.Tensor, op.name)}
830
+
831
+ # In eager mode, these ops can take (Tensor, bool) args; but in
832
+ # JIT they can only take (Tensor, Scalar), and bool is not a
833
+ # scalar in the JIT type system. So to test these in JIT, the bool
834
+ # is converted to an int for the test.
835
+ ops_with_unsupported_bool_args = [
836
+ {
837
+ "name": "div_floor_rounding",
838
+ "arg_idx": [0],
839
+ },
840
+ {
841
+ "name": "div_no_rounding_mode",
842
+ "arg_idx": [0],
843
+ },
844
+ {
845
+ "name": "div_trunc_rounding",
846
+ "arg_idx": [0],
847
+ },
848
+ {
849
+ "name": "index_fill",
850
+ "arg_idx": [2],
851
+ },
852
+ {
853
+ "name": "full_like",
854
+ "arg_idx": [0],
855
+ },
856
+ {
857
+ "name": "mul",
858
+ "arg_idx": [0],
859
+ },
860
+ {
861
+ "name": "new_full",
862
+ "arg_idx": [1],
863
+ },
864
+ ]
865
+
866
+ # doesn't support tracing
867
+ if has_fake_function:
868
+ return outputs
869
+
870
+ for sample in samples:
871
+ for variant in variants.values():
872
+ if variant is None:
873
+ continue
874
+
875
+ if is_lambda(variant):
876
+ continue
877
+
878
+ matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args)
879
+ for op_data in matching_ops:
880
+ for idx in op_data["arg_idx"]:
881
+ args = list(sample.args)
882
+ if len(sample.args) > idx and isinstance(sample.args[idx], bool):
883
+ args[idx] = int(args[idx])
884
+ sample.args = tuple(args)
885
+
886
+ outputs.append((variant, sample))
887
+
888
+ return outputs
889
+
890
+ # types.LambdaType gave false positives
891
+ def is_lambda(lamb):
892
+ LAMBDA = lambda: 0 # noqa: E731
893
+ return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__
venv/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch._dynamo.test_case
4
+ import unittest.mock
5
+ import os
6
+ import contextlib
7
+ import torch._logging
8
+ import torch._logging._internal
9
+ from torch._dynamo.utils import LazyString
10
+ import logging
11
+ import io
12
+
13
+ @contextlib.contextmanager
14
+ def preserve_log_state():
15
+ prev_state = torch._logging._internal._get_log_state()
16
+ torch._logging._internal._set_log_state(torch._logging._internal.LogState())
17
+ try:
18
+ yield
19
+ finally:
20
+ torch._logging._internal._set_log_state(prev_state)
21
+ torch._logging._internal._init_logs()
22
+
23
+ def log_settings(settings):
24
+ exit_stack = contextlib.ExitStack()
25
+ settings_patch = unittest.mock.patch.dict(os.environ, {"TORCH_LOGS": settings})
26
+ exit_stack.enter_context(preserve_log_state())
27
+ exit_stack.enter_context(settings_patch)
28
+ torch._logging._internal._init_logs()
29
+ return exit_stack
30
+
31
+ def log_api(**kwargs):
32
+ exit_stack = contextlib.ExitStack()
33
+ exit_stack.enter_context(preserve_log_state())
34
+ torch._logging.set_logs(**kwargs)
35
+ return exit_stack
36
+
37
+
38
+ def kwargs_to_settings(**kwargs):
39
+ INT_TO_VERBOSITY = {10: "+", 20: "", 40: "-"}
40
+
41
+ settings = []
42
+
43
+ def append_setting(name, level):
44
+ if isinstance(name, str) and isinstance(level, int) and level in INT_TO_VERBOSITY:
45
+ settings.append(INT_TO_VERBOSITY[level] + name)
46
+ return
47
+ else:
48
+ raise ValueError("Invalid value for setting")
49
+
50
+ for name, val in kwargs.items():
51
+ if isinstance(val, bool):
52
+ settings.append(name)
53
+ elif isinstance(val, int):
54
+ append_setting(name, val)
55
+ elif isinstance(val, dict) and name == "modules":
56
+ for module_qname, level in val.items():
57
+ append_setting(module_qname, level)
58
+ else:
59
+ raise ValueError("Invalid value for setting")
60
+
61
+ return ",".join(settings)
62
+
63
+
64
+ # Note on testing strategy:
65
+ # This class does two things:
66
+ # 1. Runs two versions of a test:
67
+ # 1a. patches the env var log settings to some specific value
68
+ # 1b. calls torch._logging.set_logs(..)
69
+ # 2. patches the emit method of each setup handler to gather records
70
+ # that are emitted to each console stream
71
+ # 3. passes a ref to the gathered records to each test case for checking
72
+ #
73
+ # The goal of this testing in general is to ensure that given some settings env var
74
+ # that the logs are setup correctly and capturing the correct records.
75
+ def make_logging_test(**kwargs):
76
+ def wrapper(fn):
77
+ def test_fn(self):
78
+
79
+ torch._dynamo.reset()
80
+ records = []
81
+ # run with env var
82
+ if len(kwargs) == 0:
83
+ with self._handler_watcher(records):
84
+ fn(self, records)
85
+ else:
86
+ with log_settings(kwargs_to_settings(**kwargs)), self._handler_watcher(records):
87
+ fn(self, records)
88
+
89
+ # run with API
90
+ torch._dynamo.reset()
91
+ records.clear()
92
+ with log_api(**kwargs), self._handler_watcher(records):
93
+ fn(self, records)
94
+
95
+
96
+ return test_fn
97
+
98
+ return wrapper
99
+
100
+ def make_settings_test(settings):
101
+ def wrapper(fn):
102
+ def test_fn(self):
103
+ torch._dynamo.reset()
104
+ records = []
105
+ # run with env var
106
+ with log_settings(settings), self._handler_watcher(records):
107
+ fn(self, records)
108
+
109
+ return test_fn
110
+
111
+ return wrapper
112
+
113
+ class LoggingTestCase(torch._dynamo.test_case.TestCase):
114
+ @classmethod
115
+ def setUpClass(cls):
116
+ super().setUpClass()
117
+ cls._exit_stack.enter_context(
118
+ unittest.mock.patch.dict(os.environ, {"___LOG_TESTING": ""})
119
+ )
120
+ cls._exit_stack.enter_context(
121
+ unittest.mock.patch("torch._dynamo.config.suppress_errors", True)
122
+ )
123
+ cls._exit_stack.enter_context(
124
+ unittest.mock.patch("torch._dynamo.config.verbose", False)
125
+ )
126
+
127
+ @classmethod
128
+ def tearDownClass(cls):
129
+ cls._exit_stack.close()
130
+ torch._logging._internal.log_state.clear()
131
+ torch._logging._init_logs()
132
+
133
+ def getRecord(self, records, m):
134
+ record = None
135
+ for r in records:
136
+ # NB: not r.msg because it looks like 3.11 changed how they
137
+ # structure log records
138
+ if m in r.getMessage():
139
+ self.assertIsNone(
140
+ record,
141
+ msg=LazyString(
142
+ lambda: f"multiple matching records: {record} and {r} among {records}"
143
+ ),
144
+ )
145
+ record = r
146
+ if record is None:
147
+ self.fail(f"did not find record with {m} among {records}")
148
+ return record
149
+
150
+ # This patches the emit method of each handler to gather records
151
+ # as they are emitted
152
+ def _handler_watcher(self, record_list):
153
+ exit_stack = contextlib.ExitStack()
154
+
155
+ def emit_post_hook(record):
156
+ nonlocal record_list
157
+ record_list.append(record)
158
+
159
+ # registered logs are the only ones with handlers, so patch those
160
+ for log_qname in torch._logging._internal.log_registry.get_log_qnames():
161
+ logger = logging.getLogger(log_qname)
162
+ num_handlers = len(logger.handlers)
163
+ self.assertLessEqual(
164
+ num_handlers,
165
+ 2,
166
+ "All pt2 loggers should only have at most two handlers (debug artifacts and messages above debug level).",
167
+ )
168
+
169
+ self.assertGreater(num_handlers, 0, "All pt2 loggers should have more than zero handlers")
170
+
171
+ for handler in logger.handlers:
172
+ old_emit = handler.emit
173
+
174
+ def new_emit(record):
175
+ old_emit(record)
176
+ emit_post_hook(record)
177
+
178
+ exit_stack.enter_context(
179
+ unittest.mock.patch.object(handler, "emit", new_emit)
180
+ )
181
+
182
+ return exit_stack
183
+
184
+
185
+ def logs_to_string(module, log_option):
186
+ """Example:
187
+ logs_to_string("torch._inductor.compile_fx", "post_grad_graphs")
188
+ returns the output of TORCH_LOGS="post_grad_graphs" from the
189
+ torch._inductor.compile_fx module.
190
+ """
191
+ log_stream = io.StringIO()
192
+ handler = logging.StreamHandler(stream=log_stream)
193
+
194
+ @contextlib.contextmanager
195
+ def tmp_redirect_logs():
196
+ try:
197
+ logger = torch._logging.getArtifactLogger(module, log_option)
198
+ logger.addHandler(handler)
199
+ yield
200
+ finally:
201
+ logger.removeHandler(handler)
202
+
203
+ def ctx_manager():
204
+ exit_stack = log_settings(log_option)
205
+ exit_stack.enter_context(tmp_redirect_logs())
206
+ return exit_stack
207
+
208
+ return log_stream, ctx_manager
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch.testing._internal.opinfo.core
4
+ import torch.testing._internal.opinfo.definitions
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (306 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc ADDED
Binary file (55.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc ADDED
Binary file (3.63 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from typing import List
4
+
5
+ from torch.testing._internal.opinfo.core import OpInfo
6
+ from torch.testing._internal.opinfo.definitions import (
7
+ _masked,
8
+ fft,
9
+ linalg,
10
+ signal,
11
+ special,
12
+ )
13
+
14
+ # Operator database
15
+ op_db: List[OpInfo] = [
16
+ *fft.op_db,
17
+ *linalg.op_db,
18
+ *signal.op_db,
19
+ *special.op_db,
20
+ *_masked.op_db,
21
+ ]
22
+
23
+ python_ref_db: List[OpInfo] = [
24
+ *fft.python_ref_db,
25
+ *linalg.python_ref_db,
26
+ *special.python_ref_db,
27
+ ]
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (618 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc ADDED
Binary file (9.81 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc ADDED
Binary file (43.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc ADDED
Binary file (8.88 kB). View file