applied-ai-018 commited on
Commit
81a734d
·
verified ·
1 Parent(s): 6a33523

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_logging/__init__.py +15 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_logging/_internal.py +826 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_logging/_registrations.py +110 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/n_shadows_utils.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/ns_types.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/qconfig_multi_mapping.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/profiler/__init__.py +48 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py +1201 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py +662 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/profiler/_utils.py +373 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/profiler/itt.py +78 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/profiler/profiler.py +754 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/profiler/python_tracer.py +20 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/special/__init__.py +1283 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/testing/__init__.py +3 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/testing/_comparison.py +1572 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/testing/_creation.py +253 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__init__.py +0 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py +367 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py +630 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py +163 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py +247 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py +1513 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py +109 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py +1255 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py +131 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py +1219 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py +321 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py +0 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py +384 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py +0 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py +225 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py +219 -0
env-llmeval/lib/python3.10/site-packages/torch/_logging/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Top level logging module for torch logging
2
+ # Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit#
3
+ # Simple setup for onboarding (see above doc for more detail):
4
+ # 1. register any top-level log qualified name for your module in torch._logging._registrations (see there for examples)
5
+ # 2. register any artifacts (<artifact_name> below) in torch._logging._registrations
6
+ # a. call getArtifactLogger(__name__, <artifact_name>) at your logging site instead of the standard logger to log your artifact
7
+ import torch._logging._registrations
8
+ from ._internal import (
9
+ _init_logs,
10
+ DEFAULT_LOGGING,
11
+ getArtifactLogger,
12
+ LazyString,
13
+ set_logs,
14
+ warning_once,
15
+ )
env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc ADDED
Binary file (24.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc ADDED
Binary file (3.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_logging/_internal.py ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+ import logging
4
+ import os
5
+ import re
6
+ from dataclasses import dataclass, field
7
+ from importlib import __import__
8
+ from typing import Dict, List, Optional, Set, Union
9
+ from weakref import WeakSet
10
+
11
+ log = logging.getLogger(__name__)
12
+
13
+ DEFAULT_LOG_LEVEL = logging.WARNING
14
+ LOG_ENV_VAR = "TORCH_LOGS"
15
+ LOG_FORMAT_ENV_VAR = "TORCH_LOGS_FORMAT"
16
+
17
+
18
+ @dataclass
19
+ class LogRegistry:
20
+ # shorthand name to log qualified name
21
+ # Note: this only contains loggers registered
22
+ # from register_log
23
+ # e.g. "dynamo" -> "torch._dynamo"
24
+ log_alias_to_log_qnames: Dict[str, List[str]] = field(default_factory=dict)
25
+
26
+ # artifact logger qualified names,
27
+ # this is populated lazily, as calls to getArtifactLogger
28
+ # currently formatted as <module>.__<artifact_name>
29
+ # e.g. "torch._dynamo.convert_frame.__guards"
30
+ artifact_log_qnames: Set[str] = field(default_factory=set)
31
+
32
+ # child logs of registered logs if specified via open
33
+ # registration by the user (ie placing "torch._dynamo.output_graph" in the env var)
34
+ # these need to be tracked so their levels can be reset properly
35
+ # e.g. "torch._dynamo.output_graph"
36
+ child_log_qnames: Set[str] = field(default_factory=set)
37
+
38
+ # artifact names, populated by register_artifact
39
+ # e.g. "guards"
40
+ artifact_names: Set[str] = field(default_factory=set)
41
+
42
+ # Artifacts that should be visible by default in the error message
43
+ visible_artifacts: Set[str] = field(default_factory=set)
44
+
45
+ # A short description of each artifact
46
+ artifact_descriptions: Dict[str, str] = field(default_factory=dict)
47
+
48
+ # artifacts which are not displayed unless explicitly named in the
49
+ # settings. Ex. output_code is NOT displayed even if the inductor
50
+ # log level is set to DEBUG. It must be explicitly named in the settings
51
+ off_by_default_artifact_names: Set[str] = field(default_factory=set)
52
+
53
+ # logging format string for artifacts
54
+ artifact_log_formatters: Dict[str, logging.Formatter] = field(default_factory=dict)
55
+
56
+ def is_artifact(self, name):
57
+ return name in self.artifact_names
58
+
59
+ def is_log(self, alias):
60
+ return alias in self.log_alias_to_log_qnames
61
+
62
+ # register a log with an alias
63
+ def register_log(self, alias, log_qnames: Union[str, List[str]]):
64
+ if isinstance(log_qnames, str):
65
+ log_qnames = [log_qnames]
66
+ self.log_alias_to_log_qnames[alias] = log_qnames
67
+
68
+ # register an artifact name
69
+ def register_artifact_name(
70
+ self, name, description, visible, off_by_default, log_format
71
+ ):
72
+ self.artifact_names.add(name)
73
+ if visible:
74
+ self.visible_artifacts.add(name)
75
+ self.artifact_descriptions[name] = description
76
+
77
+ # if off by default, don't enable it
78
+ # when log_name's log_level is set to DEBUG
79
+ if off_by_default:
80
+ self.off_by_default_artifact_names.add(name)
81
+
82
+ if log_format is not None:
83
+ self.artifact_log_formatters[name] = logging.Formatter(log_format)
84
+
85
+ # register the qualified name of an artifact log
86
+ # this is needed to know which logs need to be reset
87
+ # whenever the log_state is changed
88
+ def register_artifact_log(self, artifact_log_qname):
89
+ self.artifact_log_qnames.add(artifact_log_qname)
90
+
91
+ def register_child_log(self, log_qname):
92
+ self.child_log_qnames.add(log_qname)
93
+
94
+ # flattens all the qnames together (TODO: consider memoizing?)
95
+ def get_log_qnames(self) -> Set[str]:
96
+ return {
97
+ qname
98
+ for qnames in self.log_alias_to_log_qnames.values()
99
+ for qname in qnames
100
+ }
101
+
102
+ def get_artifact_log_qnames(self):
103
+ return set(self.artifact_log_qnames)
104
+
105
+ def get_child_log_qnames(self):
106
+ return set(self.child_log_qnames)
107
+
108
+ def is_off_by_default(self, artifact_qname):
109
+ return artifact_qname in self.off_by_default_artifact_names
110
+
111
+
112
+ @dataclass
113
+ class LogState:
114
+ # qualified log names -> currently set log level
115
+ log_qname_to_level: Dict[str, str] = field(default_factory=dict)
116
+
117
+ # the set of currently enabled artifacts
118
+ artifact_names: Set[str] = field(default_factory=set)
119
+
120
+ def enable_artifact(self, artifact_name):
121
+ self.artifact_names.add(artifact_name)
122
+
123
+ def is_artifact_enabled(self, name):
124
+ return name in self.artifact_names
125
+
126
+ def enable_log(self, log_qnames, log_level):
127
+ if isinstance(log_qnames, str):
128
+ log_qnames = [log_qnames]
129
+ for log_qname in log_qnames:
130
+ self.log_qname_to_level[log_qname] = log_level
131
+
132
+ def get_log_level_pairs(self):
133
+ """Returns all qualified module names for which the user requested
134
+ explicit logging settings.
135
+
136
+ .. warning:
137
+
138
+ This function used to return all loggers, regardless of whether
139
+ or not the user specified them or not; it now only returns logs
140
+ which were explicitly mentioned by the user (and torch, which
141
+ always is implicitly requested when we initialize our logging
142
+ subsystem.)
143
+ """
144
+ return self.log_qname_to_level.items()
145
+
146
+ def clear(self):
147
+ self.log_qname_to_level.clear()
148
+ self.artifact_names.clear()
149
+
150
+
151
+ log_registry = LogRegistry()
152
+ log_state = LogState()
153
+
154
+ # sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING)
155
+ DEFAULT_LOGGING = {
156
+ "dynamo": logging.INFO,
157
+ "graph_code": True,
158
+ "aot": logging.INFO,
159
+ "graph_breaks": True,
160
+ "recompiles": True,
161
+ "dynamic": logging.INFO,
162
+ "guards": True,
163
+ "trace_source": True,
164
+ }
165
+
166
+
167
+ def set_logs(
168
+ *,
169
+ all: Optional[int] = None,
170
+ dynamo: Optional[int] = None,
171
+ aot: Optional[int] = None,
172
+ dynamic: Optional[int] = None,
173
+ inductor: Optional[int] = None,
174
+ distributed: Optional[int] = None,
175
+ onnx: Optional[int] = None,
176
+ bytecode: bool = False,
177
+ aot_graphs: bool = False,
178
+ aot_joint_graph: bool = False,
179
+ ddp_graphs: bool = False,
180
+ graph: bool = False,
181
+ graph_code: bool = False,
182
+ graph_breaks: bool = False,
183
+ graph_sizes: bool = False,
184
+ guards: bool = False,
185
+ recompiles: bool = False,
186
+ recompiles_verbose: bool = False,
187
+ trace_source: bool = False,
188
+ trace_call: bool = False,
189
+ output_code: bool = False,
190
+ schedule: bool = False,
191
+ perf_hints: bool = False,
192
+ post_grad_graphs: bool = False,
193
+ onnx_diagnostics: bool = False,
194
+ fusion: bool = False,
195
+ overlap: bool = False,
196
+ modules: Optional[Dict[str, Union[int, bool]]] = None,
197
+ ):
198
+ """
199
+ Sets the log level for individual components and toggles individual log
200
+ artifact types.
201
+
202
+ .. warning:: This feature is a prototype and may have compatibility
203
+ breaking changes in the future.
204
+
205
+ .. note:: The ``TORCH_LOGS`` environment variable has complete precedence
206
+ over this function, so if it was set, this function does nothing.
207
+
208
+ A component is a set of related features in PyTorch. All of the log
209
+ messages emitted from a given component have their own log levels. If the
210
+ log level of a particular message has priority greater than or equal to its
211
+ component's log level setting, it is emitted. Otherwise, it is supressed.
212
+ This allows you to, for instance, silence large groups of log messages that
213
+ are not relevant to you and increase verbosity of logs for components that
214
+ are relevant. The expected log level values, ordered from highest to lowest
215
+ priority, are:
216
+
217
+ * ``logging.CRITICAL``
218
+ * ``logging.ERROR``
219
+ * ``logging.WARNING``
220
+ * ``logging.INFO``
221
+ * ``logging.DEBUG``
222
+ * ``logging.NOTSET``
223
+
224
+ See documentation for the Python ``logging`` module for more information on
225
+ log levels: `<https://docs.python.org/3/library/logging.html#logging-levels>`_
226
+
227
+ An artifact is a particular type of log message. Each artifact is assigned
228
+ to a parent component. A component can emit many different kinds of
229
+ artifacts. In general, an artifact is emitted if either its corresponding
230
+ setting in the argument list below is turned on or if its parent component
231
+ is set to a log level less than or equal to the log level of the artifact.
232
+
233
+ Keyword args:
234
+ all (:class:`Optional[int]`):
235
+ The default log level for all components. Default: ``logging.WARN``
236
+
237
+ dynamo (:class:`Optional[int]`):
238
+ The log level for the TorchDynamo component. Default: ``logging.WARN``
239
+
240
+ aot (:class:`Optional[int]`):
241
+ The log level for the AOTAutograd component. Default: ``logging.WARN``
242
+
243
+ inductor (:class:`Optional[int]`):
244
+ The log level for the TorchInductor component. Default: ``logging.WARN``
245
+
246
+ dynamic (:class:`Optional[int]`):
247
+ The log level for dynamic shapes. Default: ``logging.WARN``
248
+
249
+ distributed (:class:`Optional[int]`):
250
+ Whether to log communication operations and other debug info from pytorch distributed components.
251
+ Default: ``logging.WARN``
252
+
253
+ onnx (:class:`Optional[int]`):
254
+ The log level for the ONNX exporter component. Default: ``logging.WARN``
255
+
256
+ bytecode (:class:`bool`):
257
+ Whether to emit the original and generated bytecode from TorchDynamo.
258
+ Default: ``False``
259
+
260
+ aot_graphs (:class:`bool`):
261
+ Whether to emit the graphs generated by AOTAutograd. Default: ``False``
262
+
263
+ aot_joint_graph (:class:`bool`):
264
+ Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False``
265
+
266
+ ddp_graphs (:class:`bool`):
267
+ Whether to emit graphs generated by DDPOptimizer. Default: ``False``
268
+
269
+ graph (:class:`bool`):
270
+ Whether to emit the graph captured by TorchDynamo in tabular format.
271
+ Default: ``False``
272
+
273
+ graph_code (:class:`bool`):
274
+ Whether to emit the python source of the graph captured by TorchDynamo.
275
+ Default: ``False``
276
+
277
+ graph_breaks (:class:`bool`):
278
+ Whether to emit the graph breaks encountered by TorchDynamo.
279
+ Default: ``False``
280
+
281
+ graph_sizes (:class:`bool`):
282
+ Whether to emit tensor sizes of the graph captured by TorchDynamo.
283
+ Default: ``False``
284
+
285
+ guards (:class:`bool`):
286
+ Whether to emit the guards generated by TorchDynamo for each compiled
287
+ function. Default: ``False``
288
+
289
+ recompiles (:class:`bool`):
290
+ Whether to emit a guard failure reason and message every time
291
+ TorchDynamo recompiles a function. Default: ``False``
292
+
293
+ recompiles_verbose (:class:`bool`):
294
+ Whether to emit all guard failure reasons when TorchDynamo recompiles
295
+ a function, even those that are not actually run. Default: ``False``
296
+
297
+ trace_source (:class:`bool`):
298
+ Whether to emit when TorchDynamo begins tracing a new line. Default: ``False``
299
+
300
+ trace_call (:class:`bool`):
301
+ Whether to emit detailed line location when TorchDynamo creates an FX node
302
+ corresponding to function call. Python 3.11+ only. Default: ``False``
303
+
304
+ output_code (:class:`bool`):
305
+ Whether to emit the TorchInductor output code. Default: ``False``
306
+
307
+ schedule (:class:`bool`):
308
+ Whether to emit the TorchInductor schedule. Default: ``False``
309
+
310
+ perf_hints (:class:`bool`):
311
+ Whether to emit the TorchInductor perf hints. Default: ``False``
312
+
313
+ post_grad_graphs (:class:`bool`):
314
+ Whether to emit the graphs generated by after post grad passes. Default: ``False``
315
+
316
+ onnx_diagnostics (:class:`bool`):
317
+ Whether to emit the ONNX exporter diagnostics in logging. Default: ``False``
318
+
319
+ fusion (:class:`bool`):
320
+ Whether to emit detailed Inductor fusion decisions. Default: ``False``
321
+
322
+ overlap (:class:`bool`):
323
+ Whether to emit detailed Inductor compute/comm overlap decisions. Default: ``False``
324
+
325
+ modules (dict):
326
+ This argument provides an alternate way to specify the above log
327
+ component and artifact settings, in the format of a keyword args
328
+ dictionary given as a single argument. There are two cases
329
+ where this is useful (1) if a new log component or artifact has
330
+ been registered but a keyword argument for it has not been added
331
+ to this function and (2) if the log level for an unregistered module
332
+ needs to be set. This can be done by providing the fully-qualified module
333
+ name as the key, with the log level as the value. Default: ``None``
334
+
335
+
336
+ Example::
337
+
338
+ >>> # xdoctest: +SKIP
339
+ >>> import logging
340
+
341
+ # The following changes the "dynamo" component to emit DEBUG-level
342
+ # logs, and to emit "graph_code" artifacts.
343
+
344
+ >>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True)
345
+
346
+ # The following enables the logs for a different module
347
+
348
+ >>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG})
349
+ """
350
+ # ignore if env var is set
351
+ if LOG_ENV_VAR in os.environ:
352
+ log.warning(
353
+ "Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs"
354
+ )
355
+ return
356
+
357
+ log_state.clear()
358
+
359
+ modules = modules or {}
360
+
361
+ def _set_logs(**kwargs):
362
+ for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr]
363
+ if val is None:
364
+ continue
365
+
366
+ if log_registry.is_artifact(alias):
367
+ if not isinstance(val, bool):
368
+ raise ValueError(
369
+ f"Expected bool to enable artifact {alias}, received {val}"
370
+ )
371
+
372
+ if val:
373
+ log_state.enable_artifact(alias)
374
+ elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames:
375
+ if val not in logging._levelToName:
376
+ raise ValueError(
377
+ f"Unrecognized log level for log {alias}: {val}, valid level values "
378
+ f"are: {','.join([str(k) for k in logging._levelToName.keys()])}"
379
+ )
380
+
381
+ log_state.enable_log(
382
+ log_registry.log_alias_to_log_qnames.get(alias, alias), val
383
+ )
384
+ else:
385
+ raise ValueError(
386
+ f"Unrecognized log or artifact name passed to set_logs: {alias}"
387
+ )
388
+
389
+ _init_logs()
390
+
391
+ _set_logs(
392
+ torch=all,
393
+ dynamo=dynamo,
394
+ aot=aot,
395
+ inductor=inductor,
396
+ dynamic=dynamic,
397
+ bytecode=bytecode,
398
+ aot_graphs=aot_graphs,
399
+ aot_joint_graph=aot_joint_graph,
400
+ ddp_graphs=ddp_graphs,
401
+ distributed=distributed,
402
+ graph=graph,
403
+ graph_code=graph_code,
404
+ graph_breaks=graph_breaks,
405
+ graph_sizes=graph_sizes,
406
+ guards=guards,
407
+ recompiles=recompiles,
408
+ recompiles_verbose=recompiles_verbose,
409
+ trace_source=trace_source,
410
+ trace_call=trace_call,
411
+ output_code=output_code,
412
+ schedule=schedule,
413
+ perf_hints=perf_hints,
414
+ post_grad_graphs=post_grad_graphs,
415
+ onnx=onnx,
416
+ onnx_diagnostics=onnx_diagnostics,
417
+ fusion=fusion,
418
+ overlap=overlap,
419
+ )
420
+
421
+
422
+ def get_loggers():
423
+ """
424
+ Returns: a list of all registered loggers
425
+ """
426
+ return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()]
427
+
428
+
429
+ def register_log(setting_name, log_name):
430
+ """
431
+ Enables a log to be controlled by the env var and user API with the setting_name
432
+ Args:
433
+ setting_name: the shorthand name used in the env var and user API
434
+ log_name: the log name that the setting_name is associated with
435
+ """
436
+ log_registry.register_log(setting_name, log_name)
437
+
438
+
439
+ def register_artifact(
440
+ setting_name, description, visible=False, off_by_default=False, log_format=None
441
+ ):
442
+ """
443
+ Enables an artifact to be controlled by the env var and user API with name
444
+ Args:
445
+ setting_name: the shorthand name used in the env var and user API
446
+ description: A description of what this outputs
447
+ visible: Whether it gets suggested to users by default
448
+ off_by_default: whether this artifact should be logged when the ancestor loggers
449
+ are enabled at level DEBUG
450
+ """
451
+ log_registry.register_artifact_name(
452
+ setting_name, description, visible, off_by_default, log_format
453
+ )
454
+
455
+
456
+ def getArtifactLogger(module_qname, artifact_name):
457
+ if artifact_name not in log_registry.artifact_names:
458
+ raise ValueError(
459
+ f"Artifact name: {repr(artifact_name)} not registered,"
460
+ f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations."
461
+ )
462
+ qname = module_qname + f".__{artifact_name}"
463
+ log = logging.getLogger(qname)
464
+ log.artifact_name = artifact_name # type: ignore[attr-defined]
465
+ log_registry.register_artifact_log(qname)
466
+ configure_artifact_log(log)
467
+ return log
468
+
469
+
470
+ INCR_VERBOSITY_CHAR = "+"
471
+ DECR_VERBOSITY_CHAR = "-"
472
+ VERBOSITY_REGEX = (
473
+ "("
474
+ + "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)])
475
+ + "?)"
476
+ )
477
+
478
+
479
+ def configure_artifact_log(log):
480
+ # If the artifact is off by default, then it should only be logged when explicitly
481
+ # enabled; set propagate to False so that this artifact is not propagated
482
+ # to its ancestor logger
483
+ if log_registry.is_off_by_default(log.artifact_name):
484
+ log.propagate = False
485
+
486
+ # enable artifact logging when explicitly enabled
487
+ if log_state.is_artifact_enabled(log.artifact_name):
488
+ log.setLevel(logging.DEBUG)
489
+ log.propagate = True
490
+
491
+
492
+ # match a comma separated list of loggable names (whitespace allowed after commas)
493
+ def _gen_settings_regex():
494
+ return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?")
495
+
496
+
497
+ def _validate_settings(settings):
498
+ return re.fullmatch(_gen_settings_regex(), settings) is not None
499
+
500
+
501
+ def help_message(verbose=False):
502
+ def pad_to(s, length=30):
503
+ assert len(s) <= length
504
+ return s + " " * (length - len(s))
505
+
506
+ if verbose:
507
+ printed_artifacts = log_registry.artifact_names
508
+ else:
509
+ printed_artifacts = log_registry.visible_artifacts
510
+
511
+ if verbose:
512
+ heading = "All registered names"
513
+ else:
514
+ heading = "Visible registered names (use TORCH_LOGS='+help' for full list)"
515
+ lines = (
516
+ ["all"]
517
+ + sorted(log_registry.log_alias_to_log_qnames.keys())
518
+ + sorted(
519
+ [
520
+ f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}"
521
+ for name in printed_artifacts
522
+ ]
523
+ )
524
+ )
525
+ setting_info = " " + "\n ".join(lines)
526
+ examples = """
527
+ Examples:
528
+ TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to
529
+ logging.DEBUG and AOT to logging.INFO
530
+
531
+ TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to
532
+ logging.ERROR and TorchInductor to logging.DEBUG
533
+
534
+ TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact
535
+
536
+ TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo
537
+ to logging.DEBUG and enable the schedule artifact
538
+
539
+ TORCH_LOGS="+some.random.module,schedule" will set the log level of
540
+ some.random.module to logging.DEBUG and enable the schedule artifact
541
+
542
+ TORCH_LOGS_FORMAT="%(levelname)s: %(message)s" or any provided format
543
+ string will set the output format
544
+ Valid keys are "levelname", "message", "pathname", "levelno", "lineno",
545
+ "filename" and "name".
546
+ """ # flake8: noqa: B950
547
+ msg = f"""
548
+ TORCH_LOGS Info
549
+ {examples}
550
+
551
+ {heading}
552
+ {setting_info}
553
+ """
554
+ return msg
555
+
556
+
557
+ def _invalid_settings_err_msg(settings, verbose=False):
558
+ valid_settings = ", ".join(
559
+ ["all"]
560
+ + list(log_registry.log_alias_to_log_qnames.keys())
561
+ + list(log_registry.artifact_names)
562
+ )
563
+ msg = f"""
564
+ Invalid log settings: {settings}, must be a comma separated list of fully
565
+ qualified module names, registered log names or registered artifact names.
566
+ For more info on various settings, try TORCH_LOGS="help"
567
+ Valid settings:
568
+ {valid_settings}
569
+ """
570
+ return msg
571
+
572
+
573
+ @functools.lru_cache
574
+ def _parse_log_settings(settings):
575
+ if settings == "":
576
+ return dict()
577
+
578
+ if settings == "help":
579
+ raise ValueError(help_message(verbose=False))
580
+ elif settings == "+help":
581
+ raise ValueError(help_message(verbose=True))
582
+ if not _validate_settings(settings):
583
+ raise ValueError(_invalid_settings_err_msg(settings))
584
+
585
+ settings = re.sub(r"\s+", "", settings)
586
+ log_names = settings.split(",")
587
+
588
+ def get_name_level_pair(name):
589
+ clean_name = name.replace(INCR_VERBOSITY_CHAR, "")
590
+ clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "")
591
+
592
+ if name[0] == INCR_VERBOSITY_CHAR:
593
+ level = logging.DEBUG
594
+ elif name[0] == DECR_VERBOSITY_CHAR:
595
+ level = logging.ERROR
596
+ else:
597
+ level = logging.INFO
598
+
599
+ return clean_name, level
600
+
601
+ log_state = LogState()
602
+
603
+ for name in log_names:
604
+ name, level = get_name_level_pair(name)
605
+
606
+ if name == "all":
607
+ name = "torch"
608
+
609
+ if log_registry.is_log(name):
610
+ assert level is not None
611
+ log_qnames = log_registry.log_alias_to_log_qnames[name]
612
+ log_state.enable_log(log_qnames, level)
613
+ elif log_registry.is_artifact(name):
614
+ log_state.enable_artifact(name)
615
+ elif _is_valid_module(name):
616
+ if not _has_registered_parent(name):
617
+ log_registry.register_log(name, name)
618
+ else:
619
+ log_registry.register_child_log(name)
620
+ log_state.enable_log(name, level)
621
+ else:
622
+ raise ValueError(_invalid_settings_err_msg(settings))
623
+
624
+ return log_state
625
+
626
+
627
+ def _is_valid_module(qname):
628
+ try:
629
+ __import__(qname)
630
+ return True
631
+ except ImportError:
632
+ return False
633
+
634
+
635
+ def _update_log_state_from_env():
636
+ global log_state
637
+ log_setting = os.environ.get(LOG_ENV_VAR, None)
638
+ if log_setting is not None:
639
+ log_state = _parse_log_settings(log_setting)
640
+
641
+
642
+ def _has_registered_parent(log_qname):
643
+ cur_log = logging.getLogger(log_qname)
644
+
645
+ registered_log_qnames = log_registry.get_log_qnames()
646
+
647
+ while cur_log.parent:
648
+ if cur_log.name in registered_log_qnames:
649
+ return True
650
+ cur_log = cur_log.parent
651
+
652
+ return False
653
+
654
+
655
+ # apply custom formats to artifacts when necessary
656
+ class TorchLogsFormatter(logging.Formatter):
657
+ def format(self, record):
658
+ artifact_name = getattr(logging.getLogger(record.name), "artifact_name", None)
659
+ if artifact_name is not None:
660
+ artifact_formatter = log_registry.artifact_log_formatters.get(
661
+ artifact_name, None
662
+ )
663
+ if artifact_formatter is not None:
664
+ return artifact_formatter.format(record)
665
+
666
+ record.message = record.getMessage()
667
+ record.asctime = self.formatTime(record, self.datefmt)
668
+
669
+ # exception handling - copied from logging.Formatter.format
670
+ s = record.message
671
+ if record.exc_info:
672
+ # Cache the traceback text to avoid converting it multiple times
673
+ # (it's constant anyway)
674
+ if not record.exc_text:
675
+ record.exc_text = self.formatException(record.exc_info)
676
+ if record.exc_text:
677
+ if s[-1:] != "\n":
678
+ s = s + "\n"
679
+ s = s + record.exc_text
680
+ if record.stack_info:
681
+ if s[-1:] != "\n":
682
+ s = s + "\n"
683
+ s = s + self.formatStack(record.stack_info)
684
+
685
+ lines = s.split("\n")
686
+ record.rankprefix = ""
687
+ if dist.is_available() and dist.is_initialized():
688
+ record.rankprefix = f"[rank{dist.get_rank()}]:"
689
+
690
+ record.traceid = ""
691
+ if (trace_id := torch._guards.CompileContext.current_trace_id()) is not None:
692
+ record.traceid = f" [{trace_id}]"
693
+
694
+ prefix = f"{record.rankprefix}[{record.asctime}]{record.traceid} {record.name}: [{record.levelname}]"
695
+ return "\n".join(f"{prefix} {l}" for l in lines)
696
+
697
+
698
+ def _default_formatter():
699
+ fmt = os.environ.get(LOG_FORMAT_ENV_VAR, None)
700
+ if fmt is None:
701
+ return TorchLogsFormatter()
702
+ else:
703
+ return logging.Formatter(fmt)
704
+
705
+
706
+ DEFAULT_FORMATTER = _default_formatter()
707
+
708
+
709
+ def _setup_handlers(create_handler_fn, log):
710
+ debug_handler = _track_handler(create_handler_fn())
711
+ debug_handler.setFormatter(DEFAULT_FORMATTER)
712
+ debug_handler.setLevel(logging.DEBUG)
713
+ log.addHandler(debug_handler)
714
+
715
+
716
+ handlers = WeakSet() # type: ignore[var-annotated]
717
+
718
+
719
+ # mark handlers that we've created
720
+ # so we don't modify user handlers
721
+ def _track_handler(handler):
722
+ handlers.add(handler)
723
+ return handler
724
+
725
+
726
+ def _is_torch_handler(handler):
727
+ return handler in handlers
728
+
729
+
730
+ # clears all torch handlers on specified loggers
731
+ def _clear_handlers(log):
732
+ to_remove = [handler for handler in log.handlers if _is_torch_handler(handler)]
733
+ for handler in to_remove:
734
+ log.removeHandler(handler)
735
+
736
+
737
+ def _reset_logs():
738
+ # reset all registered logs
739
+ for log_qname in log_registry.get_log_qnames():
740
+ log = logging.getLogger(log_qname)
741
+ log.setLevel(logging.WARNING)
742
+ log.propagate = False
743
+ _clear_handlers(log)
744
+
745
+ # reset all artifact and child logs
746
+ for artifact_log_qname in itertools.chain(
747
+ log_registry.get_artifact_log_qnames(), log_registry.get_child_log_qnames()
748
+ ):
749
+ log = logging.getLogger(artifact_log_qname)
750
+ log.setLevel(logging.NOTSET)
751
+ log.propagate = True
752
+
753
+
754
+ def _get_log_state():
755
+ return log_state
756
+
757
+
758
+ def _set_log_state(state):
759
+ global log_state
760
+ log_state = state
761
+
762
+
763
+ def _init_logs(log_file_name=None):
764
+ _reset_logs()
765
+ _update_log_state_from_env()
766
+
767
+ # First, reset all known (registered) loggers to NOTSET, so that they
768
+ # respect their parent log level
769
+ for log_qname in log_registry.get_log_qnames():
770
+ # But not the top level torch level: this defaults to WARNING so
771
+ # that our log messages don't leak to the lower levels
772
+ if log_qname == "torch":
773
+ continue
774
+ log = logging.getLogger(log_qname)
775
+ log.setLevel(logging.NOTSET)
776
+
777
+ # Now, for all loggers which the user requested to have non-standard
778
+ # logging behavior, modify their log levels
779
+ for log_qname, level in log_state.get_log_level_pairs():
780
+ log = logging.getLogger(log_qname)
781
+ log.setLevel(level)
782
+
783
+ # Finally, setup handlers for all registered loggers
784
+ for log_qname in log_registry.get_log_qnames():
785
+ log = logging.getLogger(log_qname)
786
+ _setup_handlers(
787
+ logging.StreamHandler,
788
+ log,
789
+ )
790
+
791
+ if log_file_name is not None:
792
+ _setup_handlers(
793
+ lambda: logging.FileHandler(log_file_name),
794
+ log,
795
+ )
796
+
797
+ # configure artifact loggers, note: this must happen last
798
+ # since the levels of ancestor loggers are taken into account
799
+ for artifact_log_qname in log_registry.get_artifact_log_qnames():
800
+ log = logging.getLogger(artifact_log_qname)
801
+ configure_artifact_log(log)
802
+
803
+
804
+ @functools.lru_cache(None)
805
+ def warning_once(logger_obj, *args, **kwargs):
806
+ """
807
+ This function is similar to `logger.warning()`, but will emit the warning with the same message only once
808
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
809
+ The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
810
+ another type of cache that includes the caller frame information in the hashing function.
811
+ """
812
+ logger_obj.warning(*args, **kwargs)
813
+
814
+
815
+ class LazyString:
816
+ def __init__(self, func, *args, **kwargs):
817
+ self.func = func
818
+ self.args = args
819
+ self.kwargs = kwargs
820
+
821
+ def __str__(self):
822
+ return self.func(*self.args, **self.kwargs)
823
+
824
+
825
+ import torch._guards
826
+ import torch.distributed as dist
env-llmeval/lib/python3.10/site-packages/torch/_logging/_registrations.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: B950
2
+ from ._internal import register_artifact, register_log
3
+
4
+ DYNAMIC = ["torch.fx.experimental.symbolic_shapes", "torch.fx.experimental.sym_node"]
5
+ DISTRIBUTED = ["torch.distributed", "torch._dynamo.backends.distributed"]
6
+
7
+ register_log("dynamo", ["torch._dynamo", *DYNAMIC])
8
+ register_log("aot", ["torch._functorch.aot_autograd", "torch._functorch._aot_autograd"])
9
+ register_log("inductor", "torch._inductor")
10
+ register_log("dynamic", DYNAMIC)
11
+ register_log("torch", "torch")
12
+ register_log("distributed", DISTRIBUTED)
13
+ register_log("onnx", "torch.onnx")
14
+
15
+ register_artifact(
16
+ "guards",
17
+ "This prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.",
18
+ visible=True,
19
+ )
20
+ register_artifact("verbose_guards", "", off_by_default=True)
21
+ register_artifact(
22
+ "bytecode",
23
+ "Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.",
24
+ off_by_default=True,
25
+ )
26
+ register_artifact(
27
+ "graph",
28
+ "Prints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. ",
29
+ )
30
+ register_artifact("graph_code", "Like `graph`, but gives you the Python code instead.")
31
+ register_artifact(
32
+ "graph_sizes", "Prints the sizes of all FX nodes in the dynamo graph."
33
+ )
34
+ register_artifact(
35
+ "trace_source",
36
+ "As we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`",
37
+ )
38
+ register_artifact(
39
+ "trace_call",
40
+ "Like trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.",
41
+ )
42
+ register_artifact(
43
+ "aot_graphs",
44
+ "Prints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductor",
45
+ visible=True,
46
+ )
47
+ register_artifact(
48
+ "aot_joint_graph",
49
+ "Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioning",
50
+ )
51
+ register_artifact(
52
+ "post_grad_graphs",
53
+ "Prints the FX graph generated by post grad passes. Useful to understand what's being given to Inductor after post grad passes",
54
+ )
55
+ register_artifact(
56
+ "compiled_autograd",
57
+ "Prints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.",
58
+ visible=True,
59
+ )
60
+ register_artifact(
61
+ "ddp_graphs",
62
+ "Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.",
63
+ )
64
+ register_artifact(
65
+ "recompiles",
66
+ "Prints the reason why we recompiled a graph. Very, very useful.",
67
+ visible=True,
68
+ )
69
+ register_artifact(
70
+ "recompiles_verbose",
71
+ "Prints all guard checks that fail during a recompilation. "
72
+ "At runtime, Dynamo will stop at the first failed check for each failing guard. "
73
+ "So not all logged failing checks are actually ran by Dynamo.",
74
+ visible=True,
75
+ off_by_default=True,
76
+ )
77
+ register_artifact(
78
+ "graph_breaks",
79
+ "Prints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performance",
80
+ visible=True,
81
+ )
82
+ register_artifact(
83
+ "not_implemented",
84
+ "Prints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch to",
85
+ )
86
+ register_artifact(
87
+ "output_code",
88
+ "Prints the code that Inductor generates (either Triton or C++)",
89
+ off_by_default=True,
90
+ visible=True,
91
+ )
92
+ register_artifact(
93
+ "schedule",
94
+ "Inductor scheduler information. Useful if working on Inductor fusion algo",
95
+ off_by_default=True,
96
+ )
97
+ register_artifact("perf_hints", "", off_by_default=True)
98
+ register_artifact("onnx_diagnostics", "", off_by_default=True)
99
+ register_artifact(
100
+ "fusion",
101
+ "Detailed Inductor fusion decisions. More detailed than 'schedule'",
102
+ off_by_default=True,
103
+ )
104
+ register_artifact(
105
+ "overlap",
106
+ "Detailed Inductor compute/comm overlap decisions",
107
+ off_by_default=True,
108
+ )
109
+
110
+ register_artifact("custom_format_test_artifact", "Testing only", log_format="")
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc ADDED
Binary file (26.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/n_shadows_utils.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/ns_types.cpython-310.pyc ADDED
Binary file (985 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/qconfig_multi_mapping.cpython-310.pyc ADDED
Binary file (7.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
3
+ Profiler's context manager API can be used to better understand what model operators are the most expensive,
4
+ examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
5
+
6
+ .. note::
7
+ An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
8
+
9
+ """
10
+ import os
11
+
12
+ from torch._C._autograd import _supported_activities, DeviceType, kineto_available
13
+ from torch._C._profiler import _ExperimentalConfig, ProfilerActivity, RecordScope
14
+ from torch.autograd.profiler import KinetoStepTracker, record_function
15
+ from torch.optim.optimizer import register_optimizer_step_post_hook
16
+
17
+ from .profiler import (
18
+ _KinetoProfile,
19
+ ExecutionTraceObserver,
20
+ profile,
21
+ ProfilerAction,
22
+ schedule,
23
+ supported_activities,
24
+ tensorboard_trace_handler,
25
+ )
26
+
27
+ __all__ = [
28
+ "profile",
29
+ "schedule",
30
+ "supported_activities",
31
+ "tensorboard_trace_handler",
32
+ "ProfilerAction",
33
+ "ProfilerActivity",
34
+ "kineto_available",
35
+ "DeviceType",
36
+ "record_function",
37
+ "ExecutionTraceObserver",
38
+ ]
39
+
40
+ from . import itt
41
+
42
+
43
+ def _optimizer_post_hook(optimizer, args, kwargs):
44
+ KinetoStepTracker.increment_step("Optimizer")
45
+
46
+
47
+ if os.environ.get("KINETO_USE_DAEMON", None):
48
+ _ = register_optimizer_step_post_hook(_optimizer_post_hook)
env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc ADDED
Binary file (42.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc ADDED
Binary file (2.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py ADDED
@@ -0,0 +1,1201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dataclasses
3
+ import enum
4
+ import itertools as it
5
+ import logging
6
+ from typing import (
7
+ Any,
8
+ cast,
9
+ DefaultDict,
10
+ Dict,
11
+ Iterator,
12
+ List,
13
+ Optional,
14
+ Set,
15
+ Tuple,
16
+ Union,
17
+ )
18
+
19
+ from typing_extensions import Literal
20
+
21
+ import torch
22
+ from torch._C import FunctionSchema
23
+ from torch._C._autograd import _ProfilerResult
24
+ from torch._C._profiler import (
25
+ _EventType,
26
+ _ExtraFields_Allocation,
27
+ _ExtraFields_TorchOp,
28
+ _ProfilerEvent,
29
+ _TensorMetadata,
30
+ RecordScope,
31
+ )
32
+ from torch._utils import _element_size
33
+ from torch.profiler import _utils
34
+
35
+ KeyAndID = Tuple["Key", int]
36
+ TensorAndID = Tuple["TensorKey", int]
37
+
38
+ log = logging.getLogger(__name__)
39
+
40
+
41
+ class Category(enum.Enum):
42
+ INPUT = enum.auto()
43
+ TEMPORARY = enum.auto()
44
+ ACTIVATION = enum.auto()
45
+ GRADIENT = enum.auto()
46
+ AUTOGRAD_DETAIL = enum.auto()
47
+ PARAMETER = enum.auto()
48
+ OPTIMIZER_STATE = enum.auto()
49
+
50
+
51
+ _CATEGORY_TO_COLORS = {
52
+ Category.PARAMETER: "darkgreen",
53
+ Category.OPTIMIZER_STATE: "goldenrod",
54
+ Category.INPUT: "black",
55
+ Category.TEMPORARY: "mediumpurple",
56
+ Category.ACTIVATION: "red",
57
+ Category.GRADIENT: "mediumblue",
58
+ Category.AUTOGRAD_DETAIL: "royalblue",
59
+ None: "grey",
60
+ }
61
+
62
+ _CATEGORY_TO_INDEX = {c: i for i, c in enumerate(_CATEGORY_TO_COLORS)}
63
+
64
+
65
+ class Action(enum.Enum):
66
+ PREEXISTING = enum.auto()
67
+ CREATE = enum.auto()
68
+ INCREMENT_VERSION = enum.auto()
69
+ DESTROY = enum.auto()
70
+
71
+
72
+ _ACTION_TO_INDEX = {i: i.value for i in Action}
73
+
74
+
75
+ @dataclasses.dataclass(eq=True, unsafe_hash=False, frozen=True)
76
+ class Key:
77
+ device: torch.device
78
+
79
+
80
+ @dataclasses.dataclass
81
+ class _Storage:
82
+ """Bundle storage pointer and id.
83
+
84
+ All profiling logic should use `allocation_id`, however it is useful to
85
+ print storage pointers for debugging and unit tests sometimes look up
86
+ values using the storage data pointer of a live Tensor."""
87
+
88
+ ptr: int
89
+ allocation_id: int
90
+
91
+ def __repr__(self) -> str:
92
+ return f"{hex(self.ptr):>18} ({self.allocation_id})"
93
+
94
+ def __eq__(self, other: object) -> bool:
95
+ return isinstance(other, _Storage) and self.allocation_id == other.allocation_id
96
+
97
+ def __hash__(self) -> int:
98
+ return hash(self.allocation_id)
99
+
100
+
101
+ @dataclasses.dataclass(eq=True, unsafe_hash=True, frozen=True)
102
+ class TensorKey(Key):
103
+ """Hashable identifier for a storage which has been asigned an ID.
104
+
105
+ A detailed description of Tensor IDs and why they are needed is given in
106
+ `torch/csrc/profiler/collection.h` when `TensorID` is declared. To
107
+ summarize, multiple Storage buffers can map to the same logical Tensor.
108
+ This dataclass is used to refer to a concrete in-memory StorageImpl of
109
+ a Tensor.
110
+ """
111
+
112
+ id: int
113
+ storage: _Storage
114
+
115
+ def __repr__(self) -> str:
116
+ return f"id={self.id}: {repr(self.storage):<24} ({self.device})"
117
+
118
+ def __lt__(self, other: "TensorKey") -> bool:
119
+ return self._as_sortable < other._as_sortable
120
+
121
+ @staticmethod
122
+ def _make(
123
+ tensor_id: Optional[int],
124
+ storage_ptr: Optional[int],
125
+ allocation_id: Optional[int],
126
+ device: torch.device,
127
+ ) -> Optional["TensorKey"]:
128
+ if (
129
+ tensor_id is not None
130
+ and storage_ptr is not None
131
+ and allocation_id is not None
132
+ ):
133
+ return TensorKey(device, tensor_id, _Storage(storage_ptr, allocation_id))
134
+ return None
135
+
136
+ @classmethod
137
+ def from_allocation(cls, alloc: _ExtraFields_Allocation) -> Optional["TensorKey"]:
138
+ return cls._make(alloc.id, alloc.ptr, alloc.allocation_id, alloc.device)
139
+
140
+ @classmethod
141
+ def from_tensor(cls, t: Optional[_TensorMetadata]) -> Optional["TensorKey"]:
142
+ if t is not None:
143
+ return cls._make(t.id, t.storage_data_ptr, t.allocation_id, t.device)
144
+ return None
145
+
146
+ @property
147
+ def _as_sortable(self) -> Tuple[int, int, str, int]:
148
+ return self.id, self.storage.allocation_id, self.device.type, self.device.index
149
+
150
+
151
+ def _extract_parameters_and_gradients(
152
+ node: _ProfilerEvent,
153
+ ) -> Iterator[Tuple[Optional[TensorKey], Optional[TensorKey]]]:
154
+ children = node.children
155
+
156
+ # AccumulateGrad is used in the Autograd engine to handle gradient updates.
157
+ # There are two possible cases:
158
+ # 1) This is a newly created gradient Tensor. In that case there is nothing
159
+ # to accumulate, so autograd simply detaches the Tensor.
160
+ #
161
+ # 2) There is a preexisting gradient Tensor and we need to add the newly
162
+ # computed update. This is done with an in-place add (aten::add_) op.
163
+ # (The underscore suffix denotes "in-place".)
164
+ if (
165
+ node.typed[0] == _EventType.TorchOp
166
+ and node.typed[1].scope == RecordScope.BACKWARD_FUNCTION
167
+ # TODO(robieta): Move away from load bearing names
168
+ and node.name == "torch::autograd::AccumulateGrad"
169
+ and children
170
+ and children[0].typed[0] == _EventType.TorchOp
171
+ and children[0].name in ("aten::detach", "aten::add_")
172
+ and children[0].typed[1].inputs
173
+ and isinstance(children[0].typed[1].inputs[0], _TensorMetadata)
174
+ ):
175
+ yield None, TensorKey.from_tensor(children[0].typed[1].inputs[0])
176
+
177
+ # We directly instrument `torch.nn.Module` and `torch.optim.Optimizer`
178
+ # NOTE: The values captured by the python tracer are cached; they can be
179
+ # used to build up labels but do not imply that a Tensor was live at
180
+ # a particular time.
181
+ elif node.typed[0] == _EventType.PyCall:
182
+ typed_fields = node.typed[1]
183
+ assert typed_fields.module is None or typed_fields.optimizer is None
184
+ if typed_fields.module is not None:
185
+ for _, p, p_grad in typed_fields.module.parameters:
186
+ yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad)
187
+
188
+ if typed_fields.optimizer is not None:
189
+ for p, p_grad, _ in typed_fields.optimizer.parameters:
190
+ yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad)
191
+
192
+
193
+ def extract_parameters(node: _ProfilerEvent) -> Iterator[TensorKey]:
194
+ for p, p_grad in _extract_parameters_and_gradients(node):
195
+ if p is not None:
196
+ yield p
197
+
198
+
199
+ def extract_gradients(
200
+ node: _ProfilerEvent,
201
+ ) -> Iterator[Tuple[Optional[TensorKey], TensorKey]]:
202
+ for p, p_grad in _extract_parameters_and_gradients(node):
203
+ if p_grad is not None:
204
+ yield p, p_grad
205
+
206
+
207
+ def get_scopes(event: Optional[_ProfilerEvent]) -> Tuple[RecordScope, ...]:
208
+ scopes = []
209
+ while event:
210
+ if event.typed[0] == _EventType.TorchOp:
211
+ scopes.append(event.typed[1].scope)
212
+ event = event.parent
213
+ return tuple(scopes)
214
+
215
+
216
+ class SchemaMatcher:
217
+ """Lookup operator schema based on profiled name.
218
+
219
+ When profiling we record the operator's name but not the schema. However
220
+ some analysis requires that information. Fortunately we can look up
221
+ registered schema from the recorded name. We do not, however, record the
222
+ overload and so we must compare the profiled arguments with all overloads
223
+ to determine viable matches.
224
+
225
+ Note: Once https://github.com/pytorch/pytorch/issues/78871 is completed
226
+ this code will be obsolete.
227
+ """
228
+
229
+ @classmethod
230
+ def inputs_are_mutable(cls, t: _ExtraFields_TorchOp) -> Tuple[Optional[bool], ...]:
231
+ """Determine which inputs may have mutated based on function schema.
232
+
233
+ Note that we don't need to resolve down to a single schema to perform
234
+ this analysis. An input is mutable if it is mutable in any overload. In
235
+ practice, however, it is overwhelmingly common to match a single
236
+ overload. If we cannot find any valid schema then we must be
237
+ conservative and assume all inputs are mutable.
238
+ """
239
+ mutable: Optional[List[bool]] = None
240
+ for schema in cls.match_schemas(t):
241
+ mutable = mutable or [False for _ in schema.arguments]
242
+ for i, arg in enumerate(schema.arguments):
243
+ mutable[i] |= getattr(arg.alias_info, "is_write", False)
244
+
245
+ return tuple(mutable or (None for _ in t.inputs))
246
+
247
+ @classmethod
248
+ def match_schemas(cls, t: _ExtraFields_TorchOp) -> Tuple[FunctionSchema, ...]:
249
+ signature = tuple(
250
+ # Tensor
251
+ TensorKey.from_tensor(i) if isinstance(i, _TensorMetadata)
252
+ #
253
+ # TensorList
254
+ else [TensorKey.from_tensor(j) for j in i] if isinstance(i, list)
255
+ #
256
+ # Scalar and uncaptured inputs.
257
+ else i
258
+ for i in t.inputs
259
+ )
260
+
261
+ def matches(schema) -> bool:
262
+ return len(schema.arguments) == len(signature) and all(
263
+ cls._types_match(observed, schema_arg.type)
264
+ for observed, schema_arg in zip(signature, schema.arguments)
265
+ )
266
+
267
+ return tuple(s for s in cls.lookup_schemas(t.name) or () if matches(s))
268
+
269
+ @classmethod
270
+ def _types_match(cls, observed, schema_type) -> bool:
271
+ if isinstance(schema_type, torch._C.OptionalType):
272
+ schema_type = schema_type.getElementType()
273
+ return observed is None or cls._types_match(observed, schema_type)
274
+
275
+ if isinstance(schema_type, torch._C.AnyType):
276
+ return True
277
+
278
+ if schema_type.isSubtypeOf(torch._C.ListType.ofTensors()):
279
+ return isinstance(observed, list) and all(
280
+ isinstance(i, TensorKey) for i in observed
281
+ )
282
+
283
+ type_map: Tuple[Tuple[Any, Union[type, Tuple[type, ...]]], ...] = (
284
+ (torch._C.TensorType, TensorKey),
285
+ (torch._C.NoneType, type(None)),
286
+ (torch._C.BoolType, bool),
287
+ (torch._C.IntType, int),
288
+ (torch._C.FloatType, float),
289
+ (torch._C.ComplexType, complex),
290
+ (torch._C.NumberType, (bool, int, float, complex)),
291
+ )
292
+
293
+ for jit_type, py_types in type_map:
294
+ if isinstance(schema_type, jit_type):
295
+ return isinstance(observed, py_types)
296
+
297
+ # Profiler only records a subset of possible argument types. If we
298
+ # reach this point then the schema must call for a type that profiler
299
+ # does not record. Thus, the schema can only be a match if `observed`
300
+ # is also None.
301
+ return observed is None
302
+
303
+ @staticmethod
304
+ def lookup_schemas(name: str) -> Optional[Tuple[FunctionSchema, ...]]:
305
+ # TODO(robieta):
306
+ # _jit_get_schemas_for_operator is quite expensive. (~100us / call)
307
+ # Consider adding `functools.lru_cache` if that becomes an issue.
308
+
309
+ try:
310
+ # Schema lookup will throw if `name` is malformed. (For example,
311
+ # schemas must be namespaced and schema lookup will fail if name
312
+ # does not include "::".) We simply catch the exception and return
313
+ # `None` to denote that `name` cannot be an operator name.
314
+ #
315
+ # Note that record_function annotations also go through this path,
316
+ # so it is expected that some names will not correspond to PyTorch
317
+ # operators.
318
+ if "::" not in name:
319
+ return None
320
+ return tuple(torch._C._jit_get_schemas_for_operator(name))
321
+ except RuntimeError:
322
+ return None
323
+
324
+
325
+ class OpTree:
326
+ def __init__(self, result: _ProfilerResult) -> None:
327
+ self._root_nodes = result.experimental_event_tree()
328
+ self._sorted_nodes = tuple(sorted(self.dfs(), key=lambda x: x.start_time_ns))
329
+
330
+ def dfs(self, *args, **kwargs) -> Iterator[_ProfilerEvent]:
331
+ yield from _utils.traverse_dfs(self._root_nodes, *args, **kwargs)
332
+
333
+ @property
334
+ def sorted_nodes(self) -> Tuple[_ProfilerEvent, ...]:
335
+ return self._sorted_nodes
336
+
337
+
338
+ class SizeMap:
339
+ def __init__(self, op_tree: OpTree) -> None:
340
+ self._values: Dict[TensorKey, int] = {}
341
+
342
+ for node in op_tree.sorted_nodes:
343
+ if node.typed[0] == _EventType.TorchOp:
344
+ for t in self._flat_tensor_inputs(node.typed[1]):
345
+ self._update_values(t)
346
+
347
+ elif node.typed[0] == _EventType.PyCall:
348
+ typed_fields = node.typed[1]
349
+ assert typed_fields.module is None or typed_fields.optimizer is None
350
+ if typed_fields.module is not None:
351
+ for _, p, p_grad in typed_fields.module.parameters:
352
+ self._update_values(p)
353
+ self._update_values(p_grad)
354
+
355
+ if typed_fields.optimizer is not None:
356
+ for p, p_grad, state in typed_fields.optimizer.parameters:
357
+ self._update_values(p)
358
+ self._update_values(p_grad)
359
+ for _, t in state:
360
+ self._update_values(t)
361
+
362
+ allocations: Dict[TensorKey, int] = {}
363
+ for node in op_tree.sorted_nodes:
364
+ if node.typed[0] == _EventType.Allocation:
365
+ alloc_fields = node.typed[1]
366
+ key = TensorKey.from_allocation(alloc_fields)
367
+ if key:
368
+ new_size = abs(alloc_fields.alloc_size)
369
+ prior_size = allocations.setdefault(key, new_size)
370
+
371
+ # It is possible to resize Storage in PyTorch, however we
372
+ # key on data pointer so most resizes will be treated as a
373
+ # change in storage. The one corner case that cannot be
374
+ # handled is `realloc` which successfully resizes the
375
+ # storage. At time of writing this is not done anywhere in
376
+ # the core PyTorch codebase.
377
+ if prior_size != new_size:
378
+ delta = f"{prior_size} vs. {new_size}"
379
+ log.warning("Mismatch between allocation and free: %s", delta)
380
+
381
+ self._values.update(allocations)
382
+
383
+ def _update_values(self, t: Optional[_TensorMetadata]) -> None:
384
+ key = TensorKey.from_tensor(t)
385
+ if key is not None and t is not None and t.layout == torch.strided:
386
+ # Scalars are represented as zero dim Tensors
387
+ n = max(i[0] * i[1] for i in zip(t.sizes or [1], t.strides or [1]))
388
+
389
+ num_bytes = n * _element_size(t.dtype)
390
+ assert num_bytes >= 0, f"{num_bytes}"
391
+ self._values[key] = max(self._values.get(key, 0), num_bytes)
392
+
393
+ @staticmethod
394
+ def _flat_tensor_inputs(op: _ExtraFields_TorchOp) -> Iterator[_TensorMetadata]:
395
+ for i in op.inputs:
396
+ if isinstance(i, _TensorMetadata):
397
+ yield i
398
+ elif isinstance(i, list):
399
+ yield from i
400
+
401
+ def __getitem__(self, key: TensorKey):
402
+ return self._values[key]
403
+
404
+
405
+ @dataclasses.dataclass()
406
+ class DataFlowEdge:
407
+ input_version: Optional[int] = None
408
+ mutated: Optional[bool] = False
409
+
410
+ @property
411
+ def is_allocation(self) -> bool:
412
+ return self.input_version is None
413
+
414
+ @property
415
+ def is_deletion(self) -> bool:
416
+ return self.mutated is None
417
+
418
+
419
+ class DataFlowNode:
420
+ def __init__(self, event: _ProfilerEvent, graph: "DataFlowGraph") -> None:
421
+ self._event = event
422
+ self._graph = graph
423
+ self._edges: Dict[TensorKey, DataFlowEdge] = self._determine_edges()
424
+
425
+ for key, edge in self._edges.items():
426
+ if edge.mutated and not edge.is_allocation:
427
+ self._graph.bump(key)
428
+
429
+ # Make sure the version bumping behavior matches what we expect.
430
+ versions = {k: (v, self._graph.lookup(k)) for k, v in self.outputs.items()}
431
+ assert all(i == j for i, j in versions.values()), f"{versions}, {self._edges}"
432
+
433
+ def _determine_edges(self) -> Dict[TensorKey, DataFlowEdge]:
434
+ subtree = tuple(_utils.traverse_dfs([self._event]))
435
+
436
+ # Start by populating edges from op inputs and outputs.
437
+ mutable_by_key: Dict[Optional[TensorKey], Set[Optional[bool]]] = {}
438
+ for op in (i.typed[1] for i in subtree if i.typed[0] == _EventType.TorchOp):
439
+ for op_input, mutable in zip(
440
+ op.inputs, SchemaMatcher.inputs_are_mutable(op)
441
+ ):
442
+ # Tensor
443
+ if isinstance(op_input, _TensorMetadata):
444
+ key = TensorKey.from_tensor(op_input)
445
+ mutable_by_key.setdefault(key, set()).add(mutable)
446
+
447
+ # TensorList
448
+ elif isinstance(op_input, list):
449
+ for op_input_i in op_input:
450
+ key = TensorKey.from_tensor(op_input_i)
451
+ mutable_by_key.setdefault(key, set()).add(mutable)
452
+
453
+ edges: DefaultDict[Optional[TensorKey], DataFlowEdge]
454
+ edges = collections.defaultdict(DataFlowEdge)
455
+ for key, mutable_set in mutable_by_key.items():
456
+ if key is not None:
457
+ edges[key].input_version = self._graph.lookup(key) if key else -1
458
+
459
+ # We consider an op to be mutated if we encounter a schema where it
460
+ # is a mutable argument OR if it is ambiguous. (We never explicitly
461
+ # see it in any schema.)
462
+ mutated = (True in mutable_set) or (tuple(mutable_set) == (None,))
463
+ edges[key].mutated = mutated
464
+
465
+ # Then handle deletions. Note that deleting a Tensor implicitly adds
466
+ # it as an input edge.
467
+ for i in subtree:
468
+ if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size < 0:
469
+ key = TensorKey.from_allocation(i.typed[1])
470
+ edge = edges[key]
471
+ assert key is None or edge.mutated is not None, f"Double delete: {key}"
472
+ edge.mutated = None
473
+ edge.input_version = self._graph.lookup(key) if key else -1
474
+
475
+ # And finally handle allocations. This step must be last, because the
476
+ # previous two steps optimistically add input edges.
477
+ for i in subtree:
478
+ if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size > 0:
479
+ edges[TensorKey.from_allocation(i.typed[1])].input_version = None
480
+
481
+ # We don't need to sort the inputs, but it makes debugging and unit tests nicer.
482
+ return dict(sorted((k, v) for k, v in edges.items() if k is not None))
483
+
484
+ @property
485
+ def inputs(self) -> Dict[TensorKey, Tuple[bool, int]]:
486
+ return {
487
+ # MyPy can't see through `is_allocation` to know that
488
+ # `v.input_version` is not None.
489
+ k: (bool(v.mutated), cast(int, v.input_version))
490
+ for k, v in self._edges.items()
491
+ if not v.is_allocation
492
+ }
493
+
494
+ @property
495
+ def outputs(self) -> Dict[TensorKey, int]:
496
+ return {
497
+ k: 0 if v.input_version is None else v.input_version + 1
498
+ for k, v in self._edges.items()
499
+ if (v.is_allocation and not v.is_deletion) or v.mutated
500
+ }
501
+
502
+ @property
503
+ def intermediates(self) -> Tuple[TensorKey, ...]:
504
+ return tuple(
505
+ k for k, v in self._edges.items() if v.is_allocation and v.is_deletion
506
+ )
507
+
508
+ @property
509
+ def start_time(self) -> int:
510
+ return self._event.start_time_ns
511
+
512
+
513
+ class DataFlowGraph:
514
+ def __init__(self, op_tree: OpTree) -> None:
515
+ self._op_tree = op_tree
516
+ self._leaf_events = self._extract_leaf_events(op_tree)
517
+ self._active_version: Dict[TensorKey, Optional[int]] = {}
518
+ self._flow_nodes = [DataFlowNode(e, self) for e in self.leaf_events]
519
+ self._flow_nodes.sort(key=lambda x: x.start_time)
520
+ self.validate()
521
+
522
+ @property
523
+ def flow_nodes(self) -> Tuple[DataFlowNode, ...]:
524
+ return tuple(self._flow_nodes)
525
+
526
+ def validate(self):
527
+ # Check that each (Tensor, version) pair has a unique creation node
528
+ outputs: Set[Tuple[TensorKey, int]] = set()
529
+ for node in self.flow_nodes:
530
+ node_outputs = set(node.outputs.items())
531
+ duplicates = outputs & node_outputs
532
+ assert not duplicates, f"{node._event.name} {node._edges} {duplicates}"
533
+ outputs |= node_outputs
534
+
535
+ # And check that `self._nodes` forms a valid topologically sorted DAG.
536
+ tensor_versions: Dict[TensorKey, int] = {}
537
+ for node in self.flow_nodes:
538
+ for key, (_, version) in node.inputs.items():
539
+ expected = tensor_versions.get(key, 0)
540
+ assert expected == version, (expected, version)
541
+
542
+ for key, version in node.outputs.items():
543
+ prior_version = tensor_versions.get(key, version)
544
+ assert version >= prior_version, (version, prior_version)
545
+ tensor_versions[key] = version
546
+
547
+ @property
548
+ def leaf_events(self) -> Tuple[_ProfilerEvent, ...]:
549
+ return self._leaf_events
550
+
551
+ @staticmethod
552
+ def _extract_leaf_events(op_tree: OpTree) -> Tuple[_ProfilerEvent, ...]:
553
+ """Partially traverse the op tree and extract top level ops.
554
+
555
+ Consider the following code:
556
+ ```
557
+ with record_function("My annotation"):
558
+ x.zero_()
559
+ y.zero_()
560
+ ```
561
+
562
+ The op tree (assuming no Autograd) will look like:
563
+ <Python context>
564
+ TorchOp: "My annotation"
565
+ TorchOp: zero_
566
+ TorchOp: fill_
567
+ TorchOp: zero_
568
+ TorchOp: fill_
569
+
570
+ The recursive structure of operator calls makes data flow unwieldy.
571
+ In order to simplify analysis we would like to select the highest level
572
+ ops to represent in the graph. In this case those are the `zero_` ops;
573
+ the fact that `fill_` is called is an implementation detail. We also
574
+ do not want to group everything under "My annotation" as this could
575
+ create overly coarse bundles and lose critical semantics.
576
+
577
+ To address this issue we walk over the graph and select the topmost
578
+ torch ops ** which match at least one operator schema **. These form
579
+ the leaves of the first pass through the op tree. (As well as any
580
+ allocations or frees which do are not part of a kernel.) These events
581
+ form the logical nodes in our data flow graph.
582
+ """
583
+
584
+ leaf_events: List[_ProfilerEvent] = []
585
+
586
+ def leaf_op(e: _ProfilerEvent) -> bool:
587
+ return e.typed[0] == _EventType.TorchOp and (
588
+ e.typed[1].scope == RecordScope.BACKWARD_FUNCTION
589
+ or bool(SchemaMatcher.match_schemas(e.typed[1]))
590
+ )
591
+
592
+ def children_fn(e: _ProfilerEvent):
593
+ if leaf_op(e) or e.tag == _EventType.Allocation:
594
+ leaf_events.append(e)
595
+ return []
596
+
597
+ return e.children
598
+
599
+ for _ in op_tree.dfs(children_fn=children_fn):
600
+ pass
601
+
602
+ return tuple(sorted(leaf_events, key=lambda x: x.start_time_ns))
603
+
604
+ def lookup(self, key: TensorKey) -> int:
605
+ version = self._active_version.setdefault(key, 0)
606
+ assert version is not None
607
+ return version
608
+
609
+ def bump(self, key: TensorKey) -> None:
610
+ prior_version = self._active_version.get(key, None)
611
+ assert prior_version is not None
612
+ self._active_version[key] = prior_version + 1
613
+
614
+ def delete(self, key: TensorKey) -> None:
615
+ assert self._active_version.setdefault(key, 0) is not None
616
+ self._active_version[key] = None
617
+
618
+
619
+ @dataclasses.dataclass
620
+ class CategoryElement:
621
+ by_id: Optional[Category] = None
622
+ by_key: Dict[TensorKey, Category] = dataclasses.field(default_factory=dict)
623
+ by_version: Dict[TensorAndID, Category] = dataclasses.field(default_factory=dict)
624
+
625
+ # Used by unit tests to check internals. (And consequently by
626
+ # MemoryProfile.lookup) This should not be used in any other capacity.
627
+ _by_id_keyset: Set[TensorKey] = dataclasses.field(default_factory=set)
628
+
629
+
630
+ @dataclasses.dataclass
631
+ class CategoryDict:
632
+ _values: DefaultDict[int, CategoryElement] = dataclasses.field(
633
+ default_factory=lambda: collections.defaultdict(CategoryElement)
634
+ )
635
+
636
+ def set_by_id(self, key: TensorKey, category: Category) -> None:
637
+ self._values[key.id].by_id = category
638
+ self._values[key.id]._by_id_keyset.add(key)
639
+
640
+ def set_by_key(self, key: TensorKey, category: Category) -> None:
641
+ self._values[key.id].by_key[key] = category
642
+
643
+ def set_by_version(self, key: TensorKey, version: int, category: Category) -> None:
644
+ self._values[key.id].by_version[(key, version)] = category
645
+
646
+ def setdefault_by_version(
647
+ self, key: TensorKey, version: int, category: Category
648
+ ) -> None:
649
+ self._values[key.id].by_version.setdefault((key, version), category)
650
+
651
+ def get(self, key: Key, version: int) -> Optional[Category]:
652
+ if isinstance(key, Key) and not isinstance(key, TensorKey):
653
+ return None
654
+ element = self._values[key.id]
655
+ return (
656
+ element.by_id
657
+ or element.by_key.get(key, None)
658
+ or element.by_version.get((key, version), None)
659
+ )
660
+
661
+
662
+ class MemoryProfile:
663
+ def __init__(self, result: _ProfilerResult) -> None:
664
+ self._op_tree = OpTree(result)
665
+ self._data_flow_graph = DataFlowGraph(self._op_tree)
666
+ self._size_map = SizeMap(self._op_tree)
667
+ self._categories = CategoryDict()
668
+
669
+ self._set_gradients_and_temporaries()
670
+ self._set_parameters_using_python_tracer()
671
+ self._set_inputs()
672
+ self._set_parameters_using_data_flow()
673
+ self._set_activations()
674
+ self._set_optimizer_state()
675
+ self._set_autograd_detail()
676
+
677
+ @property
678
+ def timeline(self) -> Tuple[Tuple[int, Action, KeyAndID, int], ...]:
679
+ output: List[Tuple[int, Action, KeyAndID, int]] = []
680
+ allocation_times: Dict[Tuple[TensorKey, bool], int] = {}
681
+ live_unknown: Dict[Tuple[int, torch.device], Literal[True]] = {}
682
+ for event in self._op_tree.dfs():
683
+ if event.typed[0] == _EventType.Allocation:
684
+ alloc_fields = event.typed[1]
685
+ alloc_size = alloc_fields.alloc_size
686
+ is_allocation = alloc_size > 0
687
+ t = event.start_time_ns
688
+
689
+ tkey = TensorKey.from_allocation(alloc_fields)
690
+ if tkey is not None:
691
+ allocation_times[(tkey, is_allocation)] = t
692
+
693
+ else:
694
+ key = Key(alloc_fields.device)
695
+ ptr_and_device = (alloc_fields.ptr, key.device)
696
+ if is_allocation:
697
+ if ptr_and_device in live_unknown:
698
+ output.append(
699
+ (t, Action.INCREMENT_VERSION, (key, 0), alloc_size)
700
+ )
701
+ else:
702
+ live_unknown[ptr_and_device] = True
703
+ output.append((t, Action.CREATE, (key, 0), alloc_size))
704
+ else:
705
+ output.append((t, Action.DESTROY, (key, 0), -alloc_size))
706
+ if not live_unknown.pop(ptr_and_device, False):
707
+ output.append(
708
+ (-1, Action.PREEXISTING, (key, 0), -alloc_size)
709
+ )
710
+
711
+ snapshot = self._category_snapshot()
712
+ last_version = dict(sorted(snapshot.keys()))
713
+
714
+ events: List[Tuple[int, Action, TensorAndID]] = [
715
+ (-1, Action.PREEXISTING, (key, version))
716
+ for key, version in snapshot.keys()
717
+ if (key, True) not in allocation_times and version == 0
718
+ ]
719
+
720
+ for node in self._data_flow_graph.flow_nodes:
721
+ for key, edge in node._edges.items():
722
+ if edge.is_allocation:
723
+ t = allocation_times[(key, True)]
724
+ events.append((t, Action.CREATE, (key, 0)))
725
+
726
+ elif edge.mutated:
727
+ t = node._event.start_time_ns
728
+ version = edge.input_version
729
+ assert version is not None
730
+ events.append((t, Action.INCREMENT_VERSION, (key, version)))
731
+
732
+ if edge.is_deletion:
733
+ t = allocation_times[(key, False)]
734
+ events.append((t, Action.DESTROY, (key, last_version[key])))
735
+
736
+ output.extend(
737
+ (time, action, (key, version), self._size_map[key])
738
+ for time, action, (key, version) in events
739
+ )
740
+
741
+ output.sort(key=lambda x: (x[0], x[1].value))
742
+ return tuple(output)
743
+
744
+ def _is_gradient(self, *args, **kwargs) -> bool:
745
+ return self._categories.get(*args, **kwargs) == Category.GRADIENT
746
+
747
+ def _category_snapshot(self) -> Dict[TensorAndID, Optional[Category]]:
748
+ all_tensor_versions: Set[TensorAndID] = set()
749
+
750
+ for node in self._data_flow_graph.flow_nodes:
751
+ all_tensor_versions.update(((k, v) for k, (_, v) in node.inputs.items()))
752
+ all_tensor_versions.update((key, 0) for key in node.intermediates)
753
+ all_tensor_versions.update(node.outputs.items())
754
+
755
+ for i in self._categories._values.values():
756
+ all_tensor_versions.update((key, 0) for key in i._by_id_keyset)
757
+
758
+ return {
759
+ (key, version): self._categories.get(key, version)
760
+ for key, version in sorted(all_tensor_versions)
761
+ }
762
+
763
+ def _any_version_depends_on_gradient(self) -> Set[int]:
764
+ """Extract IDs of Tensors which depend or will depend on a gradient.
765
+
766
+ Note that this weakened definition of "depends" requires us to loop
767
+ over the data flow graph multiple times because it allows dependency
768
+ information to flow backward through edges and removes the guarantee
769
+ that nodes are topologically sorted. (Or indeed, even that a valid
770
+ topological order exists.) Put another way, we have converted an
771
+ acyclic data flow graph into a cyclic graph and we are attempting to
772
+ partition cycles involving a gradient from the rest of the graph.
773
+ """
774
+ depends_on_gradient: Set[int] = set()
775
+ while True:
776
+ start_size = len(depends_on_gradient)
777
+ for node in self._data_flow_graph.flow_nodes:
778
+ ids = tuple(
779
+ key.id
780
+ for key, (_, version) in node.inputs.items()
781
+ if self._categories.get(key, version)
782
+ in (Category.GRADIENT, Category.PARAMETER)
783
+ or key.id in depends_on_gradient
784
+ )
785
+
786
+ if ids:
787
+ depends_on_gradient.update(ids)
788
+ depends_on_gradient.update(key.id for key in node.outputs)
789
+
790
+ # We are guaranteed to exit because there is a finite set of
791
+ # TensorAndID pairs. In practice we do not expect to loop more than
792
+ # three times: once to identify the core parameter update loop,
793
+ # once to fold the first step into that loop, and a third time
794
+ # where no new elements are added.
795
+ if len(depends_on_gradient) == start_size:
796
+ return depends_on_gradient
797
+
798
+ def _set_gradients_and_temporaries(self) -> None:
799
+ """Mark Tensors which are unambiguous and simple to reason about."""
800
+
801
+ # Gradients are straightforward to detect. We directly check the
802
+ # `.grad` property in the Python tracer, and we can detect any new
803
+ # gradient Tensors from `AccumulateGrad` ops.
804
+ for event in self._op_tree.dfs():
805
+ for _, p_grad in extract_gradients(event):
806
+ self._categories.set_by_id(p_grad, Category.GRADIENT)
807
+
808
+ # Similarly, temporary Tensors are easy to identify and are useful to
809
+ # flag since they can make memory use "spikier" than one would
810
+ # otherwise expect.
811
+ for node in self._data_flow_graph.flow_nodes:
812
+ for i in node.intermediates:
813
+ self._categories.set_by_key(i, Category.TEMPORARY)
814
+
815
+ def _set_parameters_using_python_tracer(self) -> None:
816
+ for event in self._op_tree.dfs():
817
+ for p in extract_parameters(event):
818
+ if p is not None:
819
+ self._categories.set_by_id(p, Category.PARAMETER)
820
+
821
+ def _set_inputs(self) -> None:
822
+ """Mark inputs based on which Tensors are updated using gradients.
823
+
824
+ The process for differentiating between inputs and activations is more
825
+ involved. Most Tensors in a training loop depend on at least one
826
+ gradient: parameters depend on them through updates, and activations
827
+ and optimizer state depend on them transitively through parameters.
828
+ Critically, we do not need to know which Tensors are parameters to
829
+ apply this method; we can simply walk the data flow graph to build the
830
+ set of all values which depend on a gradient and then obtain the set
831
+ of inputs from the conjugate set.
832
+
833
+ There is, however, one hiccup. The first time we see a parameter is
834
+ generally on the forward pass of the first step. We know from
835
+ inspection of the data flow graph that v1 of that Tensor depends on
836
+ a gradient (provided we profile an optimizer step), but not v0. To
837
+ address this problem we weaken the definition of "depends on a
838
+ gradient" to "any version of this Tensor depends on a gradient",
839
+ which in turn strengthens the criteria for the input set enough to
840
+ filter the activations in the forward pass of the first step."""
841
+
842
+ # All of this analysis is predicated on using at least one training
843
+ # step (or parameters from the python tracer) to partition the graph.
844
+ # Absent that we cannot determine which Tensors are inputs and which
845
+ # ones are part of the model.
846
+ depends_on_gradient = self._any_version_depends_on_gradient()
847
+
848
+ # We only want to annotate Tensors which actually contribute to the
849
+ # model calculation.
850
+ produces_gradient: Set[TensorAndID] = set()
851
+ for node in reversed(self._data_flow_graph.flow_nodes):
852
+ tensors = {(key, version) for key, (_, version) in node.inputs.items()}
853
+ tensors |= node.outputs.items()
854
+ if any(
855
+ self._categories.get(*i) in (Category.GRADIENT, Category.PARAMETER)
856
+ or i in produces_gradient
857
+ for i in tensors
858
+ ):
859
+ produces_gradient |= tensors
860
+
861
+ # Don't include Tensors created in the backward pass, as these are
862
+ # generally Autograd implementation details rather than proper inputs.
863
+ input_candidates = produces_gradient.copy()
864
+ for node in self._data_flow_graph.flow_nodes:
865
+ if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event):
866
+ input_candidates -= set(node.outputs.items())
867
+
868
+ for key, version in input_candidates:
869
+ if key.id not in depends_on_gradient:
870
+ self._categories.setdefault_by_version(key, version, Category.INPUT)
871
+
872
+ def _set_parameters_using_data_flow(self) -> None:
873
+ """Deduce which Tensors are parameters.
874
+
875
+ Consider the following code for the step of SGD with momentum
876
+ (nesterov=False), where `d_p` is the gradient of `param` and `buf` is
877
+ the momentum buffer.
878
+ ```
879
+ buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
880
+ d_p = buf
881
+ param.add_(d_p, alpha=-lr)
882
+ ```
883
+ Both `param` and `buf` take a gradient and perform an in-place update.
884
+
885
+ The python tracer will inspect calls to `nn.Module.forward` and
886
+ `optim.Optimizer.step` to extract parameter and optimizer state
887
+ respectively (including parameters), so this is generally a non-issue.
888
+
889
+ However as a fallback we can also exploit several properties of
890
+ parameters to distinguish them from other model state.
891
+
892
+ First, they are directly used in the forward pass. (At this point we
893
+ haven't established which parts of the graph correspond to the forward
894
+ pass but we can deduce enough to suffice.) Some mutable state such as
895
+ batch norm moving averages also contribute to the forward pass, but
896
+ optimizer state does not.
897
+
898
+ Second, a parameter is by definition used to compute at least one
899
+ gradient and depends on at least one gradient.
900
+ """
901
+ snapshot = self._category_snapshot()
902
+
903
+ # Determine which Tensors might be parameters based on forward pass
904
+ # data flow. Note this these are only candidates; we filter nodes that
905
+ # we know are part of the backward pass but that doesn't guarantee that
906
+ # they are part of the forward pass.
907
+ candidate_parameters: Set[TensorAndID] = set()
908
+ candidate_fwd_tensors: Set[TensorAndID] = {
909
+ i for i, category in snapshot.items() if category == Category.INPUT
910
+ }
911
+
912
+ for node in self._data_flow_graph.flow_nodes:
913
+ inputs = {(key, value) for key, (_, value) in node.inputs.items()}
914
+ if (
915
+ # Don't check nodes in the backward pass.
916
+ RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event)
917
+ and not any(self._is_gradient(*i) for i in inputs)
918
+ and not any(self._is_gradient(*i) for i in node.outputs.items())
919
+ #
920
+ # and only check nodes which depend on an input.
921
+ and candidate_fwd_tensors.intersection(inputs)
922
+ ):
923
+ candidate_fwd_tensors |= node.outputs.items()
924
+ candidate_parameters |= inputs.difference(candidate_fwd_tensors)
925
+
926
+ # Require that each parameter eventually contributes to the value of a gradient
927
+ used_for_gradient: Set[TensorAndID] = set()
928
+ for node in reversed(self._data_flow_graph.flow_nodes):
929
+ if any(
930
+ self._is_gradient(*i) or i in used_for_gradient
931
+ for i in node.outputs.items()
932
+ ):
933
+ for key, (_, version) in node.inputs.items():
934
+ used_for_gradient.add((key, version))
935
+ candidate_parameters.intersection_update(used_for_gradient)
936
+
937
+ # and depends on a gradient.
938
+ parameter_keys = {key.id for key, _ in candidate_parameters}
939
+ parameter_keys &= self._any_version_depends_on_gradient()
940
+
941
+ for key, _ in snapshot.keys():
942
+ if key.id in parameter_keys:
943
+ self._categories.set_by_id(key, Category.PARAMETER)
944
+
945
+ def _set_activations(self) -> None:
946
+ """Flood the graph to identify activations."""
947
+
948
+ required = {Category.INPUT, Category.ACTIVATION}
949
+ also_allowed = {Category.PARAMETER, Category.TEMPORARY}
950
+ for node in self._data_flow_graph.flow_nodes:
951
+ inputs = {(key, value) for key, (_, value) in node.inputs.items()}
952
+ input_categories = {self._categories.get(*i) for i in inputs}
953
+
954
+ if (
955
+ (input_categories & required)
956
+ and not (input_categories - (required | also_allowed))
957
+ #
958
+ # Stop filling when we reach the backward pass.
959
+ and RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event)
960
+ ):
961
+ for i in node.outputs.items():
962
+ self._categories.setdefault_by_version(*i, Category.ACTIVATION)
963
+
964
+ def _set_optimizer_state(self) -> None:
965
+ for event in self._op_tree.dfs():
966
+ if event.typed[0] == _EventType.PyCall and event.typed[1].optimizer:
967
+ parameters = event.typed[1].optimizer.parameters
968
+ for _, t in it.chain(*[state for _, _, state in parameters]):
969
+ key = TensorKey.from_tensor(t)
970
+ if key is not None:
971
+ self._categories.set_by_id(key, Category.OPTIMIZER_STATE)
972
+
973
+ def _set_autograd_detail(self):
974
+ prior = {None, Category.AUTOGRAD_DETAIL}
975
+ for node in self._data_flow_graph.flow_nodes:
976
+ if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event):
977
+ for key, version in node.outputs.items():
978
+ if version == 0 or self._categories.get(key, version - 1) in prior:
979
+ self._categories.setdefault_by_version(
980
+ key, version, Category.AUTOGRAD_DETAIL
981
+ )
982
+
983
+
984
+ class MemoryProfileTimeline:
985
+ def __init__(self, memory_profile):
986
+ """The minimum representation of the memory profile timeline
987
+ includes the memory timeline and categories. The timeline
988
+ consists of [timestamp, action, (TensorKey, version), numbytes]
989
+ elements, to denote any actions (pre-existing, create, destroy,
990
+ or increment_version) that occurred to a specific Tensor for a
991
+ chunk of memory. The categories help map each (TensorKey,
992
+ version) pair into a category."""
993
+ self.timeline = memory_profile.timeline
994
+ self.categories = memory_profile._categories
995
+
996
+ def _coalesce_timeline(self, device_str):
997
+ """Convert the memory timeline and categories into a memory plot
998
+ consisting of timestamps and their respective sizes by category
999
+ for a given device.
1000
+
1001
+ Input: device
1002
+ Output: [timestamps, sizes by category]
1003
+ """
1004
+ device = torch.device(device_str)
1005
+ times: List[int] = []
1006
+ sizes: List[List[int]] = []
1007
+
1008
+ def update(key, version, delta):
1009
+ category = (
1010
+ self.categories.get(key, version)
1011
+ if isinstance(key, TensorKey)
1012
+ else None
1013
+ )
1014
+ index = _CATEGORY_TO_INDEX[category] + 1
1015
+ sizes[-1][index] += int(delta)
1016
+
1017
+ t_min = -1
1018
+ for t, action, (key, version), numbytes in self.timeline:
1019
+ if key.device != device:
1020
+ continue
1021
+
1022
+ # Convert timestamps from ns to us, to match trace events.
1023
+ if t != -1:
1024
+ t = int(t / 1000)
1025
+
1026
+ # Save the smallest timestamp to populate pre-existing allocs.
1027
+ if t_min == -1 or (t < t_min and t > 0):
1028
+ t_min = t
1029
+
1030
+ # Handle timestep
1031
+ if len(times) == 0:
1032
+ times.append(t)
1033
+ sizes.append([0] + [0 for _ in _CATEGORY_TO_INDEX])
1034
+
1035
+ elif t != times[-1]:
1036
+ times.append(t)
1037
+ sizes.append(sizes[-1].copy())
1038
+
1039
+ # Handle memory and categories
1040
+ if action in (Action.PREEXISTING, Action.CREATE):
1041
+ update(key, version, numbytes)
1042
+
1043
+ elif action == Action.INCREMENT_VERSION:
1044
+ update(key, version, -numbytes)
1045
+ update(key, version + 1, numbytes)
1046
+
1047
+ elif action == Action.DESTROY:
1048
+ update(key, version, -numbytes)
1049
+
1050
+ else:
1051
+ raise ValueError(f"Unknown action: {action}")
1052
+
1053
+ times = [t_min if t < 0 else t for t in times]
1054
+ return times, sizes
1055
+
1056
+ def export_memory_timeline(self, path, device) -> None:
1057
+ """Saves the memory timeline as [times, sizes by category]
1058
+ as a JSON formatted file to the given path for the given
1059
+ device."""
1060
+ times, sizes = self._coalesce_timeline(device)
1061
+ # TODO: Write a faster serialize (orjson not available in CI)
1062
+ import json
1063
+
1064
+ with open(path, "w") as f:
1065
+ json.dump([times, sizes], f)
1066
+
1067
+ def export_memory_timeline_raw(self, path, device_str) -> None:
1068
+ """Saves the memory timeline as raw memory event tuples in the
1069
+ form of (timestamp, action, numbytes, category)
1070
+ as a JSON formatted file to the given path for the given
1071
+ device."""
1072
+ device = torch.device(device_str)
1073
+ raw_events: List[Tuple[int, int, int, int]] = []
1074
+
1075
+ def get_category_index(key, version):
1076
+ category = (
1077
+ self.categories.get(key, version)
1078
+ if isinstance(key, TensorKey)
1079
+ else None
1080
+ )
1081
+ return _CATEGORY_TO_INDEX[category]
1082
+
1083
+ for t, action, (key, version), numbytes in self.timeline:
1084
+ if key.device != device:
1085
+ continue
1086
+
1087
+ if action in (Action.PREEXISTING, Action.CREATE):
1088
+ raw_events.append(
1089
+ (
1090
+ t,
1091
+ _ACTION_TO_INDEX[action],
1092
+ numbytes,
1093
+ get_category_index(key, version),
1094
+ )
1095
+ )
1096
+
1097
+ elif action == Action.INCREMENT_VERSION:
1098
+ raw_events.append(
1099
+ (
1100
+ t,
1101
+ _ACTION_TO_INDEX[action],
1102
+ -numbytes,
1103
+ get_category_index(key, version),
1104
+ )
1105
+ )
1106
+ raw_events.append(
1107
+ (
1108
+ t,
1109
+ _ACTION_TO_INDEX[action],
1110
+ numbytes,
1111
+ get_category_index(key, version + 1),
1112
+ )
1113
+ )
1114
+
1115
+ elif action == Action.DESTROY:
1116
+ raw_events.append(
1117
+ (
1118
+ t,
1119
+ _ACTION_TO_INDEX[action],
1120
+ -numbytes,
1121
+ get_category_index(key, version),
1122
+ )
1123
+ )
1124
+
1125
+ else:
1126
+ raise ValueError(f"Unknown action: {action}")
1127
+
1128
+ import json
1129
+
1130
+ with open(path, "w") as f:
1131
+ json.dump(raw_events, f)
1132
+
1133
+ def export_memory_timeline_html(
1134
+ self, path, device, figsize=(20, 12), title=None
1135
+ ) -> None:
1136
+ """Exports the memory timeline as an HTML file which contains
1137
+ the memory timeline plot embedded as a PNG file."""
1138
+ # Check if user has matplotlib installed, return gracefully if not.
1139
+ import importlib.util
1140
+
1141
+ matplotlib_spec = importlib.util.find_spec("matplotlib")
1142
+ if matplotlib_spec is None:
1143
+ print(
1144
+ "export_memory_timeline_html failed because matplotlib was not found."
1145
+ )
1146
+ return
1147
+
1148
+ from base64 import b64encode
1149
+ from os import remove
1150
+ from tempfile import NamedTemporaryFile
1151
+
1152
+ import matplotlib.pyplot as plt
1153
+ import numpy as np
1154
+
1155
+ mt = self._coalesce_timeline(device)
1156
+ times, sizes = np.array(mt[0]), np.array(mt[1])
1157
+ # For this timeline, start at 0 to match Chrome traces.
1158
+ t_min = min(times)
1159
+ times -= t_min
1160
+ stacked = np.cumsum(sizes, axis=1) / 1024**3
1161
+ max_memory_allocated = torch.cuda.max_memory_allocated()
1162
+ max_memory_reserved = torch.cuda.max_memory_reserved()
1163
+
1164
+ # Plot memory timeline as stacked data
1165
+ fig = plt.figure(figsize=figsize, dpi=80)
1166
+ axes = fig.gca()
1167
+ for category, color in _CATEGORY_TO_COLORS.items():
1168
+ i = _CATEGORY_TO_INDEX[category]
1169
+ axes.fill_between(
1170
+ times / 1e3, stacked[:, i], stacked[:, i + 1], color=color, alpha=0.7
1171
+ )
1172
+ fig.legend(["Unknown" if i is None else i.name for i in _CATEGORY_TO_COLORS])
1173
+ # Usually training steps are in magnitude of ms.
1174
+ axes.set_xlabel("Time (ms)")
1175
+ axes.set_ylabel("Memory (GB)")
1176
+ title = "\n\n".join(
1177
+ ([title] if title else [])
1178
+ + [
1179
+ f"Max memory allocated: {max_memory_allocated/(10**9):.2f} GB \n"
1180
+ f"Max memory reserved: {max_memory_reserved/(10**9):.2f} GB"
1181
+ ]
1182
+ )
1183
+ axes.set_title(title)
1184
+
1185
+ # Embed the memory timeline image into the HTML file
1186
+ tmpfile = NamedTemporaryFile("wb", suffix=".png", delete=False)
1187
+ tmpfile.close()
1188
+ fig.savefig(tmpfile.name, format="png")
1189
+
1190
+ with open(tmpfile.name, "rb") as tmp:
1191
+ encoded = b64encode(tmp.read()).decode("utf-8")
1192
+ html = f"""<html>
1193
+ <head><meta charset="utf-8" /><title>GPU Memory Timeline HTML</title></head>
1194
+ <body>
1195
+ <img src='data:image/png;base64,{encoded}'>
1196
+ </body>
1197
+ </html>"""
1198
+
1199
+ with open(path, "w") as f:
1200
+ f.write(html)
1201
+ remove(tmpfile.name)
env-llmeval/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import math
3
+ import os
4
+ import re
5
+ from typing import Dict, List, Optional, Set
6
+
7
+ import torch
8
+ import torch.utils.benchmark as benchmark
9
+ from torch._C._profiler import (
10
+ _EventType,
11
+ _ExtraFields_PyCall,
12
+ _ExtraFields_PyCCall,
13
+ _ExtraFields_TorchOp,
14
+ _ProfilerEvent,
15
+ )
16
+ from torch.profiler import profile
17
+ from torch.profiler._utils import index_of_first_match, traverse_bfs, traverse_dfs
18
+
19
+
20
+ class Pattern:
21
+ """
22
+ Base class for all patterns, subclass this class and implement match()
23
+ to define custom patterns.
24
+
25
+ In subclass, define description and skip property.
26
+ """
27
+
28
+ def __init__(self, prof: profile, should_benchmark: bool = False):
29
+ self.prof = prof
30
+ self.should_benchmark = should_benchmark
31
+ self.name = "Please specify a name for pattern"
32
+ self.description = "Please specify a description for pattern"
33
+ self.url = ""
34
+ assert prof.profiler is not None and prof.profiler.kineto_results is not None
35
+ self.event_tree = prof.profiler.kineto_results.experimental_event_tree()
36
+ self.tid_root: Dict[int, List[_ProfilerEvent]] = {}
37
+ for event in self.event_tree:
38
+ self.tid_root.setdefault(event.start_tid, []).append(event)
39
+
40
+ @property
41
+ def skip(self):
42
+ return False
43
+
44
+ def report(self, event: _ProfilerEvent):
45
+ msg = (
46
+ f"{self.description}\n[Source Code Location] {source_code_location(event)}"
47
+ )
48
+ return msg
49
+
50
+ def eventTreeTraversal(self):
51
+ """
52
+ Traverse the event tree and yield all events.
53
+ Override this method in subclass to customize the traversal.
54
+ """
55
+ yield from traverse_dfs(self.event_tree)
56
+
57
+ def summary(self, events: List[_ProfilerEvent]):
58
+ default_summary = f"{self.name}: {len(events)} events matched."
59
+ if self.should_benchmark:
60
+ # If benchmark summary is not empty, use it.
61
+ return (
62
+ self.benchmark_summary(events)
63
+ if hasattr(self, "benchmark") # type: ignore[attr-defined]
64
+ else default_summary
65
+ )
66
+ return default_summary
67
+
68
+ def benchmark_summary(self, events: List[_ProfilerEvent]):
69
+ def format_time(time_ns: int):
70
+ unit_lst = ["ns", "us", "ms"]
71
+ for unit in unit_lst:
72
+ if time_ns < 1000:
73
+ return f"{time_ns:.2f} {unit}"
74
+ time_ns //= 1000
75
+ return f"{time_ns:.2f} s"
76
+
77
+ assert hasattr(self, "benchmark"), "Please implement benchmark()"
78
+ shapes_factor_map = self.benchmark(events) # type: ignore[attr-defined]
79
+ original_time = sum(event.duration_time_ns for event in events)
80
+ new_time = sum(
81
+ shapes_factor_map[input_shapes(event)] * event.duration_time_ns
82
+ for event in events
83
+ )
84
+ return (
85
+ f"{self.name}: {len(events)} events matched. "
86
+ f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)"
87
+ )
88
+
89
+ def match(self, event: _ProfilerEvent):
90
+ """
91
+ Return True if the event matches the pattern.
92
+ This method should be overriden in subclass.
93
+ """
94
+ raise NotImplementedError
95
+
96
+ def matched_events(self):
97
+ if self.skip:
98
+ return []
99
+ matched_events = []
100
+ for event in self.eventTreeTraversal():
101
+ if self.match(event):
102
+ matched_events.append(event)
103
+ return matched_events
104
+
105
+ def root_of(self, event: _ProfilerEvent):
106
+ while event.parent:
107
+ event = event.parent
108
+ return event
109
+
110
+ def siblings_of(self, event: _ProfilerEvent):
111
+ if event.parent:
112
+ children = event.parent.children
113
+ else:
114
+ children = self.tid_root[event.start_tid]
115
+ index = children.index(event)
116
+ return children[:index], children[index + 1 :]
117
+
118
+ def next_of(self, event: _ProfilerEvent):
119
+ _, next_events = self.siblings_of(event)
120
+ return next_events[0] if next_events else None
121
+
122
+ def prev_of(self, event: _ProfilerEvent):
123
+ prev_events, _ = self.siblings_of(event)
124
+ return prev_events[-1] if prev_events else None
125
+
126
+ def go_up_until(self, event: _ProfilerEvent, predicate):
127
+ if not event:
128
+ return None
129
+ while event.parent and not predicate(event):
130
+ event = event.parent
131
+ return event
132
+
133
+
134
+ # Patterns
135
+
136
+
137
+ class NamePattern(Pattern):
138
+ def __init__(self, prof: profile, name: str, should_benchmark: bool = False):
139
+ super().__init__(prof, should_benchmark)
140
+ self.description = f"Matched Name Event: {name}"
141
+ self.name = name
142
+
143
+ def match(self, event: _ProfilerEvent):
144
+ return re.search(self.name, event.name) is not None
145
+
146
+
147
+ class ExtraCUDACopyPattern(Pattern):
148
+ """
149
+ This pattern identifies if we creates a constant tensor on CPU and immediately moves it to GPU.
150
+ example: torch.zeros((100, 100)).to("cuda")
151
+
152
+ Pattern:
153
+ build-in method |build-in method
154
+ ... | aten::to
155
+ aten::fill_/aten::zero_ | aten::_to_copy
156
+
157
+ Algorithm:
158
+ We start at node aten::to, go parent events' previous events,
159
+ and check if we have a aten::fill_/aten::zero_ as we keep going down the tree.
160
+ We always select the last child in the children list when we go down the tree.
161
+ If at any step we failed, it is not a match.
162
+ """
163
+
164
+ def __init__(self, prof: profile, should_benchmark: bool = False):
165
+ super().__init__(prof, should_benchmark)
166
+ self.name = "Extra CUDA Copy Pattern"
167
+ self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initialize it on GPU."
168
+ self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#create-tensors-directly-on-the-target-device"
169
+ self.init_ops = {
170
+ "aten::fill_",
171
+ "aten::zero_",
172
+ "aten::normal_",
173
+ "aten::uniform_",
174
+ }
175
+
176
+ @property
177
+ def skip(self):
178
+ return not self.prof.with_stack or not self.prof.record_shapes
179
+
180
+ def match(self, event):
181
+ # TODO: We should also check tensor identities
182
+ if event.name != "aten::to":
183
+ return False
184
+ to_event = event
185
+ if not event.children:
186
+ return False
187
+ event = event.children[-1]
188
+ if event.name != "aten::_to_copy":
189
+ return False
190
+ if not event.children:
191
+ return False
192
+ event = event.children[-1]
193
+ if event.name != "aten::copy_":
194
+ return False
195
+ # aten::copy_ should have the first 2 args dtype the same
196
+ dtypes = input_dtypes(event)
197
+ if len(dtypes) < 2:
198
+ return False
199
+ if dtypes[0] is None or dtypes[0] != dtypes[1]:
200
+ return False
201
+ event = to_event
202
+ # Up one level
203
+ event = event.parent
204
+ if event is None:
205
+ return False
206
+ # Check if we have a aten::fill_ in previous leaf
207
+ event = self.prev_of(event)
208
+ if event is None:
209
+ return False
210
+ while event.children:
211
+ event = event.children[-1]
212
+ # aten::zero_ is a special optimzation case where fill_ is not called
213
+ if event.name in self.init_ops:
214
+ return True
215
+ return event.name in self.init_ops
216
+ # TODO: Check if tensor is reused
217
+
218
+ def benchmark(self, events: List[_ProfilerEvent]):
219
+ shapes_factor_map = {input_shapes(event): 0.0 for event in events}
220
+ for shape in shapes_factor_map:
221
+ size = shape[0]
222
+ to_timer = benchmark.Timer(
223
+ stmt='torch.ones(size).to("cuda")', globals={"size": size}
224
+ )
225
+ de_timer = benchmark.Timer(
226
+ stmt='torch.ones(size, device="cuda")', globals={"size": size}
227
+ )
228
+ to_time = to_timer.timeit(10).mean
229
+ de_time = de_timer.timeit(10).mean
230
+ shapes_factor_map[shape] = de_time / to_time
231
+ return shapes_factor_map
232
+
233
+
234
+ class ForLoopIndexingPattern(Pattern):
235
+ """
236
+ This pattern identifies if we use a for loop to index a tensor that
237
+ can be vectorized.
238
+ example:
239
+ tensor = torch.empty((100, 100))
240
+ for i in range(100):
241
+ tensor[i] = i
242
+
243
+ Pattern:
244
+ aten::select | ... | aten::select | ... (Repeat)
245
+
246
+ Algorithm:
247
+ We start at node aten::select, and we check if we can find this alternating patterns.
248
+ We also keep a dictionary to avoid duplicate match in the for loop.
249
+ """
250
+
251
+ def __init__(self, prof: profile, should_benchmark: bool = False):
252
+ super().__init__(prof, should_benchmark)
253
+ self.name = "For Loop Indexing Pattern"
254
+ self.description = "For loop indexing detected. Vectorization recommended."
255
+ self.visited: Set[int] = set()
256
+
257
+ def eventTreeTraversal(self):
258
+ """
259
+ We need to use BFS traversal order to avoid duplicate match.
260
+ """
261
+ yield from traverse_bfs(self.event_tree)
262
+
263
+ def match(self, event: _ProfilerEvent):
264
+ if event.name != "aten::select":
265
+ return False
266
+ if event.id in self.visited:
267
+ return False
268
+ repeat_count = 1
269
+ _, next = self.siblings_of(event)
270
+ if len(next) <= 1:
271
+ return False
272
+
273
+ # Custom event list matching
274
+ def same_ops(list1, list2):
275
+ if len(list1) != len(list2):
276
+ return False
277
+ for op1, op2 in zip(list1, list2):
278
+ if op1.name != op2.name:
279
+ return False
280
+ return True
281
+
282
+ # Record the ops between two aten::select
283
+ next_select_idx = index_of_first_match(next, lambda e: e.name == "aten::select")
284
+ if next_select_idx is None:
285
+ return False
286
+ indexing_ops = [event] + next[:next_select_idx]
287
+ next = next[len(indexing_ops) - 1 :]
288
+ for i in range(0, len(next), len(indexing_ops)):
289
+ if same_ops(indexing_ops, next[i : i + len(indexing_ops)]):
290
+ repeat_count += 1
291
+ self.visited.add(next[i].id)
292
+ else:
293
+ break
294
+ return repeat_count >= 10
295
+
296
+
297
+ class FP32MatMulPattern(Pattern):
298
+ def __init__(self, prof: profile, should_benchmark: bool = False):
299
+ super().__init__(prof, should_benchmark)
300
+ self.name = "FP32 MatMul Pattern"
301
+ self.description = (
302
+ "You are currently using GPU that supports TF32. "
303
+ "Please enable TF32 by setting 'torch.backends.cuda.matmul.allow_tf32 = True'"
304
+ )
305
+ self.url = "https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
306
+
307
+ @property
308
+ def skip(self):
309
+ if torch.version.hip is not None:
310
+ has_tf32 = False
311
+ else:
312
+ # Anything less than sm_80 is not Ampere which doesn't support TF32
313
+ has_tf32 = all(int(arch[3:]) >= 80 for arch in torch.cuda.get_arch_list())
314
+ return has_tf32 is False or super().skip or not self.prof.record_shapes
315
+
316
+ def match(self, event: _ProfilerEvent):
317
+ # If we saw this pattern once, we don't need to match it again
318
+ if event.tag != _EventType.TorchOp:
319
+ return False
320
+ assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
321
+ if event.name == "aten::mm":
322
+ if event.extra_fields.allow_tf32_cublas is False:
323
+ return True
324
+ return False
325
+
326
+ def report(self, event: _ProfilerEvent):
327
+ return self.description
328
+
329
+ def benchmark(self, events: List[_ProfilerEvent]):
330
+ shapes_factor_map = {input_shapes(event): 0.0 for event in events}
331
+ for shape in shapes_factor_map:
332
+ matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float32)
333
+ matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float32)
334
+ fp32_timer = benchmark.Timer(
335
+ stmt="torch.mm(matrixA, matrixB)",
336
+ globals={"matrixA": matrixA, "matrixB": matrixB},
337
+ )
338
+ tf32_timer = benchmark.Timer(
339
+ stmt="torch.mm(matrixA, matrixB)",
340
+ setup="torch.backends.cuda.matmul.allow_tf32 = True",
341
+ globals={"matrixA": matrixA, "matrixB": matrixB},
342
+ )
343
+ torch.backends.cuda.matmul.allow_tf32 = False
344
+ fp32_time = fp32_timer.timeit(10).mean
345
+ tf32_time = tf32_timer.timeit(10).mean
346
+ shapes_factor_map[shape] = tf32_time / fp32_time
347
+ return shapes_factor_map
348
+
349
+
350
+ class OptimizerSingleTensorPattern(Pattern):
351
+ """
352
+ This pattern identifies if we are using the single-tensor version of an optimizer.
353
+ example:
354
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
355
+ By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when
356
+ the kernels are relatively small.
357
+
358
+ Pattern:
359
+ XXXXX: _single_tenser_<OPTIMIZER_NAME>
360
+
361
+ Algorithm:
362
+ String match
363
+ """
364
+
365
+ def __init__(self, prof: profile, should_benchmark: bool = False):
366
+ super().__init__(prof, should_benchmark)
367
+ self.name = "Optimizer Single Tensor Pattern"
368
+ self.optimizers_with_foreach = ["adam", "sgd", "adamw"]
369
+ self.description = (
370
+ "Deteced optimizer running with single tensor implementation. "
371
+ "Please enable multi tensor implementation by passing 'foreach=True' into optimizer."
372
+ )
373
+ self.url = ""
374
+
375
+ def match(self, event: _ProfilerEvent):
376
+ for optimizer in self.optimizers_with_foreach:
377
+ if event.name.endswith(f"_single_tensor_{optimizer}"):
378
+ return True
379
+ return False
380
+
381
+
382
+ class SynchronizedDataLoaderPattern(Pattern):
383
+ """
384
+ This pattern identifies if we are using num_workers=0 in DataLoader.
385
+ example:
386
+ torch.utils.data.DataLoader(dataset, batch_size=batch_size)
387
+ Add num_workers=N to the arguments. N depends on system configuration.
388
+
389
+ Pattern:
390
+ dataloader.py(...): __iter__
391
+ dataloader.py(...): _get_iterator
392
+ NOT dataloader.py(...): check_worker_number_rationality
393
+
394
+ Algorithm:
395
+ If we don't see check_worker_number_rationality call in the dataloader __iter__,
396
+ It is not an asynchronous dataloader.
397
+
398
+ """
399
+
400
+ def __init__(self, prof: profile, should_benchmark: bool = False):
401
+ super().__init__(prof, should_benchmark)
402
+ self.name = "Synchronized DataLoader Pattern"
403
+ self.description = (
404
+ "Detected DataLoader running with synchronized implementation. "
405
+ "Please enable asynchronous dataloading by setting num_workers > 0 when initializing DataLoader."
406
+ )
407
+ self.url = (
408
+ "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
409
+ "#enable-async-data-loading-and-augmentation"
410
+ )
411
+
412
+ def match(self, event: _ProfilerEvent):
413
+ def is_dataloader_function(name: str, function_name: str):
414
+ return name.startswith(
415
+ os.path.join("torch", "utils", "data", "dataloader.py")
416
+ ) and name.endswith(function_name)
417
+
418
+ # TODO: fixme! Due to lifetime issues of the function name, this field might
419
+ # actually point to an already freed string when the even is a PyCall.
420
+ # Just silently skip this to unblock testing.
421
+ try:
422
+ event.name
423
+ except UnicodeDecodeError:
424
+ return False
425
+
426
+ if not is_dataloader_function(event.name, "__iter__"):
427
+ return False
428
+ if not event.children:
429
+ return False
430
+ event = event.children[0]
431
+ if not is_dataloader_function(event.name, "_get_iterator"):
432
+ return False
433
+ if not event.children:
434
+ return False
435
+ event = event.children[0]
436
+ return not is_dataloader_function(event.name, "check_worker_number_rationality")
437
+ # TODO: We should also check if the loader is bottleneck.
438
+
439
+
440
+ class GradNotSetToNonePattern(Pattern):
441
+ """
442
+ This pattern identifies if we are not setting grad to None in zero_grad.
443
+ example:
444
+ optimizer.zero_grad()
445
+ By setting set_to_none=True, we can gain speedup
446
+
447
+ Pattern:
448
+ XXXXX: _zero_grad
449
+ NOT aten::zeros
450
+ aten::zero_
451
+
452
+ aten::zero_ is called on each parameter in the model.
453
+ We also want to make sure it is not called by aten::zeros.
454
+
455
+ Algorithm:
456
+ String match
457
+ """
458
+
459
+ def __init__(self, prof: profile, should_benchmark: bool = False):
460
+ super().__init__(prof, should_benchmark)
461
+ self.name = "Gradient Set To Zero Instead of None Pattern"
462
+ self.description = (
463
+ "Detected gradient set to zero instead of None. "
464
+ "Please add 'set_to_none=True' when calling zero_grad()."
465
+ )
466
+ self.url = (
467
+ "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
468
+ "#disable-gradient-calculation-for-validation-or-inference"
469
+ )
470
+
471
+ def match(self, event: _ProfilerEvent):
472
+ if not event.name.endswith(": zero_grad"):
473
+ return False
474
+ if not event.children:
475
+ return False
476
+
477
+ for sub_event in traverse_dfs(event.children):
478
+ if (
479
+ sub_event.name == "aten::zero_"
480
+ and sub_event.parent.name != "aten::zeros"
481
+ ):
482
+ return True
483
+ # TODO: We should also check if the optimizer's numerical behavior will change.
484
+ return False
485
+
486
+
487
+ class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern):
488
+ """
489
+ This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d.
490
+ Bias doesn't do anything when followed by batchnorm.
491
+ Pattern:
492
+ nn.Module: Conv2d | nn.Module: BatchNorm2d
493
+ ...
494
+ aten::conv2d AND dtype of third argument is not null
495
+ The third argument is the bias
496
+ Algorithm:
497
+ String match
498
+ """
499
+
500
+ def __init__(self, prof: profile, should_benchmark: bool = False):
501
+ super().__init__(prof, should_benchmark)
502
+ self.name = "Enabling Bias in Conv2d Followed By BatchNorm Pattern"
503
+ self.description = "Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d."
504
+ self.url = (
505
+ "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
506
+ "#disable-bias-for-convolutions-directly-followed-by-a-batch-norm"
507
+ )
508
+
509
+ @property
510
+ def skip(self):
511
+ return self.prof.record_shapes is False or super().skip
512
+
513
+ def match(self, event: _ProfilerEvent):
514
+ if event.name != "aten::conv2d":
515
+ return False
516
+ if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] is None:
517
+ return False
518
+ # This means bias=True
519
+ event = self.go_up_until(
520
+ event, lambda e: e.name.startswith("nn.Module: Conv2d")
521
+ )
522
+ if not event:
523
+ return False
524
+ event = self.next_of(event)
525
+ if not event:
526
+ return False
527
+ return event.name.startswith("nn.Module: BatchNorm2d")
528
+
529
+
530
+ class MatMulDimInFP16Pattern(Pattern):
531
+ def __init__(self, prof: profile, should_benchmark: bool = False):
532
+ super().__init__(prof, should_benchmark)
533
+ self.name = "Matrix Multiplication Dimension Not Aligned Pattern"
534
+ self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension."
535
+ self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp"
536
+
537
+ @property
538
+ def skip(self):
539
+ return not self.prof.with_stack or not self.prof.record_shapes
540
+
541
+ def match(self, event: _ProfilerEvent):
542
+ def mutiple_of(shapes, multiple):
543
+ return all(dim % multiple == 0 for shape in shapes for dim in shape[-2:])
544
+
545
+ if event.name not in ("aten::mm", "aten::bmm", "aten::addmm"):
546
+ return False
547
+ if not input_dtypes(event):
548
+ return False
549
+ arg_dtype = input_dtypes(event)[0]
550
+ if arg_dtype in (torch.bfloat16, torch.half) and not mutiple_of(
551
+ input_shapes(event), 8
552
+ ):
553
+ return True
554
+ return False
555
+
556
+ def benchmark(self, events: List[_ProfilerEvent]):
557
+ def closest_multiple(shapes, multiple):
558
+ return [multiple * math.ceil(shape / multiple) for shape in shapes]
559
+
560
+ shapes_factor_map = {input_shapes(event): 0.0 for event in events}
561
+ for shape in shapes_factor_map:
562
+ matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float16)
563
+ matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float16)
564
+ not_aligned_dim_timer = benchmark.Timer(
565
+ stmt="torch.mm(matrixA, matrixB)",
566
+ globals={"matrixA": matrixA, "matrixB": matrixB},
567
+ )
568
+ matrixA = torch.randn(
569
+ closest_multiple(shape[0], 8), device="cuda", dtype=torch.float16
570
+ )
571
+ matrixB = torch.randn(
572
+ closest_multiple(shape[1], 8), device="cuda", dtype=torch.float16
573
+ )
574
+ aligned_dim_timer = benchmark.Timer(
575
+ stmt="torch.mm(matrixA, matrixB)",
576
+ globals={"matrixA": matrixA, "matrixB": matrixB},
577
+ )
578
+ not_aligned_dim_time = not_aligned_dim_timer.timeit(10).mean
579
+ aligned_dim_time = aligned_dim_timer.timeit(10).mean
580
+ shapes_factor_map[shape] = aligned_dim_time / not_aligned_dim_time
581
+ return shapes_factor_map
582
+
583
+
584
+ def source_code_location(event: Optional[_ProfilerEvent]):
585
+ while event:
586
+ if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall:
587
+ assert isinstance(
588
+ event.extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall)
589
+ )
590
+ if not event.extra_fields.caller.file_name.startswith("torch" + os.sep):
591
+ return f"{event.extra_fields.caller.file_name}:{event.extra_fields.caller.line_number}"
592
+ event = event.parent
593
+ return "No source code location found"
594
+
595
+
596
+ def input_shapes(event: _ProfilerEvent):
597
+ assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
598
+ return tuple(tuple(getattr(i, "sizes", ())) for i in event.extra_fields.inputs)
599
+
600
+
601
+ def input_dtypes(event: _ProfilerEvent):
602
+ assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
603
+ return tuple(getattr(i, "dtype", None) for i in event.extra_fields.inputs)
604
+
605
+
606
+ def report_all_anti_patterns(
607
+ prof,
608
+ should_benchmark: bool = False,
609
+ print_enable: bool = True,
610
+ json_report_dir: Optional[str] = None,
611
+ ):
612
+ report_dict: Dict = {}
613
+ anti_patterns = [
614
+ ExtraCUDACopyPattern(prof, should_benchmark),
615
+ # ForLoopIndexingPattern(prof, should_benchmark),
616
+ FP32MatMulPattern(prof, should_benchmark),
617
+ OptimizerSingleTensorPattern(prof, should_benchmark),
618
+ SynchronizedDataLoaderPattern(prof, should_benchmark),
619
+ GradNotSetToNonePattern(prof, should_benchmark),
620
+ Conv2dBiasFollowedByBatchNorm2dPattern(prof, should_benchmark),
621
+ MatMulDimInFP16Pattern(prof, should_benchmark),
622
+ ]
623
+ reported = set()
624
+ summaries = []
625
+ message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"]
626
+ message_list.append("Matched Events:")
627
+
628
+ for anti_pattern in anti_patterns:
629
+ matched_events = anti_pattern.matched_events()
630
+ if not matched_events:
631
+ continue
632
+ summaries.append(anti_pattern.summary(matched_events))
633
+ for event in matched_events:
634
+ report_msg = anti_pattern.report(event)
635
+ if report_msg not in reported:
636
+ message_list.append(report_msg)
637
+ reported.add(report_msg)
638
+ src_location, line_no = source_code_location(event).split(":")
639
+ report_dict.setdefault(src_location, []).append(
640
+ {
641
+ "line_number": int(line_no),
642
+ "name": anti_pattern.name,
643
+ "url": anti_pattern.url,
644
+ "message": anti_pattern.description,
645
+ }
646
+ )
647
+
648
+ if json_report_dir is not None:
649
+ json_report_path = os.path.join(json_report_dir, "torchtidy_report.json")
650
+ if os.path.exists(json_report_path):
651
+ with open(json_report_path) as f:
652
+ exisiting_report = json.load(f)
653
+ exisiting_report.update(report_dict)
654
+ report_dict = exisiting_report
655
+ with open(json_report_path, "w") as f:
656
+ json.dump(report_dict, f, indent=4)
657
+
658
+ message_list.append("Summary:")
659
+ message_list += summaries
660
+ message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}")
661
+ if print_enable:
662
+ print("\n".join(message_list))
env-llmeval/lib/python3.10/site-packages/torch/profiler/_utils.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import re
3
+ from collections import deque
4
+ from dataclasses import dataclass
5
+ from typing import Dict, List
6
+
7
+ from torch.autograd import _KinetoEvent
8
+ from torch.autograd.profiler import profile
9
+
10
+ from torch.profiler import DeviceType
11
+
12
+
13
+ def _traverse(tree, next_fn, children_fn=lambda x: x.children, reverse: bool = False):
14
+ order = reversed if reverse else lambda x: x
15
+ remaining = deque(order(tree))
16
+ while remaining:
17
+ curr_event = next_fn(remaining)
18
+ yield curr_event
19
+ for child_event in order(children_fn(curr_event)):
20
+ remaining.append(child_event)
21
+
22
+
23
+ traverse_dfs = functools.partial(_traverse, next_fn=lambda x: x.pop(), reverse=True)
24
+ traverse_bfs = functools.partial(
25
+ _traverse, next_fn=lambda x: x.popleft(), reverse=False
26
+ )
27
+
28
+
29
+ @dataclass
30
+ class EventMetrics:
31
+ duration_time_ns: int = 0
32
+ self_time_ns: int = 0
33
+ idle_time_ns: int = 0
34
+ queue_depth: int = 0
35
+
36
+ @property
37
+ def fraction_idle_time(self):
38
+ if self.duration_time_ns == 0:
39
+ return 0.0
40
+ return self.idle_time_ns / self.duration_time_ns
41
+
42
+
43
+ @dataclass
44
+ class Interval:
45
+ start: int
46
+ end: int
47
+ queue_depth: int = 0
48
+
49
+
50
+ class EventKey:
51
+ def __init__(self, event):
52
+ self.event = event
53
+
54
+ def __hash__(self):
55
+ return hash(self.event.id)
56
+
57
+ def __eq__(self, other):
58
+ return self.event.id == other.event.id
59
+
60
+ def __repr__(self):
61
+ return f"{self.event.name}"
62
+
63
+ def intervals_overlap(self, intervals: List[Interval]):
64
+ overlap_time = 0
65
+ intervals = sorted(intervals, key=lambda x: x.start)
66
+
67
+ if intervals:
68
+ overlap_start = max(self.event.start_time_ns, intervals[0].start)
69
+ overlap_end = min(self.event.end_time_ns, intervals[0].end)
70
+
71
+ if overlap_start < overlap_end:
72
+ overlap_time += overlap_end - overlap_start
73
+
74
+ i, j = 0, 1
75
+ while j < len(intervals):
76
+ prev_interval = intervals[i]
77
+ curr_interval = intervals[j]
78
+ j += 1
79
+ if prev_interval.end > curr_interval.start:
80
+ # Completely subsumed by previous interval
81
+ if prev_interval.end > curr_interval.end:
82
+ j += 1
83
+ continue
84
+ else:
85
+ curr_interval.start = prev_interval.end
86
+ i = j
87
+
88
+ overlap_start = max(self.event.start_time_ns, curr_interval.start)
89
+ overlap_end = min(self.event.end_time_ns, curr_interval.end)
90
+ if overlap_start < overlap_end:
91
+ overlap_time += overlap_end - overlap_start
92
+
93
+ return overlap_time
94
+
95
+
96
+ class BasicEvaluation:
97
+ def __init__(self, prof: profile):
98
+ self.profile = prof
99
+ self.metrics: Dict[EventKey, EventMetrics] = {}
100
+ self.compute_self_time()
101
+ self.event_keys = sorted(
102
+ (e for e in self.metrics.keys()), key=lambda x: x.event.start_time_ns
103
+ )
104
+ self.events = [e.event for e in self.event_keys]
105
+ self.cuda_events: List[_KinetoEvent] = []
106
+ self.queue_depth_list = self.compute_queue_depth()
107
+ self.compute_idle_time()
108
+
109
+ def compute_self_time(self):
110
+ """
111
+ Computes event's self time(total time - time in child ops).
112
+ """
113
+ assert self.profile.kineto_results is not None
114
+ stack = deque(self.profile.kineto_results.experimental_event_tree())
115
+
116
+ # standard iterating dfs
117
+ while stack:
118
+ curr_event = stack.pop()
119
+ self_time = curr_event.duration_time_ns
120
+ for child_event in curr_event.children:
121
+ self_time -= child_event.duration_time_ns
122
+ stack.append(child_event)
123
+ assert (
124
+ EventKey(curr_event) not in self.metrics
125
+ ), f"Duplicate id: {curr_event.id}, {curr_event.name}"
126
+ self.metrics[EventKey(curr_event)] = EventMetrics(self_time_ns=self_time)
127
+ self.metrics[
128
+ EventKey(curr_event)
129
+ ].duration_time_ns = curr_event.duration_time_ns
130
+
131
+ def compute_queue_depth(self):
132
+ """
133
+ Computes queue_depth at each event. This will calculate the queue depth data for
134
+ All the events in the tree.
135
+ This will return a list of Interval of queue depth data of cuda launch and kernels.
136
+ """
137
+ assert self.profile.kineto_results is not None
138
+ cuda_event_list = self.profile.kineto_results.events()
139
+
140
+ def is_cuda_launch_kernel(e):
141
+ # TODO: find a better way to identify cudaLaunchKernel
142
+ return e.name == "cudaLaunchKernel"
143
+
144
+ def is_cuda_kernel(e):
145
+ # TODO: find a better way to identify CUDA Kernel
146
+ return e.device_type() == DeviceType.CUDA and "mem" not in e.name.lower()
147
+
148
+ cuda_launch_events = sorted(
149
+ (e for e in cuda_event_list if is_cuda_launch_kernel(e)),
150
+ key=lambda x: x.start_us(),
151
+ )
152
+ cuda_kernel_events = sorted(
153
+ (e for e in cuda_event_list if is_cuda_kernel(e)),
154
+ key=lambda x: x.start_us(),
155
+ )
156
+
157
+ self.cuda_events = sorted(
158
+ cuda_launch_events + cuda_kernel_events, key=lambda x: x.start_us()
159
+ )
160
+
161
+ kernel_mapping: Dict[_KinetoEvent, int] = {}
162
+ last_mapped_kernel = 0
163
+ for cuda_launch_event in cuda_launch_events:
164
+ index = index_of_first_match(
165
+ cuda_kernel_events,
166
+ lambda x: x.linked_correlation_id()
167
+ == cuda_launch_event.linked_correlation_id(),
168
+ start=last_mapped_kernel,
169
+ )
170
+ kernel_mapping[cuda_launch_event] = index
171
+ last_mapped_kernel = index if index is not None else last_mapped_kernel
172
+
173
+ current_kernel_index = 0
174
+ spawned_kernel_index = -1
175
+
176
+ all_events = cuda_launch_events + cuda_kernel_events + self.events
177
+
178
+ def new_old_event_comparator(event):
179
+ if hasattr(event, "start_us"):
180
+ return event.start_us() * 1000
181
+ if hasattr(event, "start_time_ns"):
182
+ return event.start_time_ns
183
+ raise Exception("Unknown Event Type")
184
+
185
+ queue_depth_list: List[Interval] = []
186
+ all_events.sort(key=new_old_event_comparator)
187
+ for event in all_events:
188
+ # Find latest cuda kernel event
189
+ if hasattr(event, "start_us"):
190
+ start_time = event.start_us() * 1000
191
+ end_time = (event.start_us() + event.duration_us()) * 1000
192
+ # Find current spawned cuda kernel event
193
+ if event in kernel_mapping and kernel_mapping[event] is not None:
194
+ spawned_kernel_index = kernel_mapping[event]
195
+ elif hasattr(event, "start_time_ns"):
196
+ start_time = event.start_time_ns # type: ignore[attr-defined]
197
+ end_time = event.end_time_ns # type: ignore[attr-defined]
198
+
199
+ while (
200
+ current_kernel_index < len(cuda_kernel_events)
201
+ and (cuda_kernel_events[current_kernel_index].start_us()) * 1000
202
+ <= start_time
203
+ ):
204
+ current_kernel_index += 1
205
+ current_queue_depth = spawned_kernel_index - current_kernel_index + 1
206
+ current_queue_depth = max(current_queue_depth, 0)
207
+
208
+ if hasattr(event, "start_us"):
209
+ queue_depth_list.append(
210
+ Interval(start_time, end_time, current_queue_depth)
211
+ )
212
+ elif hasattr(event, "start_time_ns"):
213
+ self.metrics[EventKey(event)].queue_depth = current_queue_depth
214
+
215
+ return queue_depth_list
216
+
217
+ def compute_idle_time(self):
218
+ """
219
+ Computes idle time of the profile.
220
+ """
221
+ # Based on queue_depth_list, we can calculate idle time for all the events
222
+ idle = False
223
+ idle_start = 0
224
+ idle_intervals: List[Interval] = []
225
+ if self.queue_depth_list and self.events:
226
+ idle_intervals += [
227
+ Interval(self.events[0].start_time_ns, self.queue_depth_list[0].start),
228
+ Interval(self.queue_depth_list[-1].end, self.events[-1].end_time_ns),
229
+ ]
230
+
231
+ for data_point in self.queue_depth_list:
232
+ if data_point.queue_depth == 0 and not idle:
233
+ idle_start = data_point.end
234
+ idle = True
235
+ if data_point.queue_depth > 0 and idle:
236
+ idle_intervals.append(Interval(idle_start, data_point.start))
237
+ idle = False
238
+
239
+ event_list = [e.event for e in self.metrics.keys()]
240
+ for event in event_list:
241
+ self.metrics[EventKey(event)].idle_time_ns = EventKey(
242
+ event
243
+ ).intervals_overlap(idle_intervals)
244
+
245
+ def rank_events(self, length):
246
+ """
247
+ Filter and Rank the events based on some heuristics:
248
+ 1) Events that are in the falling phase of the queue depth.
249
+ 2) Events that have a high idle_time, self_time difference.
250
+
251
+ Parameters:
252
+ length: The number of events to return.
253
+ """
254
+
255
+ # Find the interval when qd is falling to 0
256
+ import torch
257
+
258
+ queue_depth_list = list(reversed(self.queue_depth_list))
259
+ qd_values = [e.queue_depth for e in queue_depth_list]
260
+
261
+ bottom_threashold = 0
262
+ top_threashold = 4
263
+ decrease_interval = []
264
+ i = 0
265
+ while i < len(qd_values):
266
+ if qd_values[i] > bottom_threashold:
267
+ i += 1
268
+ continue
269
+ for j in range(i + 1, len(qd_values)):
270
+ # Find next zero and if the max value between them exceeds
271
+ # the threshold, then we have a falling interval
272
+ next_minimum_idx = index_of_first_match(
273
+ qd_values, lambda x: x <= bottom_threashold, start=j
274
+ )
275
+ peak_idx = argmax(qd_values, start=j, end=next_minimum_idx)
276
+
277
+ # if is a valid peak, we add to list and continue
278
+ if peak_idx is not None and qd_values[peak_idx] >= top_threashold:
279
+ decrease_interval.append(
280
+ Interval(
281
+ queue_depth_list[peak_idx].start, queue_depth_list[i].start
282
+ )
283
+ )
284
+ i = next_minimum_idx if next_minimum_idx is not None else i
285
+ break
286
+ i += 1
287
+ # Filter out events that are not in the decrease interval
288
+ event_list = [
289
+ event
290
+ for event in self.metrics.keys()
291
+ if event.intervals_overlap(decrease_interval)
292
+ ]
293
+ if event_list:
294
+ self_time = torch.tensor(
295
+ [self.metrics[event].self_time_ns for event in event_list],
296
+ dtype=torch.float32,
297
+ )
298
+ idle_time = torch.tensor(
299
+ [self.metrics[event].fraction_idle_time for event in event_list],
300
+ dtype=torch.float32,
301
+ )
302
+ normalized_gain = (idle_time - torch.mean(idle_time)) / torch.std(idle_time)
303
+ normalized_self = (self_time - torch.mean(self_time)) / torch.std(self_time)
304
+ heuristic_score_list = normalized_gain + 0.6 * normalized_self
305
+
306
+ # Sort events by heuristic
307
+ event_list = [
308
+ event
309
+ for _, event in sorted(
310
+ zip(heuristic_score_list, event_list),
311
+ key=lambda x: x[0],
312
+ reverse=True,
313
+ )
314
+ ]
315
+ event_list = event_list[:length]
316
+ return event_list
317
+
318
+ def get_optimizable_events(self, length: int = 1, print_enable: bool = True):
319
+ event_list = self.rank_events(length)
320
+ if not print_enable:
321
+ return event_list
322
+ output = "Optimizable events:\n" if event_list else "No events to optimize\n"
323
+
324
+ output += "\n".join(
325
+ [
326
+ f"""{'-'*80}
327
+ Event: {event}
328
+ Source code location: {source_code_location(event.event)}
329
+ Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}%
330
+ {'-'*80}"""
331
+ for event in event_list
332
+ ]
333
+ )
334
+ if print_enable:
335
+ print(output)
336
+ return event_list
337
+
338
+
339
+ def index_of_first_match(seq, predicate, start=0, end=None):
340
+ if end is None or end >= len(seq):
341
+ end = len(seq)
342
+ for i in range(start, end):
343
+ if predicate(seq[i]):
344
+ return i
345
+ return None
346
+
347
+
348
+ def argmax(seq, key=lambda x: x, start=0, end=None):
349
+ seq = seq[start:end]
350
+ if len(seq) == 0:
351
+ return None
352
+ return seq.index(max(seq, key=key)) + start
353
+
354
+
355
+ def source_code_location(event):
356
+ while event is not None:
357
+ match = re.search(r"\.py\(.*\)", event.name)
358
+ if match is None:
359
+ event = event.parent
360
+ continue
361
+ return event.name
362
+ return "No source code location found"
363
+
364
+
365
+ # Provide an OSS workaround for cudagraphs + CUPTI issue
366
+ # https://github.com/pytorch/pytorch/issues/75504
367
+ # TODO(dberard) - deprecate / remove workaround for CUDA >= 12, when
368
+ # we stop supporting older CUDA versions.
369
+ def _init_for_cuda_graphs():
370
+ from torch.autograd.profiler import profile
371
+
372
+ with profile():
373
+ pass
env-llmeval/lib/python3.10/site-packages/torch/profiler/itt.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+
3
+ try:
4
+ from torch._C import _itt
5
+ except ImportError:
6
+
7
+ class _ITTStub:
8
+ @staticmethod
9
+ def _fail(*args, **kwargs):
10
+ raise RuntimeError(
11
+ "ITT functions not installed. Are you sure you have a ITT build?"
12
+ )
13
+
14
+ @staticmethod
15
+ def is_available():
16
+ return False
17
+
18
+ rangePush = _fail
19
+ rangePop = _fail
20
+ mark = _fail
21
+
22
+ _itt = _ITTStub() # type: ignore[assignment]
23
+
24
+
25
+ __all__ = ["is_available", "range_push", "range_pop", "mark", "range"]
26
+
27
+
28
+ def is_available():
29
+ """
30
+ Check if ITT feature is available or not
31
+ """
32
+ return _itt.is_available()
33
+
34
+
35
+ def range_push(msg):
36
+ """
37
+ Pushes a range onto a stack of nested range span. Returns zero-based
38
+ depth of the range that is started.
39
+
40
+ Arguments:
41
+ msg (str): ASCII message to associate with range
42
+ """
43
+ return _itt.rangePush(msg)
44
+
45
+
46
+ def range_pop():
47
+ """
48
+ Pops a range off of a stack of nested range spans. Returns the
49
+ zero-based depth of the range that is ended.
50
+ """
51
+ return _itt.rangePop()
52
+
53
+
54
+ def mark(msg):
55
+ """
56
+ Describe an instantaneous event that occurred at some point.
57
+
58
+ Arguments:
59
+ msg (str): ASCII message to associate with the event.
60
+ """
61
+ return _itt.mark(msg)
62
+
63
+
64
+ @contextmanager
65
+ def range(msg, *args, **kwargs):
66
+ """
67
+ Context manager / decorator that pushes an ITT range at the beginning
68
+ of its scope, and pops it at the end. If extra arguments are given,
69
+ they are passed as arguments to msg.format().
70
+
71
+ Args:
72
+ msg (str): message to associate with the range
73
+ """
74
+ range_push(msg.format(*args, **kwargs))
75
+ try:
76
+ yield
77
+ finally:
78
+ range_pop()
env-llmeval/lib/python3.10/site-packages/torch/profiler/profiler.py ADDED
@@ -0,0 +1,754 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import json
3
+ import os
4
+ import tempfile
5
+ from enum import Enum
6
+ from functools import partial
7
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
8
+ from warnings import warn
9
+
10
+ import torch
11
+ import torch.autograd.profiler as prof
12
+ from torch._C import _get_privateuse1_backend_name
13
+ from torch._C._profiler import (
14
+ _add_execution_trace_observer,
15
+ _disable_execution_trace_observer,
16
+ _enable_execution_trace_observer,
17
+ _ExperimentalConfig,
18
+ _remove_execution_trace_observer,
19
+ )
20
+ from torch.autograd import kineto_available, ProfilerActivity
21
+ from torch.profiler._memory_profiler import MemoryProfile, MemoryProfileTimeline
22
+
23
+
24
+ __all__ = [
25
+ "supported_activities",
26
+ "ProfilerAction",
27
+ "schedule",
28
+ "tensorboard_trace_handler",
29
+ "profile",
30
+ "ExecutionTraceObserver",
31
+ ]
32
+ PROFILER_STEP_NAME = "ProfilerStep"
33
+
34
+
35
+ def supported_activities():
36
+ """
37
+ Returns a set of supported profiler tracing activities.
38
+
39
+ Note: profiler uses CUPTI library to trace on-device CUDA kernels.
40
+ In case when CUDA is enabled but CUPTI is not available, passing
41
+ ``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA
42
+ profiling code (same as in the legacy ``torch.autograd.profiler``).
43
+ This, in turn, results in including CUDA time in the profiler table output,
44
+ but not in the JSON trace.
45
+ """
46
+ return torch.autograd._supported_activities()
47
+
48
+
49
+ class _KinetoProfile:
50
+ """Low-level profiler wrap the autograd profile
51
+
52
+ Args:
53
+ activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
54
+ ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
55
+ Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
56
+ record_shapes (bool): save information about operator's input shapes.
57
+ profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline``
58
+ for more details).
59
+ with_stack (bool): record source information (file and line number) for the ops.
60
+ with_flops (bool): use formula to estimate the FLOPS of specific operators
61
+ (matrix multiplication and 2D convolution).
62
+ with_modules (bool): record module hierarchy (including function names)
63
+ corresponding to the callstack of the op. e.g. If module A's forward call's
64
+ module B's forward which contains an aten::add op,
65
+ then aten::add's module hierarchy is A.B
66
+ Note that this support exist, at the moment, only for TorchScript models
67
+ and not eager mode models.
68
+
69
+ experimental_config (_ExperimentalConfig) : A set of experimental options
70
+ used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed.
71
+
72
+ .. note::
73
+ This API is experimental and subject to change in the future.
74
+
75
+ Enabling shape and stack tracing results in additional overhead.
76
+ When record_shapes=True is specified, profiler will temporarily hold references to the tensors;
77
+ that may further prevent certain optimizations that depend on the reference count and introduce
78
+ extra tensor copies.
79
+ """
80
+
81
+ def __init__(
82
+ self,
83
+ *,
84
+ activities: Optional[Iterable[ProfilerActivity]] = None,
85
+ record_shapes: bool = False,
86
+ profile_memory: bool = False,
87
+ with_stack: bool = False,
88
+ with_flops: bool = False,
89
+ with_modules: bool = False,
90
+ experimental_config: Optional[_ExperimentalConfig] = None,
91
+ ):
92
+ self.activities = set(activities) if activities else supported_activities()
93
+ self.record_shapes = record_shapes
94
+ self.with_flops = with_flops
95
+ self.profile_memory = profile_memory
96
+ self.with_stack = with_stack
97
+ self.with_modules = with_modules
98
+ self.experimental_config = experimental_config
99
+ self.profiler: Optional[prof.profile] = None
100
+ self.mem_tl: Optional[MemoryProfileTimeline] = None
101
+ self.use_device = None
102
+ privateuse1_backend = _get_privateuse1_backend_name()
103
+ if privateuse1_backend != "privateuseone":
104
+ self.use_device = privateuse1_backend
105
+
106
+ def start(self):
107
+ self.prepare_trace()
108
+ self.start_trace()
109
+
110
+ def stop(self):
111
+ self.stop_trace()
112
+
113
+ def prepare_trace(self):
114
+ self.profiler = prof.profile(
115
+ use_cuda=(ProfilerActivity.CUDA in self.activities),
116
+ use_cpu=(ProfilerActivity.CPU in self.activities),
117
+ use_mtia=(ProfilerActivity.MTIA in self.activities),
118
+ use_device=None,
119
+ record_shapes=self.record_shapes,
120
+ with_flops=self.with_flops,
121
+ profile_memory=self.profile_memory,
122
+ with_stack=self.with_stack,
123
+ with_modules=self.with_modules,
124
+ use_kineto=True,
125
+ experimental_config=self.experimental_config,
126
+ )
127
+ self.profiler._prepare_trace()
128
+
129
+ def start_trace(self):
130
+ assert self.profiler is not None
131
+ self.profiler._start_trace()
132
+
133
+ if self.profile_memory:
134
+ self.add_metadata_json("profile_memory", "1")
135
+ if self.with_stack:
136
+ self.add_metadata_json("with_stack", "1")
137
+ if self.record_shapes:
138
+ self.add_metadata_json("record_shapes", "1")
139
+ if self.with_modules:
140
+ self.add_metadata_json("with_modules", "1")
141
+ if self.with_flops:
142
+ self.add_metadata_json("with_flops", "1")
143
+
144
+ if kineto_available():
145
+ dist_info = self._get_distributed_info()
146
+ if dist_info:
147
+ self.add_metadata_json("distributedInfo", json.dumps(dist_info))
148
+
149
+ if hasattr(torch, "_inductor"):
150
+ import torch._inductor.config as inductor_config
151
+
152
+ if inductor_config.triton.cudagraphs:
153
+ os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
154
+ self.add_metadata_json("DISABLE_CUPTI_LAZY_REINIT", "1")
155
+ # FIXME: CUDA Graph does not work well with CUPTI teardown.
156
+ # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
157
+ # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
158
+ # Workaround: turn off CUPTI teardown when using CUDA Graphs.
159
+ os.environ["TEARDOWN_CUPTI"] = "0"
160
+
161
+ def stop_trace(self):
162
+ assert self.profiler is not None
163
+ self.profiler.__exit__(None, None, None)
164
+
165
+ def export_chrome_trace(self, path: str):
166
+ """
167
+ Exports the collected trace in Chrome JSON format.
168
+ """
169
+ assert self.profiler
170
+ if path.endswith(".gz"):
171
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False)
172
+ fp.close()
173
+ retvalue = self.profiler.export_chrome_trace(fp.name)
174
+ with open(fp.name) as fin:
175
+ with gzip.open(path, "wt") as fout:
176
+ fout.writelines(fin)
177
+ os.remove(fp.name)
178
+ return retvalue
179
+ else:
180
+ return self.profiler.export_chrome_trace(path)
181
+
182
+ def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
183
+ """Save stack traces in a file in a format suitable for visualization.
184
+
185
+ Args:
186
+ path (str): save stacks file to this location;
187
+ metric (str): metric to use: "self_cpu_time_total" or "self_cuda_time_total"
188
+
189
+ .. note::
190
+ Example of using FlameGraph tool:
191
+
192
+ - git clone https://github.com/brendangregg/FlameGraph
193
+ - cd FlameGraph
194
+ - ./flamegraph.pl --title "CPU time" --countname "us." profiler.stacks > perf_viz.svg
195
+ """
196
+ assert self.profiler
197
+ return self.profiler.export_stacks(path, metric)
198
+
199
+ def key_averages(
200
+ self, group_by_input_shape: bool = False, group_by_stack_n: int = 0
201
+ ):
202
+ """Averages events, grouping them by operator name and (optionally) input shapes and
203
+ stack.
204
+
205
+ .. note::
206
+ To use shape/stack functionality make sure to set record_shapes/with_stack
207
+ when creating profiler context manager.
208
+ """
209
+ assert self.profiler
210
+ return self.profiler.key_averages(group_by_input_shape, group_by_stack_n)
211
+
212
+ def events(self):
213
+ """
214
+ Returns the list of unaggregated profiler events,
215
+ to be used in the trace callback or after the profiling is finished
216
+ """
217
+ assert self.profiler
218
+ return self.profiler.function_events
219
+
220
+ def add_metadata(self, key: str, value: str):
221
+ """
222
+ Adds a user defined metadata with a string key and a string value
223
+ into the trace file
224
+ """
225
+ wrapped_value = '"' + value.replace('"', '\\"') + '"'
226
+ torch.autograd._add_metadata_json(key, wrapped_value)
227
+
228
+ def add_metadata_json(self, key: str, value: str):
229
+ """
230
+ Adds a user defined metadata with a string key and a valid json value
231
+ into the trace file
232
+ """
233
+ torch.autograd._add_metadata_json(key, value)
234
+
235
+ def _get_distributed_info(self):
236
+ import torch.distributed as dist
237
+
238
+ if not dist.is_available() or not dist.is_initialized():
239
+ return None
240
+
241
+ return {
242
+ "backend": dist.get_backend(),
243
+ "rank": dist.get_rank(),
244
+ "world_size": dist.get_world_size(),
245
+ }
246
+
247
+ def _memory_profile(self) -> MemoryProfile:
248
+ required = ("record_shapes", "profile_memory", "with_stack")
249
+ missing = [f"{i}=True" for i in required if not getattr(self, i)]
250
+ if missing:
251
+ raise ValueError(f"{', '.join(missing)} required for memory profiling.")
252
+
253
+ assert self.profiler is not None and self.profiler.kineto_results is not None
254
+ return MemoryProfile(self.profiler.kineto_results)
255
+
256
+ def export_memory_timeline(self, path: str, device: Optional[str] = None) -> None:
257
+ """Export memory event information from the profiler collected
258
+ tree for a given device, and export a timeline plot. There are 3
259
+ exportable files using ``export_memory_timeline``, each controlled by the
260
+ ``path``'s suffix.
261
+
262
+ - For an HTML compatible plot, use the suffix ``.html``, and a memory timeline
263
+ plot will be embedded as a PNG file in the HTML file.
264
+
265
+ - For plot points consisting of ``[times, [sizes by category]]``, where
266
+ ``times`` are timestamps and ``sizes`` are memory usage for each category.
267
+ The memory timeline plot will be saved a JSON (``.json``) or gzipped JSON
268
+ (``.json.gz``) depending on the suffix.
269
+
270
+ - For raw memory points, use the suffix ``.raw.json.gz``. Each raw memory
271
+ event will consist of ``(timestamp, action, numbytes, category)``, where
272
+ ``action`` is one of ``[PREEXISTING, CREATE, INCREMENT_VERSION, DESTROY]``,
273
+ and ``category`` is one of the enums from
274
+ ``torch.profiler._memory_profiler.Category``.
275
+
276
+ Output: Memory timeline written as gzipped JSON, JSON, or HTML.
277
+ """
278
+ # Default to device 0, if unset. Fallback on cpu.
279
+ if device is None and self.use_device and self.use_device != "cuda":
280
+ device = self.use_device + ":0"
281
+
282
+ if device is None:
283
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
284
+
285
+ # Construct the memory timeline plot data
286
+ self.mem_tl = MemoryProfileTimeline(self._memory_profile())
287
+
288
+ # Depending on the file suffix, save the data as json.gz or json.
289
+ # For html, we can embed the image into an HTML file.
290
+ if path.endswith(".html"):
291
+ self.mem_tl.export_memory_timeline_html(path, device)
292
+ elif path.endswith(".gz"):
293
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False)
294
+ fp.close()
295
+ if path.endswith("raw.json.gz"):
296
+ self.mem_tl.export_memory_timeline_raw(fp.name, device)
297
+ else:
298
+ self.mem_tl.export_memory_timeline(fp.name, device)
299
+ with open(fp.name) as fin:
300
+ with gzip.open(path, "wt") as fout:
301
+ fout.writelines(fin)
302
+ os.remove(fp.name)
303
+ else:
304
+ self.mem_tl.export_memory_timeline(path, device)
305
+
306
+
307
+ class ProfilerAction(Enum):
308
+ """
309
+ Profiler actions that can be taken at the specified intervals
310
+ """
311
+
312
+ NONE = 0
313
+ WARMUP = 1
314
+ RECORD = 2
315
+ RECORD_AND_SAVE = 3
316
+
317
+
318
+ def schedule(
319
+ *, wait: int, warmup: int, active: int, repeat: int = 0, skip_first: int = 0
320
+ ) -> Callable:
321
+ """
322
+ Returns a callable that can be used as profiler ``schedule`` argument. The profiler will skip
323
+ the first ``skip_first`` steps, then wait for ``wait`` steps, then do the warmup for the next ``warmup`` steps,
324
+ then do the active recording for the next ``active`` steps and then repeat the cycle starting with ``wait`` steps.
325
+ The optional number of cycles is specified with the ``repeat`` parameter, the zero value means that
326
+ the cycles will continue until the profiling is finished.
327
+ """
328
+
329
+ def schedule_fn(step: int) -> ProfilerAction:
330
+ assert step >= 0
331
+ if step < skip_first:
332
+ return ProfilerAction.NONE
333
+ else:
334
+ step -= skip_first
335
+ num_steps = wait + warmup + active
336
+ if repeat > 0 and step / num_steps >= repeat:
337
+ return ProfilerAction.NONE
338
+ mod_step = step % num_steps
339
+ if mod_step < wait:
340
+ return ProfilerAction.NONE
341
+ elif mod_step < wait + warmup:
342
+ return ProfilerAction.WARMUP
343
+ else:
344
+ return (
345
+ ProfilerAction.RECORD
346
+ if mod_step < num_steps - 1
347
+ else ProfilerAction.RECORD_AND_SAVE
348
+ )
349
+
350
+ assert (
351
+ wait >= 0 and warmup >= 0 and active > 0 and repeat >= 0 and skip_first >= 0
352
+ ), "Invalid profiler schedule arguments"
353
+ if warmup == 0:
354
+ warn("Profiler won't be using warmup, this can skew profiler results")
355
+ return schedule_fn
356
+
357
+
358
+ def _default_schedule_fn(_: int) -> ProfilerAction:
359
+ """
360
+ Default profiler behavior - immediately starts recording the events,
361
+ keeps doing it on every profiler step.
362
+ """
363
+ return ProfilerAction.RECORD
364
+
365
+
366
+ def tensorboard_trace_handler(
367
+ dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False
368
+ ):
369
+ """
370
+ Outputs tracing files to directory of ``dir_name``, then that directory can be
371
+ directly delivered to tensorboard as logdir.
372
+ ``worker_name`` should be unique for each worker in distributed scenario,
373
+ it will be set to '[hostname]_[pid]' by default.
374
+ """
375
+ import os
376
+ import socket
377
+ import time
378
+
379
+ def handler_fn(prof) -> None:
380
+ nonlocal worker_name
381
+ if not os.path.isdir(dir_name):
382
+ try:
383
+ os.makedirs(dir_name, exist_ok=True)
384
+ except Exception as e:
385
+ raise RuntimeError("Can't create directory: " + dir_name) from e
386
+ if not worker_name:
387
+ worker_name = f"{socket.gethostname()}_{os.getpid()}"
388
+ # Use nanosecond here to avoid naming clash when exporting the trace
389
+ file_name = f"{worker_name}.{time.time_ns()}.pt.trace.json"
390
+ if use_gzip:
391
+ file_name = file_name + ".gz"
392
+ prof.export_chrome_trace(os.path.join(dir_name, file_name))
393
+
394
+ return handler_fn
395
+
396
+
397
+ class profile(_KinetoProfile):
398
+ """Profiler context manager.
399
+
400
+ Args:
401
+ activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
402
+ ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
403
+ Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
404
+ schedule (Callable): callable that takes step (int) as a single parameter and returns
405
+ ``ProfilerAction`` value that specifies the profiler action to perform at each step.
406
+ on_trace_ready (Callable): callable that is called at each step when ``schedule``
407
+ returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.
408
+ record_shapes (bool): save information about operator's input shapes.
409
+ profile_memory (bool): track tensor memory allocation/deallocation.
410
+ with_stack (bool): record source information (file and line number) for the ops.
411
+ with_flops (bool): use formula to estimate the FLOPs (floating point operations) of specific operators
412
+ (matrix multiplication and 2D convolution).
413
+ with_modules (bool): record module hierarchy (including function names)
414
+ corresponding to the callstack of the op. e.g. If module A's forward call's
415
+ module B's forward which contains an aten::add op,
416
+ then aten::add's module hierarchy is A.B
417
+ Note that this support exist, at the moment, only for TorchScript models
418
+ and not eager mode models.
419
+ experimental_config (_ExperimentalConfig) : A set of experimental options
420
+ used for Kineto library features. Note, backward compatibility is not guaranteed.
421
+
422
+ use_cuda (bool):
423
+ .. deprecated:: 1.8.1
424
+ use ``activities`` instead.
425
+
426
+ .. note::
427
+ Use :func:`~torch.profiler.schedule` to generate the callable schedule.
428
+ Non-default schedules are useful when profiling long training jobs
429
+ and allow the user to obtain multiple traces at the different iterations
430
+ of the training process.
431
+ The default schedule simply records all the events continuously for the
432
+ duration of the context manager.
433
+
434
+ .. note::
435
+ Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard:
436
+
437
+ ``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)``
438
+
439
+ After profiling, result files can be found in the specified directory. Use the command:
440
+
441
+ ``tensorboard --logdir dir_name``
442
+
443
+ to see the results in TensorBoard.
444
+ For more information, see
445
+ `PyTorch Profiler TensorBoard Plugin <https://github.com/pytorch/kineto/tree/master/tb_plugin>`__
446
+
447
+ .. note::
448
+ Enabling shape and stack tracing results in additional overhead.
449
+ When record_shapes=True is specified, profiler will temporarily hold references to the tensors;
450
+ that may further prevent certain optimizations that depend on the reference count and introduce
451
+ extra tensor copies.
452
+
453
+ Examples:
454
+
455
+ .. code-block:: python
456
+
457
+ with torch.profiler.profile(
458
+ activities=[
459
+ torch.profiler.ProfilerActivity.CPU,
460
+ torch.profiler.ProfilerActivity.CUDA,
461
+ ]
462
+ ) as p:
463
+ code_to_profile()
464
+ print(p.key_averages().table(
465
+ sort_by="self_cuda_time_total", row_limit=-1))
466
+
467
+ Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions:
468
+
469
+ .. code-block:: python
470
+
471
+ # Non-default profiler schedule allows user to turn profiler on and off
472
+ # on different iterations of the training loop;
473
+ # trace_handler is called every time a new trace becomes available
474
+ def trace_handler(prof):
475
+ print(prof.key_averages().table(
476
+ sort_by="self_cuda_time_total", row_limit=-1))
477
+ # prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json")
478
+
479
+ with torch.profiler.profile(
480
+ activities=[
481
+ torch.profiler.ProfilerActivity.CPU,
482
+ torch.profiler.ProfilerActivity.CUDA,
483
+ ],
484
+
485
+ # In this example with wait=1, warmup=1, active=2, repeat=1,
486
+ # profiler will skip the first step/iteration,
487
+ # start warming up on the second, record
488
+ # the third and the forth iterations,
489
+ # after which the trace will become available
490
+ # and on_trace_ready (when set) is called;
491
+ # the cycle repeats starting with the next step
492
+
493
+ schedule=torch.profiler.schedule(
494
+ wait=1,
495
+ warmup=1,
496
+ active=2,
497
+ repeat=1),
498
+ on_trace_ready=trace_handler
499
+ # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')
500
+ # used when outputting for tensorboard
501
+ ) as p:
502
+ for iter in range(N):
503
+ code_iteration_to_profile(iter)
504
+ # send a signal to the profiler that the next iteration has started
505
+ p.step()
506
+ """
507
+
508
+ def __init__(
509
+ self,
510
+ *,
511
+ activities: Optional[Iterable[ProfilerActivity]] = None,
512
+ schedule: Optional[Callable[[int], ProfilerAction]] = None,
513
+ on_trace_ready: Optional[Callable[..., Any]] = None,
514
+ record_shapes: bool = False,
515
+ profile_memory: bool = False,
516
+ with_stack: bool = False,
517
+ with_flops: bool = False,
518
+ with_modules: bool = False,
519
+ experimental_config: Optional[_ExperimentalConfig] = None,
520
+ # deprecated:
521
+ use_cuda: Optional[bool] = None,
522
+ ):
523
+ activities_set = set(activities) if activities else supported_activities()
524
+ if use_cuda is not None:
525
+ warn("use_cuda is deprecated, use activities argument instead")
526
+ if use_cuda:
527
+ activities_set.add(ProfilerActivity.CUDA)
528
+ elif ProfilerActivity.CUDA in activities_set:
529
+ activities_set.remove(ProfilerActivity.CUDA)
530
+ assert len(activities_set) > 0, "No valid profiler activities found"
531
+
532
+ super().__init__(
533
+ activities=activities,
534
+ record_shapes=record_shapes,
535
+ profile_memory=profile_memory,
536
+ with_stack=with_stack,
537
+ with_flops=with_flops,
538
+ with_modules=with_modules,
539
+ experimental_config=experimental_config,
540
+ )
541
+
542
+ if schedule:
543
+ self.schedule = schedule
544
+ # add step markers into the trace and table view
545
+ self.record_steps = True
546
+ else:
547
+ self.schedule = _default_schedule_fn
548
+ self.record_steps = False
549
+ self.on_trace_ready = on_trace_ready
550
+ self.step_num = 0
551
+ self.current_action = self.schedule(self.step_num)
552
+ self.step_rec_fn: Optional[prof.record_function] = None
553
+
554
+ self.action_map: Dict[
555
+ Tuple[ProfilerAction, Optional[ProfilerAction]], List[Any]
556
+ ] = {
557
+ # key is (prev_action, current_action), value is action list corresponding to the state pair.
558
+ (ProfilerAction.NONE, ProfilerAction.NONE): [],
559
+ (ProfilerAction.NONE, ProfilerAction.WARMUP): [self.prepare_trace],
560
+ (ProfilerAction.NONE, ProfilerAction.RECORD): [
561
+ self.prepare_trace,
562
+ self.start_trace,
563
+ ],
564
+ (ProfilerAction.NONE, ProfilerAction.RECORD_AND_SAVE): [
565
+ self.prepare_trace,
566
+ self.start_trace,
567
+ ],
568
+ (ProfilerAction.WARMUP, ProfilerAction.NONE): [
569
+ partial(warn, "Incorrect schedule: WARMUP followed by NONE"),
570
+ self.start_trace,
571
+ self.stop_trace,
572
+ ],
573
+ (ProfilerAction.WARMUP, ProfilerAction.WARMUP): [],
574
+ (ProfilerAction.WARMUP, ProfilerAction.RECORD): [self.start_trace],
575
+ (ProfilerAction.WARMUP, ProfilerAction.RECORD_AND_SAVE): [self.start_trace],
576
+ (ProfilerAction.RECORD, ProfilerAction.NONE): [
577
+ partial(warn, "Incorrect schedule: RECORD followed by NONE"),
578
+ self.stop_trace,
579
+ ],
580
+ (ProfilerAction.RECORD, ProfilerAction.WARMUP): [
581
+ partial(warn, "Incorrect schedule: RECORD followed by WARMUP"),
582
+ self.stop_trace,
583
+ ],
584
+ (ProfilerAction.RECORD, ProfilerAction.RECORD): [],
585
+ (ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE): [],
586
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.NONE): [
587
+ self.stop_trace,
588
+ self._trace_ready,
589
+ ],
590
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.WARMUP): [
591
+ self.stop_trace,
592
+ self._trace_ready,
593
+ self.prepare_trace,
594
+ ],
595
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD): [
596
+ self.stop_trace,
597
+ self._trace_ready,
598
+ self.prepare_trace,
599
+ self.start_trace,
600
+ ],
601
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD_AND_SAVE): [
602
+ self.stop_trace,
603
+ self._trace_ready,
604
+ self.prepare_trace,
605
+ self.start_trace,
606
+ ],
607
+ # used for exit action
608
+ (ProfilerAction.WARMUP, None): [self.start_trace, self.stop_trace],
609
+ (ProfilerAction.RECORD, None): [self.stop_trace, self._trace_ready],
610
+ (ProfilerAction.RECORD_AND_SAVE, None): [
611
+ self.stop_trace,
612
+ self._trace_ready,
613
+ ],
614
+ }
615
+ # Start tracking increments to profiler step, this will be used
616
+ # by Kineto
617
+ prof.KinetoStepTracker.init_step_count(PROFILER_STEP_NAME)
618
+
619
+ def __enter__(self):
620
+ self.start()
621
+ return self
622
+
623
+ def __exit__(self, exc_type, exc_val, exc_tb):
624
+ self.stop()
625
+ prof.KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME)
626
+
627
+ def start(self):
628
+ self._transit_action(ProfilerAction.NONE, self.current_action)
629
+ if self.record_steps:
630
+ self.step_rec_fn = prof.record_function(
631
+ "ProfilerStep#" + str(self.step_num)
632
+ )
633
+ self.step_rec_fn.__enter__()
634
+
635
+ def stop(self):
636
+ if self.record_steps and self.step_rec_fn:
637
+ self.step_rec_fn.__exit__(None, None, None)
638
+ self._transit_action(self.current_action, None)
639
+
640
+ def step(self):
641
+ """
642
+ Signals the profiler that the next profiling step has started.
643
+ """
644
+ if self.record_steps and self.step_rec_fn:
645
+ self.step_rec_fn.__exit__(None, None, None)
646
+ prev_action = self.current_action
647
+ cur_step = self.step_num
648
+ self.step_num += 1
649
+ self.current_action = self.schedule(self.step_num)
650
+
651
+ self._transit_action(prev_action, self.current_action)
652
+ prof.KinetoStepTracker.increment_step(PROFILER_STEP_NAME)
653
+
654
+ if self.record_steps:
655
+ self.step_rec_fn = prof.record_function("ProfilerStep#" + str(cur_step))
656
+ self.step_rec_fn.__enter__()
657
+
658
+ def _trace_ready(self):
659
+ if self.on_trace_ready:
660
+ self.on_trace_ready(self)
661
+
662
+ def _transit_action(self, prev_action, current_action):
663
+ action_list = self.action_map.get((prev_action, current_action))
664
+ if action_list:
665
+ for action in action_list:
666
+ action()
667
+
668
+
669
+ class ExecutionTraceObserver:
670
+ """Execution Trace Observer
671
+
672
+ Each process can have a single ExecutionTraceObserver instance. The observer
673
+ can be added to record function callbacks via calling register_callback()
674
+ explicitly. Without calling unregister_callback(), repeated calls to
675
+ register_callback() will not add additional observers to record function
676
+ callbacks. Once an ExecutionTraceObserver is created, the start() and stop()
677
+ methods control when the event data is recorded.
678
+
679
+ Deleting or calling unregister_callback() will remove the observer from the
680
+ record function callbacks, finalize the output file, and will stop
681
+ incurring any overheads.
682
+ """
683
+
684
+ def __init__(self):
685
+ """
686
+ Initializes the default states.
687
+ """
688
+ self._registered = False
689
+ self._execution_trace_running = False
690
+
691
+ def __del__(self):
692
+ """
693
+ Calls unregister_callback() to make sure to finalize outputs.
694
+ """
695
+ self.unregister_callback()
696
+
697
+ def register_callback(self, output_file_path: str):
698
+ """
699
+ Adds ET observer to record function callbacks. The data will be
700
+ written to output_file_path.
701
+ """
702
+ if not self._registered:
703
+ self._output_file_path = output_file_path
704
+ self._registered = _add_execution_trace_observer(output_file_path)
705
+
706
+ def unregister_callback(self):
707
+ """
708
+ Removes ET observer from record function callbacks.
709
+ """
710
+ if self._registered:
711
+ self.stop()
712
+ _remove_execution_trace_observer()
713
+ self._registered = False
714
+
715
+ @property
716
+ def is_registered(self):
717
+ """
718
+ Returns True if the execution trace observer is registered, otherwise False.
719
+ """
720
+ return self._registered
721
+
722
+ def is_running(self):
723
+ """
724
+ Returns True if the observer is running, otherwise False.
725
+ """
726
+ return self._execution_trace_running
727
+
728
+ def start(self):
729
+ """
730
+ Starts to capture.
731
+ """
732
+ if self._registered and not self._execution_trace_running:
733
+ _enable_execution_trace_observer()
734
+ self._execution_trace_running = True
735
+
736
+ def stop(self):
737
+ """
738
+ Stops to capture.
739
+ """
740
+ if self._execution_trace_running:
741
+ _disable_execution_trace_observer()
742
+ self._execution_trace_running = False
743
+
744
+ def get_output_file_path(self) -> str:
745
+ """
746
+ Returns the output file name.
747
+ """
748
+ if self.is_registered:
749
+ return self._output_file_path
750
+ else:
751
+ raise RuntimeError(
752
+ "A callback to the ET profiler needs to be registered "
753
+ "first before getting the output file path"
754
+ )
env-llmeval/lib/python3.10/site-packages/torch/profiler/python_tracer.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import site
3
+ import sys
4
+ import typing
5
+
6
+ import torch
7
+
8
+
9
+ def _prefix_regex() -> typing.List[str]:
10
+ raw_paths = (
11
+ site.getsitepackages()
12
+ + sys.path
13
+ + [site.getuserbase()]
14
+ + [site.getusersitepackages()]
15
+ + [os.path.dirname(os.path.dirname(torch.__file__))]
16
+ )
17
+
18
+ path_prefixes = sorted({os.path.abspath(i) for i in raw_paths}, reverse=True)
19
+ assert all(isinstance(i, str) for i in path_prefixes)
20
+ return [i + os.sep for i in path_prefixes]
env-llmeval/lib/python3.10/site-packages/torch/special/__init__.py ADDED
@@ -0,0 +1,1283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._C import _add_docstr, _special # type: ignore[attr-defined]
3
+ from torch._torch_docs import common_args, multi_dim_common
4
+
5
+ __all__ = [
6
+ 'airy_ai',
7
+ 'bessel_j0',
8
+ 'bessel_j1',
9
+ 'bessel_y0',
10
+ 'bessel_y1',
11
+ 'chebyshev_polynomial_t',
12
+ 'chebyshev_polynomial_u',
13
+ 'chebyshev_polynomial_v',
14
+ 'chebyshev_polynomial_w',
15
+ 'digamma',
16
+ 'entr',
17
+ 'erf',
18
+ 'erfc',
19
+ 'erfcx',
20
+ 'erfinv',
21
+ 'exp2',
22
+ 'expit',
23
+ 'expm1',
24
+ 'gammainc',
25
+ 'gammaincc',
26
+ 'gammaln',
27
+ 'hermite_polynomial_h',
28
+ 'hermite_polynomial_he',
29
+ 'i0',
30
+ 'i0e',
31
+ 'i1',
32
+ 'i1e',
33
+ 'laguerre_polynomial_l',
34
+ 'legendre_polynomial_p',
35
+ 'log1p',
36
+ 'log_ndtr',
37
+ 'log_softmax',
38
+ 'logit',
39
+ 'logsumexp',
40
+ 'modified_bessel_i0',
41
+ 'modified_bessel_i1',
42
+ 'modified_bessel_k0',
43
+ 'modified_bessel_k1',
44
+ 'multigammaln',
45
+ 'ndtr',
46
+ 'ndtri',
47
+ 'polygamma',
48
+ 'psi',
49
+ 'round',
50
+ 'shifted_chebyshev_polynomial_t',
51
+ 'shifted_chebyshev_polynomial_u',
52
+ 'shifted_chebyshev_polynomial_v',
53
+ 'shifted_chebyshev_polynomial_w',
54
+ 'scaled_modified_bessel_k0',
55
+ 'scaled_modified_bessel_k1',
56
+ 'sinc',
57
+ 'softmax',
58
+ 'spherical_bessel_j0',
59
+ 'xlog1py',
60
+ 'xlogy',
61
+ 'zeta',
62
+ ]
63
+
64
+ Tensor = torch.Tensor
65
+
66
+ entr = _add_docstr(_special.special_entr,
67
+ r"""
68
+ entr(input, *, out=None) -> Tensor
69
+ Computes the entropy on :attr:`input` (as defined below), elementwise.
70
+
71
+ .. math::
72
+ \begin{align}
73
+ \text{entr(x)} = \begin{cases}
74
+ -x * \ln(x) & x > 0 \\
75
+ 0 & x = 0.0 \\
76
+ -\infty & x < 0
77
+ \end{cases}
78
+ \end{align}
79
+ """ + """
80
+
81
+ Args:
82
+ input (Tensor): the input tensor.
83
+
84
+ Keyword args:
85
+ out (Tensor, optional): the output tensor.
86
+
87
+ Example::
88
+ >>> a = torch.arange(-0.5, 1, 0.5)
89
+ >>> a
90
+ tensor([-0.5000, 0.0000, 0.5000])
91
+ >>> torch.special.entr(a)
92
+ tensor([ -inf, 0.0000, 0.3466])
93
+ """)
94
+
95
+ psi = _add_docstr(_special.special_psi,
96
+ r"""
97
+ psi(input, *, out=None) -> Tensor
98
+
99
+ Alias for :func:`torch.special.digamma`.
100
+ """)
101
+
102
+ digamma = _add_docstr(_special.special_digamma,
103
+ r"""
104
+ digamma(input, *, out=None) -> Tensor
105
+
106
+ Computes the logarithmic derivative of the gamma function on `input`.
107
+
108
+ .. math::
109
+ \digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)}
110
+ """ + r"""
111
+ Args:
112
+ input (Tensor): the tensor to compute the digamma function on
113
+
114
+ Keyword args:
115
+ {out}
116
+
117
+ .. note:: This function is similar to SciPy's `scipy.special.digamma`.
118
+
119
+ .. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`.
120
+ Previously it returned `NaN` for `0`.
121
+
122
+ Example::
123
+
124
+ >>> a = torch.tensor([1, 0.5])
125
+ >>> torch.special.digamma(a)
126
+ tensor([-0.5772, -1.9635])
127
+
128
+ """.format(**common_args))
129
+
130
+ gammaln = _add_docstr(_special.special_gammaln,
131
+ r"""
132
+ gammaln(input, *, out=None) -> Tensor
133
+
134
+ Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
135
+
136
+ .. math::
137
+ \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
138
+ """ + """
139
+ Args:
140
+ {input}
141
+
142
+ Keyword args:
143
+ {out}
144
+
145
+ Example::
146
+
147
+ >>> a = torch.arange(0.5, 2, 0.5)
148
+ >>> torch.special.gammaln(a)
149
+ tensor([ 0.5724, 0.0000, -0.1208])
150
+
151
+ """.format(**common_args))
152
+
153
+ polygamma = _add_docstr(_special.special_polygamma,
154
+ r"""
155
+ polygamma(n, input, *, out=None) -> Tensor
156
+
157
+ Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`.
158
+ :math:`n \geq 0` is called the order of the polygamma function.
159
+
160
+ .. math::
161
+ \psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x)
162
+
163
+ .. note::
164
+ This function is implemented only for nonnegative integers :math:`n \geq 0`.
165
+ """ + """
166
+ Args:
167
+ n (int): the order of the polygamma function
168
+ {input}
169
+
170
+ Keyword args:
171
+ {out}
172
+
173
+ Example::
174
+ >>> a = torch.tensor([1, 0.5])
175
+ >>> torch.special.polygamma(1, a)
176
+ tensor([1.64493, 4.9348])
177
+ >>> torch.special.polygamma(2, a)
178
+ tensor([ -2.4041, -16.8288])
179
+ >>> torch.special.polygamma(3, a)
180
+ tensor([ 6.4939, 97.4091])
181
+ >>> torch.special.polygamma(4, a)
182
+ tensor([ -24.8863, -771.4742])
183
+ """.format(**common_args))
184
+
185
+ erf = _add_docstr(_special.special_erf,
186
+ r"""
187
+ erf(input, *, out=None) -> Tensor
188
+
189
+ Computes the error function of :attr:`input`. The error function is defined as follows:
190
+
191
+ .. math::
192
+ \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
193
+ """ + r"""
194
+ Args:
195
+ {input}
196
+
197
+ Keyword args:
198
+ {out}
199
+
200
+ Example::
201
+
202
+ >>> torch.special.erf(torch.tensor([0, -1., 10.]))
203
+ tensor([ 0.0000, -0.8427, 1.0000])
204
+ """.format(**common_args))
205
+
206
+ erfc = _add_docstr(_special.special_erfc,
207
+ r"""
208
+ erfc(input, *, out=None) -> Tensor
209
+
210
+ Computes the complementary error function of :attr:`input`.
211
+ The complementary error function is defined as follows:
212
+
213
+ .. math::
214
+ \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
215
+ """ + r"""
216
+ Args:
217
+ {input}
218
+
219
+ Keyword args:
220
+ {out}
221
+
222
+ Example::
223
+
224
+ >>> torch.special.erfc(torch.tensor([0, -1., 10.]))
225
+ tensor([ 1.0000, 1.8427, 0.0000])
226
+ """.format(**common_args))
227
+
228
+ erfcx = _add_docstr(_special.special_erfcx,
229
+ r"""
230
+ erfcx(input, *, out=None) -> Tensor
231
+
232
+ Computes the scaled complementary error function for each element of :attr:`input`.
233
+ The scaled complementary error function is defined as follows:
234
+
235
+ .. math::
236
+ \mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x)
237
+ """ + r"""
238
+
239
+ """ + r"""
240
+ Args:
241
+ {input}
242
+
243
+ Keyword args:
244
+ {out}
245
+
246
+ Example::
247
+
248
+ >>> torch.special.erfcx(torch.tensor([0, -1., 10.]))
249
+ tensor([ 1.0000, 5.0090, 0.0561])
250
+ """.format(**common_args))
251
+
252
+ erfinv = _add_docstr(_special.special_erfinv,
253
+ r"""
254
+ erfinv(input, *, out=None) -> Tensor
255
+
256
+ Computes the inverse error function of :attr:`input`.
257
+ The inverse error function is defined in the range :math:`(-1, 1)` as:
258
+
259
+ .. math::
260
+ \mathrm{erfinv}(\mathrm{erf}(x)) = x
261
+ """ + r"""
262
+
263
+ Args:
264
+ {input}
265
+
266
+ Keyword args:
267
+ {out}
268
+
269
+ Example::
270
+
271
+ >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.]))
272
+ tensor([ 0.0000, 0.4769, -inf])
273
+ """.format(**common_args))
274
+
275
+ logit = _add_docstr(_special.special_logit,
276
+ r"""
277
+ logit(input, eps=None, *, out=None) -> Tensor
278
+
279
+ Returns a new tensor with the logit of the elements of :attr:`input`.
280
+ :attr:`input` is clamped to [eps, 1 - eps] when eps is not None.
281
+ When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN.
282
+
283
+ .. math::
284
+ \begin{align}
285
+ y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\
286
+ z_{i} &= \begin{cases}
287
+ x_{i} & \text{if eps is None} \\
288
+ \text{eps} & \text{if } x_{i} < \text{eps} \\
289
+ x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\
290
+ 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps}
291
+ \end{cases}
292
+ \end{align}
293
+ """ + r"""
294
+ Args:
295
+ {input}
296
+ eps (float, optional): the epsilon for input clamp bound. Default: ``None``
297
+
298
+ Keyword args:
299
+ {out}
300
+
301
+ Example::
302
+
303
+ >>> a = torch.rand(5)
304
+ >>> a
305
+ tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516])
306
+ >>> torch.special.logit(a, eps=1e-6)
307
+ tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261])
308
+ """.format(**common_args))
309
+
310
+ logsumexp = _add_docstr(_special.special_logsumexp,
311
+ r"""
312
+ logsumexp(input, dim, keepdim=False, *, out=None)
313
+
314
+ Alias for :func:`torch.logsumexp`.
315
+ """.format(**multi_dim_common))
316
+
317
+ expit = _add_docstr(_special.special_expit,
318
+ r"""
319
+ expit(input, *, out=None) -> Tensor
320
+
321
+ Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`.
322
+
323
+ .. math::
324
+ \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}}
325
+ """ + r"""
326
+ Args:
327
+ {input}
328
+
329
+ Keyword args:
330
+ {out}
331
+
332
+ Example::
333
+
334
+ >>> t = torch.randn(4)
335
+ >>> t
336
+ tensor([ 0.9213, 1.0887, -0.8858, -1.7683])
337
+ >>> torch.special.expit(t)
338
+ tensor([ 0.7153, 0.7481, 0.2920, 0.1458])
339
+ """.format(**common_args))
340
+
341
+ exp2 = _add_docstr(_special.special_exp2,
342
+ r"""
343
+ exp2(input, *, out=None) -> Tensor
344
+
345
+ Computes the base two exponential function of :attr:`input`.
346
+
347
+ .. math::
348
+ y_{i} = 2^{x_{i}}
349
+
350
+ """ + r"""
351
+ Args:
352
+ {input}
353
+
354
+ Keyword args:
355
+ {out}
356
+
357
+ Example::
358
+
359
+ >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4]))
360
+ tensor([ 1., 2., 8., 16.])
361
+ """.format(**common_args))
362
+
363
+ expm1 = _add_docstr(_special.special_expm1,
364
+ r"""
365
+ expm1(input, *, out=None) -> Tensor
366
+
367
+ Computes the exponential of the elements minus 1
368
+ of :attr:`input`.
369
+
370
+ .. math::
371
+ y_{i} = e^{x_{i}} - 1
372
+
373
+ .. note:: This function provides greater precision than exp(x) - 1 for small values of x.
374
+
375
+ """ + r"""
376
+ Args:
377
+ {input}
378
+
379
+ Keyword args:
380
+ {out}
381
+
382
+ Example::
383
+
384
+ >>> torch.special.expm1(torch.tensor([0, math.log(2.)]))
385
+ tensor([ 0., 1.])
386
+ """.format(**common_args))
387
+
388
+ xlog1py = _add_docstr(_special.special_xlog1py,
389
+ r"""
390
+ xlog1py(input, other, *, out=None) -> Tensor
391
+
392
+ Computes ``input * log1p(other)`` with the following cases.
393
+
394
+ .. math::
395
+ \text{out}_{i} = \begin{cases}
396
+ \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\
397
+ 0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\
398
+ \text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise}
399
+ \end{cases}
400
+
401
+ Similar to SciPy's `scipy.special.xlog1py`.
402
+
403
+ """ + r"""
404
+
405
+ Args:
406
+ input (Number or Tensor) : Multiplier
407
+ other (Number or Tensor) : Argument
408
+
409
+ .. note:: At least one of :attr:`input` or :attr:`other` must be a tensor.
410
+
411
+ Keyword args:
412
+ {out}
413
+
414
+ Example::
415
+
416
+ >>> x = torch.zeros(5,)
417
+ >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
418
+ >>> torch.special.xlog1py(x, y)
419
+ tensor([0., 0., 0., 0., nan])
420
+ >>> x = torch.tensor([1, 2, 3])
421
+ >>> y = torch.tensor([3, 2, 1])
422
+ >>> torch.special.xlog1py(x, y)
423
+ tensor([1.3863, 2.1972, 2.0794])
424
+ >>> torch.special.xlog1py(x, 4)
425
+ tensor([1.6094, 3.2189, 4.8283])
426
+ >>> torch.special.xlog1py(2, y)
427
+ tensor([2.7726, 2.1972, 1.3863])
428
+ """.format(**common_args))
429
+
430
+ xlogy = _add_docstr(_special.special_xlogy,
431
+ r"""
432
+ xlogy(input, other, *, out=None) -> Tensor
433
+
434
+ Computes ``input * log(other)`` with the following cases.
435
+
436
+ .. math::
437
+ \text{out}_{i} = \begin{cases}
438
+ \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\
439
+ 0 & \text{if } \text{input}_{i} = 0.0 \\
440
+ \text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise}
441
+ \end{cases}
442
+
443
+ Similar to SciPy's `scipy.special.xlogy`.
444
+
445
+ """ + r"""
446
+
447
+ Args:
448
+ input (Number or Tensor) : Multiplier
449
+ other (Number or Tensor) : Argument
450
+
451
+ .. note:: At least one of :attr:`input` or :attr:`other` must be a tensor.
452
+
453
+ Keyword args:
454
+ {out}
455
+
456
+ Example::
457
+
458
+ >>> x = torch.zeros(5,)
459
+ >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
460
+ >>> torch.special.xlogy(x, y)
461
+ tensor([0., 0., 0., 0., nan])
462
+ >>> x = torch.tensor([1, 2, 3])
463
+ >>> y = torch.tensor([3, 2, 1])
464
+ >>> torch.special.xlogy(x, y)
465
+ tensor([1.0986, 1.3863, 0.0000])
466
+ >>> torch.special.xlogy(x, 4)
467
+ tensor([1.3863, 2.7726, 4.1589])
468
+ >>> torch.special.xlogy(2, y)
469
+ tensor([2.1972, 1.3863, 0.0000])
470
+ """.format(**common_args))
471
+
472
+ i0 = _add_docstr(_special.special_i0,
473
+ r"""
474
+ i0(input, *, out=None) -> Tensor
475
+
476
+ Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`.
477
+
478
+ .. math::
479
+ \text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2}
480
+
481
+ """ + r"""
482
+ Args:
483
+ input (Tensor): the input tensor
484
+
485
+ Keyword args:
486
+ {out}
487
+
488
+ Example::
489
+
490
+ >>> torch.i0(torch.arange(5, dtype=torch.float32))
491
+ tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019])
492
+
493
+ """.format(**common_args))
494
+
495
+ i0e = _add_docstr(_special.special_i0e,
496
+ r"""
497
+ i0e(input, *, out=None) -> Tensor
498
+ Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below)
499
+ for each element of :attr:`input`.
500
+
501
+ .. math::
502
+ \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2}
503
+
504
+ """ + r"""
505
+ Args:
506
+ {input}
507
+
508
+ Keyword args:
509
+ {out}
510
+
511
+ Example::
512
+ >>> torch.special.i0e(torch.arange(5, dtype=torch.float32))
513
+ tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070])
514
+ """.format(**common_args))
515
+
516
+ i1 = _add_docstr(_special.special_i1,
517
+ r"""
518
+ i1(input, *, out=None) -> Tensor
519
+ Computes the first order modified Bessel function of the first kind (as defined below)
520
+ for each element of :attr:`input`.
521
+
522
+ .. math::
523
+ \text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!}
524
+
525
+ """ + r"""
526
+ Args:
527
+ {input}
528
+
529
+ Keyword args:
530
+ {out}
531
+
532
+ Example::
533
+ >>> torch.special.i1(torch.arange(5, dtype=torch.float32))
534
+ tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595])
535
+ """.format(**common_args))
536
+
537
+ i1e = _add_docstr(_special.special_i1e,
538
+ r"""
539
+ i1e(input, *, out=None) -> Tensor
540
+ Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below)
541
+ for each element of :attr:`input`.
542
+
543
+ .. math::
544
+ \text{out}_{i} = \exp(-|x|) * i1(x) =
545
+ \exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!}
546
+
547
+ """ + r"""
548
+ Args:
549
+ {input}
550
+
551
+ Keyword args:
552
+ {out}
553
+
554
+ Example::
555
+ >>> torch.special.i1e(torch.arange(5, dtype=torch.float32))
556
+ tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788])
557
+ """.format(**common_args))
558
+
559
+ ndtr = _add_docstr(_special.special_ndtr,
560
+ r"""
561
+ ndtr(input, *, out=None) -> Tensor
562
+ Computes the area under the standard Gaussian probability density function,
563
+ integrated from minus infinity to :attr:`input`, elementwise.
564
+
565
+ .. math::
566
+ \text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt
567
+
568
+ """ + r"""
569
+ Args:
570
+ {input}
571
+
572
+ Keyword args:
573
+ {out}
574
+
575
+ Example::
576
+ >>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3]))
577
+ tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987])
578
+ """.format(**common_args))
579
+
580
+ ndtri = _add_docstr(_special.special_ndtri,
581
+ r"""
582
+ ndtri(input, *, out=None) -> Tensor
583
+ Computes the argument, x, for which the area under the Gaussian probability density function
584
+ (integrated from minus infinity to x) is equal to :attr:`input`, elementwise.
585
+
586
+ .. math::
587
+ \text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1)
588
+
589
+ .. note::
590
+ Also known as quantile function for Normal Distribution.
591
+
592
+ """ + r"""
593
+ Args:
594
+ {input}
595
+
596
+ Keyword args:
597
+ {out}
598
+
599
+ Example::
600
+ >>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1]))
601
+ tensor([ -inf, -0.6745, 0.0000, 0.6745, inf])
602
+ """.format(**common_args))
603
+
604
+ log_ndtr = _add_docstr(_special.special_log_ndtr,
605
+ r"""
606
+ log_ndtr(input, *, out=None) -> Tensor
607
+ Computes the log of the area under the standard Gaussian probability density function,
608
+ integrated from minus infinity to :attr:`input`, elementwise.
609
+
610
+ .. math::
611
+ \text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right)
612
+
613
+ """ + r"""
614
+ Args:
615
+ {input}
616
+
617
+ Keyword args:
618
+ {out}
619
+
620
+ Example::
621
+ >>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3]))
622
+ tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014])
623
+ """.format(**common_args))
624
+
625
+ log1p = _add_docstr(_special.special_log1p,
626
+ r"""
627
+ log1p(input, *, out=None) -> Tensor
628
+
629
+ Alias for :func:`torch.log1p`.
630
+ """)
631
+
632
+ sinc = _add_docstr(_special.special_sinc,
633
+ r"""
634
+ sinc(input, *, out=None) -> Tensor
635
+
636
+ Computes the normalized sinc of :attr:`input.`
637
+
638
+ .. math::
639
+ \text{out}_{i} =
640
+ \begin{cases}
641
+ 1, & \text{if}\ \text{input}_{i}=0 \\
642
+ \sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise}
643
+ \end{cases}
644
+ """ + r"""
645
+
646
+ Args:
647
+ {input}
648
+
649
+ Keyword args:
650
+ {out}
651
+
652
+ Example::
653
+ >>> t = torch.randn(4)
654
+ >>> t
655
+ tensor([ 0.2252, -0.2948, 1.0267, -1.1566])
656
+ >>> torch.special.sinc(t)
657
+ tensor([ 0.9186, 0.8631, -0.0259, -0.1300])
658
+ """.format(**common_args))
659
+
660
+ round = _add_docstr(_special.special_round,
661
+ r"""
662
+ round(input, *, out=None) -> Tensor
663
+
664
+ Alias for :func:`torch.round`.
665
+ """)
666
+
667
+ softmax = _add_docstr(_special.special_softmax,
668
+ r"""
669
+ softmax(input, dim, *, dtype=None) -> Tensor
670
+
671
+ Computes the softmax function.
672
+
673
+ Softmax is defined as:
674
+
675
+ :math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
676
+
677
+ It is applied to all slices along dim, and will re-scale them so that the elements
678
+ lie in the range `[0, 1]` and sum to 1.
679
+
680
+ Args:
681
+ input (Tensor): input
682
+ dim (int): A dimension along which softmax will be computed.
683
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
684
+ If specified, the input tensor is cast to :attr:`dtype` before the operation
685
+ is performed. This is useful for preventing data type overflows. Default: None.
686
+
687
+ Examples::
688
+ >>> t = torch.ones(2, 2)
689
+ >>> torch.special.softmax(t, 0)
690
+ tensor([[0.5000, 0.5000],
691
+ [0.5000, 0.5000]])
692
+
693
+ """)
694
+
695
+ log_softmax = _add_docstr(_special.special_log_softmax,
696
+ r"""
697
+ log_softmax(input, dim, *, dtype=None) -> Tensor
698
+
699
+ Computes softmax followed by a logarithm.
700
+
701
+ While mathematically equivalent to log(softmax(x)), doing these two
702
+ operations separately is slower and numerically unstable. This function
703
+ is computed as:
704
+
705
+ .. math::
706
+ \text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
707
+ """ + r"""
708
+
709
+ Args:
710
+ input (Tensor): input
711
+ dim (int): A dimension along which log_softmax will be computed.
712
+ dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
713
+ If specified, the input tensor is cast to :attr:`dtype` before the operation
714
+ is performed. This is useful for preventing data type overflows. Default: None.
715
+
716
+ Example::
717
+ >>> t = torch.ones(2, 2)
718
+ >>> torch.special.log_softmax(t, 0)
719
+ tensor([[-0.6931, -0.6931],
720
+ [-0.6931, -0.6931]])
721
+ """)
722
+
723
+ zeta = _add_docstr(_special.special_zeta,
724
+ r"""
725
+ zeta(input, other, *, out=None) -> Tensor
726
+
727
+ Computes the Hurwitz zeta function, elementwise.
728
+
729
+ .. math::
730
+ \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}
731
+
732
+ """ + r"""
733
+ Args:
734
+ input (Tensor): the input tensor corresponding to `x`.
735
+ other (Tensor): the input tensor corresponding to `q`.
736
+
737
+ .. note::
738
+ The Riemann zeta function corresponds to the case when `q = 1`
739
+
740
+ Keyword args:
741
+ {out}
742
+
743
+ Example::
744
+ >>> x = torch.tensor([2., 4.])
745
+ >>> torch.special.zeta(x, 1)
746
+ tensor([1.6449, 1.0823])
747
+ >>> torch.special.zeta(x, torch.tensor([1., 2.]))
748
+ tensor([1.6449, 0.0823])
749
+ >>> torch.special.zeta(2, torch.tensor([1., 2.]))
750
+ tensor([1.6449, 0.6449])
751
+ """.format(**common_args))
752
+
753
+ multigammaln = _add_docstr(_special.special_multigammaln,
754
+ r"""
755
+ multigammaln(input, p, *, out=None) -> Tensor
756
+
757
+ Computes the `multivariate log-gamma function
758
+ <https://en.wikipedia.org/wiki/Multivariate_gamma_function>`_ with dimension
759
+ :math:`p` element-wise, given by
760
+
761
+ .. math::
762
+ \log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right)
763
+
764
+ where :math:`C = \log(\pi) \cdot \frac{p (p - 1)}{4}` and :math:`\Gamma(-)` is the Gamma function.
765
+
766
+ All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise the behavior is undefiend.
767
+ """ + """
768
+
769
+ Args:
770
+ input (Tensor): the tensor to compute the multivariate log-gamma function
771
+ p (int): the number of dimensions
772
+
773
+ Keyword args:
774
+ {out}
775
+
776
+ Example::
777
+
778
+ >>> a = torch.empty(2, 3).uniform_(1, 2)
779
+ >>> a
780
+ tensor([[1.6835, 1.8474, 1.1929],
781
+ [1.0475, 1.7162, 1.4180]])
782
+ >>> torch.special.multigammaln(a, 2)
783
+ tensor([[0.3928, 0.4007, 0.7586],
784
+ [1.0311, 0.3901, 0.5049]])
785
+ """.format(**common_args))
786
+
787
+ gammainc = _add_docstr(_special.special_gammainc,
788
+ r"""
789
+ gammainc(input, other, *, out=None) -> Tensor
790
+
791
+ Computes the regularized lower incomplete gamma function:
792
+
793
+ .. math::
794
+ \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt
795
+
796
+ where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive
797
+ and at least one is strictly positive.
798
+ If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`.
799
+ :math:`\Gamma(\cdot)` in the equation above is the gamma function,
800
+
801
+ .. math::
802
+ \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt.
803
+
804
+ See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions.
805
+
806
+ Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`
807
+ and float inputs.
808
+
809
+ .. note::
810
+ The backward pass with respect to :attr:`input` is not yet supported.
811
+ Please open an issue on PyTorch's Github to request it.
812
+
813
+ """ + r"""
814
+ Args:
815
+ input (Tensor): the first non-negative input tensor
816
+ other (Tensor): the second non-negative input tensor
817
+
818
+ Keyword args:
819
+ {out}
820
+
821
+ Example::
822
+
823
+ >>> a1 = torch.tensor([4.0])
824
+ >>> a2 = torch.tensor([3.0, 4.0, 5.0])
825
+ >>> a = torch.special.gammaincc(a1, a2)
826
+ tensor([0.3528, 0.5665, 0.7350])
827
+ tensor([0.3528, 0.5665, 0.7350])
828
+ >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2)
829
+ tensor([1., 1., 1.])
830
+
831
+ """.format(**common_args))
832
+
833
+ gammaincc = _add_docstr(_special.special_gammaincc,
834
+ r"""
835
+ gammaincc(input, other, *, out=None) -> Tensor
836
+
837
+ Computes the regularized upper incomplete gamma function:
838
+
839
+ .. math::
840
+ \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt
841
+
842
+ where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive
843
+ and at least one is strictly positive.
844
+ If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`.
845
+ :math:`\Gamma(\cdot)` in the equation above is the gamma function,
846
+
847
+ .. math::
848
+ \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt.
849
+
850
+ See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions.
851
+
852
+ Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`
853
+ and float inputs.
854
+
855
+ .. note::
856
+ The backward pass with respect to :attr:`input` is not yet supported.
857
+ Please open an issue on PyTorch's Github to request it.
858
+
859
+ """ + r"""
860
+ Args:
861
+ input (Tensor): the first non-negative input tensor
862
+ other (Tensor): the second non-negative input tensor
863
+
864
+ Keyword args:
865
+ {out}
866
+
867
+ Example::
868
+
869
+ >>> a1 = torch.tensor([4.0])
870
+ >>> a2 = torch.tensor([3.0, 4.0, 5.0])
871
+ >>> a = torch.special.gammaincc(a1, a2)
872
+ tensor([0.6472, 0.4335, 0.2650])
873
+ >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2)
874
+ tensor([1., 1., 1.])
875
+
876
+ """.format(**common_args))
877
+
878
+ airy_ai = _add_docstr(_special.special_airy_ai,
879
+ r"""
880
+ airy_ai(input, *, out=None) -> Tensor
881
+
882
+ Airy function :math:`\text{Ai}\left(\text{input}\right)`.
883
+
884
+ """ + r"""
885
+ Args:
886
+ {input}
887
+
888
+ Keyword args:
889
+ {out}
890
+ """.format(**common_args))
891
+
892
+ bessel_j0 = _add_docstr(_special.special_bessel_j0,
893
+ r"""
894
+ bessel_j0(input, *, out=None) -> Tensor
895
+
896
+ Bessel function of the first kind of order :math:`0`.
897
+
898
+ """ + r"""
899
+ Args:
900
+ {input}
901
+
902
+ Keyword args:
903
+ {out}
904
+ """.format(**common_args))
905
+
906
+ bessel_j1 = _add_docstr(_special.special_bessel_j1,
907
+ r"""
908
+ bessel_j1(input, *, out=None) -> Tensor
909
+
910
+ Bessel function of the first kind of order :math:`1`.
911
+
912
+ """ + r"""
913
+ Args:
914
+ {input}
915
+
916
+ Keyword args:
917
+ {out}
918
+ """.format(**common_args))
919
+
920
+ bessel_y0 = _add_docstr(_special.special_bessel_y0,
921
+ r"""
922
+ bessel_y0(input, *, out=None) -> Tensor
923
+
924
+ Bessel function of the second kind of order :math:`0`.
925
+
926
+ """ + r"""
927
+ Args:
928
+ {input}
929
+
930
+ Keyword args:
931
+ {out}
932
+ """.format(**common_args))
933
+
934
+ bessel_y1 = _add_docstr(_special.special_bessel_y1,
935
+ r"""
936
+ bessel_y1(input, *, out=None) -> Tensor
937
+
938
+ Bessel function of the second kind of order :math:`1`.
939
+
940
+ """ + r"""
941
+ Args:
942
+ {input}
943
+
944
+ Keyword args:
945
+ {out}
946
+ """.format(**common_args))
947
+
948
+ chebyshev_polynomial_t = _add_docstr(_special.special_chebyshev_polynomial_t,
949
+ r"""
950
+ chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
951
+
952
+ Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`.
953
+
954
+ If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
955
+ is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion:
956
+
957
+ .. math::
958
+ T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input})
959
+
960
+ is evaluated. Otherwise, the explicit trigonometric formula:
961
+
962
+ .. math::
963
+ T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x))
964
+
965
+ is evaluated.
966
+
967
+ """ + r"""
968
+ Args:
969
+ {input}
970
+ n (Tensor): Degree of the polynomial.
971
+
972
+ Keyword args:
973
+ {out}
974
+ """.format(**common_args))
975
+
976
+ chebyshev_polynomial_u = _add_docstr(_special.special_chebyshev_polynomial_u,
977
+ r"""
978
+ chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
979
+
980
+ Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`.
981
+
982
+ If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`,
983
+ :math:`2 \times \text{input}` is returned. If :math:`n < 6` or
984
+ :math:`|\text{input}| > 1`, the recursion:
985
+
986
+ .. math::
987
+ T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input})
988
+
989
+ is evaluated. Otherwise, the explicit trigonometric formula:
990
+
991
+ .. math::
992
+ \frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))}
993
+
994
+ is evaluated.
995
+
996
+ """ + r"""
997
+ Args:
998
+ {input}
999
+ n (Tensor): Degree of the polynomial.
1000
+
1001
+ Keyword args:
1002
+ {out}
1003
+ """.format(**common_args))
1004
+
1005
+ chebyshev_polynomial_v = _add_docstr(_special.special_chebyshev_polynomial_v,
1006
+ r"""
1007
+ chebyshev_polynomial_v(input, n, *, out=None) -> Tensor
1008
+
1009
+ Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`.
1010
+
1011
+ """ + r"""
1012
+ Args:
1013
+ {input}
1014
+ n (Tensor): Degree of the polynomial.
1015
+
1016
+ Keyword args:
1017
+ {out}
1018
+ """.format(**common_args))
1019
+
1020
+ chebyshev_polynomial_w = _add_docstr(_special.special_chebyshev_polynomial_w,
1021
+ r"""
1022
+ chebyshev_polynomial_w(input, n, *, out=None) -> Tensor
1023
+
1024
+ Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`.
1025
+
1026
+ """ + r"""
1027
+ Args:
1028
+ {input}
1029
+ n (Tensor): Degree of the polynomial.
1030
+
1031
+ Keyword args:
1032
+ {out}
1033
+ """.format(**common_args))
1034
+
1035
+ hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h,
1036
+ r"""
1037
+ hermite_polynomial_h(input, n, *, out=None) -> Tensor
1038
+
1039
+ Physicist’s Hermite polynomial :math:`H_{n}(\text{input})`.
1040
+
1041
+ If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
1042
+ is returned. Otherwise, the recursion:
1043
+
1044
+ .. math::
1045
+ H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input})
1046
+
1047
+ is evaluated.
1048
+
1049
+ """ + r"""
1050
+ Args:
1051
+ {input}
1052
+ n (Tensor): Degree of the polynomial.
1053
+
1054
+ Keyword args:
1055
+ {out}
1056
+ """.format(**common_args))
1057
+
1058
+ hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he,
1059
+ r"""
1060
+ hermite_polynomial_he(input, n, *, out=None) -> Tensor
1061
+
1062
+ Probabilist’s Hermite polynomial :math:`He_{n}(\text{input})`.
1063
+
1064
+ If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
1065
+ is returned. Otherwise, the recursion:
1066
+
1067
+ .. math::
1068
+ He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input})
1069
+
1070
+ is evaluated.
1071
+
1072
+ """ + r"""
1073
+ Args:
1074
+ {input}
1075
+ n (Tensor): Degree of the polynomial.
1076
+
1077
+ Keyword args:
1078
+ {out}
1079
+ """.format(**common_args))
1080
+
1081
+ laguerre_polynomial_l = _add_docstr(_special.special_laguerre_polynomial_l,
1082
+ r"""
1083
+ laguerre_polynomial_l(input, n, *, out=None) -> Tensor
1084
+
1085
+ Laguerre polynomial :math:`L_{n}(\text{input})`.
1086
+
1087
+ If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
1088
+ is returned. Otherwise, the recursion:
1089
+
1090
+ .. math::
1091
+ L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input})
1092
+
1093
+ is evaluated.
1094
+
1095
+ """ + r"""
1096
+ Args:
1097
+ {input}
1098
+ n (Tensor): Degree of the polynomial.
1099
+
1100
+ Keyword args:
1101
+ {out}
1102
+ """.format(**common_args))
1103
+
1104
+ legendre_polynomial_p = _add_docstr(_special.special_legendre_polynomial_p,
1105
+ r"""
1106
+ legendre_polynomial_p(input, n, *, out=None) -> Tensor
1107
+
1108
+ Legendre polynomial :math:`P_{n}(\text{input})`.
1109
+
1110
+ If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
1111
+ is returned. Otherwise, the recursion:
1112
+
1113
+ .. math::
1114
+ P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input})
1115
+
1116
+ is evaluated.
1117
+
1118
+ """ + r"""
1119
+ Args:
1120
+ {input}
1121
+ n (Tensor): Degree of the polynomial.
1122
+
1123
+ Keyword args:
1124
+ {out}
1125
+ """.format(**common_args))
1126
+
1127
+ modified_bessel_i0 = _add_docstr(_special.special_modified_bessel_i0,
1128
+ r"""
1129
+ modified_bessel_i0(input, *, out=None) -> Tensor
1130
+
1131
+ Modified Bessel function of the first kind of order :math:`0`.
1132
+
1133
+ """ + r"""
1134
+ Args:
1135
+ {input}
1136
+
1137
+ Keyword args:
1138
+ {out}
1139
+ """.format(**common_args))
1140
+
1141
+ modified_bessel_i1 = _add_docstr(_special.special_modified_bessel_i1,
1142
+ r"""
1143
+ modified_bessel_i1(input, *, out=None) -> Tensor
1144
+
1145
+ Modified Bessel function of the first kind of order :math:`1`.
1146
+
1147
+ """ + r"""
1148
+ Args:
1149
+ {input}
1150
+
1151
+ Keyword args:
1152
+ {out}
1153
+ """.format(**common_args))
1154
+
1155
+ modified_bessel_k0 = _add_docstr(_special.special_modified_bessel_k0,
1156
+ r"""
1157
+ modified_bessel_k0(input, *, out=None) -> Tensor
1158
+
1159
+ Modified Bessel function of the second kind of order :math:`0`.
1160
+
1161
+ """ + r"""
1162
+ Args:
1163
+ {input}
1164
+
1165
+ Keyword args:
1166
+ {out}
1167
+ """.format(**common_args))
1168
+
1169
+ modified_bessel_k1 = _add_docstr(_special.special_modified_bessel_k1,
1170
+ r"""
1171
+ modified_bessel_k1(input, *, out=None) -> Tensor
1172
+
1173
+ Modified Bessel function of the second kind of order :math:`1`.
1174
+
1175
+ """ + r"""
1176
+ Args:
1177
+ {input}
1178
+
1179
+ Keyword args:
1180
+ {out}
1181
+ """.format(**common_args))
1182
+
1183
+ scaled_modified_bessel_k0 = _add_docstr(_special.special_scaled_modified_bessel_k0,
1184
+ r"""
1185
+ scaled_modified_bessel_k0(input, *, out=None) -> Tensor
1186
+
1187
+ Scaled modified Bessel function of the second kind of order :math:`0`.
1188
+
1189
+ """ + r"""
1190
+ Args:
1191
+ {input}
1192
+
1193
+ Keyword args:
1194
+ {out}
1195
+ """.format(**common_args))
1196
+
1197
+ scaled_modified_bessel_k1 = _add_docstr(_special.special_scaled_modified_bessel_k1,
1198
+ r"""
1199
+ scaled_modified_bessel_k1(input, *, out=None) -> Tensor
1200
+
1201
+ Scaled modified Bessel function of the second kind of order :math:`1`.
1202
+
1203
+ """ + r"""
1204
+ Args:
1205
+ {input}
1206
+
1207
+ Keyword args:
1208
+ {out}
1209
+ """.format(**common_args))
1210
+
1211
+ shifted_chebyshev_polynomial_t = _add_docstr(_special.special_shifted_chebyshev_polynomial_t,
1212
+ r"""
1213
+ shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
1214
+
1215
+ Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`.
1216
+
1217
+ """ + r"""
1218
+ Args:
1219
+ {input}
1220
+ n (Tensor): Degree of the polynomial.
1221
+
1222
+ Keyword args:
1223
+ {out}
1224
+ """.format(**common_args))
1225
+
1226
+ shifted_chebyshev_polynomial_u = _add_docstr(_special.special_shifted_chebyshev_polynomial_u,
1227
+ r"""
1228
+ shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor
1229
+
1230
+ Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`.
1231
+
1232
+ """ + r"""
1233
+ Args:
1234
+ {input}
1235
+ n (Tensor): Degree of the polynomial.
1236
+
1237
+ Keyword args:
1238
+ {out}
1239
+ """.format(**common_args))
1240
+
1241
+ shifted_chebyshev_polynomial_v = _add_docstr(_special.special_shifted_chebyshev_polynomial_v,
1242
+ r"""
1243
+ shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor
1244
+
1245
+ Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`.
1246
+
1247
+ """ + r"""
1248
+ Args:
1249
+ {input}
1250
+ n (Tensor): Degree of the polynomial.
1251
+
1252
+ Keyword args:
1253
+ {out}
1254
+ """.format(**common_args))
1255
+
1256
+ shifted_chebyshev_polynomial_w = _add_docstr(_special.special_shifted_chebyshev_polynomial_w,
1257
+ r"""
1258
+ shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor
1259
+
1260
+ Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`.
1261
+
1262
+ """ + r"""
1263
+ Args:
1264
+ {input}
1265
+ n (Tensor): Degree of the polynomial.
1266
+
1267
+ Keyword args:
1268
+ {out}
1269
+ """.format(**common_args))
1270
+
1271
+ spherical_bessel_j0 = _add_docstr(_special.special_spherical_bessel_j0,
1272
+ r"""
1273
+ spherical_bessel_j0(input, *, out=None) -> Tensor
1274
+
1275
+ Spherical Bessel function of the first kind of order :math:`0`.
1276
+
1277
+ """ + r"""
1278
+ Args:
1279
+ {input}
1280
+
1281
+ Keyword args:
1282
+ {out}
1283
+ """.format(**common_args))
env-llmeval/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from torch._C import FileCheck as FileCheck
2
+ from ._comparison import assert_allclose, assert_close as assert_close
3
+ from ._creation import make_tensor as make_tensor
env-llmeval/lib/python3.10/site-packages/torch/testing/_comparison.py ADDED
@@ -0,0 +1,1572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import cmath
3
+ import collections.abc
4
+ import contextlib
5
+ import warnings
6
+ from typing import (
7
+ Any,
8
+ Callable,
9
+ Collection,
10
+ Dict,
11
+ List,
12
+ NoReturn,
13
+ Optional,
14
+ Sequence,
15
+ Tuple,
16
+ Type,
17
+ Union,
18
+ )
19
+
20
+ import torch
21
+
22
+ try:
23
+ import numpy as np
24
+
25
+ NUMPY_AVAILABLE = True
26
+ except ModuleNotFoundError:
27
+ NUMPY_AVAILABLE = False
28
+
29
+
30
+ class ErrorMeta(Exception):
31
+ """Internal testing exception that makes that carries error metadata."""
32
+
33
+ def __init__(
34
+ self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = ()
35
+ ) -> None:
36
+ super().__init__(
37
+ "If you are a user and see this message during normal operation "
38
+ "please file an issue at https://github.com/pytorch/pytorch/issues. "
39
+ "If you are a developer and working on the comparison functions, please `raise ErrorMeta().to_error()` "
40
+ "for user facing errors."
41
+ )
42
+ self.type = type
43
+ self.msg = msg
44
+ self.id = id
45
+
46
+ def to_error(
47
+ self, msg: Optional[Union[str, Callable[[str], str]]] = None
48
+ ) -> Exception:
49
+ if not isinstance(msg, str):
50
+ generated_msg = self.msg
51
+ if self.id:
52
+ generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}"
53
+
54
+ msg = msg(generated_msg) if callable(msg) else generated_msg
55
+
56
+ return self.type(msg)
57
+
58
+
59
+ # Some analysis of tolerance by logging tests from test_torch.py can be found in
60
+ # https://github.com/pytorch/pytorch/pull/32538.
61
+ # {dtype: (rtol, atol)}
62
+ _DTYPE_PRECISIONS = {
63
+ torch.float16: (0.001, 1e-5),
64
+ torch.bfloat16: (0.016, 1e-5),
65
+ torch.float32: (1.3e-6, 1e-5),
66
+ torch.float64: (1e-7, 1e-7),
67
+ torch.complex32: (0.001, 1e-5),
68
+ torch.complex64: (1.3e-6, 1e-5),
69
+ torch.complex128: (1e-7, 1e-7),
70
+ }
71
+ # The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in
72
+ # their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values`
73
+ _DTYPE_PRECISIONS.update(
74
+ {
75
+ dtype: _DTYPE_PRECISIONS[torch.float32]
76
+ for dtype in (
77
+ torch.quint8,
78
+ torch.quint2x4,
79
+ torch.quint4x2,
80
+ torch.qint8,
81
+ torch.qint32,
82
+ )
83
+ }
84
+ )
85
+
86
+
87
+ def default_tolerances(
88
+ *inputs: Union[torch.Tensor, torch.dtype],
89
+ dtype_precisions: Optional[Dict[torch.dtype, Tuple[float, float]]] = None,
90
+ ) -> Tuple[float, float]:
91
+ """Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype.
92
+
93
+ See :func:`assert_close` for a table of the default tolerance for each dtype.
94
+
95
+ Returns:
96
+ (Tuple[float, float]): Loosest tolerances of all input dtypes.
97
+ """
98
+ dtypes = []
99
+ for input in inputs:
100
+ if isinstance(input, torch.Tensor):
101
+ dtypes.append(input.dtype)
102
+ elif isinstance(input, torch.dtype):
103
+ dtypes.append(input)
104
+ else:
105
+ raise TypeError(
106
+ f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead."
107
+ )
108
+ dtype_precisions = dtype_precisions or _DTYPE_PRECISIONS
109
+ rtols, atols = zip(*[dtype_precisions.get(dtype, (0.0, 0.0)) for dtype in dtypes])
110
+ return max(rtols), max(atols)
111
+
112
+
113
+ def get_tolerances(
114
+ *inputs: Union[torch.Tensor, torch.dtype],
115
+ rtol: Optional[float],
116
+ atol: Optional[float],
117
+ id: Tuple[Any, ...] = (),
118
+ ) -> Tuple[float, float]:
119
+ """Gets absolute and relative to be used for numeric comparisons.
120
+
121
+ If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of
122
+ :func:`default_tolerances` is used.
123
+
124
+ Raises:
125
+ ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified.
126
+
127
+ Returns:
128
+ (Tuple[float, float]): Valid absolute and relative tolerances.
129
+ """
130
+ if (rtol is None) ^ (atol is None):
131
+ # We require both tolerance to be omitted or specified, because specifying only one might lead to surprising
132
+ # results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0.
133
+ raise ErrorMeta(
134
+ ValueError,
135
+ f"Both 'rtol' and 'atol' must be either specified or omitted, "
136
+ f"but got no {'rtol' if rtol is None else 'atol'}.",
137
+ id=id,
138
+ )
139
+ elif rtol is not None and atol is not None:
140
+ return rtol, atol
141
+ else:
142
+ return default_tolerances(*inputs)
143
+
144
+
145
+ def _make_mismatch_msg(
146
+ *,
147
+ default_identifier: str,
148
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
149
+ extra: Optional[str] = None,
150
+ abs_diff: float,
151
+ abs_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
152
+ atol: float,
153
+ rel_diff: float,
154
+ rel_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
155
+ rtol: float,
156
+ ) -> str:
157
+ """Makes a mismatch error message for numeric values.
158
+
159
+ Args:
160
+ default_identifier (str): Default description of the compared values, e.g. "Tensor-likes".
161
+ identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides
162
+ ``default_identifier``. Can be passed as callable in which case it will be called with
163
+ ``default_identifier`` to create the description at runtime.
164
+ extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics.
165
+ abs_diff (float): Absolute difference.
166
+ abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference.
167
+ atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are
168
+ ``> 0``.
169
+ rel_diff (float): Relative difference.
170
+ rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference.
171
+ rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are
172
+ ``> 0``.
173
+ """
174
+ equality = rtol == 0 and atol == 0
175
+
176
+ def make_diff_msg(
177
+ *,
178
+ type: str,
179
+ diff: float,
180
+ idx: Optional[Union[int, Tuple[int, ...]]],
181
+ tol: float,
182
+ ) -> str:
183
+ if idx is None:
184
+ msg = f"{type.title()} difference: {diff}"
185
+ else:
186
+ msg = f"Greatest {type} difference: {diff} at index {idx}"
187
+ if not equality:
188
+ msg += f" (up to {tol} allowed)"
189
+ return msg + "\n"
190
+
191
+ if identifier is None:
192
+ identifier = default_identifier
193
+ elif callable(identifier):
194
+ identifier = identifier(default_identifier)
195
+
196
+ msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n"
197
+
198
+ if extra:
199
+ msg += f"{extra.strip()}\n"
200
+
201
+ msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol)
202
+ msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol)
203
+
204
+ return msg.strip()
205
+
206
+
207
+ def make_scalar_mismatch_msg(
208
+ actual: Union[bool, int, float, complex],
209
+ expected: Union[bool, int, float, complex],
210
+ *,
211
+ rtol: float,
212
+ atol: float,
213
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
214
+ ) -> str:
215
+ """Makes a mismatch error message for scalars.
216
+
217
+ Args:
218
+ actual (Union[bool, int, float, complex]): Actual scalar.
219
+ expected (Union[bool, int, float, complex]): Expected scalar.
220
+ rtol (float): Relative tolerance.
221
+ atol (float): Absolute tolerance.
222
+ identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed
223
+ as callable in which case it will be called by the default value to create the description at runtime.
224
+ Defaults to "Scalars".
225
+ """
226
+ abs_diff = abs(actual - expected)
227
+ rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected)
228
+ return _make_mismatch_msg(
229
+ default_identifier="Scalars",
230
+ identifier=identifier,
231
+ extra=f"Expected {expected} but got {actual}.",
232
+ abs_diff=abs_diff,
233
+ atol=atol,
234
+ rel_diff=rel_diff,
235
+ rtol=rtol,
236
+ )
237
+
238
+
239
+ def make_tensor_mismatch_msg(
240
+ actual: torch.Tensor,
241
+ expected: torch.Tensor,
242
+ matches: torch.Tensor,
243
+ *,
244
+ rtol: float,
245
+ atol: float,
246
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
247
+ ):
248
+ """Makes a mismatch error message for tensors.
249
+
250
+ Args:
251
+ actual (torch.Tensor): Actual tensor.
252
+ expected (torch.Tensor): Expected tensor.
253
+ matches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the
254
+ location of matches.
255
+ rtol (float): Relative tolerance.
256
+ atol (float): Absolute tolerance.
257
+ identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed
258
+ as callable in which case it will be called by the default value to create the description at runtime.
259
+ Defaults to "Tensor-likes".
260
+ """
261
+
262
+ def unravel_flat_index(flat_index: int) -> Tuple[int, ...]:
263
+ if not matches.shape:
264
+ return ()
265
+
266
+ inverse_index = []
267
+ for size in matches.shape[::-1]:
268
+ div, mod = divmod(flat_index, size)
269
+ flat_index = div
270
+ inverse_index.append(mod)
271
+
272
+ return tuple(inverse_index[::-1])
273
+
274
+ number_of_elements = matches.numel()
275
+ total_mismatches = number_of_elements - int(torch.sum(matches))
276
+ extra = (
277
+ f"Mismatched elements: {total_mismatches} / {number_of_elements} "
278
+ f"({total_mismatches / number_of_elements:.1%})"
279
+ )
280
+
281
+ actual_flat = actual.flatten()
282
+ expected_flat = expected.flatten()
283
+ matches_flat = matches.flatten()
284
+
285
+ if not actual.dtype.is_floating_point and not actual.dtype.is_complex:
286
+ # TODO: Instead of always upcasting to int64, it would be sufficient to cast to the next higher dtype to avoid
287
+ # overflow
288
+ actual_flat = actual_flat.to(torch.int64)
289
+ expected_flat = expected_flat.to(torch.int64)
290
+
291
+ abs_diff = torch.abs(actual_flat - expected_flat)
292
+ # Ensure that only mismatches are used for the max_abs_diff computation
293
+ abs_diff[matches_flat] = 0
294
+ max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0)
295
+
296
+ rel_diff = abs_diff / torch.abs(expected_flat)
297
+ # Ensure that only mismatches are used for the max_rel_diff computation
298
+ rel_diff[matches_flat] = 0
299
+ max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0)
300
+ return _make_mismatch_msg(
301
+ default_identifier="Tensor-likes",
302
+ identifier=identifier,
303
+ extra=extra,
304
+ abs_diff=max_abs_diff.item(),
305
+ abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)),
306
+ atol=atol,
307
+ rel_diff=max_rel_diff.item(),
308
+ rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)),
309
+ rtol=rtol,
310
+ )
311
+
312
+
313
+ class UnsupportedInputs(Exception): # noqa: B903
314
+ """Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs."""
315
+
316
+
317
+ class Pair(abc.ABC):
318
+ """ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`.
319
+
320
+ Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison.
321
+
322
+ Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the
323
+ super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to
324
+ handle the inputs and the next pair type will be tried.
325
+
326
+ All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can
327
+ be used to automatically handle overwriting the message with a user supplied one and id handling.
328
+ """
329
+
330
+ def __init__(
331
+ self,
332
+ actual: Any,
333
+ expected: Any,
334
+ *,
335
+ id: Tuple[Any, ...] = (),
336
+ **unknown_parameters: Any,
337
+ ) -> None:
338
+ self.actual = actual
339
+ self.expected = expected
340
+ self.id = id
341
+ self._unknown_parameters = unknown_parameters
342
+
343
+ @staticmethod
344
+ def _inputs_not_supported() -> NoReturn:
345
+ raise UnsupportedInputs()
346
+
347
+ @staticmethod
348
+ def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]):
349
+ """Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise."""
350
+ if not all(isinstance(input, cls) for input in inputs):
351
+ Pair._inputs_not_supported()
352
+
353
+ def _fail(
354
+ self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = ()
355
+ ) -> NoReturn:
356
+ """Raises an :class:`ErrorMeta` from a given exception type and message and the stored id.
357
+
358
+ .. warning::
359
+
360
+ If you use this before the ``super().__init__(...)`` call in the constructor, you have to pass the ``id``
361
+ explicitly.
362
+ """
363
+ raise ErrorMeta(type, msg, id=self.id if not id and hasattr(self, "id") else id)
364
+
365
+ @abc.abstractmethod
366
+ def compare(self) -> None:
367
+ """Compares the inputs and raises an :class`ErrorMeta` in case they mismatch."""
368
+
369
+ def extra_repr(self) -> Sequence[Union[str, Tuple[str, Any]]]:
370
+ """Returns extra information that will be included in the representation.
371
+
372
+ Should be overwritten by all subclasses that use additional options. The representation of the object will only
373
+ be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of
374
+ key-value-pairs or attribute names.
375
+ """
376
+ return []
377
+
378
+ def __repr__(self) -> str:
379
+ head = f"{type(self).__name__}("
380
+ tail = ")"
381
+ body = [
382
+ f" {name}={value!s},"
383
+ for name, value in [
384
+ ("id", self.id),
385
+ ("actual", self.actual),
386
+ ("expected", self.expected),
387
+ *[
388
+ (extra, getattr(self, extra)) if isinstance(extra, str) else extra
389
+ for extra in self.extra_repr()
390
+ ],
391
+ ]
392
+ ]
393
+ return "\n".join((head, *body, *tail))
394
+
395
+
396
+ class ObjectPair(Pair):
397
+ """Pair for any type of inputs that will be compared with the `==` operator.
398
+
399
+ .. note::
400
+
401
+ Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs
402
+ couldn't handle the inputs.
403
+
404
+ """
405
+
406
+ def compare(self) -> None:
407
+ try:
408
+ equal = self.actual == self.expected
409
+ except Exception as error:
410
+ # We are not using `self._raise_error_meta` here since we need the exception chaining
411
+ raise ErrorMeta(
412
+ ValueError,
413
+ f"{self.actual} == {self.expected} failed with:\n{error}.",
414
+ id=self.id,
415
+ ) from error
416
+
417
+ if not equal:
418
+ self._fail(AssertionError, f"{self.actual} != {self.expected}")
419
+
420
+
421
+ class NonePair(Pair):
422
+ """Pair for ``None`` inputs."""
423
+
424
+ def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None:
425
+ if not (actual is None or expected is None):
426
+ self._inputs_not_supported()
427
+
428
+ super().__init__(actual, expected, **other_parameters)
429
+
430
+ def compare(self) -> None:
431
+ if not (self.actual is None and self.expected is None):
432
+ self._fail(
433
+ AssertionError, f"None mismatch: {self.actual} is not {self.expected}"
434
+ )
435
+
436
+
437
+ class BooleanPair(Pair):
438
+ """Pair for :class:`bool` inputs.
439
+
440
+ .. note::
441
+
442
+ If ``numpy`` is available, also handles :class:`numpy.bool_` inputs.
443
+
444
+ """
445
+
446
+ def __init__(
447
+ self,
448
+ actual: Any,
449
+ expected: Any,
450
+ *,
451
+ id: Tuple[Any, ...],
452
+ **other_parameters: Any,
453
+ ) -> None:
454
+ actual, expected = self._process_inputs(actual, expected, id=id)
455
+ super().__init__(actual, expected, **other_parameters)
456
+
457
+ @property
458
+ def _supported_types(self) -> Tuple[Type, ...]:
459
+ cls: List[Type] = [bool]
460
+ if NUMPY_AVAILABLE:
461
+ cls.append(np.bool_)
462
+ return tuple(cls)
463
+
464
+ def _process_inputs(
465
+ self, actual: Any, expected: Any, *, id: Tuple[Any, ...]
466
+ ) -> Tuple[bool, bool]:
467
+ self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
468
+ actual, expected = (
469
+ self._to_bool(bool_like, id=id) for bool_like in (actual, expected)
470
+ )
471
+ return actual, expected
472
+
473
+ def _to_bool(self, bool_like: Any, *, id: Tuple[Any, ...]) -> bool:
474
+ if isinstance(bool_like, bool):
475
+ return bool_like
476
+ elif isinstance(bool_like, np.bool_):
477
+ return bool_like.item()
478
+ else:
479
+ raise ErrorMeta(
480
+ TypeError, f"Unknown boolean type {type(bool_like)}.", id=id
481
+ )
482
+
483
+ def compare(self) -> None:
484
+ if self.actual is not self.expected:
485
+ self._fail(
486
+ AssertionError,
487
+ f"Booleans mismatch: {self.actual} is not {self.expected}",
488
+ )
489
+
490
+
491
+ class NumberPair(Pair):
492
+ """Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs.
493
+
494
+ .. note::
495
+
496
+ If ``numpy`` is available, also handles :class:`numpy.number` inputs.
497
+
498
+ Kwargs:
499
+ rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
500
+ values based on the type are selected with the below table.
501
+ atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
502
+ values based on the type are selected with the below table.
503
+ equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
504
+ check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``.
505
+
506
+ The following table displays correspondence between Python number type and the ``torch.dtype``'s. See
507
+ :func:`assert_close` for the corresponding tolerances.
508
+
509
+ +------------------+-------------------------------+
510
+ | ``type`` | corresponding ``torch.dtype`` |
511
+ +==================+===============================+
512
+ | :class:`int` | :attr:`~torch.int64` |
513
+ +------------------+-------------------------------+
514
+ | :class:`float` | :attr:`~torch.float64` |
515
+ +------------------+-------------------------------+
516
+ | :class:`complex` | :attr:`~torch.complex64` |
517
+ +------------------+-------------------------------+
518
+ """
519
+
520
+ _TYPE_TO_DTYPE = {
521
+ int: torch.int64,
522
+ float: torch.float64,
523
+ complex: torch.complex128,
524
+ }
525
+ _NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys())
526
+
527
+ def __init__(
528
+ self,
529
+ actual: Any,
530
+ expected: Any,
531
+ *,
532
+ id: Tuple[Any, ...] = (),
533
+ rtol: Optional[float] = None,
534
+ atol: Optional[float] = None,
535
+ equal_nan: bool = False,
536
+ check_dtype: bool = False,
537
+ **other_parameters: Any,
538
+ ) -> None:
539
+ actual, expected = self._process_inputs(actual, expected, id=id)
540
+ super().__init__(actual, expected, id=id, **other_parameters)
541
+
542
+ self.rtol, self.atol = get_tolerances(
543
+ *[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)],
544
+ rtol=rtol,
545
+ atol=atol,
546
+ id=id,
547
+ )
548
+ self.equal_nan = equal_nan
549
+ self.check_dtype = check_dtype
550
+
551
+ @property
552
+ def _supported_types(self) -> Tuple[Type, ...]:
553
+ cls = list(self._NUMBER_TYPES)
554
+ if NUMPY_AVAILABLE:
555
+ cls.append(np.number)
556
+ return tuple(cls)
557
+
558
+ def _process_inputs(
559
+ self, actual: Any, expected: Any, *, id: Tuple[Any, ...]
560
+ ) -> Tuple[Union[int, float, complex], Union[int, float, complex]]:
561
+ self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
562
+ actual, expected = (
563
+ self._to_number(number_like, id=id) for number_like in (actual, expected)
564
+ )
565
+ return actual, expected
566
+
567
+ def _to_number(
568
+ self, number_like: Any, *, id: Tuple[Any, ...]
569
+ ) -> Union[int, float, complex]:
570
+ if NUMPY_AVAILABLE and isinstance(number_like, np.number):
571
+ return number_like.item()
572
+ elif isinstance(number_like, self._NUMBER_TYPES):
573
+ return number_like # type: ignore[return-value]
574
+ else:
575
+ raise ErrorMeta(
576
+ TypeError, f"Unknown number type {type(number_like)}.", id=id
577
+ )
578
+
579
+ def compare(self) -> None:
580
+ if self.check_dtype and type(self.actual) is not type(self.expected):
581
+ self._fail(
582
+ AssertionError,
583
+ f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.",
584
+ )
585
+
586
+ if self.actual == self.expected:
587
+ return
588
+
589
+ if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected):
590
+ return
591
+
592
+ abs_diff = abs(self.actual - self.expected)
593
+ tolerance = self.atol + self.rtol * abs(self.expected)
594
+
595
+ if cmath.isfinite(abs_diff) and abs_diff <= tolerance:
596
+ return
597
+
598
+ self._fail(
599
+ AssertionError,
600
+ make_scalar_mismatch_msg(
601
+ self.actual, self.expected, rtol=self.rtol, atol=self.atol
602
+ ),
603
+ )
604
+
605
+ def extra_repr(self) -> Sequence[str]:
606
+ return (
607
+ "rtol",
608
+ "atol",
609
+ "equal_nan",
610
+ "check_dtype",
611
+ )
612
+
613
+
614
+ class TensorLikePair(Pair):
615
+ """Pair for :class:`torch.Tensor`-like inputs.
616
+
617
+ Kwargs:
618
+ allow_subclasses (bool):
619
+ rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
620
+ values based on the type are selected. See :func:assert_close: for details.
621
+ atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
622
+ values based on the type are selected. See :func:assert_close: for details.
623
+ equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
624
+ check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
625
+ :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
626
+ :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
627
+ check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
628
+ check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
629
+ :func:`torch.promote_types`) before being compared.
630
+ check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
631
+ check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
632
+ compared.
633
+ check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
634
+ """
635
+
636
+ def __init__(
637
+ self,
638
+ actual: Any,
639
+ expected: Any,
640
+ *,
641
+ id: Tuple[Any, ...] = (),
642
+ allow_subclasses: bool = True,
643
+ rtol: Optional[float] = None,
644
+ atol: Optional[float] = None,
645
+ equal_nan: bool = False,
646
+ check_device: bool = True,
647
+ check_dtype: bool = True,
648
+ check_layout: bool = True,
649
+ check_stride: bool = False,
650
+ **other_parameters: Any,
651
+ ):
652
+ actual, expected = self._process_inputs(
653
+ actual, expected, id=id, allow_subclasses=allow_subclasses
654
+ )
655
+ super().__init__(actual, expected, id=id, **other_parameters)
656
+
657
+ self.rtol, self.atol = get_tolerances(
658
+ actual, expected, rtol=rtol, atol=atol, id=self.id
659
+ )
660
+ self.equal_nan = equal_nan
661
+ self.check_device = check_device
662
+ self.check_dtype = check_dtype
663
+ self.check_layout = check_layout
664
+ self.check_stride = check_stride
665
+
666
+ def _process_inputs(
667
+ self, actual: Any, expected: Any, *, id: Tuple[Any, ...], allow_subclasses: bool
668
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
669
+ directly_related = isinstance(actual, type(expected)) or isinstance(
670
+ expected, type(actual)
671
+ )
672
+ if not directly_related:
673
+ self._inputs_not_supported()
674
+
675
+ if not allow_subclasses and type(actual) is not type(expected):
676
+ self._inputs_not_supported()
677
+
678
+ actual, expected = (self._to_tensor(input) for input in (actual, expected))
679
+ for tensor in (actual, expected):
680
+ self._check_supported(tensor, id=id)
681
+ return actual, expected
682
+
683
+ def _to_tensor(self, tensor_like: Any) -> torch.Tensor:
684
+ if isinstance(tensor_like, torch.Tensor):
685
+ return tensor_like
686
+
687
+ try:
688
+ return torch.as_tensor(tensor_like)
689
+ except Exception:
690
+ self._inputs_not_supported()
691
+
692
+ def _check_supported(self, tensor: torch.Tensor, *, id: Tuple[Any, ...]) -> None:
693
+ if tensor.layout not in {
694
+ torch.strided,
695
+ torch.sparse_coo,
696
+ torch.sparse_csr,
697
+ torch.sparse_csc,
698
+ torch.sparse_bsr,
699
+ torch.sparse_bsc,
700
+ }:
701
+ raise ErrorMeta(
702
+ ValueError, f"Unsupported tensor layout {tensor.layout}", id=id
703
+ )
704
+
705
+ def compare(self) -> None:
706
+ actual, expected = self.actual, self.expected
707
+
708
+ self._compare_attributes(actual, expected)
709
+ if any(input.device.type == "meta" for input in (actual, expected)):
710
+ return
711
+
712
+ actual, expected = self._equalize_attributes(actual, expected)
713
+ self._compare_values(actual, expected)
714
+
715
+ def _compare_attributes(
716
+ self,
717
+ actual: torch.Tensor,
718
+ expected: torch.Tensor,
719
+ ) -> None:
720
+ """Checks if the attributes of two tensors match.
721
+
722
+ Always checks
723
+
724
+ - the :attr:`~torch.Tensor.shape`,
725
+ - whether both inputs are quantized or not,
726
+ - and if they use the same quantization scheme.
727
+
728
+ Checks for
729
+
730
+ - :attr:`~torch.Tensor.layout`,
731
+ - :meth:`~torch.Tensor.stride`,
732
+ - :attr:`~torch.Tensor.device`, and
733
+ - :attr:`~torch.Tensor.dtype`
734
+
735
+ are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair.
736
+ """
737
+
738
+ def raise_mismatch_error(
739
+ attribute_name: str, actual_value: Any, expected_value: Any
740
+ ) -> NoReturn:
741
+ self._fail(
742
+ AssertionError,
743
+ f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.",
744
+ )
745
+
746
+ if actual.shape != expected.shape:
747
+ raise_mismatch_error("shape", actual.shape, expected.shape)
748
+
749
+ if actual.is_quantized != expected.is_quantized:
750
+ raise_mismatch_error(
751
+ "is_quantized", actual.is_quantized, expected.is_quantized
752
+ )
753
+ elif actual.is_quantized and actual.qscheme() != expected.qscheme():
754
+ raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme())
755
+
756
+ if actual.layout != expected.layout:
757
+ if self.check_layout:
758
+ raise_mismatch_error("layout", actual.layout, expected.layout)
759
+ elif (
760
+ actual.layout == torch.strided
761
+ and self.check_stride
762
+ and actual.stride() != expected.stride()
763
+ ):
764
+ raise_mismatch_error("stride()", actual.stride(), expected.stride())
765
+
766
+ if self.check_device and actual.device != expected.device:
767
+ raise_mismatch_error("device", actual.device, expected.device)
768
+
769
+ if self.check_dtype and actual.dtype != expected.dtype:
770
+ raise_mismatch_error("dtype", actual.dtype, expected.dtype)
771
+
772
+ def _equalize_attributes(
773
+ self, actual: torch.Tensor, expected: torch.Tensor
774
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
775
+ """Equalizes some attributes of two tensors for value comparison.
776
+
777
+ If ``actual`` and ``expected`` are ...
778
+
779
+ - ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory.
780
+ - ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to
781
+ :func:`torch.promote_types`).
782
+ - ... not of the same ``layout``, they are converted to strided tensors.
783
+
784
+ Args:
785
+ actual (Tensor): Actual tensor.
786
+ expected (Tensor): Expected tensor.
787
+
788
+ Returns:
789
+ (Tuple[Tensor, Tensor]): Equalized tensors.
790
+ """
791
+ # The comparison logic uses operators currently not supported by the MPS backends.
792
+ # See https://github.com/pytorch/pytorch/issues/77144 for details.
793
+ # TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend
794
+ if actual.is_mps or expected.is_mps: # type: ignore[attr-defined]
795
+ actual = actual.cpu()
796
+ expected = expected.cpu()
797
+
798
+ if actual.device != expected.device:
799
+ actual = actual.cpu()
800
+ expected = expected.cpu()
801
+
802
+ if actual.dtype != expected.dtype:
803
+ dtype = torch.promote_types(actual.dtype, expected.dtype)
804
+ actual = actual.to(dtype)
805
+ expected = expected.to(dtype)
806
+
807
+ if actual.layout != expected.layout:
808
+ # These checks are needed, since Tensor.to_dense() fails on tensors that are already strided
809
+ actual = actual.to_dense() if actual.layout != torch.strided else actual
810
+ expected = (
811
+ expected.to_dense() if expected.layout != torch.strided else expected
812
+ )
813
+
814
+ return actual, expected
815
+
816
+ def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None:
817
+ if actual.is_quantized:
818
+ compare_fn = self._compare_quantized_values
819
+ elif actual.is_sparse:
820
+ compare_fn = self._compare_sparse_coo_values
821
+ elif actual.layout in {
822
+ torch.sparse_csr,
823
+ torch.sparse_csc,
824
+ torch.sparse_bsr,
825
+ torch.sparse_bsc,
826
+ }:
827
+ compare_fn = self._compare_sparse_compressed_values
828
+ else:
829
+ compare_fn = self._compare_regular_values_close
830
+
831
+ compare_fn(
832
+ actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan
833
+ )
834
+
835
+ def _compare_quantized_values(
836
+ self,
837
+ actual: torch.Tensor,
838
+ expected: torch.Tensor,
839
+ *,
840
+ rtol: float,
841
+ atol: float,
842
+ equal_nan: bool,
843
+ ) -> None:
844
+ """Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness.
845
+
846
+ .. note::
847
+
848
+ A detailed discussion about why only the dequantized variant is checked for closeness rather than checking
849
+ the individual quantization parameters for closeness and the integer representation for equality can be
850
+ found in https://github.com/pytorch/pytorch/issues/68548.
851
+ """
852
+ return self._compare_regular_values_close(
853
+ actual.dequantize(),
854
+ expected.dequantize(),
855
+ rtol=rtol,
856
+ atol=atol,
857
+ equal_nan=equal_nan,
858
+ identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}",
859
+ )
860
+
861
+ def _compare_sparse_coo_values(
862
+ self,
863
+ actual: torch.Tensor,
864
+ expected: torch.Tensor,
865
+ *,
866
+ rtol: float,
867
+ atol: float,
868
+ equal_nan: bool,
869
+ ) -> None:
870
+ """Compares sparse COO tensors by comparing
871
+
872
+ - the number of sparse dimensions,
873
+ - the number of non-zero elements (nnz) for equality,
874
+ - the indices for equality, and
875
+ - the values for closeness.
876
+ """
877
+ if actual.sparse_dim() != expected.sparse_dim():
878
+ self._fail(
879
+ AssertionError,
880
+ (
881
+ f"The number of sparse dimensions in sparse COO tensors does not match: "
882
+ f"{actual.sparse_dim()} != {expected.sparse_dim()}"
883
+ ),
884
+ )
885
+
886
+ if actual._nnz() != expected._nnz():
887
+ self._fail(
888
+ AssertionError,
889
+ (
890
+ f"The number of specified values in sparse COO tensors does not match: "
891
+ f"{actual._nnz()} != {expected._nnz()}"
892
+ ),
893
+ )
894
+
895
+ self._compare_regular_values_equal(
896
+ actual._indices(),
897
+ expected._indices(),
898
+ identifier="Sparse COO indices",
899
+ )
900
+ self._compare_regular_values_close(
901
+ actual._values(),
902
+ expected._values(),
903
+ rtol=rtol,
904
+ atol=atol,
905
+ equal_nan=equal_nan,
906
+ identifier="Sparse COO values",
907
+ )
908
+
909
+ def _compare_sparse_compressed_values(
910
+ self,
911
+ actual: torch.Tensor,
912
+ expected: torch.Tensor,
913
+ *,
914
+ rtol: float,
915
+ atol: float,
916
+ equal_nan: bool,
917
+ ) -> None:
918
+ """Compares sparse compressed tensors by comparing
919
+
920
+ - the number of non-zero elements (nnz) for equality,
921
+ - the plain indices for equality,
922
+ - the compressed indices for equality, and
923
+ - the values for closeness.
924
+ """
925
+ format_name, compressed_indices_method, plain_indices_method = {
926
+ torch.sparse_csr: (
927
+ "CSR",
928
+ torch.Tensor.crow_indices,
929
+ torch.Tensor.col_indices,
930
+ ),
931
+ torch.sparse_csc: (
932
+ "CSC",
933
+ torch.Tensor.ccol_indices,
934
+ torch.Tensor.row_indices,
935
+ ),
936
+ torch.sparse_bsr: (
937
+ "BSR",
938
+ torch.Tensor.crow_indices,
939
+ torch.Tensor.col_indices,
940
+ ),
941
+ torch.sparse_bsc: (
942
+ "BSC",
943
+ torch.Tensor.ccol_indices,
944
+ torch.Tensor.row_indices,
945
+ ),
946
+ }[actual.layout]
947
+
948
+ if actual._nnz() != expected._nnz():
949
+ self._fail(
950
+ AssertionError,
951
+ (
952
+ f"The number of specified values in sparse {format_name} tensors does not match: "
953
+ f"{actual._nnz()} != {expected._nnz()}"
954
+ ),
955
+ )
956
+
957
+ # Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formates can be `torch.int32` _or_
958
+ # `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it
959
+ # can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will
960
+ # fail.
961
+ actual_compressed_indices = compressed_indices_method(actual)
962
+ expected_compressed_indices = compressed_indices_method(expected)
963
+ indices_dtype = torch.promote_types(
964
+ actual_compressed_indices.dtype, expected_compressed_indices.dtype
965
+ )
966
+
967
+ self._compare_regular_values_equal(
968
+ actual_compressed_indices.to(indices_dtype),
969
+ expected_compressed_indices.to(indices_dtype),
970
+ identifier=f"Sparse {format_name} {compressed_indices_method.__name__}",
971
+ )
972
+ self._compare_regular_values_equal(
973
+ plain_indices_method(actual).to(indices_dtype),
974
+ plain_indices_method(expected).to(indices_dtype),
975
+ identifier=f"Sparse {format_name} {plain_indices_method.__name__}",
976
+ )
977
+ self._compare_regular_values_close(
978
+ actual.values(),
979
+ expected.values(),
980
+ rtol=rtol,
981
+ atol=atol,
982
+ equal_nan=equal_nan,
983
+ identifier=f"Sparse {format_name} values",
984
+ )
985
+
986
+ def _compare_regular_values_equal(
987
+ self,
988
+ actual: torch.Tensor,
989
+ expected: torch.Tensor,
990
+ *,
991
+ equal_nan: bool = False,
992
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
993
+ ) -> None:
994
+ """Checks if the values of two tensors are equal."""
995
+ self._compare_regular_values_close(
996
+ actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier
997
+ )
998
+
999
+ def _compare_regular_values_close(
1000
+ self,
1001
+ actual: torch.Tensor,
1002
+ expected: torch.Tensor,
1003
+ *,
1004
+ rtol: float,
1005
+ atol: float,
1006
+ equal_nan: bool,
1007
+ identifier: Optional[Union[str, Callable[[str], str]]] = None,
1008
+ ) -> None:
1009
+ """Checks if the values of two tensors are close up to a desired tolerance."""
1010
+ matches = torch.isclose(
1011
+ actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan
1012
+ )
1013
+ if torch.all(matches):
1014
+ return
1015
+
1016
+ if actual.shape == torch.Size([]):
1017
+ msg = make_scalar_mismatch_msg(
1018
+ actual.item(),
1019
+ expected.item(),
1020
+ rtol=rtol,
1021
+ atol=atol,
1022
+ identifier=identifier,
1023
+ )
1024
+ else:
1025
+ msg = make_tensor_mismatch_msg(
1026
+ actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier
1027
+ )
1028
+ self._fail(AssertionError, msg)
1029
+
1030
+ def extra_repr(self) -> Sequence[str]:
1031
+ return (
1032
+ "rtol",
1033
+ "atol",
1034
+ "equal_nan",
1035
+ "check_device",
1036
+ "check_dtype",
1037
+ "check_layout",
1038
+ "check_stride",
1039
+ )
1040
+
1041
+
1042
+ def originate_pairs(
1043
+ actual: Any,
1044
+ expected: Any,
1045
+ *,
1046
+ pair_types: Sequence[Type[Pair]],
1047
+ sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
1048
+ mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
1049
+ id: Tuple[Any, ...] = (),
1050
+ **options: Any,
1051
+ ) -> List[Pair]:
1052
+ """Originates pairs from the individual inputs.
1053
+
1054
+ ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
1055
+ :class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them.
1056
+
1057
+ Args:
1058
+ actual (Any): Actual input.
1059
+ expected (Any): Expected input.
1060
+ pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs.
1061
+ First successful pair will be used.
1062
+ sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
1063
+ mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
1064
+ id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message.
1065
+ **options (Any): Options passed to each pair during construction.
1066
+
1067
+ Raises:
1068
+ ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their
1069
+ length does not match.
1070
+ ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of
1071
+ keys do not match.
1072
+ ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs.
1073
+ ErrorMeta: With any expected exception that happens during the construction of a pair.
1074
+
1075
+ Returns:
1076
+ (List[Pair]): Originated pairs.
1077
+ """
1078
+ # We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop:
1079
+ # "a" == "a"[0][0]...
1080
+ if (
1081
+ isinstance(actual, sequence_types)
1082
+ and not isinstance(actual, str)
1083
+ and isinstance(expected, sequence_types)
1084
+ and not isinstance(expected, str)
1085
+ ):
1086
+ actual_len = len(actual)
1087
+ expected_len = len(expected)
1088
+ if actual_len != expected_len:
1089
+ raise ErrorMeta(
1090
+ AssertionError,
1091
+ f"The length of the sequences mismatch: {actual_len} != {expected_len}",
1092
+ id=id,
1093
+ )
1094
+
1095
+ pairs = []
1096
+ for idx in range(actual_len):
1097
+ pairs.extend(
1098
+ originate_pairs(
1099
+ actual[idx],
1100
+ expected[idx],
1101
+ pair_types=pair_types,
1102
+ sequence_types=sequence_types,
1103
+ mapping_types=mapping_types,
1104
+ id=(*id, idx),
1105
+ **options,
1106
+ )
1107
+ )
1108
+ return pairs
1109
+
1110
+ elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types):
1111
+ actual_keys = set(actual.keys())
1112
+ expected_keys = set(expected.keys())
1113
+ if actual_keys != expected_keys:
1114
+ missing_keys = expected_keys - actual_keys
1115
+ additional_keys = actual_keys - expected_keys
1116
+ raise ErrorMeta(
1117
+ AssertionError,
1118
+ (
1119
+ f"The keys of the mappings do not match:\n"
1120
+ f"Missing keys in the actual mapping: {sorted(missing_keys)}\n"
1121
+ f"Additional keys in the actual mapping: {sorted(additional_keys)}"
1122
+ ),
1123
+ id=id,
1124
+ )
1125
+
1126
+ keys: Collection = actual_keys
1127
+ # Since the origination aborts after the first failure, we try to be deterministic
1128
+ with contextlib.suppress(Exception):
1129
+ keys = sorted(keys)
1130
+
1131
+ pairs = []
1132
+ for key in keys:
1133
+ pairs.extend(
1134
+ originate_pairs(
1135
+ actual[key],
1136
+ expected[key],
1137
+ pair_types=pair_types,
1138
+ sequence_types=sequence_types,
1139
+ mapping_types=mapping_types,
1140
+ id=(*id, key),
1141
+ **options,
1142
+ )
1143
+ )
1144
+ return pairs
1145
+
1146
+ else:
1147
+ for pair_type in pair_types:
1148
+ try:
1149
+ return [pair_type(actual, expected, id=id, **options)]
1150
+ # Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the
1151
+ # inputs. Thus, we try the next pair type.
1152
+ except UnsupportedInputs:
1153
+ continue
1154
+ # Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This
1155
+ # is only in a separate branch, because the one below would also except it.
1156
+ except ErrorMeta:
1157
+ raise
1158
+ # Raising any other exception during origination is unexpected and will give some extra information about
1159
+ # what happened. If applicable, the exception should be expected in the future.
1160
+ except Exception as error:
1161
+ raise RuntimeError(
1162
+ f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n"
1163
+ f"{type(actual).__name__}(): {actual}\n\n"
1164
+ f"and\n\n"
1165
+ f"{type(expected).__name__}(): {expected}\n\n"
1166
+ f"resulted in the unexpected exception above. "
1167
+ f"If you are a user and see this message during normal operation "
1168
+ "please file an issue at https://github.com/pytorch/pytorch/issues. "
1169
+ "If you are a developer and working on the comparison functions, "
1170
+ "please except the previous error and raise an expressive `ErrorMeta` instead."
1171
+ ) from error
1172
+ else:
1173
+ raise ErrorMeta(
1174
+ TypeError,
1175
+ f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.",
1176
+ id=id,
1177
+ )
1178
+
1179
+
1180
+ def not_close_error_metas(
1181
+ actual: Any,
1182
+ expected: Any,
1183
+ *,
1184
+ pair_types: Sequence[Type[Pair]] = (ObjectPair,),
1185
+ sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
1186
+ mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
1187
+ **options: Any,
1188
+ ) -> List[ErrorMeta]:
1189
+ """Asserts that inputs are equal.
1190
+
1191
+ ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
1192
+ :class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them.
1193
+
1194
+ Args:
1195
+ actual (Any): Actual input.
1196
+ expected (Any): Expected input.
1197
+ pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the
1198
+ inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`.
1199
+ sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
1200
+ mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
1201
+ **options (Any): Options passed to each pair during construction.
1202
+ """
1203
+ # Hide this function from `pytest`'s traceback
1204
+ __tracebackhide__ = True
1205
+
1206
+ try:
1207
+ pairs = originate_pairs(
1208
+ actual,
1209
+ expected,
1210
+ pair_types=pair_types,
1211
+ sequence_types=sequence_types,
1212
+ mapping_types=mapping_types,
1213
+ **options,
1214
+ )
1215
+ except ErrorMeta as error_meta:
1216
+ # Explicitly raising from None to hide the internal traceback
1217
+ raise error_meta.to_error() from None
1218
+
1219
+ error_metas: List[ErrorMeta] = []
1220
+ for pair in pairs:
1221
+ try:
1222
+ pair.compare()
1223
+ except ErrorMeta as error_meta:
1224
+ error_metas.append(error_meta)
1225
+ # Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information
1226
+ # about what happened. If applicable, the exception should be expected in the future.
1227
+ except Exception as error:
1228
+ raise RuntimeError(
1229
+ f"Comparing\n\n"
1230
+ f"{pair}\n\n"
1231
+ f"resulted in the unexpected exception above. "
1232
+ f"If you are a user and see this message during normal operation "
1233
+ "please file an issue at https://github.com/pytorch/pytorch/issues. "
1234
+ "If you are a developer and working on the comparison functions, "
1235
+ "please except the previous error and raise an expressive `ErrorMeta` instead."
1236
+ ) from error
1237
+
1238
+ # [ErrorMeta Cycles]
1239
+ # ErrorMeta objects in this list capture
1240
+ # tracebacks that refer to the frame of this function.
1241
+ # The local variable `error_metas` refers to the error meta
1242
+ # objects, creating a reference cycle. Frames in the traceback
1243
+ # would not get freed until cycle collection, leaking cuda memory in tests.
1244
+ # We break the cycle by removing the reference to the error_meta objects
1245
+ # from this frame as it returns.
1246
+ error_metas = [error_metas]
1247
+ return error_metas.pop()
1248
+
1249
+
1250
+ def assert_close(
1251
+ actual: Any,
1252
+ expected: Any,
1253
+ *,
1254
+ allow_subclasses: bool = True,
1255
+ rtol: Optional[float] = None,
1256
+ atol: Optional[float] = None,
1257
+ equal_nan: bool = False,
1258
+ check_device: bool = True,
1259
+ check_dtype: bool = True,
1260
+ check_layout: bool = True,
1261
+ check_stride: bool = False,
1262
+ msg: Optional[Union[str, Callable[[str], str]]] = None,
1263
+ ):
1264
+ r"""Asserts that ``actual`` and ``expected`` are close.
1265
+
1266
+ If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if
1267
+
1268
+ .. math::
1269
+
1270
+ \lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert
1271
+
1272
+ Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are
1273
+ only considered equal to each other if ``equal_nan`` is ``True``.
1274
+
1275
+ In addition, they are only considered close if they have the same
1276
+
1277
+ - :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``),
1278
+ - ``dtype`` (if ``check_dtype`` is ``True``),
1279
+ - ``layout`` (if ``check_layout`` is ``True``), and
1280
+ - stride (if ``check_stride`` is ``True``).
1281
+
1282
+ If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed.
1283
+
1284
+ If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are
1285
+ checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR,
1286
+ or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively,
1287
+ are always checked for equality whereas the values are checked for closeness according to the definition above.
1288
+
1289
+ If ``actual`` and ``expected`` are quantized, they are considered close if they have the same
1290
+ :meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the
1291
+ definition above.
1292
+
1293
+ ``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which
1294
+ :class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types
1295
+ have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s
1296
+ or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all
1297
+ their elements are considered close according to the above definition.
1298
+
1299
+ .. note::
1300
+
1301
+ Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e.
1302
+ :class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus,
1303
+ Python scalars of different types can be checked, but require ``check_dtype=False``.
1304
+
1305
+ Args:
1306
+ actual (Any): Actual input.
1307
+ expected (Any): Expected input.
1308
+ allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types
1309
+ are allowed. Otherwise type equality is required.
1310
+ rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
1311
+ values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
1312
+ atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
1313
+ values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
1314
+ equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal.
1315
+ check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
1316
+ :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
1317
+ :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
1318
+ check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
1319
+ check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
1320
+ :func:`torch.promote_types`) before being compared.
1321
+ check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
1322
+ check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
1323
+ compared.
1324
+ check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
1325
+ msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during
1326
+ the comparison. Can also passed as callable in which case it will be called with the generated message and
1327
+ should return the new message.
1328
+
1329
+ Raises:
1330
+ ValueError: If no :class:`torch.Tensor` can be constructed from an input.
1331
+ ValueError: If only ``rtol`` or ``atol`` is specified.
1332
+ AssertionError: If corresponding inputs are not Python scalars and are not directly related.
1333
+ AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have
1334
+ different types.
1335
+ AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match.
1336
+ AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match.
1337
+ AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`.
1338
+ AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same
1339
+ :attr:`~torch.Tensor.layout`.
1340
+ AssertionError: If only one of corresponding tensors is quantized.
1341
+ AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s.
1342
+ AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same
1343
+ :attr:`~torch.Tensor.device`.
1344
+ AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``.
1345
+ AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride.
1346
+ AssertionError: If the values of corresponding tensors are not close according to the definition above.
1347
+
1348
+ The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching
1349
+ ``dtype``'s, the maximum of both tolerances is used.
1350
+
1351
+ +---------------------------+------------+----------+
1352
+ | ``dtype`` | ``rtol`` | ``atol`` |
1353
+ +===========================+============+==========+
1354
+ | :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` |
1355
+ +---------------------------+------------+----------+
1356
+ | :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` |
1357
+ +---------------------------+------------+----------+
1358
+ | :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` |
1359
+ +---------------------------+------------+----------+
1360
+ | :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` |
1361
+ +---------------------------+------------+----------+
1362
+ | :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` |
1363
+ +---------------------------+------------+----------+
1364
+ | :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` |
1365
+ +---------------------------+------------+----------+
1366
+ | :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` |
1367
+ +---------------------------+------------+----------+
1368
+ | :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` |
1369
+ +---------------------------+------------+----------+
1370
+ | :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` |
1371
+ +---------------------------+------------+----------+
1372
+ | :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` |
1373
+ +---------------------------+------------+----------+
1374
+ | :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` |
1375
+ +---------------------------+------------+----------+
1376
+ | :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` |
1377
+ +---------------------------+------------+----------+
1378
+ | other | ``0.0`` | ``0.0`` |
1379
+ +---------------------------+------------+----------+
1380
+
1381
+ .. note::
1382
+
1383
+ :func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged
1384
+ to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might
1385
+ define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default:
1386
+
1387
+ >>> import functools
1388
+ >>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0)
1389
+ >>> assert_equal(1e-9, 1e-10)
1390
+ Traceback (most recent call last):
1391
+ ...
1392
+ AssertionError: Scalars are not equal!
1393
+ <BLANKLINE>
1394
+ Expected 1e-10 but got 1e-09.
1395
+ Absolute difference: 9.000000000000001e-10
1396
+ Relative difference: 9.0
1397
+
1398
+ Examples:
1399
+ >>> # tensor to tensor comparison
1400
+ >>> expected = torch.tensor([1e0, 1e-1, 1e-2])
1401
+ >>> actual = torch.acos(torch.cos(expected))
1402
+ >>> torch.testing.assert_close(actual, expected)
1403
+
1404
+ >>> # scalar to scalar comparison
1405
+ >>> import math
1406
+ >>> expected = math.sqrt(2.0)
1407
+ >>> actual = 2.0 / math.sqrt(2.0)
1408
+ >>> torch.testing.assert_close(actual, expected)
1409
+
1410
+ >>> # numpy array to numpy array comparison
1411
+ >>> import numpy as np
1412
+ >>> expected = np.array([1e0, 1e-1, 1e-2])
1413
+ >>> actual = np.arccos(np.cos(expected))
1414
+ >>> torch.testing.assert_close(actual, expected)
1415
+
1416
+ >>> # sequence to sequence comparison
1417
+ >>> import numpy as np
1418
+ >>> # The types of the sequences do not have to match. They only have to have the same
1419
+ >>> # length and their elements have to match.
1420
+ >>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)]
1421
+ >>> actual = tuple(expected)
1422
+ >>> torch.testing.assert_close(actual, expected)
1423
+
1424
+ >>> # mapping to mapping comparison
1425
+ >>> from collections import OrderedDict
1426
+ >>> import numpy as np
1427
+ >>> foo = torch.tensor(1.0)
1428
+ >>> bar = 2.0
1429
+ >>> baz = np.array(3.0)
1430
+ >>> # The types and a possible ordering of mappings do not have to match. They only
1431
+ >>> # have to have the same set of keys and their elements have to match.
1432
+ >>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)])
1433
+ >>> actual = {"baz": baz, "bar": bar, "foo": foo}
1434
+ >>> torch.testing.assert_close(actual, expected)
1435
+
1436
+ >>> expected = torch.tensor([1.0, 2.0, 3.0])
1437
+ >>> actual = expected.clone()
1438
+ >>> # By default, directly related instances can be compared
1439
+ >>> torch.testing.assert_close(torch.nn.Parameter(actual), expected)
1440
+ >>> # This check can be made more strict with allow_subclasses=False
1441
+ >>> torch.testing.assert_close(
1442
+ ... torch.nn.Parameter(actual), expected, allow_subclasses=False
1443
+ ... )
1444
+ Traceback (most recent call last):
1445
+ ...
1446
+ TypeError: No comparison pair was able to handle inputs of type
1447
+ <class 'torch.nn.parameter.Parameter'> and <class 'torch.Tensor'>.
1448
+ >>> # If the inputs are not directly related, they are never considered close
1449
+ >>> torch.testing.assert_close(actual.numpy(), expected)
1450
+ Traceback (most recent call last):
1451
+ ...
1452
+ TypeError: No comparison pair was able to handle inputs of type <class 'numpy.ndarray'>
1453
+ and <class 'torch.Tensor'>.
1454
+ >>> # Exceptions to these rules are Python scalars. They can be checked regardless of
1455
+ >>> # their type if check_dtype=False.
1456
+ >>> torch.testing.assert_close(1.0, 1, check_dtype=False)
1457
+
1458
+ >>> # NaN != NaN by default.
1459
+ >>> expected = torch.tensor(float("Nan"))
1460
+ >>> actual = expected.clone()
1461
+ >>> torch.testing.assert_close(actual, expected)
1462
+ Traceback (most recent call last):
1463
+ ...
1464
+ AssertionError: Scalars are not close!
1465
+ <BLANKLINE>
1466
+ Expected nan but got nan.
1467
+ Absolute difference: nan (up to 1e-05 allowed)
1468
+ Relative difference: nan (up to 1.3e-06 allowed)
1469
+ >>> torch.testing.assert_close(actual, expected, equal_nan=True)
1470
+
1471
+ >>> expected = torch.tensor([1.0, 2.0, 3.0])
1472
+ >>> actual = torch.tensor([1.0, 4.0, 5.0])
1473
+ >>> # The default error message can be overwritten.
1474
+ >>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!")
1475
+ Traceback (most recent call last):
1476
+ ...
1477
+ AssertionError: Argh, the tensors are not close!
1478
+ >>> # If msg is a callable, it can be used to augment the generated message with
1479
+ >>> # extra information
1480
+ >>> torch.testing.assert_close(
1481
+ ... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter"
1482
+ ... )
1483
+ Traceback (most recent call last):
1484
+ ...
1485
+ AssertionError: Header
1486
+ <BLANKLINE>
1487
+ Tensor-likes are not close!
1488
+ <BLANKLINE>
1489
+ Mismatched elements: 2 / 3 (66.7%)
1490
+ Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed)
1491
+ Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed)
1492
+ <BLANKLINE>
1493
+ Footer
1494
+ """
1495
+ # Hide this function from `pytest`'s traceback
1496
+ __tracebackhide__ = True
1497
+
1498
+ error_metas = not_close_error_metas(
1499
+ actual,
1500
+ expected,
1501
+ pair_types=(
1502
+ NonePair,
1503
+ BooleanPair,
1504
+ NumberPair,
1505
+ TensorLikePair,
1506
+ ),
1507
+ allow_subclasses=allow_subclasses,
1508
+ rtol=rtol,
1509
+ atol=atol,
1510
+ equal_nan=equal_nan,
1511
+ check_device=check_device,
1512
+ check_dtype=check_dtype,
1513
+ check_layout=check_layout,
1514
+ check_stride=check_stride,
1515
+ msg=msg,
1516
+ )
1517
+
1518
+ if error_metas:
1519
+ # TODO: compose all metas into one AssertionError
1520
+ raise error_metas[0].to_error(msg)
1521
+
1522
+
1523
+ def assert_allclose(
1524
+ actual: Any,
1525
+ expected: Any,
1526
+ rtol: Optional[float] = None,
1527
+ atol: Optional[float] = None,
1528
+ equal_nan: bool = True,
1529
+ msg: str = "",
1530
+ ) -> None:
1531
+ """
1532
+ .. warning::
1533
+
1534
+ :func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release.
1535
+ Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions
1536
+ `here <https://github.com/pytorch/pytorch/issues/61844>`_.
1537
+ """
1538
+ warnings.warn(
1539
+ "`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. "
1540
+ "Please use `torch.testing.assert_close()` instead. "
1541
+ "You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.",
1542
+ FutureWarning,
1543
+ stacklevel=2,
1544
+ )
1545
+
1546
+ if not isinstance(actual, torch.Tensor):
1547
+ actual = torch.tensor(actual)
1548
+ if not isinstance(expected, torch.Tensor):
1549
+ expected = torch.tensor(expected, dtype=actual.dtype)
1550
+
1551
+ if rtol is None and atol is None:
1552
+ rtol, atol = default_tolerances(
1553
+ actual,
1554
+ expected,
1555
+ dtype_precisions={
1556
+ torch.float16: (1e-3, 1e-3),
1557
+ torch.float32: (1e-4, 1e-5),
1558
+ torch.float64: (1e-5, 1e-8),
1559
+ },
1560
+ )
1561
+
1562
+ torch.testing.assert_close(
1563
+ actual,
1564
+ expected,
1565
+ rtol=rtol,
1566
+ atol=atol,
1567
+ equal_nan=equal_nan,
1568
+ check_device=True,
1569
+ check_dtype=False,
1570
+ check_stride=False,
1571
+ msg=msg or None,
1572
+ )
env-llmeval/lib/python3.10/site-packages/torch/testing/_creation.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains tensor creation utilities.
3
+ """
4
+
5
+ import collections.abc
6
+ import math
7
+ import warnings
8
+ from typing import cast, List, Optional, Tuple, Union
9
+
10
+ import torch
11
+
12
+ _INTEGRAL_TYPES = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
13
+ _FLOATING_TYPES = [torch.float16, torch.bfloat16, torch.float32, torch.float64]
14
+ _FLOATING_8BIT_TYPES = [torch.float8_e4m3fn, torch.float8_e5m2]
15
+ _COMPLEX_TYPES = [torch.complex32, torch.complex64, torch.complex128]
16
+ _BOOLEAN_OR_INTEGRAL_TYPES = [torch.bool, *_INTEGRAL_TYPES]
17
+ _FLOATING_OR_COMPLEX_TYPES = [*_FLOATING_TYPES, *_COMPLEX_TYPES]
18
+
19
+
20
+ def _uniform_random_(t: torch.Tensor, low: float, high: float) -> torch.Tensor:
21
+ # uniform_ requires to-from <= std::numeric_limits<scalar_t>::max()
22
+ # Work around this by scaling the range before and after the PRNG
23
+ if high - low >= torch.finfo(t.dtype).max:
24
+ return t.uniform_(low / 2, high / 2).mul_(2)
25
+ else:
26
+ return t.uniform_(low, high)
27
+
28
+
29
+ def make_tensor(
30
+ *shape: Union[int, torch.Size, List[int], Tuple[int, ...]],
31
+ dtype: torch.dtype,
32
+ device: Union[str, torch.device],
33
+ low: Optional[float] = None,
34
+ high: Optional[float] = None,
35
+ requires_grad: bool = False,
36
+ noncontiguous: bool = False,
37
+ exclude_zero: bool = False,
38
+ memory_format: Optional[torch.memory_format] = None,
39
+ ) -> torch.Tensor:
40
+ r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with
41
+ values uniformly drawn from ``[low, high)``.
42
+
43
+ If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable
44
+ finite values then they are clamped to the lowest or highest representable finite value, respectively.
45
+ If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`,
46
+ which depend on :attr:`dtype`.
47
+
48
+ +---------------------------+------------+----------+
49
+ | ``dtype`` | ``low`` | ``high`` |
50
+ +===========================+============+==========+
51
+ | boolean type | ``0`` | ``2`` |
52
+ +---------------------------+------------+----------+
53
+ | unsigned integral type | ``0`` | ``10`` |
54
+ +---------------------------+------------+----------+
55
+ | signed integral types | ``-9`` | ``10`` |
56
+ +---------------------------+------------+----------+
57
+ | floating types | ``-9`` | ``9`` |
58
+ +---------------------------+------------+----------+
59
+ | complex types | ``-9`` | ``9`` |
60
+ +---------------------------+------------+----------+
61
+
62
+ Args:
63
+ shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor.
64
+ dtype (:class:`torch.dtype`): The data type of the returned tensor.
65
+ device (Union[str, torch.device]): The device of the returned tensor.
66
+ low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is
67
+ clamped to the least representable finite value of the given dtype. When ``None`` (default),
68
+ this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
69
+ high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is
70
+ clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value
71
+ is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
72
+
73
+ .. deprecated:: 2.1
74
+
75
+ Passing ``low==high`` to :func:`~torch.testing.make_tensor` for floating or complex types is deprecated
76
+ since 2.1 and will be removed in 2.3. Use :func:`torch.full` instead.
77
+
78
+ requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``.
79
+ noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is
80
+ ignored if the constructed tensor has fewer than two elements. Mutually exclusive with ``memory_format``.
81
+ exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value
82
+ depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating
83
+ point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the
84
+ :attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number
85
+ whose real and imaginary parts are both the smallest positive normal number representable by the complex
86
+ type. Default ``False``.
87
+ memory_format (Optional[torch.memory_format]): The memory format of the returned tensor. Mutually exclusive
88
+ with ``noncontiguous``.
89
+
90
+ Raises:
91
+ ValueError: If ``requires_grad=True`` is passed for integral `dtype`
92
+ ValueError: If ``low >= high``.
93
+ ValueError: If either :attr:`low` or :attr:`high` is ``nan``.
94
+ ValueError: If both :attr:`noncontiguous` and :attr:`memory_format` are passed.
95
+ TypeError: If :attr:`dtype` isn't supported by this function.
96
+
97
+ Examples:
98
+ >>> # xdoctest: +SKIP
99
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
100
+ >>> from torch.testing import make_tensor
101
+ >>> # Creates a float tensor with values in [-1, 1)
102
+ >>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1)
103
+ >>> # xdoctest: +SKIP
104
+ tensor([ 0.1205, 0.2282, -0.6380])
105
+ >>> # Creates a bool tensor on CUDA
106
+ >>> make_tensor((2, 2), device='cuda', dtype=torch.bool)
107
+ tensor([[False, False],
108
+ [False, True]], device='cuda:0')
109
+ """
110
+
111
+ def modify_low_high(
112
+ low: Optional[float],
113
+ high: Optional[float],
114
+ *,
115
+ lowest_inclusive: float,
116
+ highest_exclusive: float,
117
+ default_low: float,
118
+ default_high: float,
119
+ ) -> Tuple[float, float]:
120
+ """
121
+ Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high)
122
+ if required.
123
+ """
124
+
125
+ def clamp(a: float, l: float, h: float) -> float:
126
+ return min(max(a, l), h)
127
+
128
+ low = low if low is not None else default_low
129
+ high = high if high is not None else default_high
130
+
131
+ if any(isinstance(value, float) and math.isnan(value) for value in [low, high]):
132
+ raise ValueError(
133
+ f"`low` and `high` cannot be NaN, but got {low=} and {high=}"
134
+ )
135
+ elif low == high and dtype in _FLOATING_OR_COMPLEX_TYPES:
136
+ warnings.warn(
137
+ "Passing `low==high` to `torch.testing.make_tensor` for floating or complex types "
138
+ "is deprecated since 2.1 and will be removed in 2.3. "
139
+ "Use torch.full(...) instead.",
140
+ FutureWarning,
141
+ )
142
+ elif low >= high:
143
+ raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}")
144
+ elif high < lowest_inclusive or low >= highest_exclusive:
145
+ raise ValueError(
146
+ f"The value interval specified by `low` and `high` is [{low}, {high}), "
147
+ f"but {dtype} only supports [{lowest_inclusive}, {highest_exclusive})"
148
+ )
149
+
150
+ low = clamp(low, lowest_inclusive, highest_exclusive)
151
+ high = clamp(high, lowest_inclusive, highest_exclusive)
152
+
153
+ if dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
154
+ # 1. `low` is ceiled to avoid creating values smaller than `low` and thus outside the specified interval
155
+ # 2. Following the same reasoning as for 1., `high` should be floored. However, the higher bound of
156
+ # `torch.randint` is exclusive, and thus we need to ceil here as well.
157
+ return math.ceil(low), math.ceil(high)
158
+
159
+ return low, high
160
+
161
+ if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence):
162
+ shape = shape[0] # type: ignore[assignment]
163
+ shape = cast(Tuple[int, ...], tuple(shape))
164
+
165
+ if noncontiguous and memory_format is not None:
166
+ raise ValueError(
167
+ f"The parameters `noncontiguous` and `memory_format` are mutually exclusive, "
168
+ f"but got {noncontiguous=} and {memory_format=}"
169
+ )
170
+
171
+ if requires_grad and dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
172
+ raise ValueError(
173
+ f"`requires_grad=True` is not supported for boolean and integral dtypes, but got {dtype=}"
174
+ )
175
+
176
+ if dtype is torch.bool:
177
+ low, high = cast(
178
+ Tuple[int, int],
179
+ modify_low_high(
180
+ low,
181
+ high,
182
+ lowest_inclusive=0,
183
+ highest_exclusive=2,
184
+ default_low=0,
185
+ default_high=2,
186
+ ),
187
+ )
188
+ result = torch.randint(low, high, shape, device=device, dtype=dtype)
189
+ elif dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
190
+ low, high = cast(
191
+ Tuple[int, int],
192
+ modify_low_high(
193
+ low,
194
+ high,
195
+ lowest_inclusive=torch.iinfo(dtype).min,
196
+ highest_exclusive=torch.iinfo(dtype).max
197
+ # In theory, `highest_exclusive` should always be the maximum value + 1. However, `torch.randint`
198
+ # internally converts the bounds to an int64 and would overflow. In other words: `torch.randint` cannot
199
+ # sample 2**63 - 1, i.e. the maximum value of `torch.int64` and we need to account for that here.
200
+ + (1 if dtype is not torch.int64 else 0),
201
+ # This is incorrect for `torch.uint8`, but since we clamp to `lowest`, i.e. 0 for `torch.uint8`,
202
+ # _after_ we use the default value, we don't need to special case it here
203
+ default_low=-9,
204
+ default_high=10,
205
+ ),
206
+ )
207
+ result = torch.randint(low, high, shape, device=device, dtype=dtype)
208
+ elif dtype in _FLOATING_OR_COMPLEX_TYPES:
209
+ low, high = modify_low_high(
210
+ low,
211
+ high,
212
+ lowest_inclusive=torch.finfo(dtype).min,
213
+ highest_exclusive=torch.finfo(dtype).max,
214
+ default_low=-9,
215
+ default_high=9,
216
+ )
217
+ result = torch.empty(shape, device=device, dtype=dtype)
218
+ _uniform_random_(
219
+ torch.view_as_real(result) if dtype in _COMPLEX_TYPES else result, low, high
220
+ )
221
+ elif dtype in _FLOATING_8BIT_TYPES:
222
+ low, high = modify_low_high(
223
+ low,
224
+ high,
225
+ lowest_inclusive=torch.finfo(dtype).min,
226
+ highest_exclusive=torch.finfo(dtype).max,
227
+ default_low=-9,
228
+ default_high=9,
229
+ )
230
+ result = torch.empty(shape, device=device, dtype=torch.float32)
231
+ _uniform_random_(result, low, high)
232
+ result = result.to(dtype)
233
+ else:
234
+ raise TypeError(
235
+ f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()."
236
+ " To request support, file an issue at: https://github.com/pytorch/pytorch/issues"
237
+ )
238
+
239
+ if noncontiguous and result.numel() > 1:
240
+ result = torch.repeat_interleave(result, 2, dim=-1)
241
+ result = result[..., ::2]
242
+ elif memory_format is not None:
243
+ result = result.clone(memory_format=memory_format)
244
+
245
+ if exclude_zero:
246
+ result[result == 0] = (
247
+ 1 if dtype in _BOOLEAN_OR_INTEGRAL_TYPES else torch.finfo(dtype).tiny
248
+ )
249
+
250
+ if dtype in _FLOATING_OR_COMPLEX_TYPES:
251
+ result.requires_grad = requires_grad
252
+
253
+ return result
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.testing._internal.common_utils import TEST_WITH_ROCM
3
+
4
+
5
+ class AutocastTestLists:
6
+ def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
7
+ input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
8
+
9
+ hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
10
+ torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
11
+ torch.randn((n, n), device=dev, dtype=torch.float32),)
12
+
13
+ weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
14
+ torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
15
+ torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
16
+ torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
17
+
18
+ # returns args as a tuple
19
+ return input + hx + weights
20
+
21
+ # Supplies ops and arguments for test_autocast_* in test/test_cuda.py
22
+ def __init__(self, dev):
23
+ super().__init__()
24
+ n = 8
25
+ # Utility arguments, created as one-element tuples
26
+ pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
27
+ pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
28
+ pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
29
+ mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
30
+ mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
31
+ mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
32
+
33
+ dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
34
+ conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
35
+ torch.randn(dimset, dtype=torch.float32, device=dev))
36
+ for dimset in dimsets]
37
+ bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
38
+ element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
39
+ pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
40
+ pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
41
+ mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
42
+ mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
43
+ mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
44
+ mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
45
+
46
+ # The lists below organize ops that autocast needs to test.
47
+ # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py.
48
+ # Each op is associated with a tuple of valid arguments.
49
+ # In addition, cudnn conv ops are not supported on ROCm and hence will
50
+ # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list.
51
+
52
+ # Some ops implement built-in type promotion. These don't need autocasting,
53
+ # but autocasting relies on their promotion, so we include tests to double-check.
54
+ self.torch_expect_builtin_promote = [
55
+ ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool),
56
+ ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool),
57
+ ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
58
+ ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool),
59
+ ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
60
+ ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool),
61
+ ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32),
62
+ ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32),
63
+ ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32),
64
+ ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
65
+ ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32),
66
+ ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
67
+ ]
68
+ self.methods_expect_builtin_promote = [
69
+ ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
70
+ ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
71
+ ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
72
+ ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
73
+ ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
74
+ ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
75
+ ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
76
+ ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
77
+ ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
78
+ ]
79
+
80
+ # The remaining lists organize ops that autocast treats explicitly.
81
+ self.torch_fp16 = [
82
+ # deprecated _convolution
83
+ ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
84
+ (0, 0), 1, False, True, True)),
85
+ # the current _convolution
86
+ ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
87
+ (0, 0), 1, False, True, True, True)),
88
+ ("conv1d", conv_args_fp32[0]),
89
+ ("conv2d", conv_args_fp32[1]),
90
+ ("conv3d", conv_args_fp32[2]),
91
+ ("conv_tbc", conv_args_fp32[0] + bias_fp32),
92
+ ("conv_transpose1d", conv_args_fp32[0]),
93
+ ("conv_transpose2d", conv_args_fp32[1]),
94
+ ("conv_transpose3d", conv_args_fp32[2]),
95
+ ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)),
96
+ ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM),
97
+ ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1),
98
+ (1, 1), 1, False, True, True), TEST_WITH_ROCM),
99
+ ("prelu", pointwise0_fp32 + element0_fp32),
100
+ ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
101
+ ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32),
102
+ ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32),
103
+ ("matmul", mat0_fp32 + mat1_fp32),
104
+ ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32),
105
+ ("mm", mat0_fp32 + mat1_fp32),
106
+ ("mv", mat0_fp32 + pointwise0_fp32),
107
+ ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32),
108
+ ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
109
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
110
+ ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
111
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
112
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
113
+ ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
114
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
115
+ # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell.
116
+ # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
117
+ # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
118
+ ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)),
119
+ ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)),
120
+ ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
121
+ ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
122
+ ]
123
+ self.torch_fp32 = [
124
+ ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
125
+ ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
126
+ ("cosh", pointwise0_fp16),
127
+ ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)),
128
+ ("exp", pointwise0_fp16),
129
+ ("expm1", pointwise0_fp16),
130
+ ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
131
+ ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
132
+ ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
133
+ ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)),
134
+ ("reciprocal", pointwise0_fp16),
135
+ ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)),
136
+ ("sinh", pointwise0_fp16),
137
+ ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)),
138
+ ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16),
139
+ ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)),
140
+ # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API.
141
+ ("softmax", pointwise0_fp16 + (0,)),
142
+ ("log_softmax", pointwise0_fp16 + (0,)),
143
+ ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)),
144
+ ("group_norm", mat0_fp16 + (1,)),
145
+ ("norm", pointwise0_fp16),
146
+ ("norm", pointwise0_fp16, {"dim": 0}),
147
+ # these need magma
148
+ # ("norm", mat0_fp16, {"p": "nuc"}),
149
+ # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}),
150
+ ("norm", pointwise0_fp16, {"p": 1}),
151
+ ("norm", pointwise0_fp16, {"p": 1, "dim": 0}),
152
+ ("cosine_similarity", mat0_fp16 + mat1_fp16),
153
+ ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
154
+ ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16),
155
+ torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16),
156
+ torch.tensor([1], device=dev, dtype=torch.int))),
157
+ ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)),
158
+ ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
159
+ ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)),
160
+ ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16),
161
+ ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
162
+ ("cumprod", pointwise0_fp16 + (0,)),
163
+ ("cumsum", pointwise0_fp16 + (0,)),
164
+ ("dist", pointwise0_fp16 + pointwise1_fp16),
165
+ ("pdist", mat0_fp16),
166
+ ("cdist", mat0_fp16 + mat1_fp16),
167
+ ("prod", pointwise0_fp16),
168
+ ("prod", pointwise0_fp16 + (0,)),
169
+ ("renorm", mat0_fp16 + (2, 0, 1.0)),
170
+ ("sum", pointwise0_fp16),
171
+ ("sum", mat0_fp16 + (1,)),
172
+ ("logsumexp", mat0_fp16 + (1,)),
173
+ ]
174
+ self.torch_need_autocast_promote = [
175
+ ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)),
176
+ ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16),
177
+ ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)),
178
+ ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev),
179
+ torch.randn((1, 2), dtype=torch.float32, device=dev),
180
+ torch.randn((1, 2, 2), dtype=torch.float16, device=dev),
181
+ torch.randn((1,), dtype=torch.float32, device=dev))),
182
+ ("cross", (torch.randn(3, dtype=torch.float32, device=dev),
183
+ torch.randn(3, dtype=torch.float16, device=dev))),
184
+ ("dot", pointwise0_fp16 + pointwise1_fp32),
185
+ ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev),
186
+ torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev),
187
+ 0, 0, False)),
188
+ ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),),
189
+ torch.randn(1, device=dev, dtype=torch.float16))),
190
+ ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),),
191
+ torch.randn(1, device=dev, dtype=torch.float32))),
192
+ ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev),
193
+ torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
194
+ ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev),
195
+ 0,
196
+ torch.randint(0, 2, (2, 2, 2), device=dev),
197
+ torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
198
+ ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev),
199
+ 0,
200
+ torch.randint(0, 2, (2, 2, 2), device=dev),
201
+ torch.randn((2, 2, 2), dtype=torch.float32, device=dev))),
202
+ ]
203
+ self.nn_fp16 = [
204
+ ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32),
205
+ ]
206
+ self.nn_fp32 = [
207
+ ("softplus", pointwise0_fp16),
208
+ ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float),
209
+ torch.zeros((n,), device=dev, dtype=torch.long))),
210
+ ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half),
211
+ torch.zeros((n, n, n), device=dev, dtype=torch.long))),
212
+ ("l1_loss", mat0_fp16 + mat1_fp16),
213
+ ("smooth_l1_loss", mat0_fp16 + mat1_fp16),
214
+ ("mse_loss", mat0_fp16 + mat1_fp16),
215
+ ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
216
+ ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
217
+ ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
218
+ ]
219
+ self.linalg_fp16 = [
220
+ ("linalg_vecdot", mat0_fp32 + mat0_fp32),
221
+ ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)),
222
+ ]
223
+ self.methods_fp16 = [
224
+ ("__matmul__", mat0_fp32 + mat1_fp32)
225
+ ]
226
+ self.methods_fp32 = [
227
+ ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)),
228
+ ]
229
+ self.banned = [
230
+ ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32),
231
+ torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn),
232
+ ]
233
+
234
+ class AutocastCPUTestLists:
235
+ # Supplies ops and arguments for test_autocast_* in test/test_cpu.py
236
+ def __init__(self, dev):
237
+ super().__init__()
238
+ n = 8
239
+ # Utility arguments, created as one-element tuples
240
+ pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
241
+ pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
242
+ pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
243
+ mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
244
+ mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
245
+ mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
246
+
247
+ pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
248
+ pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
249
+
250
+ dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n))
251
+
252
+ dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),)
253
+ for dimset in dummy_dimsets]
254
+
255
+ dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
256
+ conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),
257
+ torch.randn(dimset, dtype=torch.bfloat16, device=dev))
258
+ for dimset in dimsets]
259
+ conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
260
+ torch.randn(dimset, dtype=torch.float32, device=dev))
261
+ for dimset in dimsets]
262
+
263
+ bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
264
+ element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
265
+ pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
266
+ pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
267
+ mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
268
+ mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
269
+ mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
270
+ mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
271
+
272
+ dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),)
273
+ for dimset in dummy_dimsets]
274
+ # The lists below organize ops that autocast needs to test.
275
+ # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py.
276
+ # Each op is associated with a tuple of valid arguments.
277
+
278
+ # Some ops implement built-in type promotion. These don't need autocasting,
279
+ # but autocasting relies on their promotion, so we include tests to double-check.
280
+ self.torch_expect_builtin_promote = [
281
+ ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
282
+ ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
283
+ ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
284
+ ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
285
+ ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
286
+ ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
287
+ ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
288
+ ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
289
+ ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
290
+ ]
291
+
292
+ self.methods_expect_builtin_promote = [
293
+ ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
294
+ ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
295
+ ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
296
+ ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
297
+ ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
298
+ ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
299
+ ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
300
+ ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
301
+ ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
302
+ ]
303
+ # The remaining lists organize ops that autocast treats explicitly.
304
+ self.torch_16 = [
305
+ ("conv1d", conv_args_fp32[0]),
306
+ ("conv2d", conv_args_fp32[1]),
307
+ ("conv3d", conv_args_fp32[2]),
308
+ ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
309
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
310
+ ("mm", mat0_fp32 + mat1_fp32),
311
+ ("matmul", mat0_fp32 + mat1_fp32),
312
+ ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
313
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
314
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
315
+ ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
316
+ ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
317
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
318
+ ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32),
319
+ torch.randn((5, 3, 5), device=dev, dtype=torch.float32),
320
+ torch.randn(5, device=dev, dtype=torch.float32),
321
+ 0)),
322
+ ("conv_transpose1d", conv_args_fp32[0]),
323
+ ("conv_transpose2d", conv_args_fp32[1]),
324
+ ("conv_transpose3d", conv_args_fp32[2]),
325
+ ("prelu", pointwise0_fp32 + element0_fp32),
326
+ ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
327
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
328
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
329
+ n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32),
330
+ torch.randn((3 * n), device=dev, dtype=torch.float32),
331
+ torch.randn((n, n), device=dev, dtype=torch.float32),
332
+ torch.randn((n), device=dev, dtype=torch.float32))),
333
+ ]
334
+ self.torch_fp32 = [
335
+ ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
336
+ ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16),
337
+ torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16),
338
+ torch.tensor([1], device=dev, dtype=torch.int))),
339
+ ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)),
340
+ ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)),
341
+ ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16),
342
+ ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
343
+ ]
344
+ self.nn_16 = [
345
+ ("linear", mat0_fp32 + mat1_fp32, {}),
346
+ ]
347
+ self.nn_fp32 = [
348
+ ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}),
349
+ ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) +
350
+ (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
351
+ ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}),
352
+ ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),
353
+ torch.zeros((n,), device=dev, dtype=torch.long))),
354
+ ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16),
355
+ torch.zeros((n, n, n), device=dev, dtype=torch.long))),
356
+ ("l1_loss", mat0_bf16 + mat1_bf16),
357
+ ("smooth_l1_loss", mat0_bf16 + mat1_bf16),
358
+ ("mse_loss", mat0_bf16 + mat1_bf16),
359
+ ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
360
+ ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
361
+ ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
362
+ ("huber_loss", mat0_bf16 + mat1_bf16),
363
+ ]
364
+ self.torch_need_autocast_promote = [
365
+ ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
366
+ ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
367
+ ]
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from functools import partial
3
+ from torch.testing import make_tensor
4
+ from torch.testing._internal.opinfo.core import (
5
+ OpInfo,
6
+ SampleInput,
7
+ )
8
+ from torch.testing._internal.common_dtype import all_types_and
9
+ import numpy as np
10
+
11
+ # Note: [autograd.Function db]
12
+ #
13
+ # This is a collection of autograd.Function test cases written as OpInfos
14
+ # so they can easily be consumed by OpInfo-based tests to check if a subsystem
15
+ # supports autograd.Function.
16
+ #
17
+ # Axes:
18
+ # - saves {output, input, intermediate, non-tensor}
19
+ # - {inputs, output} x {single tensor, tensors, arbitrary objects}
20
+ # - Uses {mark_dirty, mark_non_differentiable, once_differentiable}
21
+
22
+
23
+ def to_numpy(tensor):
24
+ return tensor.cpu().numpy()
25
+
26
+
27
+ class NumpyCube(torch.autograd.Function):
28
+ @staticmethod
29
+ def forward(input):
30
+ input_np = to_numpy(input)
31
+ dinput = torch.tensor(3 * input_np ** 2, device=input.device)
32
+ return torch.tensor(input_np ** 3, device=input.device), dinput
33
+
34
+ @staticmethod
35
+ def setup_context(ctx, inputs, output):
36
+ ctx.save_for_backward(inputs[0], output[1])
37
+ ctx.save_for_forward(inputs[0], output[1])
38
+
39
+ @staticmethod
40
+ def backward(ctx, grad_output, grad_saved):
41
+ input, dinput = ctx.saved_tensors
42
+ return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input)
43
+
44
+ @staticmethod
45
+ def vmap(info, in_dims, input):
46
+ result = NumpyCube.apply(input)
47
+ return result, (in_dims[0], in_dims[0])
48
+
49
+ @staticmethod
50
+ def jvp(ctx, input_tangent):
51
+ input, dinput = ctx.saved_tensors
52
+ return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
53
+
54
+
55
+ class CubeGenVmap(torch.autograd.Function):
56
+ generate_vmap_rule = True
57
+
58
+ @staticmethod
59
+ def forward(x):
60
+ return x ** 3, 3 * x ** 2
61
+
62
+ @staticmethod
63
+ def setup_context(ctx, inputs, outputs):
64
+ ctx.save_for_backward(inputs[0], outputs[1])
65
+ ctx.save_for_forward(inputs[0], outputs[1])
66
+
67
+ @staticmethod
68
+ def backward(ctx, grad_output, grad_saved):
69
+ input, dinput = ctx.saved_tensors
70
+ result = grad_output * dinput + 6 * dinput
71
+ return result
72
+
73
+ @staticmethod
74
+ def jvp(ctx, input_tangent):
75
+ input, dinput = ctx.saved_tensors
76
+ return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
77
+
78
+
79
+ def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs):
80
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
81
+ yield SampleInput(make_arg(1, low=0.8, high=2), args=())
82
+
83
+
84
+ class NumpyCubeNotComposable(torch.autograd.Function):
85
+ @staticmethod
86
+ def forward(input):
87
+ input_np = to_numpy(input)
88
+ return torch.tensor(input_np ** 3, device=input.device), input_np
89
+
90
+ @staticmethod
91
+ def setup_context(ctx, inputs, output):
92
+ _, input_np = output
93
+ ctx.input_np = input_np
94
+ ctx.device = inputs[0].device
95
+
96
+ @staticmethod
97
+ @torch.autograd.function.once_differentiable
98
+ def backward(ctx, grad_output, grad_saved):
99
+ result_np = 3 * (ctx.input_np ** 2)
100
+ return torch.tensor(result_np, device=ctx.device)
101
+
102
+
103
+ class NumpyMul(torch.autograd.Function):
104
+ @staticmethod
105
+ def forward(x, y):
106
+ return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
107
+
108
+ @staticmethod
109
+ def setup_context(ctx, inputs, output):
110
+ ctx.save_for_backward(*inputs)
111
+ ctx.save_for_forward(*inputs)
112
+
113
+ @staticmethod
114
+ def backward(ctx, grad_output):
115
+ x, y = ctx.saved_tensors
116
+ gx = None
117
+ if ctx.needs_input_grad[0]:
118
+ gx = NumpyMul.apply(grad_output, y)
119
+ gy = None
120
+ if ctx.needs_input_grad[1]:
121
+ gy = NumpyMul.apply(grad_output, x)
122
+ return gx, gy
123
+
124
+ @staticmethod
125
+ def vmap(info, in_dims, x, y):
126
+ x_bdim, y_bdim = in_dims
127
+ x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
128
+ y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
129
+ result = NumpyMul.apply(x, y)
130
+ result = result.movedim(-1, 0)
131
+ return result, 0
132
+
133
+ @staticmethod
134
+ def jvp(ctx, x_tangent, y_tangent):
135
+ x, y = ctx.saved_tensors
136
+ return x_tangent * y + y_tangent * x
137
+
138
+ def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs):
139
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
140
+ # Broadcasting
141
+ yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),))
142
+
143
+
144
+ class MulGenVmap(torch.autograd.Function):
145
+ generate_vmap_rule = True
146
+
147
+ @staticmethod
148
+ def forward(x, y):
149
+ return x * y
150
+
151
+ @staticmethod
152
+ def setup_context(ctx, inputs, outputs):
153
+ ctx.save_for_backward(*inputs)
154
+ ctx.save_for_forward(*inputs)
155
+
156
+ @staticmethod
157
+ def backward(ctx, grad_output):
158
+ x, y = ctx.saved_tensors
159
+ gx = None
160
+ if ctx.needs_input_grad[0]:
161
+ gx = MulGenVmap.apply(grad_output, y)
162
+ gy = None
163
+ if ctx.needs_input_grad[1]:
164
+ gy = MulGenVmap.apply(grad_output, x)
165
+ return gx, gy
166
+
167
+ @staticmethod
168
+ def jvp(ctx, x_tangent, y_tangent):
169
+ x, y = ctx.saved_tensors
170
+ return x_tangent * y + y_tangent * x
171
+
172
+
173
+ class NumpyExp_(torch.autograd.Function):
174
+ @staticmethod
175
+ def forward(x):
176
+ x_np = to_numpy(x)
177
+ np.exp(x_np, x_np)
178
+ return x
179
+
180
+ @staticmethod
181
+ def setup_context(ctx, inputs, output):
182
+ x, = inputs
183
+ ctx.mark_dirty(x)
184
+ ctx.save_for_backward(output)
185
+ ctx.save_for_forward(output)
186
+
187
+ @staticmethod
188
+ def backward(ctx, grad_output):
189
+ output, = ctx.saved_tensors
190
+ return NumpyMul.apply(grad_output, output)
191
+
192
+ @staticmethod
193
+ def vmap(info, in_dims, x):
194
+ NumpyExp_.apply(x)
195
+ return x, in_dims[0]
196
+
197
+ @staticmethod
198
+ def jvp(ctx, x_tangent):
199
+ # Doesn't call numpy operations because I didn't want to write NumpyMul_
200
+ output, = ctx.saved_tensors
201
+ x_tangent.mul_(output)
202
+ return x_tangent
203
+
204
+ class NumpySort(torch.autograd.Function):
205
+ @staticmethod
206
+ def forward(x, dim):
207
+ device = x.device
208
+ x = to_numpy(x)
209
+ ind = np.argsort(x, axis=dim)
210
+ ind_inv = np.argsort(ind, axis=dim)
211
+ result = np.take_along_axis(x, ind, axis=dim)
212
+ return (
213
+ torch.tensor(x, device=device),
214
+ torch.tensor(ind, device=device),
215
+ torch.tensor(ind_inv, device=device),
216
+ )
217
+
218
+ @staticmethod
219
+ def setup_context(ctx, inputs, output):
220
+ x, dim = inputs
221
+ _, ind, ind_inv = output
222
+ ctx.mark_non_differentiable(ind, ind_inv)
223
+ ctx.save_for_backward(ind, ind_inv)
224
+ ctx.save_for_forward(ind, ind_inv)
225
+ ctx.dim = dim
226
+
227
+ @staticmethod
228
+ def backward(ctx, grad_output, _0, _1):
229
+ ind, ind_inv = ctx.saved_tensors
230
+ return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None
231
+
232
+ @staticmethod
233
+ def vmap(info, in_dims, x, dim):
234
+ x_bdim, _ = in_dims
235
+ x = x.movedim(x_bdim, 0)
236
+ # wrap dim
237
+ dim = dim if dim >= 0 else dim + x.dim() - 1
238
+ return NumpySort.apply(x, dim + 1), (0, 0, 0)
239
+
240
+ @staticmethod
241
+ def jvp(ctx, x_tangent, _):
242
+ ind, ind_inv = ctx.saved_tensors
243
+ return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
244
+
245
+ class SortGenVmap(torch.autograd.Function):
246
+ generate_vmap_rule = True
247
+
248
+ @staticmethod
249
+ def forward(x, dim):
250
+ device = x.device
251
+ ind = torch.argsort(x, dim=dim)
252
+ ind_inv = torch.argsort(ind, axis=dim)
253
+ result = torch.take_along_dim(x, ind, dim=dim)
254
+ return result, ind, ind_inv
255
+
256
+ @staticmethod
257
+ def setup_context(ctx, inputs, outputs):
258
+ x, dim = inputs
259
+ _, ind, ind_inv = outputs
260
+ ctx.mark_non_differentiable(ind, ind_inv)
261
+ ctx.save_for_backward(ind, ind_inv)
262
+ ctx.save_for_forward(ind, ind_inv)
263
+ ctx.dim = dim
264
+
265
+ @staticmethod
266
+ def backward(ctx, grad_output, _0, _1):
267
+ ind, ind_inv = ctx.saved_tensors
268
+ return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None
269
+
270
+ @staticmethod
271
+ def jvp(ctx, x_tangent, _):
272
+ ind, ind_inv = ctx.saved_tensors
273
+ return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
274
+
275
+
276
+ def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs):
277
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
278
+ yield SampleInput(make_arg(3, 5), args=(1,))
279
+
280
+
281
+ def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs):
282
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
283
+ tensor = make_arg(3, 5)
284
+ dim = 1
285
+ _, ind, ind_inv = NumpySort.apply(tensor, 1)
286
+ yield SampleInput(tensor, args=(ind, ind_inv, dim))
287
+
288
+
289
+ class NumpyTake(torch.autograd.Function):
290
+ @staticmethod
291
+ def forward(x, ind, ind_inv, dim):
292
+ device = x.device
293
+ x = to_numpy(x)
294
+ ind = to_numpy(ind)
295
+ return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
296
+
297
+ @staticmethod
298
+ def setup_context(ctx, inputs, output):
299
+ x, ind, ind_inv, dim = inputs
300
+ ctx.save_for_backward(ind, ind_inv)
301
+ ctx.save_for_forward(ind, ind_inv)
302
+ ctx.dim = dim
303
+
304
+ @staticmethod
305
+ def backward(ctx, grad_output):
306
+ ind, ind_inv = ctx.saved_tensors
307
+ result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim)
308
+ return result, None, None, None
309
+
310
+ @staticmethod
311
+ def vmap(info, in_dims, x, ind, ind_inv, dim):
312
+ x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
313
+
314
+ # wrap dim
315
+ logical_dim = x.dim() if x_bdim is None else x_bdim - 1
316
+ dim = dim if dim >= 0 else dim + logical_dim
317
+
318
+ def expand_bdim(x, x_bdim):
319
+ if x_bdim is None:
320
+ return x.expand(info.batch_size, *x.shape)
321
+ return x.movedim(x_bdim, 0)
322
+
323
+ x = expand_bdim(x, x_bdim)
324
+ ind = expand_bdim(ind, ind_bdim)
325
+ ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
326
+
327
+ return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0
328
+
329
+ @staticmethod
330
+ def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
331
+ assert ind_tangent is None
332
+ assert ind_inv_tangent is None
333
+ ind, ind_inv = ctx.saved_tensors
334
+ return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim)
335
+
336
+ class TakeGenVmap(torch.autograd.Function):
337
+ generate_vmap_rule = True
338
+
339
+ @staticmethod
340
+ def forward(x, ind, ind_inv, dim):
341
+ return torch.take_along_dim(x, ind, dim)
342
+
343
+ @staticmethod
344
+ def setup_context(ctx, inputs, outputs):
345
+ x, ind, ind_inv, dim = inputs
346
+ ctx.save_for_backward(ind, ind_inv)
347
+ ctx.save_for_forward(ind, ind_inv)
348
+ ctx.dim = dim
349
+
350
+ @staticmethod
351
+ def backward(ctx, grad_output):
352
+ ind, ind_inv = ctx.saved_tensors
353
+ result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim)
354
+ return result, None, None, None
355
+
356
+ @staticmethod
357
+ def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
358
+ ind, ind_inv = ctx.saved_tensors
359
+ return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim)
360
+
361
+ class Select(torch.autograd.Function):
362
+ @staticmethod
363
+ def forward(x, idx):
364
+ return x[idx]
365
+
366
+ @staticmethod
367
+ def setup_context(ctx, inputs, output):
368
+ x, idx = inputs
369
+ ctx.x_shape = x.shape
370
+ ctx.idx = idx
371
+
372
+ @staticmethod
373
+ def backward(ctx, grad_output):
374
+ result = grad_output.new_zeros(ctx.x_shape)
375
+ result[ctx.idx] = grad_output
376
+ return result, None
377
+
378
+ @staticmethod
379
+ def vmap(info, in_dims, x, idx):
380
+ x_bdim, _ = in_dims
381
+ x = x.movedim(x_bdim, 1)
382
+ return Select.apply(x, idx), 0
383
+
384
+ @staticmethod
385
+ def jvp(ctx, x_tangent, _):
386
+ return Select.apply(x_tangent, ctx.idx)
387
+
388
+ class SelectGenVmap(torch.autograd.Function):
389
+ generate_vmap_rule = True
390
+
391
+ @staticmethod
392
+ def forward(x, idx):
393
+ return x[idx]
394
+
395
+ @staticmethod
396
+ def setup_context(ctx, inputs, outputs):
397
+ x, idx = inputs
398
+ ctx.x_shape = x.shape
399
+ ctx.idx = idx
400
+
401
+ @staticmethod
402
+ def backward(ctx, grad_output):
403
+ result = grad_output.new_zeros(ctx.x_shape)
404
+ result[ctx.idx] = grad_output
405
+ return result, None
406
+
407
+ @staticmethod
408
+ def jvp(ctx, x_tangent, _):
409
+ return SelectGenVmap.apply(x_tangent, ctx.idx)
410
+
411
+
412
+ def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs):
413
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
414
+ yield SampleInput(make_arg(3, 5), args=(2,))
415
+
416
+ class ScaleGradGenVmap(torch.autograd.Function):
417
+ generate_vmap_rule = True
418
+ scale = 3.14
419
+
420
+ @staticmethod
421
+ def forward(x):
422
+ return x.clone()
423
+
424
+ @staticmethod
425
+ def setup_context(ctx, inputs, outputs):
426
+ pass
427
+
428
+ @staticmethod
429
+ def backward(ctx, grad_output):
430
+ return grad_output * ScaleGradGenVmap.scale
431
+
432
+ @staticmethod
433
+ def jvp(ctx, x_tangent):
434
+ return x_tangent * ScaleGradGenVmap.scale
435
+
436
+ class ZeroGradientsGenVmap(torch.autograd.Function):
437
+ generate_vmap_rule = True
438
+
439
+ @staticmethod
440
+ def forward(x, y):
441
+ return x.clone(), y.clone()
442
+
443
+ @staticmethod
444
+ def setup_context(ctx, inputs, outputs):
445
+ pass
446
+
447
+ @staticmethod
448
+ def backward(ctx, gx, gy):
449
+ # Intentionally returning torch.zeros instead of zeros_like or new_zeros.
450
+ # Also intentionally not None.
451
+ return (
452
+ # Intentionally too-large gradient
453
+ torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device),
454
+ torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
455
+ )
456
+
457
+ @staticmethod
458
+ def jvp(ctx, gx, gy):
459
+ # Intentionally returning torch.zeros instead of zeros_like or new_zeros.
460
+ # Also intentionally not None.
461
+ return (
462
+ torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device),
463
+ torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
464
+ )
465
+
466
+
467
+ def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs):
468
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
469
+ yield SampleInput(make_arg(3, 5))
470
+
471
+
472
+ class ForwardHasDefaultArgs(torch.autograd.Function):
473
+ @staticmethod
474
+ def forward(x, idx=(2,)):
475
+ return x[idx]
476
+
477
+ @staticmethod
478
+ def setup_context(ctx, inputs, output):
479
+ x, idx = inputs
480
+ ctx.x_shape = x.shape
481
+ ctx.idx = idx
482
+
483
+ @staticmethod
484
+ def backward(ctx, grad_output):
485
+ result = grad_output.new_zeros(ctx.x_shape)
486
+ result[ctx.idx] = grad_output
487
+ return result, None
488
+
489
+ @staticmethod
490
+ def vmap(info, in_dims, x, idx):
491
+ x_bdim, _ = in_dims
492
+ x = x.movedim(x_bdim, 1)
493
+ return ForwardHasDefaultArgs.apply(x, idx), 0
494
+
495
+ @staticmethod
496
+ def jvp(ctx, x_tangent, _):
497
+ return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx)
498
+
499
+
500
+ autograd_function_db = [
501
+ OpInfo(
502
+ 'NumpyCubeAutogradFunction',
503
+ op=NumpyCube.apply,
504
+ supports_forward_ad=True,
505
+ supports_fwgrad_bwgrad=True,
506
+ sample_inputs_func=sample_inputs_numpy_cube,
507
+ dtypes=all_types_and(torch.bool, torch.half),
508
+ supports_out=False,
509
+ ),
510
+ OpInfo(
511
+ 'NumpyExpMarkDirtyAutogradFunction',
512
+ op=lambda x: NumpyExp_.apply(x.clone()),
513
+ inplace_variant=NumpyExp_.apply,
514
+ supports_forward_ad=True,
515
+ supports_fwgrad_bwgrad=True,
516
+ sample_inputs_func=sample_inputs_numpy_cube,
517
+ dtypes=all_types_and(torch.bool, torch.half),
518
+ supports_out=False,
519
+ ),
520
+ OpInfo(
521
+ 'NumpyMulAutogradFunction',
522
+ op=NumpyMul.apply,
523
+ supports_forward_ad=True,
524
+ supports_fwgrad_bwgrad=True,
525
+ sample_inputs_func=sample_inputs_numpy_mul,
526
+ dtypes=all_types_and(torch.bool, torch.half),
527
+ supports_out=False,
528
+ ),
529
+ OpInfo(
530
+ 'NumpyCubeNotComposableAutogradFunction',
531
+ op=lambda x: NumpyCubeNotComposable.apply(x)[0],
532
+ supports_forward_ad=False,
533
+ supports_fwgrad_bwgrad=False,
534
+ sample_inputs_func=sample_inputs_numpy_cube,
535
+ dtypes=all_types_and(torch.bool, torch.half),
536
+ supports_out=False,
537
+ ),
538
+ OpInfo(
539
+ 'NumpySortAutogradFunction',
540
+ op=NumpySort.apply,
541
+ supports_forward_ad=False,
542
+ supports_fwgrad_bwgrad=False,
543
+ sample_inputs_func=sample_inputs_numpy_sort,
544
+ dtypes=all_types_and(torch.bool, torch.half),
545
+ supports_out=False,
546
+ gradcheck_wrapper=lambda y, ind: y,
547
+ ),
548
+ OpInfo(
549
+ 'NumpyTakeAutogradFunction',
550
+ op=NumpyTake.apply,
551
+ supports_forward_ad=False,
552
+ supports_fwgrad_bwgrad=False,
553
+ sample_inputs_func=sample_inputs_numpy_take,
554
+ dtypes=all_types_and(torch.bool, torch.half),
555
+ supports_out=False,
556
+ ),
557
+ OpInfo(
558
+ 'SelectAutogradFunction',
559
+ op=Select.apply,
560
+ supports_forward_ad=True,
561
+ supports_fwgrad_bwgrad=True,
562
+ sample_inputs_func=sample_inputs_select,
563
+ dtypes=all_types_and(torch.bool, torch.half),
564
+ supports_out=False,
565
+ ),
566
+ OpInfo(
567
+ 'CubeGenVmapAutogradFunction',
568
+ op=CubeGenVmap.apply,
569
+ supports_forward_ad=True,
570
+ supports_fwgrad_bwgrad=True,
571
+ sample_inputs_func=sample_inputs_numpy_cube,
572
+ dtypes=all_types_and(torch.bool, torch.half),
573
+ supports_out=False,
574
+ ),
575
+ OpInfo(
576
+ 'MulGenVmapAutogradFunction',
577
+ op=MulGenVmap.apply,
578
+ supports_forward_ad=True,
579
+ supports_fwgrad_bwgrad=True,
580
+ sample_inputs_func=sample_inputs_numpy_mul,
581
+ dtypes=all_types_and(torch.bool, torch.half),
582
+ supports_out=False,
583
+ ),
584
+ OpInfo(
585
+ 'SortGenVmapAutogradFunction',
586
+ op=SortGenVmap.apply,
587
+ supports_forward_ad=True,
588
+ supports_fwgrad_bwgrad=True,
589
+ sample_inputs_func=sample_inputs_numpy_sort,
590
+ dtypes=all_types_and(torch.bool, torch.half),
591
+ supports_out=False,
592
+ gradcheck_wrapper=lambda y, ind: y,
593
+ ),
594
+ OpInfo(
595
+ 'SelectGenVmapAutogradFunction',
596
+ op=SelectGenVmap.apply,
597
+ supports_forward_ad=True,
598
+ supports_fwgrad_bwgrad=True,
599
+ sample_inputs_func=sample_inputs_select,
600
+ dtypes=all_types_and(torch.bool, torch.half),
601
+ supports_out=False,
602
+ ),
603
+ OpInfo(
604
+ 'ScaleGradGenVmapAutogradFunction',
605
+ op=ScaleGradGenVmap.apply,
606
+ supports_forward_ad=True,
607
+ supports_fwgrad_bwgrad=True,
608
+ sample_inputs_func=sample_inputs_numpy_cube,
609
+ dtypes=all_types_and(torch.bool, torch.half),
610
+ supports_out=False,
611
+ ),
612
+ OpInfo(
613
+ 'ZeroGradientsGenVmapAutogradFunction',
614
+ op=ZeroGradientsGenVmap.apply,
615
+ supports_forward_ad=True,
616
+ supports_fwgrad_bwgrad=True,
617
+ sample_inputs_func=sample_inputs_numpy_mul,
618
+ dtypes=all_types_and(torch.bool, torch.half),
619
+ supports_out=False,
620
+ ),
621
+ OpInfo(
622
+ 'ForwardHasDefaultArgsAutogradFunction',
623
+ op=ForwardHasDefaultArgs.apply,
624
+ supports_forward_ad=True,
625
+ supports_fwgrad_bwgrad=True,
626
+ sample_inputs_func=sample_inputs_forward_default_args,
627
+ dtypes=all_types_and(torch.bool, torch.half),
628
+ supports_out=False,
629
+ ),
630
+ ]
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ from typing import List
5
+
6
+ __all__ = [
7
+ "check_code_for_cuda_kernel_launches",
8
+ "check_cuda_kernel_launches",
9
+ ]
10
+
11
+ # FILES TO EXCLUDE (match is done with suffix using `endswith`)
12
+ # You wouldn't drive without a seatbelt, though, so why would you
13
+ # launch a kernel without some safety? Use this as a quick workaround
14
+ # for a problem with the checker, fix the checker, then de-exclude
15
+ # the files in question.
16
+ exclude_files: List[str] = []
17
+
18
+ # Without using a C++ AST we can't 100% detect kernel launches, so we
19
+ # model them as having the pattern "<<<parameters>>>(arguments);"
20
+ # We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be
21
+ # the next statement.
22
+ #
23
+ # We model the next statement as ending at the next `}` or `;`.
24
+ # If we see `}` then a clause ended (bad) if we see a semi-colon then
25
+ # we expect the launch check just before it.
26
+ #
27
+ # Since the kernel launch can include lambda statements, it's important
28
+ # to find the correct end-paren of the kernel launch. Doing this with
29
+ # pure regex requires recursive regex, which aren't part of the Python
30
+ # standard library. To avoid an additional dependency, we build a prefix
31
+ # regex that finds the start of a kernel launch, use a paren-matching
32
+ # algorithm to find the end of the launch, and then another regex to
33
+ # determine if a launch check is present.
34
+
35
+ # Finds potential starts of kernel launches
36
+ kernel_launch_start = re.compile(
37
+ r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE
38
+ )
39
+
40
+ # This pattern should start at the character after the final paren of the
41
+ # kernel launch. It returns a match if the launch check is not the next statement
42
+ has_check = re.compile(
43
+ r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE
44
+ )
45
+
46
+ def find_matching_paren(s: str, startpos: int) -> int:
47
+ """Given a string "prefix (unknown number of characters) suffix"
48
+ and the position of the first `(` returns the index of the character
49
+ 1 past the `)`, accounting for paren nesting
50
+ """
51
+ opening = 0
52
+ for i, c in enumerate(s[startpos:]):
53
+ if c == '(':
54
+ opening += 1
55
+ elif c == ')':
56
+ opening -= 1
57
+ if opening == 0:
58
+ return startpos + i + 1
59
+
60
+ raise IndexError("Closing parens not found!")
61
+
62
+
63
+ def should_exclude_file(filename) -> bool:
64
+ for exclude_suffix in exclude_files:
65
+ if filename.endswith(exclude_suffix):
66
+ return True
67
+ return False
68
+
69
+
70
+ def check_code_for_cuda_kernel_launches(code, filename=None):
71
+ """Checks code for CUDA kernel launches without cuda error checks.
72
+
73
+ Args:
74
+ filename - Filename of file containing the code. Used only for display
75
+ purposes, so you can put anything here.
76
+ code - The code to check
77
+
78
+ Returns:
79
+ The number of unsafe kernel launches in the code
80
+ """
81
+ if filename is None:
82
+ filename = "##Python Function Call##"
83
+
84
+ # We break the code apart and put it back together to add
85
+ # helpful line numberings for identifying problem areas
86
+ code = enumerate(code.split("\n")) # Split by line breaks
87
+ code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines
88
+ code = '\n'.join(code) # Put it back together
89
+
90
+ num_launches_without_checks = 0
91
+ for m in kernel_launch_start.finditer(code):
92
+ end_paren = find_matching_paren(code, m.end() - 1)
93
+ if has_check.match(code, end_paren):
94
+ num_launches_without_checks += 1
95
+ context = code[m.start():end_paren + 1]
96
+ print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr)
97
+
98
+ return num_launches_without_checks
99
+
100
+
101
+ def check_file(filename):
102
+ """Checks a file for CUDA kernel launches without cuda error checks
103
+
104
+ Args:
105
+ filename - File to check
106
+
107
+ Returns:
108
+ The number of unsafe kernel launches in the file
109
+ """
110
+ if not (filename.endswith((".cu", ".cuh"))):
111
+ return 0
112
+ if should_exclude_file(filename):
113
+ return 0
114
+ with open(filename) as fo:
115
+ contents = fo.read()
116
+ unsafeCount = check_code_for_cuda_kernel_launches(contents, filename)
117
+ return unsafeCount
118
+
119
+
120
+ def check_cuda_kernel_launches():
121
+ """Checks all pytorch code for CUDA kernel launches without cuda error checks
122
+
123
+ Returns:
124
+ The number of unsafe kernel launches in the codebase
125
+ """
126
+ torch_dir = os.path.dirname(os.path.realpath(__file__))
127
+ torch_dir = os.path.dirname(torch_dir) # Go up to parent torch
128
+ torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2
129
+
130
+ kernels_without_checks = 0
131
+ files_without_checks = []
132
+ for root, dirnames, filenames in os.walk(torch_dir):
133
+ # `$BASE/build` and `$BASE/torch/include` are generated
134
+ # so we don't want to flag their contents
135
+ if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"):
136
+ # Curtail search by modifying dirnames and filenames in place
137
+ # Yes, this is the way to do this, see `help(os.walk)`
138
+ dirnames[:] = []
139
+ continue
140
+
141
+ for x in filenames:
142
+ filename = os.path.join(root, x)
143
+ file_result = check_file(filename)
144
+ if file_result > 0:
145
+ kernels_without_checks += file_result
146
+ files_without_checks.append(filename)
147
+
148
+ if kernels_without_checks > 0:
149
+ count_str = f"Found {kernels_without_checks} instances in " \
150
+ f"{len(files_without_checks)} files where kernel " \
151
+ "launches didn't have checks."
152
+ print(count_str, file=sys.stderr)
153
+ print("Files without checks:", file=sys.stderr)
154
+ for x in files_without_checks:
155
+ print(f"\t{x}", file=sys.stderr)
156
+ print(count_str, file=sys.stderr)
157
+
158
+ return kernels_without_checks
159
+
160
+
161
+ if __name__ == "__main__":
162
+ unsafe_launches = check_cuda_kernel_launches()
163
+ sys.exit(0 if unsafe_launches == 0 else 1)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""This file is allowed to initialize CUDA context when imported."""
2
+
3
+ import functools
4
+ import torch
5
+ import torch.cuda
6
+ from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS
7
+ import inspect
8
+ import contextlib
9
+
10
+
11
+ CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized()
12
+
13
+
14
+ TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
15
+ CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None
16
+ # note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN
17
+ if TEST_WITH_ROCM:
18
+ TEST_CUDNN = LazyVal(lambda: TEST_CUDA)
19
+ else:
20
+ TEST_CUDNN = LazyVal(lambda: TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE)))
21
+
22
+ TEST_CUDNN_VERSION = LazyVal(lambda: torch.backends.cudnn.version() if TEST_CUDNN else 0)
23
+
24
+ SM53OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3))
25
+ SM60OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0))
26
+ SM70OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0))
27
+ SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5))
28
+ SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0))
29
+ SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0))
30
+
31
+ PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and (not TEST_WITH_ROCM) and (not IS_WINDOWS) and SM80OrLater)
32
+ PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM)
33
+ # This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate
34
+ PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION)
35
+
36
+ PLATFORM_SUPPORTS_FUSED_SDPA: bool = TEST_CUDA and not TEST_WITH_ROCM
37
+
38
+ if TEST_NUMBA:
39
+ try:
40
+ import numba.cuda
41
+ TEST_NUMBA_CUDA = numba.cuda.is_available()
42
+ except Exception as e:
43
+ TEST_NUMBA_CUDA = False
44
+ TEST_NUMBA = False
45
+ else:
46
+ TEST_NUMBA_CUDA = False
47
+
48
+ # Used below in `initialize_cuda_context_rng` to ensure that CUDA context and
49
+ # RNG have been initialized.
50
+ __cuda_ctx_rng_initialized = False
51
+
52
+
53
+ # after this call, CUDA context and RNG must have been initialized on each GPU
54
+ def initialize_cuda_context_rng():
55
+ global __cuda_ctx_rng_initialized
56
+ assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng'
57
+ if not __cuda_ctx_rng_initialized:
58
+ # initialize cuda context and rng for memory tests
59
+ for i in range(torch.cuda.device_count()):
60
+ torch.randn(1, device=f"cuda:{i}")
61
+ __cuda_ctx_rng_initialized = True
62
+
63
+
64
+ # Test whether hardware TF32 math mode enabled. It is enabled only on:
65
+ # - CUDA >= 11
66
+ # - arch >= Ampere
67
+ def tf32_is_not_fp32():
68
+ if not torch.cuda.is_available() or torch.version.cuda is None:
69
+ return False
70
+ if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
71
+ return False
72
+ if int(torch.version.cuda.split('.')[0]) < 11:
73
+ return False
74
+ return True
75
+
76
+
77
+ @contextlib.contextmanager
78
+ def tf32_off():
79
+ old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
80
+ try:
81
+ torch.backends.cuda.matmul.allow_tf32 = False
82
+ with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
83
+ yield
84
+ finally:
85
+ torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
86
+
87
+
88
+ @contextlib.contextmanager
89
+ def tf32_on(self, tf32_precision=1e-5):
90
+ old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
91
+ old_precision = self.precision
92
+ try:
93
+ torch.backends.cuda.matmul.allow_tf32 = True
94
+ self.precision = tf32_precision
95
+ with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
96
+ yield
97
+ finally:
98
+ torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
99
+ self.precision = old_precision
100
+
101
+
102
+ # This is a wrapper that wraps a test to run this test twice, one with
103
+ # allow_tf32=True, another with allow_tf32=False. When running with
104
+ # allow_tf32=True, it will use reduced precision as specified by the
105
+ # argument. For example:
106
+ # @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
107
+ # @tf32_on_and_off(0.005)
108
+ # def test_matmul(self, device, dtype):
109
+ # a = ...; b = ...;
110
+ # c = torch.matmul(a, b)
111
+ # self.assertEqual(c, expected)
112
+ # In the above example, when testing torch.float32 and torch.complex64 on CUDA
113
+ # on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at
114
+ # TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced
115
+ # precision to check values.
116
+ #
117
+ # This decorator can be used for function with or without device/dtype, such as
118
+ # @tf32_on_and_off(0.005)
119
+ # def test_my_op(self)
120
+ # @tf32_on_and_off(0.005)
121
+ # def test_my_op(self, device)
122
+ # @tf32_on_and_off(0.005)
123
+ # def test_my_op(self, device, dtype)
124
+ # @tf32_on_and_off(0.005)
125
+ # def test_my_op(self, dtype)
126
+ # if neither device nor dtype is specified, it will check if the system has ampere device
127
+ # if device is specified, it will check if device is cuda
128
+ # if dtype is specified, it will check if dtype is float32 or complex64
129
+ # tf32 and fp32 are different only when all the three checks pass
130
+ def tf32_on_and_off(tf32_precision=1e-5):
131
+ def with_tf32_disabled(self, function_call):
132
+ with tf32_off():
133
+ function_call()
134
+
135
+ def with_tf32_enabled(self, function_call):
136
+ with tf32_on(self, tf32_precision):
137
+ function_call()
138
+
139
+ def wrapper(f):
140
+ params = inspect.signature(f).parameters
141
+ arg_names = tuple(params.keys())
142
+
143
+ @functools.wraps(f)
144
+ def wrapped(*args, **kwargs):
145
+ for k, v in zip(arg_names, args):
146
+ kwargs[k] = v
147
+ cond = tf32_is_not_fp32()
148
+ if 'device' in kwargs:
149
+ cond = cond and (torch.device(kwargs['device']).type == 'cuda')
150
+ if 'dtype' in kwargs:
151
+ cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64})
152
+ if cond:
153
+ with_tf32_disabled(kwargs['self'], lambda: f(**kwargs))
154
+ with_tf32_enabled(kwargs['self'], lambda: f(**kwargs))
155
+ else:
156
+ f(**kwargs)
157
+
158
+ return wrapped
159
+ return wrapper
160
+
161
+
162
+ # This is a wrapper that wraps a test to run it with TF32 turned off.
163
+ # This wrapper is designed to be used when a test uses matmul or convolutions
164
+ # but the purpose of that test is not testing matmul or convolutions.
165
+ # Disabling TF32 will enforce torch.float tensors to be always computed
166
+ # at full precision.
167
+ def with_tf32_off(f):
168
+ @functools.wraps(f)
169
+ def wrapped(*args, **kwargs):
170
+ with tf32_off():
171
+ return f(*args, **kwargs)
172
+
173
+ return wrapped
174
+
175
+ def _get_magma_version():
176
+ if 'Magma' not in torch.__config__.show():
177
+ return (0, 0)
178
+ position = torch.__config__.show().find('Magma ')
179
+ version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0]
180
+ return tuple(int(x) for x in version_str.split("."))
181
+
182
+ def _get_torch_cuda_version():
183
+ if torch.version.cuda is None:
184
+ return (0, 0)
185
+ cuda_version = str(torch.version.cuda)
186
+ return tuple(int(x) for x in cuda_version.split("."))
187
+
188
+ def _get_torch_rocm_version():
189
+ if not TEST_WITH_ROCM:
190
+ return (0, 0)
191
+ rocm_version = str(torch.version.hip)
192
+ rocm_version = rocm_version.split("-")[0] # ignore git sha
193
+ return tuple(int(x) for x in rocm_version.split("."))
194
+
195
+ def _check_cusparse_generic_available():
196
+ return not TEST_WITH_ROCM
197
+
198
+ def _check_hipsparse_generic_available():
199
+ if not TEST_WITH_ROCM:
200
+ return False
201
+
202
+ rocm_version = str(torch.version.hip)
203
+ rocm_version = rocm_version.split("-")[0] # ignore git sha
204
+ rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
205
+ return not (rocm_version_tuple is None or rocm_version_tuple < (5, 1))
206
+
207
+
208
+ TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available()
209
+ TEST_HIPSPARSE_GENERIC = _check_hipsparse_generic_available()
210
+
211
+ # Shared by test_cuda.py and test_multigpu.py
212
+ def _create_scaling_models_optimizers(device="cuda", optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None):
213
+ # Create a module+optimizer that will use scaling, and a control module+optimizer
214
+ # that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
215
+ mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
216
+ mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
217
+ with torch.no_grad():
218
+ for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
219
+ s.copy_(c)
220
+
221
+ kwargs = {"lr": 1.0}
222
+ if optimizer_kwargs is not None:
223
+ kwargs.update(optimizer_kwargs)
224
+ opt_control = optimizer_ctor(mod_control.parameters(), **kwargs)
225
+ opt_scaling = optimizer_ctor(mod_scaling.parameters(), **kwargs)
226
+
227
+ return mod_control, mod_scaling, opt_control, opt_scaling
228
+
229
+
230
+ def _create_scaling_case(device="cuda", dtype=torch.float, optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None):
231
+ data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
232
+ (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
233
+ (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
234
+ (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
235
+
236
+ loss_fn = torch.nn.MSELoss().cuda()
237
+
238
+ skip_iter = 2
239
+
240
+ return _create_scaling_models_optimizers(
241
+ device=device, optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs,
242
+ ) + (data, loss_fn, skip_iter)
243
+
244
+
245
+ # Importing this module should NOT eagerly initialize CUDA
246
+ if not CUDA_ALREADY_INITIALIZED_ON_IMPORT:
247
+ assert not torch.cuda.is_initialized()
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py ADDED
@@ -0,0 +1,1513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import gc
3
+ import inspect
4
+ import runpy
5
+ import sys
6
+ import threading
7
+ from collections import namedtuple
8
+ from enum import Enum
9
+ from functools import wraps, partial
10
+ from typing import List, Any, ClassVar, Optional, Sequence, Tuple, Union, Dict, Set
11
+ import unittest
12
+ import os
13
+ import torch
14
+ from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \
15
+ skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN, \
16
+ IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, IS_WINDOWS, TEST_MPS, \
17
+ _TestParametrizer, compose_parametrize_fns, dtype_name, \
18
+ TEST_WITH_MIOPEN_SUGGEST_NHWC, NATIVE_DEVICES, skipIfTorchDynamo, \
19
+ get_tracked_input, clear_tracked_input, PRINT_REPRO_ON_FAILURE
20
+ from torch.testing._internal.common_cuda import _get_torch_cuda_version, \
21
+ TEST_CUSPARSE_GENERIC, TEST_HIPSPARSE_GENERIC, _get_torch_rocm_version
22
+ from torch.testing._internal.common_dtype import get_all_dtypes
23
+
24
+ try:
25
+ import psutil # type: ignore[import]
26
+ HAS_PSUTIL = True
27
+ except ImportError:
28
+ HAS_PSUTIL = False
29
+
30
+ # Note [Writing Test Templates]
31
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
32
+ #
33
+ # This note was written shortly after the PyTorch 1.9 release.
34
+ # If you notice it's out-of-date or think it could be improved then please
35
+ # file an issue.
36
+ #
37
+ # PyTorch has its own framework for instantiating test templates. That is, for
38
+ # taking test classes that look similar to unittest or pytest
39
+ # compatible test classes and optionally doing the following:
40
+ #
41
+ # - instantiating a version of the test class for each available device type
42
+ # (often the CPU, CUDA, and META device types)
43
+ # - further instantiating a version of each test that's always specialized
44
+ # on the test class's device type, and optionally specialized further
45
+ # on datatypes or operators
46
+ #
47
+ # This functionality is similar to pytest's parametrize functionality
48
+ # (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable
49
+ # additional logic that specializes the instantiated test classes for their
50
+ # device types (see CPUTestBase and CUDATestBase below), supports a variety
51
+ # of composable decorators that allow for test filtering and setting
52
+ # tolerances, and allows tests parametrized by operators to instantiate
53
+ # only the subset of device type x dtype that operator supports.
54
+ #
55
+ # This framework was built to make it easier to write tests that run on
56
+ # multiple device types, multiple datatypes (dtypes), and for multiple
57
+ # operators. It's also useful for controlling which tests are run. For example,
58
+ # only tests that use a CUDA device can be run on platforms with CUDA.
59
+ # Let's dive in with an example to get an idea for how it works:
60
+ #
61
+ # --------------------------------------------------------
62
+ # A template class (looks like a regular unittest TestCase)
63
+ # class TestClassFoo(TestCase):
64
+ #
65
+ # # A template test that can be specialized with a device
66
+ # # NOTE: this test case is not runnable by unittest or pytest because it
67
+ # # accepts an extra positional argument, "device", that they do not understand
68
+ # def test_bar(self, device):
69
+ # pass
70
+ #
71
+ # # Function that instantiates a template class and its tests
72
+ # instantiate_device_type_tests(TestCommon, globals())
73
+ # --------------------------------------------------------
74
+ #
75
+ # In the above code example we see a template class and a single test template
76
+ # that can be instantiated with a device. The function
77
+ # instantiate_device_type_tests(), called at file scope, instantiates
78
+ # new test classes, one per available device type, and new tests in those
79
+ # classes from these templates. It actually does this by removing
80
+ # the class TestClassFoo and replacing it with classes like TestClassFooCPU
81
+ # and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase
82
+ # and CUDATestBase respectively. Additional device types, like XLA,
83
+ # (see https://github.com/pytorch/xla) can further extend the set of
84
+ # instantiated test classes to create classes like TestClassFooXLA.
85
+ #
86
+ # The test template, test_bar(), is also instantiated. In this case the template
87
+ # is only specialized on a device, so (depending on the available device
88
+ # types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda()
89
+ # in TestClassFooCUDA. We can think of the instantiated test classes as
90
+ # looking like this:
91
+ #
92
+ # --------------------------------------------------------
93
+ # # An instantiated test class for the CPU device type
94
+ # class TestClassFooCPU(CPUTestBase):
95
+ #
96
+ # # An instantiated test that calls the template with the string representation
97
+ # # of a device from the test class's device type
98
+ # def test_bar_cpu(self):
99
+ # test_bar(self, 'cpu')
100
+ #
101
+ # # An instantiated test class for the CUDA device type
102
+ # class TestClassFooCUDA(CUDATestBase):
103
+ #
104
+ # # An instantiated test that calls the template with the string representation
105
+ # # of a device from the test class's device type
106
+ # def test_bar_cuda(self):
107
+ # test_bar(self, 'cuda:0')
108
+ # --------------------------------------------------------
109
+ #
110
+ # These instantiated test classes ARE discoverable and runnable by both
111
+ # unittest and pytest. One thing that may be confusing, however, is that
112
+ # attempting to run "test_bar" will not work, despite it appearing in the
113
+ # original template code. This is because "test_bar" is no longer discoverable
114
+ # after instantiate_device_type_tests() runs, as the above snippet shows.
115
+ # Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both
116
+ # can be run with the option "-k test_bar".
117
+ #
118
+ # Removing the template class and adding the instantiated classes requires
119
+ # passing "globals()" to instantiate_device_type_tests(), because it
120
+ # edits the file's Python objects.
121
+ #
122
+ # As mentioned, tests can be additionally parametrized on dtypes or
123
+ # operators. Datatype parametrization uses the @dtypes decorator and
124
+ # require a test template like this:
125
+ #
126
+ # --------------------------------------------------------
127
+ # # A template test that can be specialized with a device and a datatype (dtype)
128
+ # @dtypes(torch.float32, torch.int64)
129
+ # def test_car(self, device, dtype)
130
+ # pass
131
+ # --------------------------------------------------------
132
+ #
133
+ # If the CPU and CUDA device types are available this test would be
134
+ # instantiated as 4 tests that cover the cross-product of the two dtypes
135
+ # and two device types:
136
+ #
137
+ # - test_car_cpu_float32
138
+ # - test_car_cpu_int64
139
+ # - test_car_cuda_float32
140
+ # - test_car_cuda_int64
141
+ #
142
+ # The dtype is passed as a torch.dtype object.
143
+ #
144
+ # Tests parametrized on operators (actually on OpInfos, more on that in a
145
+ # moment...) use the @ops decorator and require a test template like this:
146
+ # --------------------------------------------------------
147
+ # # A template test that can be specialized with a device, dtype, and OpInfo
148
+ # @ops(op_db)
149
+ # def test_car(self, device, dtype, op)
150
+ # pass
151
+ # --------------------------------------------------------
152
+ #
153
+ # See the documentation for the @ops decorator below for additional details
154
+ # on how to use it and see the note [OpInfos] in
155
+ # common_methods_invocations.py for more details on OpInfos.
156
+ #
157
+ # A test parametrized over the entire "op_db", which contains hundreds of
158
+ # OpInfos, will likely have hundreds or thousands of instantiations. The
159
+ # test will be instantiated on the cross-product of device types, operators,
160
+ # and the dtypes the operator supports on that device type. The instantiated
161
+ # tests will have names like:
162
+ #
163
+ # - test_car_add_cpu_float32
164
+ # - test_car_sub_cuda_int64
165
+ #
166
+ # The first instantiated test calls the original test_car() with the OpInfo
167
+ # for torch.add as its "op" argument, the string 'cpu' for its "device" argument,
168
+ # and the dtype torch.float32 for is "dtype" argument. The second instantiated
169
+ # test calls the test_car() with the OpInfo for torch.sub, a CUDA device string
170
+ # like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype
171
+ # torch.int64 for its "dtype argument."
172
+ #
173
+ # In addition to parametrizing over device, dtype, and ops via OpInfos, the
174
+ # @parametrize decorator is supported for arbitrary parametrizations:
175
+ # --------------------------------------------------------
176
+ # # A template test that can be specialized with a device, dtype, and value for x
177
+ # @parametrize("x", range(5))
178
+ # def test_car(self, device, dtype, x)
179
+ # pass
180
+ # --------------------------------------------------------
181
+ #
182
+ # See the documentation for @parametrize in common_utils.py for additional details
183
+ # on this. Note that the instantiate_device_type_tests() function will handle
184
+ # such parametrizations; there is no need to additionally call
185
+ # instantiate_parametrized_tests().
186
+ #
187
+ # Clever test filtering can be very useful when working with parametrized
188
+ # tests. "-k test_car" would run every instantiated variant of the test_car()
189
+ # test template, and "-k test_car_add" runs every variant instantiated with
190
+ # torch.add.
191
+ #
192
+ # It is important to use the passed device and dtype as appropriate. Use
193
+ # helper functions like make_tensor() that require explicitly specifying
194
+ # the device and dtype so they're not forgotten.
195
+ #
196
+ # Test templates can use a variety of composable decorators to specify
197
+ # additional options and requirements, some are listed here:
198
+ #
199
+ # - @deviceCountAtLeast(<minimum number of devices to run test with>)
200
+ # Passes a list of strings representing all available devices of
201
+ # the test class's device type as the test template's "device" argument.
202
+ # If there are fewer devices than the value passed to the decorator
203
+ # the test is skipped.
204
+ # - @dtypes(<list of tuples of dtypes>)
205
+ # In addition to accepting multiple dtypes, the @dtypes decorator
206
+ # can accept a sequence of tuple pairs of dtypes. The test template
207
+ # will be called with each tuple for its "dtype" argument.
208
+ # - @onlyNativeDeviceTypes
209
+ # Skips the test if the device is not a native device type (currently CPU, CUDA, Meta)
210
+ # - @onlyCPU
211
+ # Skips the test if the device is not a CPU device
212
+ # - @onlyCUDA
213
+ # Skips the test if the device is not a CUDA device
214
+ # - @onlyMPS
215
+ # Skips the test if the device is not a MPS device
216
+ # - @skipCPUIfNoLapack
217
+ # Skips the test if the device is a CPU device and LAPACK is not installed
218
+ # - @skipCPUIfNoMkl
219
+ # Skips the test if the device is a CPU device and MKL is not installed
220
+ # - @skipCUDAIfNoMagma
221
+ # Skips the test if the device is a CUDA device and MAGMA is not installed
222
+ # - @skipCUDAIfRocm
223
+ # Skips the test if the device is a CUDA device and ROCm is being used
224
+
225
+
226
+ # Note [Adding a Device Type]
227
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
228
+ #
229
+ # To add a device type:
230
+ #
231
+ # (1) Create a new "TestBase" extending DeviceTypeTestBase.
232
+ # See CPUTestBase and CUDATestBase below.
233
+ # (2) Define the "device_type" attribute of the base to be the
234
+ # appropriate string.
235
+ # (3) Add logic to this file that appends your base class to
236
+ # device_type_test_bases when your device type is available.
237
+ # (4) (Optional) Write setUpClass/tearDownClass class methods that
238
+ # instantiate dependencies (see MAGMA in CUDATestBase).
239
+ # (5) (Optional) Override the "instantiate_test" method for total
240
+ # control over how your class creates tests.
241
+ #
242
+ # setUpClass is called AFTER tests have been created and BEFORE and ONLY IF
243
+ # they are run. This makes it useful for initializing devices and dependencies.
244
+
245
+
246
+ # Note [Overriding methods in generic tests]
247
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
248
+ #
249
+ # Device generic tests look a lot like normal test classes, but they differ
250
+ # from ordinary classes in some important ways. In particular, overriding
251
+ # methods in generic tests doesn't work quite the way you expect.
252
+ #
253
+ # class TestFooDeviceType(TestCase):
254
+ # # Intention is to override
255
+ # def assertEqual(self, x, y):
256
+ # # This DOESN'T WORK!
257
+ # super().assertEqual(x, y)
258
+ #
259
+ # If you try to run this code, you'll get an error saying that TestFooDeviceType
260
+ # is not in scope. This is because after instantiating our classes, we delete
261
+ # it from the parent scope. Instead, you need to hardcode a direct invocation
262
+ # of the desired subclass call, e.g.,
263
+ #
264
+ # class TestFooDeviceType(TestCase):
265
+ # # Intention is to override
266
+ # def assertEqual(self, x, y):
267
+ # TestCase.assertEqual(x, y)
268
+ #
269
+ # However, a less error-prone way of customizing the behavior of TestCase
270
+ # is to either (1) add your functionality to TestCase and make it toggled
271
+ # by a class attribute, or (2) create your own subclass of TestCase, and
272
+ # then inherit from it for your generic test.
273
+
274
+
275
+ def _dtype_test_suffix(dtypes):
276
+ """ Returns the test suffix for a dtype, sequence of dtypes, or None. """
277
+ if isinstance(dtypes, (list, tuple)):
278
+ if len(dtypes) == 0:
279
+ return ''
280
+ return '_' + '_'.join(dtype_name(d) for d in dtypes)
281
+ elif dtypes:
282
+ return f'_{dtype_name(dtypes)}'
283
+ else:
284
+ return ''
285
+
286
+
287
+ def _update_param_kwargs(param_kwargs, name, value):
288
+ """ Adds a kwarg with the specified name and value to the param_kwargs dict. """
289
+ # Make name plural (e.g. devices / dtypes) if the value is composite.
290
+ plural_name = f'{name}s'
291
+
292
+ # Clear out old entries of the arg if any.
293
+ if name in param_kwargs:
294
+ del param_kwargs[name]
295
+ if plural_name in param_kwargs:
296
+ del param_kwargs[plural_name]
297
+
298
+ if isinstance(value, (list, tuple)):
299
+ param_kwargs[plural_name] = value
300
+ elif value is not None:
301
+ param_kwargs[name] = value
302
+
303
+ # Leave param_kwargs as-is when value is None.
304
+
305
+
306
+ class DeviceTypeTestBase(TestCase):
307
+ device_type: str = 'generic_device_type'
308
+
309
+ # Flag to disable test suite early due to unrecoverable error such as CUDA error.
310
+ _stop_test_suite = False
311
+
312
+ # Precision is a thread-local setting since it may be overridden per test
313
+ _tls = threading.local()
314
+ _tls.precision = TestCase._precision
315
+ _tls.rel_tol = TestCase._rel_tol
316
+
317
+ @property
318
+ def precision(self):
319
+ return self._tls.precision
320
+
321
+ @precision.setter
322
+ def precision(self, prec):
323
+ self._tls.precision = prec
324
+
325
+ @property
326
+ def rel_tol(self):
327
+ return self._tls.rel_tol
328
+
329
+ @rel_tol.setter
330
+ def rel_tol(self, prec):
331
+ self._tls.rel_tol = prec
332
+
333
+ # Returns a string representing the device that single device tests should use.
334
+ # Note: single device tests use this device exclusively.
335
+ @classmethod
336
+ def get_primary_device(cls):
337
+ return cls.device_type
338
+
339
+ @classmethod
340
+ def _init_and_get_primary_device(cls):
341
+ try:
342
+ return cls.get_primary_device()
343
+ except Exception:
344
+ # For CUDATestBase, XLATestBase, and possibly others, the primary device won't be available
345
+ # until setUpClass() sets it. Call that manually here if needed.
346
+ if hasattr(cls, 'setUpClass'):
347
+ cls.setUpClass()
348
+ return cls.get_primary_device()
349
+
350
+ # Returns a list of strings representing all available devices of this
351
+ # device type. The primary device must be the first string in the list
352
+ # and the list must contain no duplicates.
353
+ # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic
354
+ # mechanism of acquiring all available devices.
355
+ @classmethod
356
+ def get_all_devices(cls):
357
+ return [cls.get_primary_device()]
358
+
359
+ # Returns the dtypes the test has requested.
360
+ # Prefers device-specific dtype specifications over generic ones.
361
+ @classmethod
362
+ def _get_dtypes(cls, test):
363
+ if not hasattr(test, 'dtypes'):
364
+ return None
365
+
366
+ default_dtypes = test.dtypes.get('all')
367
+ msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it"
368
+ assert default_dtypes is not None, msg
369
+
370
+ return test.dtypes.get(cls.device_type, default_dtypes)
371
+
372
+ def _get_precision_override(self, test, dtype):
373
+ if not hasattr(test, 'precision_overrides'):
374
+ return self.precision
375
+ return test.precision_overrides.get(dtype, self.precision)
376
+
377
+ def _get_tolerance_override(self, test, dtype):
378
+ if not hasattr(test, 'tolerance_overrides'):
379
+ return self.precision, self.rel_tol
380
+ return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol))
381
+
382
+ def _apply_precision_override_for_test(self, test, param_kwargs):
383
+ dtype = param_kwargs['dtype'] if 'dtype' in param_kwargs else None
384
+ dtype = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else dtype
385
+ if dtype:
386
+ self.precision = self._get_precision_override(test, dtype)
387
+ self.precision, self.rel_tol = self._get_tolerance_override(test, dtype)
388
+
389
+ # Creates device-specific tests.
390
+ @classmethod
391
+ def instantiate_test(cls, name, test, *, generic_cls=None):
392
+
393
+ def instantiate_test_helper(cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []):
394
+ # Add the device param kwarg if the test needs device or devices.
395
+ param_kwargs = {} if param_kwargs is None else param_kwargs
396
+ test_sig_params = inspect.signature(test).parameters
397
+ if 'device' in test_sig_params or 'devices' in test_sig_params:
398
+ device_arg: str = cls._init_and_get_primary_device()
399
+ if hasattr(test, 'num_required_devices'):
400
+ device_arg = cls.get_all_devices()
401
+ _update_param_kwargs(param_kwargs, 'device', device_arg)
402
+
403
+ # Apply decorators based on param kwargs.
404
+ for decorator in decorator_fn(param_kwargs):
405
+ test = decorator(test)
406
+
407
+ # Constructs the test
408
+ @wraps(test)
409
+ def instantiated_test(self, param_kwargs=param_kwargs):
410
+ # Sets precision and runs test
411
+ # Note: precision is reset after the test is run
412
+ guard_precision = self.precision
413
+ guard_rel_tol = self.rel_tol
414
+ try:
415
+ self._apply_precision_override_for_test(test, param_kwargs)
416
+ result = test(self, **param_kwargs)
417
+ except RuntimeError as rte:
418
+ # check if rte should stop entire test suite.
419
+ self._stop_test_suite = self._should_stop_test_suite()
420
+ # Check if test has been decorated with `@expectedFailure`
421
+ # Using `__unittest_expecting_failure__` attribute, see
422
+ # https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164
423
+ # In that case, make it fail with "unexpected success" by suppressing exception
424
+ if getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite:
425
+ import sys
426
+ print("Suppressing fatal exception to trigger unexpected success", file=sys.stderr)
427
+ return
428
+ # raise the runtime error as is for the test suite to record.
429
+ raise rte
430
+ finally:
431
+ self.precision = guard_precision
432
+ self.rel_tol = guard_rel_tol
433
+
434
+ return result
435
+
436
+ assert not hasattr(cls, name), f"Redefinition of test {name}"
437
+ setattr(cls, name, instantiated_test)
438
+
439
+ def default_parametrize_fn(test, generic_cls, device_cls):
440
+ # By default, no parametrization is needed.
441
+ yield (test, '', {}, lambda _: [])
442
+
443
+ # Parametrization decorators set the parametrize_fn attribute on the test.
444
+ parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn)
445
+
446
+ # If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it.
447
+ dtypes = cls._get_dtypes(test)
448
+ if dtypes is not None:
449
+
450
+ def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes):
451
+ for dtype in dtypes:
452
+ param_kwargs: Dict[str, Any] = {}
453
+ _update_param_kwargs(param_kwargs, "dtype", dtype)
454
+
455
+ # Note that an empty test suffix is set here so that the dtype can be appended
456
+ # later after the device.
457
+ yield (test, '', param_kwargs, lambda _: [])
458
+
459
+ parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn)
460
+
461
+ # Instantiate the parametrized tests.
462
+ for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020
463
+ test_suffix = '' if test_suffix == '' else '_' + test_suffix
464
+ device_suffix = '_' + cls.device_type
465
+
466
+ # Note: device and dtype suffix placement
467
+ # Special handling here to place dtype(s) after device according to test name convention.
468
+ dtype_kwarg = None
469
+ if 'dtype' in param_kwargs or 'dtypes' in param_kwargs:
470
+ dtype_kwarg = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else param_kwargs['dtype']
471
+ test_name = f'{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}'
472
+
473
+ instantiate_test_helper(cls=cls, name=test_name, test=test, param_kwargs=param_kwargs,
474
+ decorator_fn=decorator_fn)
475
+
476
+ def run(self, result=None):
477
+ super().run(result=result)
478
+ # Early terminate test if _stop_test_suite is set.
479
+ if self._stop_test_suite:
480
+ result.stop()
481
+
482
+
483
+ class CPUTestBase(DeviceTypeTestBase):
484
+ device_type = 'cpu'
485
+
486
+ # No critical error should stop CPU test suite
487
+ def _should_stop_test_suite(self):
488
+ return False
489
+
490
+ class CUDATestBase(DeviceTypeTestBase):
491
+ device_type = 'cuda'
492
+ _do_cuda_memory_leak_check = True
493
+ _do_cuda_non_default_stream = True
494
+ primary_device: ClassVar[str]
495
+ cudnn_version: ClassVar[Any]
496
+ no_magma: ClassVar[bool]
497
+ no_cudnn: ClassVar[bool]
498
+
499
+ def has_cudnn(self):
500
+ return not self.no_cudnn
501
+
502
+ @classmethod
503
+ def get_primary_device(cls):
504
+ return cls.primary_device
505
+
506
+ @classmethod
507
+ def get_all_devices(cls):
508
+ primary_device_idx = int(cls.get_primary_device().split(':')[1])
509
+ num_devices = torch.cuda.device_count()
510
+
511
+ prim_device = cls.get_primary_device()
512
+ cuda_str = 'cuda:{0}'
513
+ non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
514
+ return [prim_device] + non_primary_devices
515
+
516
+ @classmethod
517
+ def setUpClass(cls):
518
+ # has_magma shows up after cuda is initialized
519
+ t = torch.ones(1).cuda()
520
+ cls.no_magma = not torch.cuda.has_magma
521
+
522
+ # Determines if cuDNN is available and its version
523
+ cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t)
524
+ cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()
525
+
526
+ # Acquires the current device as the primary (test) device
527
+ cls.primary_device = f'cuda:{torch.cuda.current_device()}'
528
+
529
+ # See Note [Lazy Tensor tests in device agnostic testing]
530
+ lazy_ts_backend_init = False
531
+ class LazyTestBase(DeviceTypeTestBase):
532
+ device_type = 'lazy'
533
+
534
+ def _should_stop_test_suite(self):
535
+ return False
536
+
537
+ @classmethod
538
+ def setUpClass(cls):
539
+ import torch._lazy
540
+ import torch._lazy.metrics
541
+ import torch._lazy.ts_backend
542
+ global lazy_ts_backend_init
543
+ if not lazy_ts_backend_init:
544
+ # Need to connect the TS backend to lazy key before running tests
545
+ torch._lazy.ts_backend.init()
546
+ lazy_ts_backend_init = True
547
+
548
+ class MPSTestBase(DeviceTypeTestBase):
549
+ device_type = 'mps'
550
+ primary_device: ClassVar[str]
551
+
552
+ @classmethod
553
+ def get_primary_device(cls):
554
+ return cls.primary_device
555
+
556
+ @classmethod
557
+ def get_all_devices(cls):
558
+ # currently only one device is supported on MPS backend
559
+ prim_device = cls.get_primary_device()
560
+ return [prim_device]
561
+
562
+ @classmethod
563
+ def setUpClass(cls):
564
+ cls.primary_device = 'mps:0'
565
+
566
+ def _should_stop_test_suite(self):
567
+ return False
568
+
569
+ class PrivateUse1TestBase(DeviceTypeTestBase):
570
+ primary_device: ClassVar[str]
571
+ device_mod = None
572
+ device_type = 'privateuse1'
573
+
574
+ @classmethod
575
+ def get_primary_device(cls):
576
+ return cls.primary_device
577
+
578
+ @classmethod
579
+ def get_all_devices(cls):
580
+ primary_device_idx = int(cls.get_primary_device().split(':')[1])
581
+ num_devices = cls.device_mod.device_count()
582
+ prim_device = cls.get_primary_device()
583
+ device_str = f'{cls.device_type}:{{0}}'
584
+ non_primary_devices = [device_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
585
+ return [prim_device] + non_primary_devices
586
+
587
+ @classmethod
588
+ def setUpClass(cls):
589
+ cls.device_type = torch._C._get_privateuse1_backend_name()
590
+ cls.device_mod = getattr(torch, cls.device_type, None)
591
+ assert cls.device_mod is not None, f'''torch has no module of `{cls.device_type}`, you should register
592
+ a module by `torch._register_device_module`.'''
593
+ cls.primary_device = f'{cls.device_type}:{cls.device_mod.current_device()}'
594
+
595
+ # Adds available device-type-specific test base classes
596
+ def get_device_type_test_bases():
597
+ # set type to List[Any] due to mypy list-of-union issue:
598
+ # https://github.com/python/mypy/issues/3351
599
+ test_bases: List[Any] = list()
600
+
601
+ if IS_SANDCASTLE or IS_FBCODE:
602
+ if IS_REMOTE_GPU:
603
+ # Skip if sanitizer is enabled
604
+ if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN:
605
+ test_bases.append(CUDATestBase)
606
+ else:
607
+ test_bases.append(CPUTestBase)
608
+ else:
609
+ test_bases.append(CPUTestBase)
610
+ if torch.cuda.is_available():
611
+ test_bases.append(CUDATestBase)
612
+ device_type = torch._C._get_privateuse1_backend_name()
613
+ device_mod = getattr(torch, device_type, None)
614
+ if hasattr(device_mod, "is_available") and device_mod.is_available():
615
+ test_bases.append(PrivateUse1TestBase)
616
+ # Disable MPS testing in generic device testing temporarily while we're
617
+ # ramping up support.
618
+ # elif torch.backends.mps.is_available():
619
+ # test_bases.append(MPSTestBase)
620
+
621
+ return test_bases
622
+
623
+ device_type_test_bases = get_device_type_test_bases()
624
+
625
+
626
+ def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None):
627
+ # device type cannot appear in both except_for and only_for
628
+ intersect = set(except_for if except_for else []) & set(only_for if only_for else [])
629
+ assert not intersect, f"device ({intersect}) appeared in both except_for and only_for"
630
+
631
+ if except_for:
632
+ device_type_test_bases = filter(
633
+ lambda x: x.device_type not in except_for, device_type_test_bases)
634
+ if only_for:
635
+ device_type_test_bases = filter(
636
+ lambda x: x.device_type in only_for, device_type_test_bases)
637
+
638
+ return list(device_type_test_bases)
639
+
640
+
641
+ # Note [How to extend DeviceTypeTestBase to add new test device]
642
+ # The following logic optionally allows downstream projects like pytorch/xla to
643
+ # add more test devices.
644
+ # Instructions:
645
+ # - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project.
646
+ # - Inside the file, one should inherit from `DeviceTypeTestBase` class and define
647
+ # a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of
648
+ # `instantiate_test` method.
649
+ # - DO NOT import common_device_type inside the file.
650
+ # `runpy.run_path` with `globals()` already properly setup the context so that
651
+ # `DeviceTypeTestBase` is already available.
652
+ # - Set a top-level variable `TEST_CLASS` equal to your new class.
653
+ # E.g. TEST_CLASS = XLATensorBase
654
+ # - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path
655
+ # to this file. Multiple paths can be separated by `:`.
656
+ # See pytorch/xla/test/pytorch_test_base.py for a more detailed example.
657
+ _TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None)
658
+ if _TORCH_TEST_DEVICES:
659
+ for path in _TORCH_TEST_DEVICES.split(':'):
660
+ # runpy (a stdlib module) lacks annotations
661
+ mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value]
662
+ device_type_test_bases.append(mod['TEST_CLASS'])
663
+
664
+
665
+ PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1'
666
+
667
+ PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = 'PYTORCH_TESTING_DEVICE_ONLY_FOR'
668
+ PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = 'PYTORCH_TESTING_DEVICE_EXCEPT_FOR'
669
+
670
+
671
+ # Adds 'instantiated' device-specific test cases to the given scope.
672
+ # The tests in these test cases are derived from the generic tests in
673
+ # generic_test_class. This function should be used instead of
674
+ # instantiate_parametrized_tests() if the test class contains
675
+ # device-specific tests (NB: this supports additional @parametrize usage).
676
+ #
677
+ # See note "Writing Test Templates"
678
+ def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None, include_lazy=False, allow_mps=False):
679
+ # Removes the generic test class from its enclosing scope so its tests
680
+ # are not discoverable.
681
+ del scope[generic_test_class.__name__]
682
+
683
+ # Creates an 'empty' version of the generic_test_class
684
+ # Note: we don't inherit from the generic_test_class directly because
685
+ # that would add its tests to our test classes and they would be
686
+ # discovered (despite not being runnable). Inherited methods also
687
+ # can't be removed later, and we can't rely on load_tests because
688
+ # pytest doesn't support it (as of this writing).
689
+ empty_name = generic_test_class.__name__ + "_base"
690
+ empty_class = type(empty_name, generic_test_class.__bases__, {})
691
+
692
+ # Acquires members names
693
+ # See Note [Overriding methods in generic tests]
694
+ generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys())
695
+ generic_tests = [x for x in generic_members if x.startswith('test')]
696
+
697
+ # allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy`
698
+ test_bases = device_type_test_bases.copy()
699
+ if allow_mps and TEST_MPS and MPSTestBase not in test_bases:
700
+ test_bases.append(MPSTestBase)
701
+ # Filter out the device types based on user inputs
702
+ desired_device_type_test_bases = filter_desired_device_types(test_bases, except_for, only_for)
703
+ if include_lazy:
704
+ # Note [Lazy Tensor tests in device agnostic testing]
705
+ # Right now, test_view_ops.py runs with LazyTensor.
706
+ # We don't want to opt every device-agnostic test into using the lazy device,
707
+ # because many of them will fail.
708
+ # So instead, the only way to opt a specific device-agnostic test file into
709
+ # lazy tensor testing is with include_lazy=True
710
+ if IS_FBCODE:
711
+ print("TorchScript backend not yet supported in FBCODE/OVRSOURCE builds", file=sys.stderr)
712
+ else:
713
+ desired_device_type_test_bases.append(LazyTestBase)
714
+
715
+ def split_if_not_empty(x: str):
716
+ return x.split(",") if len(x) != 0 else []
717
+
718
+ # Filter out the device types based on environment variables if available
719
+ # Usage:
720
+ # export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu
721
+ # export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla
722
+ env_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, ''))
723
+ env_except_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, ''))
724
+
725
+ desired_device_type_test_bases = filter_desired_device_types(desired_device_type_test_bases,
726
+ env_except_for, env_only_for)
727
+
728
+
729
+ # Creates device-specific test cases
730
+ for base in desired_device_type_test_bases:
731
+ class_name = generic_test_class.__name__ + base.device_type.upper()
732
+
733
+ # type set to Any and suppressed due to unsupport runtime class:
734
+ # https://github.com/python/mypy/wiki/Unsupported-Python-Features
735
+ device_type_test_class: Any = type(class_name, (base, empty_class), {})
736
+
737
+ for name in generic_members:
738
+ if name in generic_tests: # Instantiates test member
739
+ test = getattr(generic_test_class, name)
740
+ # XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls)
741
+ sig = inspect.signature(device_type_test_class.instantiate_test)
742
+ if len(sig.parameters) == 3:
743
+ # Instantiates the device-specific tests
744
+ device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class)
745
+ else:
746
+ device_type_test_class.instantiate_test(name, copy.deepcopy(test))
747
+ else: # Ports non-test member
748
+ assert name not in device_type_test_class.__dict__, f"Redefinition of directly defined member {name}"
749
+ nontest = getattr(generic_test_class, name)
750
+ setattr(device_type_test_class, name, nontest)
751
+
752
+ # Mimics defining the instantiated class in the caller's file
753
+ # by setting its module to the given class's and adding
754
+ # the module to the given scope.
755
+ # This lets the instantiated class be discovered by unittest.
756
+ device_type_test_class.__module__ = generic_test_class.__module__
757
+ scope[class_name] = device_type_test_class
758
+
759
+
760
+ # Category of dtypes to run an OpInfo-based test for
761
+ # Example use: @ops(dtype=OpDTypes.supported)
762
+ #
763
+ # There are 5 categories:
764
+ # - supported: Every dtype supported by the operator. Use for exhaustive
765
+ # testing of all dtypes.
766
+ # - unsupported: Run tests on dtypes not supported by the operator. e.g. for
767
+ # testing the operator raises an error and doesn't crash.
768
+ # - supported_backward: Every dtype supported by the operator's backward pass.
769
+ # - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass.
770
+ # - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the
771
+ # operator supports in both forward and backward.
772
+ # - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test
773
+ # when this is selected.
774
+ class OpDTypes(Enum):
775
+ supported = 0 # Test all supported dtypes (default)
776
+ unsupported = 1 # Test only unsupported dtypes
777
+ supported_backward = 2 # Test all supported backward dtypes
778
+ unsupported_backward = 3 # Test only unsupported backward dtypes
779
+ any_one = 4 # Test precisely one supported dtype
780
+ none = 5 # Instantiate no dtype variants (no dtype kwarg needed)
781
+ any_common_cpu_cuda_one = 6 # Test precisely one supported dtype that is common to both cuda and cpu
782
+
783
+
784
+ # Arbitrary order
785
+ ANY_DTYPE_ORDER = (
786
+ torch.float32,
787
+ torch.float64,
788
+ torch.complex64,
789
+ torch.complex128,
790
+ torch.float16,
791
+ torch.bfloat16,
792
+ torch.long,
793
+ torch.int32,
794
+ torch.int16,
795
+ torch.int8,
796
+ torch.uint8,
797
+ torch.bool
798
+ )
799
+
800
+ def _serialize_sample(sample_input):
801
+ # NB: For OpInfos, SampleInput.summary() prints in a cleaner way.
802
+ if getattr(sample_input, "summary", None) is not None:
803
+ return sample_input.summary()
804
+ return str(sample_input)
805
+
806
+ # Decorator that defines the OpInfos a test template should be instantiated for.
807
+ #
808
+ # Example usage:
809
+ #
810
+ # @ops(unary_ufuncs)
811
+ # def test_numerics(self, device, dtype, op):
812
+ # <test_code>
813
+ #
814
+ # This will instantiate variants of test_numerics for each given OpInfo,
815
+ # on each device the OpInfo's operator supports, and for every dtype supported by
816
+ # that operator. There are a few caveats to the dtype rule, explained below.
817
+ #
818
+ # The @ops decorator can accept two
819
+ # additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified
820
+ # then the test variants are instantiated for those dtypes, regardless of
821
+ # what the operator supports. If given "allowed_dtypes" then test variants
822
+ # are instantiated only for the intersection of allowed_dtypes and the dtypes
823
+ # they would otherwise be instantiated with. That is, allowed_dtypes composes
824
+ # with the options listed above and below.
825
+ #
826
+ # The "dtypes" argument can also accept additional values (see OpDTypes above):
827
+ # OpDTypes.supported - the test is instantiated for all dtypes the operator
828
+ # supports
829
+ # OpDTypes.unsupported - the test is instantiated for all dtypes the operator
830
+ # doesn't support
831
+ # OpDTypes.supported_backward - the test is instantiated for all dtypes the
832
+ # operator's gradient formula supports
833
+ # OpDTypes.unsupported_backward - the test is instantiated for all dtypes the
834
+ # operator's gradient formula doesn't support
835
+ # OpDTypes.any_one - the test is instantiated for one dtype the
836
+ # operator supports. The dtype supports forward and backward if possible.
837
+ # OpDTypes.none - the test is instantiated without any dtype. The test signature
838
+ # should not include a dtype kwarg in this case.
839
+ #
840
+ # These options allow tests to have considerable control over the dtypes
841
+ # they're instantiated for.
842
+
843
+ class ops(_TestParametrizer):
844
+ def __init__(self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported,
845
+ allowed_dtypes: Optional[Sequence[torch.dtype]] = None):
846
+ self.op_list = list(op_list)
847
+ self.opinfo_dtypes = dtypes
848
+ self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None
849
+
850
+ def _parametrize_test(self, test, generic_cls, device_cls):
851
+ """ Parameterizes the given test function across each op and its associated dtypes. """
852
+ if device_cls is None:
853
+ raise RuntimeError('The @ops decorator is only intended to be used in a device-specific '
854
+ 'context; use it with instantiate_device_type_tests() instead of '
855
+ 'instantiate_parametrized_tests()')
856
+
857
+ op = check_exhausted_iterator = object()
858
+ for op in self.op_list:
859
+ # Determine the set of dtypes to use.
860
+ dtypes: Union[Set[torch.dtype], Set[None]]
861
+ if isinstance(self.opinfo_dtypes, Sequence):
862
+ dtypes = set(self.opinfo_dtypes)
863
+ elif self.opinfo_dtypes == OpDTypes.unsupported_backward:
864
+ dtypes = set(get_all_dtypes()).difference(op.supported_backward_dtypes(device_cls.device_type))
865
+ elif self.opinfo_dtypes == OpDTypes.supported_backward:
866
+ dtypes = op.supported_backward_dtypes(device_cls.device_type)
867
+ elif self.opinfo_dtypes == OpDTypes.unsupported:
868
+ dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type))
869
+ elif self.opinfo_dtypes == OpDTypes.supported:
870
+ dtypes = op.supported_dtypes(device_cls.device_type)
871
+ elif self.opinfo_dtypes == OpDTypes.any_one:
872
+ # Tries to pick a dtype that supports both forward or backward
873
+ supported = op.supported_dtypes(device_cls.device_type)
874
+ supported_backward = op.supported_backward_dtypes(device_cls.device_type)
875
+ supported_both = supported.intersection(supported_backward)
876
+ dtype_set = supported_both if len(supported_both) > 0 else supported
877
+ for dtype in ANY_DTYPE_ORDER:
878
+ if dtype in dtype_set:
879
+ dtypes = {dtype}
880
+ break
881
+ else:
882
+ dtypes = {}
883
+ elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one:
884
+ # Tries to pick a dtype that supports both CPU and CUDA
885
+ supported = op.dtypes.intersection(op.dtypesIfCUDA)
886
+ if supported:
887
+ dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)}
888
+ else:
889
+ dtypes = {}
890
+
891
+ elif self.opinfo_dtypes == OpDTypes.none:
892
+ dtypes = {None}
893
+ else:
894
+ raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}")
895
+
896
+ if self.allowed_dtypes is not None:
897
+ dtypes = dtypes.intersection(self.allowed_dtypes)
898
+
899
+ # Construct the test name; device / dtype parts are handled outside.
900
+ # See [Note: device and dtype suffix placement]
901
+ test_name = op.formatted_name
902
+
903
+ for dtype in dtypes:
904
+ # Construct parameter kwargs to pass to the test.
905
+ param_kwargs = {'op': op}
906
+ _update_param_kwargs(param_kwargs, 'dtype', dtype)
907
+
908
+ # NOTE: test_wrapper exists because we don't want to apply
909
+ # op-specific decorators to the original test.
910
+ # Test-specific decorators are applied to the original test,
911
+ # however.
912
+ try:
913
+ @wraps(test)
914
+ def test_wrapper(*args, **kwargs):
915
+ try:
916
+ return test(*args, **kwargs)
917
+ except unittest.SkipTest as e:
918
+ raise e
919
+ except Exception as e:
920
+ tracked_input = get_tracked_input()
921
+ if PRINT_REPRO_ON_FAILURE and tracked_input is not None:
922
+ raise Exception(
923
+ f"Caused by {tracked_input.type_desc} "
924
+ f"at index {tracked_input.index}: "
925
+ f"{_serialize_sample(tracked_input.val)}") from e
926
+ raise e
927
+ finally:
928
+ clear_tracked_input()
929
+
930
+ # Initialize info for the last input seen. This is useful for tracking
931
+ # down which inputs caused a test failure. Note that TrackedInputIter is
932
+ # responsible for managing this.
933
+ test.tracked_input = None
934
+
935
+ decorator_fn = partial(op.get_decorators, generic_cls.__name__,
936
+ test.__name__, device_cls.device_type, dtype)
937
+
938
+ yield (test_wrapper, test_name, param_kwargs, decorator_fn)
939
+ except Exception as ex:
940
+ # Provides an error message for debugging before rethrowing the exception
941
+ print(f"Failed to instantiate {test_name} for op {op.name}!")
942
+ raise ex
943
+ if op is check_exhausted_iterator:
944
+ raise ValueError('An empty op_list was passed to @ops. '
945
+ 'Note that this may result from reuse of a generator.')
946
+
947
+ # Decorator that skips a test if the given condition is true.
948
+ # Notes:
949
+ # (1) Skip conditions stack.
950
+ # (2) Skip conditions can be bools or strings. If a string the
951
+ # test base must have defined the corresponding attribute to be False
952
+ # for the test to run. If you want to use a string argument you should
953
+ # probably define a new decorator instead (see below).
954
+ # (3) Prefer the existing decorators to defining the 'device_type' kwarg.
955
+ class skipIf:
956
+
957
+ def __init__(self, dep, reason, device_type=None):
958
+ self.dep = dep
959
+ self.reason = reason
960
+ self.device_type = device_type
961
+
962
+ def __call__(self, fn):
963
+
964
+ @wraps(fn)
965
+ def dep_fn(slf, *args, **kwargs):
966
+ if self.device_type is None or self.device_type == slf.device_type:
967
+ if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep):
968
+ raise unittest.SkipTest(self.reason)
969
+
970
+ return fn(slf, *args, **kwargs)
971
+ return dep_fn
972
+
973
+
974
+ # Skips a test on CPU if the condition is true.
975
+ class skipCPUIf(skipIf):
976
+
977
+ def __init__(self, dep, reason):
978
+ super().__init__(dep, reason, device_type='cpu')
979
+
980
+
981
+ # Skips a test on CUDA if the condition is true.
982
+ class skipCUDAIf(skipIf):
983
+
984
+ def __init__(self, dep, reason):
985
+ super().__init__(dep, reason, device_type='cuda')
986
+
987
+ # Skips a test on Lazy if the condition is true.
988
+ class skipLazyIf(skipIf):
989
+
990
+ def __init__(self, dep, reason):
991
+ super().__init__(dep, reason, device_type='lazy')
992
+
993
+ # Skips a test on Meta if the condition is true.
994
+ class skipMetaIf(skipIf):
995
+
996
+ def __init__(self, dep, reason):
997
+ super().__init__(dep, reason, device_type='meta')
998
+
999
+ # Skips a test on MPS if the condition is true.
1000
+ class skipMPSIf(skipIf):
1001
+
1002
+ def __init__(self, dep, reason):
1003
+ super().__init__(dep, reason, device_type='mps')
1004
+
1005
+ # Skips a test on XLA if the condition is true.
1006
+ class skipXLAIf(skipIf):
1007
+
1008
+ def __init__(self, dep, reason):
1009
+ super().__init__(dep, reason, device_type='xla')
1010
+
1011
+ class skipPRIVATEUSE1If(skipIf):
1012
+
1013
+ def __init__(self, dep, reason):
1014
+ device_type = torch._C._get_privateuse1_backend_name()
1015
+ super().__init__(dep, reason, device_type=device_type)
1016
+
1017
+ def _has_sufficient_memory(device, size):
1018
+ if torch.device(device).type == 'cuda':
1019
+ if not torch.cuda.is_available():
1020
+ return False
1021
+ gc.collect()
1022
+ torch.cuda.empty_cache()
1023
+ # torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU
1024
+ if device == 'cuda':
1025
+ device = 'cuda:0'
1026
+ return torch.cuda.memory.mem_get_info(device)[0] >= size
1027
+
1028
+ if device == 'xla':
1029
+ raise unittest.SkipTest('TODO: Memory availability checks for XLA?')
1030
+
1031
+ if device != 'cpu':
1032
+ raise unittest.SkipTest('Unknown device type')
1033
+
1034
+ # CPU
1035
+ if not HAS_PSUTIL:
1036
+ raise unittest.SkipTest('Need psutil to determine if memory is sufficient')
1037
+
1038
+ # The sanitizers have significant memory overheads
1039
+ if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN:
1040
+ effective_size = size * 10
1041
+ else:
1042
+ effective_size = size
1043
+
1044
+ if psutil.virtual_memory().available < effective_size:
1045
+ gc.collect()
1046
+ return psutil.virtual_memory().available >= effective_size
1047
+
1048
+
1049
+ def largeTensorTest(size, device=None):
1050
+ """Skip test if the device has insufficient memory to run the test
1051
+
1052
+ size may be a number of bytes, a string of the form "N GB", or a callable
1053
+
1054
+ If the test is a device generic test, available memory on the primary device will be checked.
1055
+ It can also be overriden by the optional `device=` argument.
1056
+ In other tests, the `device=` argument needs to be specified.
1057
+ """
1058
+ if isinstance(size, str):
1059
+ assert size.endswith(('GB', 'gb')), "only bytes or GB supported"
1060
+ size = 1024 ** 3 * int(size[:-2])
1061
+
1062
+ def inner(fn):
1063
+ @wraps(fn)
1064
+ def dep_fn(self, *args, **kwargs):
1065
+ size_bytes = size(self, *args, **kwargs) if callable(size) else size
1066
+ _device = device if device is not None else self.get_primary_device()
1067
+ if not _has_sufficient_memory(_device, size_bytes):
1068
+ raise unittest.SkipTest(f'Insufficient {_device} memory')
1069
+
1070
+ return fn(self, *args, **kwargs)
1071
+ return dep_fn
1072
+ return inner
1073
+
1074
+
1075
+ class expectedFailure:
1076
+
1077
+ def __init__(self, device_type):
1078
+ self.device_type = device_type
1079
+
1080
+ def __call__(self, fn):
1081
+
1082
+ @wraps(fn)
1083
+ def efail_fn(slf, *args, **kwargs):
1084
+ if self.device_type is None or self.device_type == slf.device_type:
1085
+ try:
1086
+ fn(slf, *args, **kwargs)
1087
+ except Exception:
1088
+ return
1089
+ else:
1090
+ slf.fail('expected test to fail, but it passed')
1091
+
1092
+ return fn(slf, *args, **kwargs)
1093
+ return efail_fn
1094
+
1095
+
1096
+ class onlyOn:
1097
+
1098
+ def __init__(self, device_type):
1099
+ self.device_type = device_type
1100
+
1101
+ def __call__(self, fn):
1102
+
1103
+ @wraps(fn)
1104
+ def only_fn(slf, *args, **kwargs):
1105
+ if self.device_type != slf.device_type:
1106
+ reason = f"Only runs on {self.device_type}"
1107
+ raise unittest.SkipTest(reason)
1108
+
1109
+ return fn(slf, *args, **kwargs)
1110
+
1111
+ return only_fn
1112
+
1113
+
1114
+ # Decorator that provides all available devices of the device type to the test
1115
+ # as a list of strings instead of providing a single device string.
1116
+ # Skips the test if the number of available devices of the variant's device
1117
+ # type is less than the 'num_required_devices' arg.
1118
+ class deviceCountAtLeast:
1119
+
1120
+ def __init__(self, num_required_devices):
1121
+ self.num_required_devices = num_required_devices
1122
+
1123
+ def __call__(self, fn):
1124
+ assert not hasattr(fn, 'num_required_devices'), f"deviceCountAtLeast redefinition for {fn.__name__}"
1125
+ fn.num_required_devices = self.num_required_devices
1126
+
1127
+ @wraps(fn)
1128
+ def multi_fn(slf, devices, *args, **kwargs):
1129
+ if len(devices) < self.num_required_devices:
1130
+ reason = f"fewer than {self.num_required_devices} devices detected"
1131
+ raise unittest.SkipTest(reason)
1132
+
1133
+ return fn(slf, devices, *args, **kwargs)
1134
+
1135
+ return multi_fn
1136
+
1137
+ # Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1)
1138
+ def onlyNativeDeviceTypes(fn):
1139
+ @wraps(fn)
1140
+ def only_fn(self, *args, **kwargs):
1141
+ if self.device_type not in NATIVE_DEVICES:
1142
+ reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}"
1143
+ raise unittest.SkipTest(reason)
1144
+
1145
+ return fn(self, *args, **kwargs)
1146
+
1147
+ return only_fn
1148
+
1149
+ # Specifies per-dtype precision overrides.
1150
+ # Ex.
1151
+ #
1152
+ # @precisionOverride({torch.half : 1e-2, torch.float : 1e-4})
1153
+ # @dtypes(torch.half, torch.float, torch.double)
1154
+ # def test_X(self, device, dtype):
1155
+ # ...
1156
+ #
1157
+ # When the test is instantiated its class's precision will be set to the
1158
+ # corresponding override, if it exists.
1159
+ # self.precision can be accessed directly, and it also controls the behavior of
1160
+ # functions like self.assertEqual().
1161
+ #
1162
+ # Note that self.precision is a scalar value, so if you require multiple
1163
+ # precisions (or are working with multiple dtypes) they should be specified
1164
+ # explicitly and computed using self.precision (e.g.
1165
+ # self.precision *2, max(1, self.precision)).
1166
+ class precisionOverride:
1167
+
1168
+ def __init__(self, d):
1169
+ assert isinstance(d, dict), "precisionOverride not given a dtype : precision dict!"
1170
+ for dtype in d.keys():
1171
+ assert isinstance(dtype, torch.dtype), f"precisionOverride given unknown dtype {dtype}"
1172
+
1173
+ self.d = d
1174
+
1175
+ def __call__(self, fn):
1176
+ fn.precision_overrides = self.d
1177
+ return fn
1178
+
1179
+ # Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over
1180
+ # precisionOverride.
1181
+ # Ex.
1182
+ #
1183
+ # @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3},
1184
+ # torch.double : tol{atol=1e-4, rtol = 0})
1185
+ # @dtypes(torch.half, torch.float, torch.double)
1186
+ # def test_X(self, device, dtype):
1187
+ # ...
1188
+ #
1189
+ # When the test is instantiated its class's tolerance will be set to the
1190
+ # corresponding override, if it exists.
1191
+ # self.rtol and self.precision can be accessed directly, and they also control
1192
+ # the behavior of functions like self.assertEqual().
1193
+ #
1194
+ # The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and
1195
+ # atol = 1e-4 and rtol = 0 for torch.double.
1196
+ tol = namedtuple('tol', ['atol', 'rtol'])
1197
+
1198
+ class toleranceOverride:
1199
+ def __init__(self, d):
1200
+ assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!"
1201
+ for dtype, prec in d.items():
1202
+ assert isinstance(dtype, torch.dtype), f"toleranceOverride given unknown dtype {dtype}"
1203
+ assert isinstance(prec, tol), "toleranceOverride not given a dtype : tol dict!"
1204
+
1205
+ self.d = d
1206
+
1207
+ def __call__(self, fn):
1208
+ fn.tolerance_overrides = self.d
1209
+ return fn
1210
+
1211
+ # Decorator that instantiates a variant of the test for each given dtype.
1212
+ # Notes:
1213
+ # (1) Tests that accept the dtype argument MUST use this decorator.
1214
+ # (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU
1215
+ # or dtypesIfCUDA.
1216
+ # (3) Can accept an iterable of dtypes or an iterable of tuples
1217
+ # of dtypes.
1218
+ # Examples:
1219
+ # @dtypes(torch.float32, torch.float64)
1220
+ # @dtypes((torch.long, torch.float32), (torch.int, torch.float64))
1221
+ class dtypes:
1222
+
1223
+ def __init__(self, *args, device_type="all"):
1224
+ if len(args) > 0 and isinstance(args[0], (list, tuple)):
1225
+ for arg in args:
1226
+ assert isinstance(arg, (list, tuple)), \
1227
+ "When one dtype variant is a tuple or list, " \
1228
+ "all dtype variants must be. " \
1229
+ f"Received non-list non-tuple dtype {str(arg)}"
1230
+ assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}"
1231
+ else:
1232
+ assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}"
1233
+
1234
+ self.args = args
1235
+ self.device_type = device_type
1236
+
1237
+ def __call__(self, fn):
1238
+ d = getattr(fn, 'dtypes', {})
1239
+ assert self.device_type not in d, f"dtypes redefinition for {self.device_type}"
1240
+ d[self.device_type] = self.args
1241
+ fn.dtypes = d
1242
+ return fn
1243
+
1244
+
1245
+ # Overrides specified dtypes on the CPU.
1246
+ class dtypesIfCPU(dtypes):
1247
+
1248
+ def __init__(self, *args):
1249
+ super().__init__(*args, device_type='cpu')
1250
+
1251
+
1252
+ # Overrides specified dtypes on CUDA.
1253
+ class dtypesIfCUDA(dtypes):
1254
+
1255
+ def __init__(self, *args):
1256
+ super().__init__(*args, device_type='cuda')
1257
+
1258
+ class dtypesIfMPS(dtypes):
1259
+
1260
+ def __init__(self, *args):
1261
+ super().__init__(*args, device_type='mps')
1262
+
1263
+ class dtypesIfPRIVATEUSE1(dtypes):
1264
+
1265
+ def __init__(self, *args):
1266
+ super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name())
1267
+
1268
+ def onlyCPU(fn):
1269
+ return onlyOn('cpu')(fn)
1270
+
1271
+
1272
+ def onlyCUDA(fn):
1273
+ return onlyOn('cuda')(fn)
1274
+
1275
+
1276
+ def onlyMPS(fn):
1277
+ return onlyOn('mps')(fn)
1278
+
1279
+ def onlyPRIVATEUSE1(fn):
1280
+ device_type = torch._C._get_privateuse1_backend_name()
1281
+ device_mod = getattr(torch, device_type, None)
1282
+ if device_mod is None:
1283
+ reason = f"Skip as torch has no module of {device_type}"
1284
+ return unittest.skip(reason)(fn)
1285
+ return onlyOn(device_type)(fn)
1286
+
1287
+ def onlyCUDAAndPRIVATEUSE1(fn):
1288
+ @wraps(fn)
1289
+ def only_fn(self, *args, **kwargs):
1290
+ if self.device_type not in ('cuda', torch._C._get_privateuse1_backend_name()):
1291
+ reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}"
1292
+ raise unittest.SkipTest(reason)
1293
+
1294
+ return fn(self, *args, **kwargs)
1295
+
1296
+ return only_fn
1297
+
1298
+ def disablecuDNN(fn):
1299
+
1300
+ @wraps(fn)
1301
+ def disable_cudnn(self, *args, **kwargs):
1302
+ if self.device_type == 'cuda' and self.has_cudnn():
1303
+ with torch.backends.cudnn.flags(enabled=False):
1304
+ return fn(self, *args, **kwargs)
1305
+ return fn(self, *args, **kwargs)
1306
+
1307
+ return disable_cudnn
1308
+
1309
+ def disableMkldnn(fn):
1310
+
1311
+ @wraps(fn)
1312
+ def disable_mkldnn(self, *args, **kwargs):
1313
+ if torch.backends.mkldnn.is_available():
1314
+ with torch.backends.mkldnn.flags(enabled=False):
1315
+ return fn(self, *args, **kwargs)
1316
+ return fn(self, *args, **kwargs)
1317
+
1318
+ return disable_mkldnn
1319
+
1320
+
1321
+ def expectedFailureCUDA(fn):
1322
+ return expectedFailure('cuda')(fn)
1323
+
1324
+ def expectedFailureMeta(fn):
1325
+ return skipIfTorchDynamo()(expectedFailure('meta')(fn))
1326
+
1327
+ def expectedFailureXLA(fn):
1328
+ return expectedFailure('xla')(fn)
1329
+
1330
+ # Skips a test on CPU if LAPACK is not available.
1331
+ def skipCPUIfNoLapack(fn):
1332
+ return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn)
1333
+
1334
+
1335
+ # Skips a test on CPU if FFT is not available.
1336
+ def skipCPUIfNoFFT(fn):
1337
+ return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(fn)
1338
+
1339
+
1340
+ # Skips a test on CPU if MKL is not available.
1341
+ def skipCPUIfNoMkl(fn):
1342
+ return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn)
1343
+
1344
+
1345
+ # Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows).
1346
+ def skipCPUIfNoMklSparse(fn):
1347
+ return skipCPUIf(IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support")(fn)
1348
+
1349
+
1350
+ # Skips a test on CPU if mkldnn is not available.
1351
+ def skipCPUIfNoMkldnn(fn):
1352
+ return skipCPUIf(not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support")(fn)
1353
+
1354
+
1355
+ # Skips a test on CUDA if MAGMA is not available.
1356
+ def skipCUDAIfNoMagma(fn):
1357
+ return skipCUDAIf('no_magma', "no MAGMA library detected")(skipCUDANonDefaultStreamIf(True)(fn))
1358
+
1359
+ def has_cusolver():
1360
+ return not TEST_WITH_ROCM
1361
+
1362
+ def has_hipsolver():
1363
+ rocm_version = _get_torch_rocm_version()
1364
+ # hipSOLVER is disabled on ROCM < 5.3
1365
+ return rocm_version >= (5, 3)
1366
+
1367
+ # Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available
1368
+ def skipCUDAIfNoCusolver(fn):
1369
+ return skipCUDAIf(not has_cusolver() and not has_hipsolver(), "cuSOLVER not available")(fn)
1370
+
1371
+
1372
+ # Skips a test if both cuSOLVER and MAGMA are not available
1373
+ def skipCUDAIfNoMagmaAndNoCusolver(fn):
1374
+ if has_cusolver():
1375
+ return fn
1376
+ else:
1377
+ # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
1378
+ return skipCUDAIfNoMagma(fn)
1379
+
1380
+ # Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available
1381
+ def skipCUDAIfNoMagmaAndNoLinalgsolver(fn):
1382
+ if has_cusolver() or has_hipsolver():
1383
+ return fn
1384
+ else:
1385
+ # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
1386
+ return skipCUDAIfNoMagma(fn)
1387
+
1388
+ # Skips a test on CUDA when using ROCm.
1389
+ def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"):
1390
+ def dec_fn(fn):
1391
+ reason = f"skipCUDAIfRocm: {msg}"
1392
+ return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn)
1393
+ if func:
1394
+ return dec_fn(func)
1395
+ return dec_fn
1396
+
1397
+ # Skips a test on CUDA when not using ROCm.
1398
+ def skipCUDAIfNotRocm(fn):
1399
+ return skipCUDAIf(not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack")(fn)
1400
+
1401
+ # Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
1402
+ def skipCUDAIfRocmVersionLessThan(version=None):
1403
+
1404
+ def dec_fn(fn):
1405
+ @wraps(fn)
1406
+ def wrap_fn(self, *args, **kwargs):
1407
+ if self.device_type == 'cuda':
1408
+ if not TEST_WITH_ROCM:
1409
+ reason = "ROCm not available"
1410
+ raise unittest.SkipTest(reason)
1411
+ rocm_version_tuple = _get_torch_rocm_version()
1412
+ if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
1413
+ reason = f"ROCm {rocm_version_tuple} is available but {version} required"
1414
+ raise unittest.SkipTest(reason)
1415
+
1416
+ return fn(self, *args, **kwargs)
1417
+
1418
+ return wrap_fn
1419
+ return dec_fn
1420
+
1421
+ # Skips a test on CUDA when using ROCm.
1422
+ def skipCUDAIfNotMiopenSuggestNHWC(fn):
1423
+ return skipCUDAIf(not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation")(fn)
1424
+
1425
+ # Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s.
1426
+ def skipCUDAVersionIn(versions : List[Tuple[int, int]] = None):
1427
+ def dec_fn(fn):
1428
+ @wraps(fn)
1429
+ def wrap_fn(self, *args, **kwargs):
1430
+ version = _get_torch_cuda_version()
1431
+ if version == (0, 0): # cpu or rocm
1432
+ return fn(self, *args, **kwargs)
1433
+ if version in (versions or []):
1434
+ reason = f"test skipped for CUDA version {version}"
1435
+ raise unittest.SkipTest(reason)
1436
+ return fn(self, *args, **kwargs)
1437
+
1438
+ return wrap_fn
1439
+ return dec_fn
1440
+
1441
+ # Skips a test for CUDA versions less than specified, given in the form of [major, minor].
1442
+ def skipCUDAIfVersionLessThan(versions : Tuple[int, int] = None):
1443
+ def dec_fn(fn):
1444
+ @wraps(fn)
1445
+ def wrap_fn(self, *args, **kwargs):
1446
+ version = _get_torch_cuda_version()
1447
+ if version == (0, 0): # cpu or rocm
1448
+ return fn(self, *args, **kwargs)
1449
+ if version < versions:
1450
+ reason = f"test skipped for CUDA versions < {version}"
1451
+ raise unittest.SkipTest(reason)
1452
+ return fn(self, *args, **kwargs)
1453
+
1454
+ return wrap_fn
1455
+ return dec_fn
1456
+
1457
+ # Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested.
1458
+ def skipCUDAIfCudnnVersionLessThan(version=0):
1459
+
1460
+ def dec_fn(fn):
1461
+ @wraps(fn)
1462
+ def wrap_fn(self, *args, **kwargs):
1463
+ if self.device_type == 'cuda':
1464
+ if self.no_cudnn:
1465
+ reason = "cuDNN not available"
1466
+ raise unittest.SkipTest(reason)
1467
+ if self.cudnn_version is None or self.cudnn_version < version:
1468
+ reason = f"cuDNN version {self.cudnn_version} is available but {version} required"
1469
+ raise unittest.SkipTest(reason)
1470
+
1471
+ return fn(self, *args, **kwargs)
1472
+
1473
+ return wrap_fn
1474
+ return dec_fn
1475
+
1476
+ # Skips a test on CUDA if cuSparse generic API is not available
1477
+ def skipCUDAIfNoCusparseGeneric(fn):
1478
+ return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(fn)
1479
+
1480
+ def skipCUDAIfNoHipsparseGeneric(fn):
1481
+ return skipCUDAIf(not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available")(fn)
1482
+
1483
+ def skipCUDAIfNoSparseGeneric(fn):
1484
+ return skipCUDAIf(not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC), "Sparse Generic API not available")(fn)
1485
+
1486
+ def skipCUDAIfNoCudnn(fn):
1487
+ return skipCUDAIfCudnnVersionLessThan(0)(fn)
1488
+
1489
+ def skipCUDAIfMiopen(fn):
1490
+ return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn)
1491
+
1492
+ def skipCUDAIfNoMiopen(fn):
1493
+ return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(skipCUDAIfNoCudnn(fn))
1494
+
1495
+ def skipLazy(fn):
1496
+ return skipLazyIf(True, "test doesn't work with lazy tensors")(fn)
1497
+
1498
+ def skipMeta(fn):
1499
+ return skipMetaIf(True, "test doesn't work with meta tensors")(fn)
1500
+
1501
+ def skipXLA(fn):
1502
+ return skipXLAIf(True, "Marked as skipped for XLA")(fn)
1503
+
1504
+ def skipMPS(fn):
1505
+ return skipMPSIf(True, "test doesn't work on MPS backend")(fn)
1506
+
1507
+ def skipPRIVATEUSE1(fn):
1508
+ return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn)
1509
+
1510
+ # TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now.
1511
+ # This should probably enumerate all available device type test base classes.
1512
+ def get_all_device_types() -> List[str]:
1513
+ return ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Owner(s): ["oncall: distributed"]
2
+
3
+ from typing import Tuple
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+
9
+ class UnitModule(nn.Module):
10
+ def __init__(self, device: torch.device):
11
+ super().__init__()
12
+ self.l1 = nn.Linear(100, 100, device=device)
13
+ self.seq = nn.Sequential(
14
+ nn.ReLU(),
15
+ nn.Linear(100, 100, device=device),
16
+ nn.ReLU(),
17
+ )
18
+ self.l2 = nn.Linear(100, 100, device=device)
19
+
20
+ def forward(self, x):
21
+ return self.l2(self.seq(self.l1(x)))
22
+
23
+
24
+ class CompositeModel(nn.Module):
25
+ def __init__(self, device: torch.device):
26
+ super().__init__()
27
+ self.l1 = nn.Linear(100, 100, device=device)
28
+ self.u1 = UnitModule(device)
29
+ self.u2 = UnitModule(device)
30
+ self.l2 = nn.Linear(100, 100, device=device)
31
+
32
+ def forward(self, x):
33
+ return self.l2(self.u2(self.u1(self.l1(x))))
34
+
35
+
36
+ class UnitParamModule(nn.Module):
37
+ def __init__(self, device: torch.device):
38
+ super().__init__()
39
+ self.l = nn.Linear(100, 100, device=device)
40
+ self.seq = nn.Sequential(
41
+ nn.ReLU(),
42
+ nn.Linear(100, 100, device=device),
43
+ nn.ReLU(),
44
+ )
45
+ self.p = nn.Parameter(torch.randn((100, 100), device=device))
46
+
47
+ def forward(self, x):
48
+ return torch.mm(self.seq(self.l(x)), self.p)
49
+
50
+
51
+ class CompositeParamModel(nn.Module):
52
+ def __init__(self, device: torch.device):
53
+ super().__init__()
54
+ self.l = nn.Linear(100, 100, device=device)
55
+ self.u1 = UnitModule(device)
56
+ self.u2 = UnitModule(device)
57
+ self.p = nn.Parameter(torch.randn((100, 100), device=device))
58
+ self.register_buffer(
59
+ "buffer", torch.randn((100, 100), device=device), persistent=True
60
+ )
61
+
62
+ def forward(self, x):
63
+ a = self.u2(self.u1(self.l(x)))
64
+ b = self.p
65
+ return torch.mm(a, b)
66
+
67
+
68
+ class FakeSequential(nn.Module):
69
+ # Define this class to achieve a desired nested wrapping using the module
70
+ # wrap policy with `nn.Sequential`
71
+ def __init__(self, *modules: Tuple[nn.Module, ...]) -> None:
72
+ super().__init__()
73
+ self._module_sequence = list(modules)
74
+
75
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
76
+ for module in self._module_sequence:
77
+ x = module(x)
78
+ return x
79
+
80
+
81
+ class NestedSequentialModel(nn.Module):
82
+ def __init__(self, device: torch.device) -> None:
83
+ super().__init__()
84
+ # This nested structure exercises traversal order to catch differences
85
+ # between valid traversals (e.g. BFS and DFS variations).
86
+ self.seq1 = nn.Sequential(
87
+ nn.Linear(1, 1, device=device),
88
+ FakeSequential(
89
+ nn.Linear(1, 1, device=device),
90
+ nn.ReLU(),
91
+ FakeSequential(
92
+ nn.Linear(1, 1, device=device),
93
+ ),
94
+ nn.ReLU(),
95
+ ),
96
+ nn.Linear(1, 2, device=device),
97
+ )
98
+ self.lin = nn.Linear(2, 2, device=device)
99
+ self.seq2 = nn.Sequential(
100
+ nn.ReLU(),
101
+ nn.Linear(2, 3, device=device),
102
+ FakeSequential(
103
+ nn.Linear(3, 2, bias=False, device=device),
104
+ nn.Linear(2, 4, bias=False, device=device),
105
+ ),
106
+ )
107
+
108
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
109
+ return self.seq2(self.lin(self.seq1(x)))
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py ADDED
@@ -0,0 +1,1255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import faulthandler
2
+ import logging
3
+ import multiprocessing
4
+ import os
5
+ import queue
6
+ import subprocess
7
+ import sys
8
+ import tempfile
9
+ import threading
10
+ import time
11
+ import traceback
12
+ import types
13
+ import unittest
14
+ from contextlib import contextmanager
15
+ from dataclasses import dataclass
16
+ from datetime import timedelta
17
+ from enum import Enum
18
+ from functools import partial, reduce, wraps
19
+ from io import StringIO
20
+ from typing import Dict, NamedTuple, Optional, Union
21
+ from unittest.mock import patch
22
+
23
+ import torch
24
+ import torch._dynamo.test_case
25
+ import torch.cuda.nccl
26
+ import torch.distributed as c10d
27
+ import torch.nn as nn
28
+ from torch.testing._internal.common_utils import (
29
+ FILE_SCHEMA,
30
+ find_free_port,
31
+ IS_SANDCASTLE,
32
+ retry_on_connect_failures,
33
+ skip_but_pass_in_sandcastle,
34
+ skip_but_pass_in_sandcastle_if,
35
+ TEST_WITH_ROCM,
36
+ TEST_WITH_TSAN,
37
+ TestCase,
38
+ )
39
+ from torch.testing._internal.distributed.multi_threaded_pg import (
40
+ _install_threaded_pg,
41
+ _uninstall_threaded_pg,
42
+ ProcessLocalGroup,
43
+ )
44
+
45
+ logging.basicConfig(level=logging.INFO)
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ class TestSkip(NamedTuple):
50
+ exit_code: int
51
+ message: str
52
+
53
+
54
+ TEST_SKIPS = {
55
+ "backend_unavailable": TestSkip(
56
+ 72, "Skipped because distributed backend is not available."
57
+ ),
58
+ "small_worldsize": TestSkip(73, "Skipped due to small world size."),
59
+ "odd_worldsize": TestSkip(87, "Skipped due to odd world size."),
60
+ "no_cuda": TestSkip(74, "CUDA is not available."),
61
+ "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"),
62
+ "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"),
63
+ "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"),
64
+ "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"),
65
+ "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"),
66
+ "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"),
67
+ "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"),
68
+ "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"),
69
+ "nccl": TestSkip(76, "c10d not compiled with NCCL support"),
70
+ "skipIfRocm": TestSkip(78, "Test skipped for ROCm"),
71
+ "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"),
72
+ "generic": TestSkip(
73
+ 86, "Test skipped at subprocess level, look at subprocess log for skip reason"
74
+ ),
75
+ "importerror": TestSkip(88, "Test skipped due to missing import"),
76
+ }
77
+
78
+
79
+ @dataclass
80
+ class DistTestCases:
81
+ # Backends that do not support a specific collective
82
+ skip_collective = {}
83
+ skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"}
84
+ skip_collective["reduce"] = set()
85
+ skip_collective["sendrecv anysource"] = {"nccl", "ucc"}
86
+ skip_collective["cpu barrier"] = {"nccl", "ucc"}
87
+
88
+ # Sets showing that something is implemented
89
+ backend_feature = {}
90
+ backend_feature["gpu"] = {"nccl", "gloo", "ucc"}
91
+ backend_feature["cuda"] = {"nccl", "gloo", "ucc"}
92
+ backend_feature["ddp"] = {"nccl", "gloo", "ucc"}
93
+ backend_feature["subgroup"] = {"nccl", "gloo", "ucc"}
94
+ backend_feature["plugin"] = set()
95
+
96
+
97
+ def skip_if_no_gpu(func):
98
+ """Skips if the world size exceeds the number of GPUs, ensuring that if the
99
+ test is run, each rank has its own GPU via ``torch.cuda.device(rank)``."""
100
+
101
+ @wraps(func)
102
+ def wrapper(*args, **kwargs):
103
+ if not torch.cuda.is_available():
104
+ sys.exit(TEST_SKIPS["no_cuda"].exit_code)
105
+ world_size = int(os.environ["WORLD_SIZE"])
106
+ if torch.cuda.device_count() < world_size:
107
+ sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code)
108
+
109
+ return func(*args, **kwargs)
110
+
111
+ return wrapper
112
+
113
+
114
+ def skip_if_small_worldsize(func):
115
+ @wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
118
+ sys.exit(TEST_SKIPS["small_worldsize"].exit_code)
119
+
120
+ return func(*args, **kwargs)
121
+
122
+ return wrapper
123
+
124
+
125
+ def skip_if_odd_worldsize(func):
126
+ @wraps(func)
127
+ def wrapper(*args, **kwargs):
128
+ if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1:
129
+ sys.exit(TEST_SKIPS["odd_worldsize"].exit_code)
130
+
131
+ return func(*args, **kwargs)
132
+
133
+ return wrapper
134
+
135
+
136
+ def require_n_gpus_for_nccl_backend(n, backend):
137
+ def decorator(func):
138
+ @wraps(func)
139
+ def wrapper(*args, **kwargs):
140
+ if backend == "nccl" and torch.cuda.device_count() < n:
141
+ sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code)
142
+ else:
143
+ return func(*args, **kwargs)
144
+
145
+ return wrapper
146
+
147
+ return decorator
148
+
149
+
150
+ def import_transformers_or_skip():
151
+ def decorator(func):
152
+ @wraps(func)
153
+ def wrapper(*args, **kwargs):
154
+ try:
155
+ from transformers import ( # noqa: F401
156
+ AutoModelForMaskedLM,
157
+ BertConfig,
158
+ )
159
+
160
+ return func(*args, **kwargs)
161
+ except ImportError:
162
+ sys.exit(TEST_SKIPS["importerror"].exit_code)
163
+
164
+ return wrapper
165
+
166
+ return decorator
167
+
168
+
169
+ def skip_if_lt_x_gpu(x):
170
+ def decorator(func):
171
+ @wraps(func)
172
+ def wrapper(*args, **kwargs):
173
+ if torch.cuda.is_available() and torch.cuda.device_count() >= x:
174
+ return func(*args, **kwargs)
175
+ sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
176
+
177
+ return wrapper
178
+
179
+ return decorator
180
+
181
+
182
+ # This decorator helps avoiding initializing cuda while testing other backends
183
+ def nccl_skip_if_lt_x_gpu(backend, x):
184
+ def decorator(func):
185
+ @wraps(func)
186
+ def wrapper(*args, **kwargs):
187
+ if backend != "nccl":
188
+ return func(*args, **kwargs)
189
+ if torch.cuda.is_available() and torch.cuda.device_count() >= x:
190
+ return func(*args, **kwargs)
191
+ sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
192
+
193
+ return wrapper
194
+
195
+ return decorator
196
+
197
+
198
+ def verify_ddp_error_logged(model_DDP, err_substr):
199
+ # Verify error was logged in ddp_logging_data.
200
+ ddp_logging_data = model_DDP._get_ddp_logging_data()
201
+ assert "iteration" in ddp_logging_data
202
+ assert "has_error" in ddp_logging_data
203
+ assert "error" in ddp_logging_data
204
+ logging_err = ddp_logging_data["error"]
205
+ # Remove C++ stacktrace if needed.
206
+ actual = (
207
+ err_substr
208
+ if err_substr.find("\nException raised from ") == -1
209
+ else err_substr.split("\nException raised from ")[0]
210
+ )
211
+ assert (
212
+ actual in logging_err
213
+ ), f"Did not find expected {actual} in ddp logging data error: {logging_err}"
214
+
215
+
216
+ def with_nccl_blocking_wait(func):
217
+ """
218
+ Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of
219
+ this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for
220
+ the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and
221
+ TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values.
222
+ """
223
+
224
+ @wraps(func)
225
+ def wrapper(*args, **kwargs):
226
+ # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING
227
+ try:
228
+ cached_nccl_async_error_handling: Union[str, None] = os.environ[
229
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING"
230
+ ]
231
+ del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"]
232
+ except KeyError:
233
+ # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset
234
+ cached_nccl_async_error_handling = None
235
+
236
+ # Save val of TORCH_NCCL_BLOCKING_WAIT and set it.
237
+ try:
238
+ cached_nccl_blocking_wait: Union[str, None] = os.environ[
239
+ "TORCH_NCCL_BLOCKING_WAIT"
240
+ ]
241
+ except KeyError:
242
+ cached_nccl_blocking_wait = None
243
+ finally:
244
+ os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
245
+
246
+ try:
247
+ ret = func(*args, **kwargs)
248
+ return ret
249
+ finally:
250
+ # restore old values.
251
+ if cached_nccl_async_error_handling is not None:
252
+ os.environ[
253
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING"
254
+ ] = cached_nccl_async_error_handling
255
+
256
+ if cached_nccl_blocking_wait is not None:
257
+ os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait
258
+
259
+ return wrapper
260
+
261
+
262
+ def with_dist_debug_levels(levels):
263
+ """
264
+ Runs a test for each distributed debug level specified in levels.
265
+ """
266
+
267
+ def decorator(func):
268
+ @wraps(func)
269
+ def wrapper(*args, **kwargs):
270
+ old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None)
271
+ for level in levels:
272
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = level
273
+ c10d.set_debug_level_from_env()
274
+ ret = func(*args, **kwargs)
275
+ c10d.barrier()
276
+ if old_level is not None:
277
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level
278
+ # Only returns test return for last test, but since these are
279
+ # unittests the return value is not really used and earlier tests
280
+ # would've raised had they failed.
281
+ return ret
282
+
283
+ return wrapper
284
+
285
+ return decorator
286
+
287
+
288
+ def requires_gloo():
289
+ return skip_but_pass_in_sandcastle_if(
290
+ not c10d.is_gloo_available(),
291
+ "c10d was not compiled with the Gloo backend",
292
+ )
293
+
294
+
295
+ def requires_nccl_version(version, msg):
296
+ if not c10d.is_nccl_available():
297
+ return skip_but_pass_in_sandcastle(
298
+ "c10d was not compiled with the NCCL backend",
299
+ )
300
+ else:
301
+ return skip_but_pass_in_sandcastle_if(
302
+ torch.cuda.nccl.version() < version,
303
+ "Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format(
304
+ version, torch.cuda.nccl.version(), msg
305
+ ),
306
+ )
307
+
308
+
309
+ def requires_nccl():
310
+ return skip_but_pass_in_sandcastle_if(
311
+ not c10d.is_nccl_available(),
312
+ "c10d was not compiled with the NCCL backend",
313
+ )
314
+
315
+ def requires_ucc():
316
+ return skip_but_pass_in_sandcastle_if(
317
+ not c10d.is_ucc_available(),
318
+ "c10d was not compiled with the UCC backend",
319
+ )
320
+
321
+ def requires_mpi():
322
+ return skip_but_pass_in_sandcastle_if(
323
+ not c10d.is_mpi_available(),
324
+ "c10d was not compiled with the MPI backend",
325
+ )
326
+
327
+
328
+ def skip_if_rocm(func):
329
+ """Skips a test for ROCm"""
330
+ func.skip_if_rocm = True
331
+
332
+ @wraps(func)
333
+ def wrapper(*args, **kwargs):
334
+ if not TEST_WITH_ROCM:
335
+ return func(*args, **kwargs)
336
+ sys.exit(TEST_SKIPS["skipIfRocm"].exit_code)
337
+
338
+ return wrapper
339
+
340
+
341
+ def skip_if_win32():
342
+ return skip_but_pass_in_sandcastle_if(
343
+ sys.platform == "win32",
344
+ "This unit test case is not supported on Windows platform",
345
+ )
346
+
347
+
348
+ @retry_on_connect_failures
349
+ def create_tcp_store(
350
+ addr="localhost",
351
+ world_size=1,
352
+ is_master=True,
353
+ timeout=timedelta(minutes=5),
354
+ wait_for_workers=True,
355
+ jit_class=False,
356
+ use_libuv=False
357
+ ):
358
+ """
359
+ Creates a TCP store. Retries if the chosen port is already in use.
360
+ """
361
+ port = find_free_port()
362
+ if jit_class:
363
+ timeout_millisecond = int(timeout / timedelta(milliseconds=1))
364
+ return torch.classes.dist_c10d.TCPStore(
365
+ addr, port, world_size, is_master, timeout_millisecond
366
+ )
367
+ else:
368
+ return c10d.TCPStore(
369
+ addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv
370
+ )
371
+
372
+
373
+ if TEST_WITH_TSAN:
374
+ # TSAN runs much slower.
375
+ TIMEOUT_DEFAULT = 500
376
+ else:
377
+ TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300'))
378
+ TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400}
379
+
380
+
381
+ # https://github.com/pytorch/pytorch/issues/75665
382
+ if TEST_WITH_ROCM:
383
+ TIMEOUT_OVERRIDE["test_join_kwargs"] = 200
384
+
385
+
386
+ def create_device(interface=None):
387
+ if sys.platform == "win32" or interface is None:
388
+ return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1")
389
+ else:
390
+ return c10d.ProcessGroupGloo.create_device(interface=interface)
391
+
392
+
393
+ def get_timeout(test_id) -> int:
394
+ return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT)
395
+
396
+
397
+ @contextmanager
398
+ def captured_output():
399
+ new_out, new_err = StringIO(), StringIO()
400
+ old_out, old_err = sys.stdout, sys.stderr
401
+ try:
402
+ sys.stdout, sys.stderr = new_out, new_err
403
+ yield sys.stdout, sys.stderr
404
+ finally:
405
+ sys.stdout, sys.stderr = old_out, old_err
406
+
407
+
408
+ def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
409
+ """
410
+ Generate a number of basic test cases for sparse reduction.
411
+ These cover tensors with a varying number of sparse dimensions and a varying
412
+ number of dense dimensions. The only reduction operation we support is sum.
413
+ """
414
+
415
+ def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0):
416
+ # First sparse dimension is [0..rank].
417
+ # Subsequent dimensions are always 0, so we know there is
418
+ # a non-empty intersection between any two sparse tensors.
419
+ indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
420
+ shape = [world_size] + [2 for _ in range(dense_dims)]
421
+ for _ in range(sparse_dims - 1):
422
+ indices = torch.cat((indices, torch.zeros(1, rank + 1)))
423
+ shape.append(world_size)
424
+ values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
425
+ return torch.sparse_coo_tensor(indices, values, shape)
426
+
427
+ def compute_sum(fn, world_size: int):
428
+ return reduce(
429
+ lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)]
430
+ )
431
+
432
+ return [
433
+ (
434
+ [
435
+ fn(num_inputs * rank + i, num_inputs * world_size)
436
+ for i in range(num_inputs)
437
+ ],
438
+ [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)],
439
+ )
440
+ for fn in [
441
+ partial(generate, sparse_dims=1),
442
+ partial(generate, sparse_dims=2),
443
+ partial(generate, sparse_dims=3),
444
+ partial(generate, dense_dims=1),
445
+ partial(generate, dense_dims=2),
446
+ partial(generate, dense_dims=3),
447
+ ]
448
+ ]
449
+
450
+
451
+ # HELPER FOR MULTIGPU TESTS
452
+ def init_multigpu_helper(world_size: int, backend: str):
453
+ """Multigpu tests are designed to simulate the multi nodes with multi
454
+ GPUs on each node. Nccl backend requires equal #GPUs in each process.
455
+ On a single node, all visible GPUs are evenly
456
+ divided to subsets, each process only uses a subset.
457
+ """
458
+ nGPUs = torch.cuda.device_count()
459
+ visible_devices = range(nGPUs)
460
+
461
+ # If rank is less than or equal to number of available GPU's
462
+ # then each rank can be mapped to corresponding GPU.
463
+ nGPUs_per_process = 1
464
+ if world_size > nGPUs:
465
+ nGPUs_per_process = nGPUs // world_size
466
+ rank_to_GPU = {
467
+ i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process])
468
+ for i in range(world_size)
469
+ }
470
+ return rank_to_GPU
471
+
472
+
473
+ tmp_dir: Optional[tempfile.TemporaryDirectory] = None
474
+
475
+
476
+ def initialize_temp_directories(init_method: Optional[str] = None) -> None:
477
+ global tmp_dir
478
+ tmp_dir = tempfile.TemporaryDirectory()
479
+ os.environ["TEMP_DIR"] = tmp_dir.name
480
+ os.mkdir(os.path.join(tmp_dir.name, "barrier"))
481
+ os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
482
+ init_dir_path = os.path.join(tmp_dir.name, "init_dir")
483
+ os.mkdir(init_dir_path)
484
+ # Set init method if specified.
485
+ if init_method is not None:
486
+ os.environ["INIT_METHOD"] = init_method
487
+ else:
488
+ os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join(
489
+ init_dir_path, "shared_init_file"
490
+ )
491
+
492
+
493
+ def cleanup_temp_dir() -> None:
494
+ if tmp_dir is not None:
495
+ tmp_dir.cleanup()
496
+
497
+
498
+ # Most tests operate with this worldsize
499
+ DEFAULT_WORLD_SIZE = 4
500
+
501
+ # [How does MultiProcessTestCase work?]
502
+ # Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by
503
+ # default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an
504
+ # example which inherits from this class. Its `Setup()` methods calls into
505
+ # `MultiProcessTestCase._spawn_processes()` which spawns `world_size()`
506
+ # subprocesses. During the spawn, the main process passes the test name to
507
+ # subprocesses, and the name is acquired from self.id(). The subprocesses
508
+ # then use the provided test function name to retrieve the function attribute
509
+ # from the test instance and run it. The main process simply waits for all
510
+ # subprocesses to join.
511
+
512
+
513
+ class MultiProcessTestCase(TestCase):
514
+ MAIN_PROCESS_RANK = -1
515
+ # This exit code is used to indicate that the test code had an error and
516
+ # exited abnormally. There are certain tests that might use sys.exit() to
517
+ # simulate failures and in those cases, we can't have an exit code of 0,
518
+ # but we still want to ensure we didn't run into any other errors.
519
+ TEST_ERROR_EXIT_CODE = 10
520
+
521
+ # do not early terminate for distributed tests.
522
+ def _should_stop_test_suite(self) -> bool:
523
+ return False
524
+
525
+ @property
526
+ def world_size(self) -> int:
527
+ return DEFAULT_WORLD_SIZE
528
+
529
+ def join_or_run(self, fn):
530
+ @wraps(fn)
531
+ def wrapper(self):
532
+ if self.rank == self.MAIN_PROCESS_RANK:
533
+ self._join_processes(fn)
534
+ else:
535
+ fn()
536
+
537
+ return types.MethodType(wrapper, self)
538
+
539
+ # The main process spawns N subprocesses that run the test.
540
+ # Constructor patches current instance test method to
541
+ # assume the role of the main process and join its subprocesses,
542
+ # or run the underlying test function.
543
+ def __init__(self, method_name: str = "runTest") -> None:
544
+ super().__init__(method_name)
545
+ fn = getattr(self, method_name)
546
+ setattr(self, method_name, self.join_or_run(fn))
547
+
548
+ def setUp(self) -> None:
549
+ super().setUp()
550
+ self.skip_return_code_checks = [] # type: ignore[var-annotated]
551
+ self.processes = [] # type: ignore[var-annotated]
552
+ self.rank = self.MAIN_PROCESS_RANK
553
+ self.file_name = tempfile.NamedTemporaryFile(delete=False).name
554
+ # pid to pipe consisting of error message from process.
555
+ self.pid_to_pipe = {} # type: ignore[var-annotated]
556
+
557
+ def tearDown(self) -> None:
558
+ super().tearDown()
559
+ for p in self.processes:
560
+ p.terminate()
561
+ # Each Process instance holds a few open file descriptors. The unittest
562
+ # runner creates a new TestCase instance for each test method and keeps
563
+ # it alive until the end of the entire suite. We must thus reset the
564
+ # processes to prevent an effective file descriptor leak.
565
+ self.processes = []
566
+
567
+ def _current_test_name(self) -> str:
568
+ # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
569
+ return self.id().split(".")[-1]
570
+
571
+ def _start_processes(self, proc) -> None:
572
+ self.processes = []
573
+ for rank in range(int(self.world_size)):
574
+ parent_conn, child_conn = torch.multiprocessing.Pipe()
575
+ process = proc(
576
+ target=self.__class__._run,
577
+ name="process " + str(rank),
578
+ args=(rank, self._current_test_name(), self.file_name, child_conn),
579
+ )
580
+ process.start()
581
+ logger.info("Started process %s with pid %s", rank, process.pid)
582
+ self.pid_to_pipe[process.pid] = parent_conn
583
+ self.processes.append(process)
584
+
585
+ def _spawn_processes(self) -> None:
586
+ proc = torch.multiprocessing.get_context("spawn").Process
587
+ self._start_processes(proc)
588
+
589
+ class Event(Enum):
590
+ GET_TRACEBACK = 1
591
+
592
+ @staticmethod
593
+ def _event_listener(parent_pipe, signal_pipe, rank: int):
594
+ logger.info("Starting event listener thread for rank %s", rank)
595
+ while True:
596
+ ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe])
597
+
598
+ if parent_pipe in ready_pipes:
599
+
600
+ if parent_pipe.closed:
601
+ logger.info(
602
+ "Pipe closed for process %s, stopping event listener thread", rank
603
+ )
604
+ return
605
+
606
+ event = parent_pipe.recv()
607
+ logger.info("Received event %s on process %s", event, rank)
608
+
609
+ if event == MultiProcessTestCase.Event.GET_TRACEBACK:
610
+ # Return traceback to the parent process.
611
+ with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
612
+ faulthandler.dump_traceback(tmp_file)
613
+ # Flush buffers and seek to read from the beginning
614
+ tmp_file.flush()
615
+ tmp_file.seek(0)
616
+ parent_pipe.send(tmp_file.read())
617
+
618
+ logger.info("Process %s sent traceback", rank)
619
+
620
+ if signal_pipe in ready_pipes:
621
+ return
622
+
623
+ @classmethod
624
+ def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
625
+ self = cls(test_name)
626
+ self.rank = rank
627
+ self.file_name = file_name
628
+ self.run_test(test_name, parent_pipe)
629
+
630
+ def run_test(self, test_name: str, parent_pipe) -> None:
631
+ # Start event listener thread.
632
+ signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False)
633
+ event_listener_thread = threading.Thread(
634
+ target=MultiProcessTestCase._event_listener,
635
+ args=(parent_pipe, signal_recv_pipe, self.rank),
636
+ daemon=True,
637
+ )
638
+ event_listener_thread.start()
639
+ if sys.platform != "win32" and sys.platform != "darwin":
640
+ # Register signal handler to dump stack traces on FATALs.
641
+ # Windows and MacOS do not support the signal handlers.
642
+ torch._C._set_print_stack_traces_on_fatal_signal(True)
643
+ # Show full C++ stacktraces when a Python error originating from C++ is raised.
644
+ os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
645
+
646
+ # self.id() == e.g. '__main__.TestDistributed.test_get_rank'
647
+ # We're retrieving a corresponding test and executing it.
648
+ try:
649
+ getattr(self, test_name)()
650
+ except unittest.SkipTest as se:
651
+ logger.info(
652
+ "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se)
653
+ )
654
+ sys.exit(TEST_SKIPS["generic"].exit_code)
655
+ except Exception as e:
656
+ logger.error(
657
+ "Caught exception: \n%s exiting "
658
+ "process %s with exit code: %s",
659
+ traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE
660
+ )
661
+ # Send error to parent process.
662
+ parent_pipe.send(traceback.format_exc())
663
+ sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
664
+ finally:
665
+ if signal_send_pipe is not None:
666
+ signal_send_pipe.send(None)
667
+
668
+ assert event_listener_thread is not None
669
+ event_listener_thread.join()
670
+ # Close pipe after done with test.
671
+ parent_pipe.close()
672
+
673
+ def _get_timedout_process_traceback(self) -> None:
674
+ pipes = []
675
+ for i, process in enumerate(self.processes):
676
+ if process.exitcode is None:
677
+ pipe = self.pid_to_pipe[process.pid]
678
+ try:
679
+ pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
680
+ pipes.append((i, pipe))
681
+ except ConnectionError as e:
682
+ logger.error(
683
+ "Encountered error while trying to get traceback for process %s: %s", i, e
684
+ )
685
+
686
+ # Wait for results.
687
+ for rank, pipe in pipes:
688
+ try:
689
+ # Wait for traceback
690
+ if pipe.poll(5):
691
+ if pipe.closed:
692
+ logger.info(
693
+ "Pipe closed for process %s, cannot retrieve traceback", rank
694
+ )
695
+ continue
696
+
697
+ traceback = pipe.recv()
698
+ logger.error(
699
+ "Process %s timed out with traceback: \n\n%s", rank, traceback
700
+ )
701
+ else:
702
+ logger.error(
703
+ "Could not retrieve traceback for timed out process: %s", rank
704
+ )
705
+ except ConnectionError as e:
706
+ logger.error(
707
+ "Encountered error while trying to get traceback for process %s: %s", rank, e
708
+ )
709
+
710
+ def _join_processes(self, fn) -> None:
711
+ timeout = get_timeout(self.id())
712
+ start_time = time.time()
713
+ subprocess_error = False
714
+ try:
715
+ while True:
716
+ # check to see if any subprocess exited with an error early.
717
+ for (i, p) in enumerate(self.processes):
718
+ # This is the exit code processes exit with if they
719
+ # encountered an exception.
720
+ if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
721
+ print(
722
+ f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes."
723
+ )
724
+ active_children = torch.multiprocessing.active_children()
725
+ for ac in active_children:
726
+ ac.terminate()
727
+ subprocess_error = True
728
+ break
729
+ if subprocess_error:
730
+ break
731
+ # All processes have joined cleanly if they all a valid exitcode
732
+ if all(p.exitcode is not None for p in self.processes):
733
+ break
734
+ # Check if we should time out the test. If so, we terminate each process.
735
+ elapsed = time.time() - start_time
736
+ if elapsed > timeout:
737
+ self._get_timedout_process_traceback()
738
+ print(
739
+ f"Timing out after {timeout} seconds and killing subprocesses."
740
+ )
741
+ for p in self.processes:
742
+ p.terminate()
743
+ break
744
+ # Sleep to avoid excessive busy polling.
745
+ time.sleep(0.1)
746
+
747
+ elapsed_time = time.time() - start_time
748
+
749
+ if fn in self.skip_return_code_checks:
750
+ self._check_no_test_errors(elapsed_time)
751
+ else:
752
+ self._check_return_codes(elapsed_time)
753
+ finally:
754
+ # Close all pipes
755
+ for pipe in self.pid_to_pipe.values():
756
+ pipe.close()
757
+
758
+ def _check_no_test_errors(self, elapsed_time) -> None:
759
+ """
760
+ Checks that we didn't have any errors thrown in the child processes.
761
+ """
762
+ for i, p in enumerate(self.processes):
763
+ if p.exitcode is None:
764
+ raise RuntimeError(
765
+ f"Process {i} timed out after {elapsed_time} seconds"
766
+ )
767
+ self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode)
768
+
769
+ def _check_return_codes(self, elapsed_time) -> None:
770
+ """
771
+ Checks that the return codes of all spawned processes match, and skips
772
+ tests if they returned a return code indicating a skipping condition.
773
+ """
774
+ # If no processes are spawned, there is nothing to check.
775
+ if not self.processes:
776
+ logger.warning("Note: no subprocesses were spawned, test was likely skipped.")
777
+ return
778
+
779
+ first_process = self.processes[0]
780
+ # first, we check if there are errors in actual processes
781
+ # (via TEST_ERROR_EXIT CODE), and raise an exception for those.
782
+ # the reason we do this is to attempt to raise a more helpful error
783
+ # message than "Process x terminated/timed out"
784
+ # TODO: we should pipe the exception of the failed subprocess here.
785
+ # Currently, the actual exception is displayed as a logging output.
786
+ errored_processes = [
787
+ (i, p)
788
+ for i, p in enumerate(self.processes)
789
+ if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
790
+ ]
791
+ if errored_processes:
792
+ error = ""
793
+ for i, process in errored_processes:
794
+ # Get error from pipe.
795
+ error_message = self.pid_to_pipe[process.pid].recv()
796
+ error += (
797
+ "Process {} exited with error code {} and exception:\n{}\n".format(
798
+ i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message
799
+ )
800
+ )
801
+
802
+ raise RuntimeError(error)
803
+ # If no process exited uncleanly, we check for timeouts, and then ensure
804
+ # each process exited cleanly.
805
+ for i, p in enumerate(self.processes):
806
+ if p.exitcode is None:
807
+ raise RuntimeError(
808
+ f"Process {i} terminated or timed out after {elapsed_time} seconds"
809
+ )
810
+ self.assertEqual(
811
+ p.exitcode,
812
+ first_process.exitcode,
813
+ msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format(
814
+ i, first_process.exitcode, p.exitcode
815
+ ),
816
+ )
817
+ for skip in TEST_SKIPS.values():
818
+ if first_process.exitcode == skip.exit_code:
819
+ if IS_SANDCASTLE:
820
+ # Don't use unittest.skip to skip the test on sandcastle
821
+ # since it creates tasks for skipped tests assuming there
822
+ # is some follow-up needed. Instead just "pass" the test
823
+ # with an appropriate message.
824
+ logger.info(
825
+ "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message
826
+ )
827
+ return
828
+ else:
829
+ raise unittest.SkipTest(skip.message)
830
+ self.assertEqual(
831
+ first_process.exitcode,
832
+ 0,
833
+ msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}",
834
+ )
835
+
836
+ @property
837
+ def is_master(self) -> bool:
838
+ return self.rank == 0
839
+
840
+
841
+ # Cannot use functools.cache as it requires python 3.9
842
+ EFA_PROBE_RESULT = None
843
+
844
+
845
+ def has_efa() -> bool:
846
+ """
847
+ If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has
848
+ Libfabric EFA interfaces and EFA software components installed,
849
+ see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html.
850
+ """
851
+ global EFA_PROBE_RESULT
852
+ if EFA_PROBE_RESULT is not None:
853
+ return EFA_PROBE_RESULT
854
+
855
+ try:
856
+ EFA_PROBE_RESULT = (
857
+ subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0
858
+ )
859
+ except FileNotFoundError:
860
+ EFA_PROBE_RESULT = False
861
+ return EFA_PROBE_RESULT
862
+
863
+
864
+ def tp_transports():
865
+ """
866
+ If the machine has Libfabric EFA interfaces and EFA software components installed it may cause
867
+ 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe
868
+ uses InfiniBand transport, so we exclude it from tensorpipe transports,
869
+ see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022
870
+ """
871
+ return ["shm", "uv"] if has_efa() else None
872
+
873
+
874
+ def spawn_threads_and_init_comms(
875
+ func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE
876
+ ):
877
+ """
878
+ Wrapper to use with a test method
879
+ """
880
+ if func is None:
881
+ return partial(
882
+ spawn_threads_and_init_comms, timeout=timeout, world_size=world_size
883
+ )
884
+
885
+
886
+ def _run_test_method_with_multi_threads(world_size, callback):
887
+ world = _install_threaded_pg()
888
+ global_store = c10d.HashStore()
889
+
890
+ def world_is_valid():
891
+ return world == c10d.distributed_c10d._world
892
+
893
+ def worker(rank, world_pg, store):
894
+ c10d.init_process_group(
895
+ backend="threaded", rank=rank, world_size=world_size, store=store
896
+ )
897
+ try:
898
+ callback()
899
+ except BaseException as ex:
900
+ # Exceptions are handled in MultiThreadedTestCase
901
+ MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info()))
902
+ ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
903
+ finally:
904
+ if world_is_valid():
905
+ c10d.destroy_process_group()
906
+
907
+ threads = []
908
+ for rank in range(world_size):
909
+ t = threading.Thread(target=worker, args=(rank, world, global_store))
910
+ t.start()
911
+ threads.append(t)
912
+
913
+ return threads
914
+
915
+
916
+ @wraps(func)
917
+ def wrapper(self, *args, **kwargs):
918
+ # TODO: get test name from kwargs
919
+ threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs))
920
+ # join and error handling
921
+ MultiThreadedTestCase._join_threads(threads, func)
922
+
923
+ return wrapper
924
+
925
+
926
+ class MultiThreadedTestCase(TestCase):
927
+ """
928
+ Test runner that runs all tests with the in-proc process group using
929
+ multiple threads with the threaded process group.
930
+
931
+ Each test spawns world_size threads and run the test method in each thread.
932
+
933
+ Difference from regular MultiProcess test runner:
934
+ Must explicitly defines SetUp and call self._spawn_threads() to run the tests.
935
+ Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown)
936
+ to set up / tear down each thread when running each test.
937
+ No global state possible
938
+ How bad of a limitation is this?
939
+ """
940
+ exception_queue = queue.Queue()
941
+
942
+ MAIN_THREAD_RANK = -1
943
+
944
+ def join_or_run(self, fn):
945
+ @wraps(fn)
946
+ def wrapper(self):
947
+ if self.rank == self.MAIN_THREAD_RANK:
948
+ self._join_threads(self.threads, fn)
949
+ else:
950
+ fn()
951
+
952
+ return types.MethodType(wrapper, self)
953
+
954
+ def __init__(self, method_name: str = "runTest") -> None:
955
+ super().__init__(method_name)
956
+ test_fn = getattr(self, method_name, None)
957
+ setattr(self, method_name, self.join_or_run(test_fn))
958
+
959
+ def perThreadSetUp(self):
960
+ # super().setUp() # TestCase.setUp() calls torch.manual_seed()
961
+ pass
962
+
963
+ def perThreadTearDown(self):
964
+ pass
965
+
966
+ def setUp(self) -> None:
967
+ """
968
+ setUp only set up things in the main thread, if you want to configure things
969
+ in the spawned threads, use perThreadSetUp
970
+ """
971
+ super().setUp()
972
+ self.rank = self.MAIN_THREAD_RANK
973
+ self.threads = []
974
+ # Show full C++ stacktraces when a Python error originating from C++ is raised.
975
+ os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
976
+
977
+ def tearDown(self):
978
+ """
979
+ tearDown only set up things in the main thread, if you want to configure things
980
+ in the spawned threads, use perThreadTearDown
981
+ """
982
+ super().tearDown()
983
+ self.threads = []
984
+
985
+ def _spawn_threads(self):
986
+ """
987
+ class method to spawn threads and run test, use this method in the SetUp of your TestCase
988
+ """
989
+ test_name = self._current_test_name
990
+ # for each test case, we need to create thread local world, and a global store
991
+ world = _install_threaded_pg()
992
+ self.__class__.global_store = c10d.HashStore()
993
+
994
+ def world_is_valid():
995
+ return world == c10d.distributed_c10d._world
996
+
997
+ if not world_is_valid():
998
+ raise RuntimeError("Invalid world")
999
+
1000
+ for rank in range(self.world_size):
1001
+ t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size))
1002
+ t.start()
1003
+ self.threads.append(t)
1004
+
1005
+ @classmethod
1006
+ def _run(cls, test_name, rank, world_size):
1007
+ self = cls(test_name)
1008
+ self.rank = rank
1009
+
1010
+ # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make
1011
+ # every thread have the same value. This would be relevant when we use op db tests, where it
1012
+ # needs those states to be set i.e. using instantiate_device_type_tests()
1013
+ # TODO: figure out a better way to do this
1014
+ if hasattr(self, "_tls"):
1015
+ self._tls = threading.local()
1016
+ self._tls.precision = TestCase._precision
1017
+ self._tls.rel_tol = TestCase._rel_tol
1018
+
1019
+ self.run_test_with_threaded_pg(test_name, rank, world_size)
1020
+
1021
+ def run_test_with_threaded_pg(self, test_name, rank, world_size):
1022
+ """
1023
+ Run the current test associated with `test_name` using the threaded process group.
1024
+ """
1025
+ c10d.init_process_group(
1026
+ backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store
1027
+ )
1028
+ self.perThreadSetUp()
1029
+
1030
+ try:
1031
+ getattr(self, test_name)()
1032
+ except BaseException as ex:
1033
+ self.exception_queue.put((rank, sys.exc_info()))
1034
+ ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
1035
+ finally:
1036
+ c10d.destroy_process_group()
1037
+ self.perThreadTearDown()
1038
+
1039
+
1040
+ @classmethod
1041
+ def _join_threads(cls, threads, fn):
1042
+ timeout = TIMEOUT_DEFAULT
1043
+ try:
1044
+ for idx, thread in enumerate(threads):
1045
+ thread.join(max(0, timeout))
1046
+ if thread.is_alive():
1047
+ MultiThreadedTestCase.exception_queue.put(
1048
+ (
1049
+ idx,
1050
+ (
1051
+ TimeoutError,
1052
+ TimeoutError(
1053
+ f"Rank failed to join in under {timeout} seconds"
1054
+ ),
1055
+ None,
1056
+ ),
1057
+ )
1058
+ )
1059
+ ProcessLocalGroup.reset()
1060
+ failed_ranks = []
1061
+ while not cls.exception_queue.empty():
1062
+ failure = cls.exception_queue.get()
1063
+ failed_ranks.append(failure)
1064
+ finally:
1065
+ _uninstall_threaded_pg()
1066
+
1067
+ cls._check_return_codes(failed_ranks, timeout, fn)
1068
+
1069
+ @classmethod
1070
+ def _check_return_codes(cls, failed_ranks, timeout, fn):
1071
+ # Print based on exceptions raised from threads
1072
+ # SkipTest: print info for each thread
1073
+ # TimeoutError: raise RuntimeError for any timed out thread
1074
+ # Normal Exception: print error for each thread that raises exception
1075
+ # and raise a RuntimeError
1076
+ error_msg = ""
1077
+ skip_code = -1
1078
+ for rank, exc_info in failed_ranks:
1079
+ exc = exc_info[1]
1080
+ if isinstance(exc, unittest.SkipTest):
1081
+ logger.info(
1082
+ "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc)
1083
+ )
1084
+ if skip_code < 0:
1085
+ skip_code = TEST_SKIPS["generic"].exit_code
1086
+ elif isinstance(exc, TimeoutError):
1087
+ msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n"
1088
+ logger.error(msg)
1089
+ raise RuntimeError(msg)
1090
+ elif isinstance(exc, Exception):
1091
+ msg = "".join(traceback.format_exception(*exc_info))
1092
+ logger.error(
1093
+ "Caught exception: \n%s exiting thread %s", msg, rank
1094
+ )
1095
+ error_msg += (
1096
+ f"Thread {rank} exited with exception:\n{msg}\n"
1097
+ )
1098
+ elif isinstance(exc, SystemExit):
1099
+ if type(exc.code) == int and skip_code < 0:
1100
+ skip_code = exc.code
1101
+
1102
+ # check exceptions
1103
+ if len(error_msg) > 0:
1104
+ raise RuntimeError(error_msg)
1105
+ # check skip
1106
+ if skip_code > 0:
1107
+ for skip in TEST_SKIPS.values():
1108
+ if skip_code == skip.exit_code:
1109
+ if IS_SANDCASTLE:
1110
+ # "pass" the test with an appropriate message.
1111
+ logger.info(
1112
+ "Skipping %s on sandcastle for the following reason: %s", fn, skip.message
1113
+ )
1114
+ return
1115
+ else:
1116
+ raise unittest.SkipTest(skip.message)
1117
+
1118
+ @property
1119
+ def world_size(self) -> int:
1120
+ return DEFAULT_WORLD_SIZE
1121
+
1122
+ @property
1123
+ def _current_test_name(self) -> str:
1124
+ # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
1125
+ return self.id().split(".")[-1]
1126
+
1127
+ def assertEqualOnRank(self, x, y, msg=None, *, rank=0):
1128
+ """
1129
+ The reason why we have this util function instead of
1130
+ self.assertEqual is all threads are sharing one CPU RNG
1131
+ so the assertion result is only reliable on rank 0
1132
+ """
1133
+ if self.rank == rank:
1134
+ self.assertEqual(x, y, msg)
1135
+
1136
+ def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0):
1137
+ if self.rank == rank:
1138
+ self.assertNotEqual(x, y)
1139
+
1140
+
1141
+ class SaveForwardInputsModule(nn.Module):
1142
+ def __init__(
1143
+ self,
1144
+ forward_inputs: Dict[nn.Module, torch.Tensor],
1145
+ cast_forward_inputs: bool,
1146
+ ) -> None:
1147
+ super().__init__()
1148
+ self.l = nn.Linear(100, 100)
1149
+ self.forward_inputs = forward_inputs
1150
+ self.cast_forward_inputs = cast_forward_inputs
1151
+
1152
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1153
+ self.forward_inputs[self] = x
1154
+ return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x)
1155
+
1156
+
1157
+ class SaveForwardInputsModel(nn.Module):
1158
+ def __init__(
1159
+ self,
1160
+ forward_inputs: Dict[nn.Module, torch.Tensor],
1161
+ cast_forward_inputs: bool,
1162
+ ) -> None:
1163
+ super().__init__()
1164
+ self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
1165
+ self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
1166
+ self.forward_inputs = forward_inputs
1167
+
1168
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1169
+ self.forward_inputs[self] = x
1170
+ return self.c2(self.c1(x))
1171
+
1172
+ @contextmanager
1173
+ def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True):
1174
+ # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase,
1175
+ # Just manually implement the most important part of the dynamo behavior to reset/clear.
1176
+ torch.cuda.set_device(rank)
1177
+ os.environ['MASTER_ADDR'] = 'localhost'
1178
+ os.environ['MASTER_PORT'] = '6789'
1179
+ if init_pg:
1180
+ c10d.init_process_group("nccl", rank=rank, world_size=world_size)
1181
+ torch._dynamo.reset()
1182
+ torch._dynamo.utils.counters.clear()
1183
+ try:
1184
+ yield
1185
+ finally:
1186
+ torch._dynamo.reset()
1187
+ torch._dynamo.utils.counters.clear()
1188
+ if init_pg:
1189
+ c10d.destroy_process_group()
1190
+
1191
+
1192
+ class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase):
1193
+ """
1194
+ Test harness for single-process dynamo distributed tests,
1195
+ initializes dist process group.
1196
+
1197
+ Prefer this for simple tests, as it's easier to debug.
1198
+ """
1199
+
1200
+ @classmethod
1201
+ def setUpClass(cls):
1202
+ super().setUpClass()
1203
+ # _exit_stack is set up in TestCase
1204
+ cls._exit_stack.enter_context(
1205
+ patch.dict(
1206
+ os.environ,
1207
+ {
1208
+ "MASTER_ADDR": "localhost",
1209
+ "MASTER_PORT": "12355",
1210
+ },
1211
+ )
1212
+ )
1213
+ cls.rank = 0
1214
+ cls.device = f"cuda:{cls.rank}"
1215
+ cls.device_ids = None if "cuda" in cls.device else [cls.rank]
1216
+ c10d.init_process_group("nccl", rank=cls.rank, world_size=1)
1217
+
1218
+ @classmethod
1219
+ def tearDownClass(cls):
1220
+ c10d.destroy_process_group()
1221
+ super().tearDownClass()
1222
+
1223
+
1224
+ class DynamoDistributedMultiProcTestCase(MultiProcessTestCase):
1225
+ """
1226
+ Use this for tests that actually run on multiple GPUs.
1227
+
1228
+ Decorate tests with @skip_if_lt_x_gpu(ngpu)
1229
+
1230
+ Note: MultiProcTestCase spawns processes per test and is slow.
1231
+ Prefer MultiThreadedTestCase for most tests. Perhaps use this one
1232
+ sparingly for integration tests.
1233
+ """
1234
+ def setUp(self):
1235
+ super().setUp()
1236
+ self._spawn_processes()
1237
+
1238
+ def tearDown(self):
1239
+ super().tearDown()
1240
+ try:
1241
+ os.remove(self.file_name)
1242
+ except OSError:
1243
+ pass
1244
+
1245
+ @property
1246
+ def world_size(self) -> int:
1247
+ return torch.cuda.device_count()
1248
+
1249
+ @classmethod
1250
+ def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None:
1251
+ # The rest is copypasta from MultiProcessTestCase._run
1252
+ self = cls(test_name)
1253
+ self.rank = rank
1254
+ self.file_name = file_name
1255
+ self.run_test(test_name, parent_pipe)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import torch
4
+
5
+
6
+ # Functions and classes for describing the dtypes a function supports
7
+ # NOTE: these helpers should correspond to PyTorch's C++ dispatch macros
8
+
9
+ # Verifies each given dtype is a torch.dtype
10
+ def _validate_dtypes(*dtypes):
11
+ for dtype in dtypes:
12
+ assert isinstance(dtype, torch.dtype)
13
+ return dtypes
14
+
15
+ # class for tuples corresponding to a PyTorch dispatch macro
16
+ class _dispatch_dtypes(tuple):
17
+ def __add__(self, other):
18
+ assert isinstance(other, tuple)
19
+ return _dispatch_dtypes(tuple.__add__(self, other))
20
+
21
+ _empty_types = _dispatch_dtypes(())
22
+ def empty_types():
23
+ return _empty_types
24
+
25
+ _floating_types = _dispatch_dtypes((torch.float32, torch.float64))
26
+ def floating_types():
27
+ return _floating_types
28
+
29
+ _floating_types_and_half = _floating_types + (torch.half,)
30
+ def floating_types_and_half():
31
+ return _floating_types_and_half
32
+
33
+ def floating_types_and(*dtypes):
34
+ return _floating_types + _validate_dtypes(*dtypes)
35
+
36
+ _floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble)
37
+ def floating_and_complex_types():
38
+ return _floating_and_complex_types
39
+
40
+ def floating_and_complex_types_and(*dtypes):
41
+ return _floating_and_complex_types + _validate_dtypes(*dtypes)
42
+
43
+ _double_types = _dispatch_dtypes((torch.float64, torch.complex128))
44
+ def double_types():
45
+ return _double_types
46
+
47
+ _integral_types = _dispatch_dtypes((torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64))
48
+ def integral_types():
49
+ return _integral_types
50
+
51
+ def integral_types_and(*dtypes):
52
+ return _integral_types + _validate_dtypes(*dtypes)
53
+
54
+ _all_types = _floating_types + _integral_types
55
+ def all_types():
56
+ return _all_types
57
+
58
+ def all_types_and(*dtypes):
59
+ return _all_types + _validate_dtypes(*dtypes)
60
+
61
+ _complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble))
62
+ def complex_types():
63
+ return _complex_types
64
+
65
+ def complex_types_and(*dtypes):
66
+ return _complex_types + _validate_dtypes(*dtypes)
67
+
68
+ _all_types_and_complex = _all_types + _complex_types
69
+ def all_types_and_complex():
70
+ return _all_types_and_complex
71
+
72
+ def all_types_and_complex_and(*dtypes):
73
+ return _all_types_and_complex + _validate_dtypes(*dtypes)
74
+
75
+ _all_types_and_half = _all_types + (torch.half,)
76
+ def all_types_and_half():
77
+ return _all_types_and_half
78
+
79
+ def custom_types(*dtypes):
80
+ """Create a list of arbitrary dtypes"""
81
+ return _empty_types + _validate_dtypes(*dtypes)
82
+
83
+ # The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro
84
+
85
+ # See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS.
86
+ def get_all_dtypes(include_half=True,
87
+ include_bfloat16=True,
88
+ include_bool=True,
89
+ include_complex=True,
90
+ include_complex32=False,
91
+ include_qint=False,
92
+ ) -> List[torch.dtype]:
93
+ dtypes = get_all_int_dtypes() + get_all_fp_dtypes(include_half=include_half, include_bfloat16=include_bfloat16)
94
+ if include_bool:
95
+ dtypes.append(torch.bool)
96
+ if include_complex:
97
+ dtypes += get_all_complex_dtypes(include_complex32)
98
+ if include_qint:
99
+ dtypes += get_all_qint_dtypes()
100
+ return dtypes
101
+
102
+ def get_all_math_dtypes(device) -> List[torch.dtype]:
103
+ return get_all_int_dtypes() + get_all_fp_dtypes(include_half=device.startswith('cuda'),
104
+ include_bfloat16=False) + get_all_complex_dtypes()
105
+
106
+ def get_all_complex_dtypes(include_complex32=False) -> List[torch.dtype]:
107
+ return [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128]
108
+
109
+
110
+ def get_all_int_dtypes() -> List[torch.dtype]:
111
+ return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
112
+
113
+
114
+ def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]:
115
+ dtypes = [torch.float32, torch.float64]
116
+ if include_half:
117
+ dtypes.append(torch.float16)
118
+ if include_bfloat16:
119
+ dtypes.append(torch.bfloat16)
120
+ return dtypes
121
+
122
+
123
+ def get_all_qint_dtypes() -> List[torch.dtype]:
124
+ return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4]
125
+
126
+
127
+ float_to_corresponding_complex_type_map = {
128
+ torch.float16: torch.complex32,
129
+ torch.float32: torch.complex64,
130
+ torch.float64: torch.complex128,
131
+ }
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py ADDED
@@ -0,0 +1,1219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Owner(s): ["oncall: distributed"]
2
+
3
+ import itertools
4
+ import os
5
+ import re
6
+ import sys
7
+ from abc import ABC, abstractmethod
8
+ from contextlib import nullcontext
9
+ from copy import deepcopy
10
+ from enum import auto, Enum
11
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
12
+ from unittest import mock
13
+
14
+ import torch
15
+ import torch.distributed as dist
16
+ import torch.nn as nn
17
+ from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP
18
+ from torch.distributed.fsdp._common_utils import TrainingState
19
+ from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES
20
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
21
+ BackwardPrefetch,
22
+ MixedPrecision,
23
+ ShardingStrategy,
24
+ )
25
+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
26
+ from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap
27
+ from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
28
+ from torch.nn.parallel.distributed import DistributedDataParallel as DDP
29
+ from torch.testing._internal.common_distributed import (
30
+ MultiProcessTestCase,
31
+ MultiThreadedTestCase,
32
+ TEST_SKIPS,
33
+ )
34
+ from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms
35
+
36
+
37
+ class FSDPInitMode(Enum):
38
+ # No FSDP wrapping
39
+ NO_FSDP = auto()
40
+ # FSDP recursive wrapping
41
+ RECURSIVE = auto()
42
+ # TODO: FSDP non-recursive wrapping
43
+ # NONRECURSIVE = auto()
44
+
45
+
46
+ class CUDAInitMode(Enum):
47
+ # Move model to CUDA before passing to the FSDP constructor
48
+ CUDA_BEFORE = auto()
49
+ # Move model to CUDA after passing to the FSDP constructor
50
+ CUDA_AFTER = auto()
51
+ # Keep on CPU
52
+ CUDA_NEVER = auto()
53
+
54
+
55
+ class FSDPTestModel(nn.Module, ABC):
56
+ """This defines the interface expected from all models used commonly for
57
+ FSDP unit tests."""
58
+
59
+ @abstractmethod
60
+ def get_input(self, device) -> Tuple[torch.Tensor, ...]:
61
+ """Returns an input for the model as as tuple."""
62
+ ...
63
+
64
+ @abstractmethod
65
+ def get_loss(self, input, output) -> torch.Tensor:
66
+ """Returns the loss given the input and output."""
67
+ ...
68
+
69
+ @abstractmethod
70
+ def run_backward(self, loss) -> None:
71
+ """Runs the backward pass (e.g. including ``loss.backward()``)."""
72
+ ...
73
+
74
+ @staticmethod
75
+ @abstractmethod
76
+ def init(
77
+ group: dist.ProcessGroup,
78
+ fsdp_init_mode: FSDPInitMode,
79
+ *init_args: Any,
80
+ cuda_init_mode: CUDAInitMode,
81
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
82
+ deterministic: bool = False,
83
+ **init_kwargs: Any,
84
+ ) -> nn.Module:
85
+ """Initializes an instance of this model."""
86
+ ...
87
+
88
+
89
+ def _assert_module_states(
90
+ model: nn.Module,
91
+ process_group: dist.ProcessGroup,
92
+ assert_fn: Callable,
93
+ ):
94
+ """
95
+ All-gathers module states across ranks and calls ``assert_fn`` on each pair
96
+ of corresponding states from rank 0 and a nonzero rank. For example, if
97
+ ``assert_fn`` is ``self.assertEqual()``, then this checks that all module
98
+ states are equal across ranks.
99
+ """
100
+ # Include names for debugging convenience
101
+ named_module_states = [
102
+ (param_name, param.detach().cpu())
103
+ for param_name, param in model.named_parameters()
104
+ ]
105
+ named_module_states += [
106
+ (buffer_name, buffer.detach().cpu())
107
+ for buffer_name, buffer in model.named_buffers()
108
+ ]
109
+ world_size = dist.get_world_size(process_group)
110
+ olist = [None for _ in range(world_size)]
111
+ dist.all_gather_object(olist, named_module_states, group=process_group)
112
+ rank0_states = olist[0]
113
+ for state in olist[1:]:
114
+ for (_, p1), (_, p2) in zip(rank0_states, state):
115
+ assert_fn(p1, p2)
116
+
117
+
118
+ def _zero_model(
119
+ model: nn.Module,
120
+ zero_buffers: bool = False,
121
+ summon_full=True,
122
+ ):
123
+ """Zeros the parameters and optionally buffers of ``model`` in place."""
124
+ ctx = FSDP.summon_full_params(model) if summon_full else nullcontext()
125
+ with ctx:
126
+ for param in model.parameters():
127
+ with torch.no_grad():
128
+ param.zero_()
129
+ if zero_buffers:
130
+ for buffer in model.buffers():
131
+ with torch.no_grad():
132
+ buffer.zero_()
133
+
134
+
135
+ def _get_state_dict(model, cpu_offload=False, half=False):
136
+ if not cpu_offload:
137
+ model = model.cuda()
138
+ if half:
139
+ model.half()
140
+
141
+ return model.state_dict()
142
+
143
+
144
+ def subtest_name(test_name_mapping, *args):
145
+ return "_".join(
146
+ [test_name_mapping[str(s)] if s is not None else "none" for s in args]
147
+ )
148
+
149
+
150
+ def _broadcast_state_dict(rank, state_dict):
151
+ # For non-FSDP roots, some parts of the model state on rank 0 may
152
+ # not be on CPU, so we move everything to CPU to avoid issues like:
153
+ # https://github.com/pytorch/pytorch/issues/77113.
154
+ for param_name, param in state_dict.items():
155
+ if param.device != torch.device("cpu"):
156
+ state_dict[param_name] = param.cpu()
157
+
158
+ olist = [state_dict if rank == 0 else None]
159
+ dist.broadcast_object_list(olist)
160
+ state_dict = olist[0]
161
+ # Ensure that the state is on CUDA
162
+ for param_name in state_dict.keys():
163
+ state_dict[param_name] = state_dict[param_name].cuda()
164
+ return state_dict
165
+
166
+
167
+ def get_full_params(model: nn.Module, recurse: bool = True):
168
+ """
169
+ Returns the full unsharded parameters of ``model``. Any FSDP-managed
170
+ parameters offloaded to CPU are moved to GPU in the returned list.
171
+
172
+ Args:
173
+ recurse (bool): If ``False``, only unshards the parameters immediate to
174
+ ``model``; if ``True``, recurses through the module hierarchy
175
+ rooted at ``model``.
176
+ """
177
+ with FSDP.summon_full_params(model, recurse=recurse):
178
+ return deepcopy(list(model.parameters()))
179
+
180
+
181
+ def _maybe_cuda(model: nn.Module, move_to_cuda: bool):
182
+ return model.cuda() if move_to_cuda else model
183
+
184
+
185
+ def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs):
186
+ return model if not wrap_fsdp else FSDP(model, *args, **kwargs)
187
+
188
+
189
+ class DummyProcessGroup:
190
+ def __init__(self, rank: int, size: int):
191
+ self._rank = rank
192
+ self._size = size
193
+
194
+ def rank(self) -> int:
195
+ return self._rank
196
+
197
+ def size(self) -> int:
198
+ return self._size
199
+
200
+ def allreduce(self, *args, **kwargs):
201
+ dist_wait = mock.Mock()
202
+
203
+ def get_future():
204
+ future = torch.futures.Future()
205
+ future.set_result(1)
206
+ return future
207
+
208
+ dist_wait.get_future = get_future
209
+ return dist_wait
210
+
211
+
212
+ class TransformerWithSharedParams(FSDPTestModel):
213
+ def __init__(
214
+ self,
215
+ group: dist.ProcessGroup,
216
+ cuda_init_mode: CUDAInitMode,
217
+ add_bn: bool,
218
+ deterministic: bool,
219
+ ):
220
+ super().__init__()
221
+ self.rank = group.rank()
222
+ self.world_size = group.size()
223
+ if deterministic:
224
+ torch.manual_seed(0)
225
+ d_vocab = 23
226
+ d_model = 16
227
+
228
+ self.embed_tokens = nn.Embedding(d_vocab, d_model)
229
+ self.transformer = nn.Transformer(
230
+ d_model=d_model,
231
+ num_encoder_layers=2,
232
+ num_decoder_layers=2,
233
+ dim_feedforward=8,
234
+ dropout=0.1,
235
+ )
236
+ self.output_proj = nn.Linear(d_model, d_vocab)
237
+
238
+ # share the embedding and output projection weights
239
+ self.output_proj.weight = self.embed_tokens.weight
240
+ self.register_buffer(
241
+ "vocab_bias", self.embed_tokens.weight.new_ones((d_model,))
242
+ )
243
+ self.register_buffer(
244
+ "long_buffer",
245
+ torch.zeros_like(self.vocab_bias, dtype=torch.long),
246
+ ) # type: ignore[arg-type]
247
+
248
+ self.bs = 2
249
+ self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity()
250
+ if cuda_init_mode == CUDAInitMode.CUDA_BEFORE:
251
+ self = self.cuda()
252
+ if deterministic:
253
+ self.eval()
254
+
255
+ def get_input(self, device):
256
+ torch.manual_seed(1 + self.rank) # keep everything deterministic
257
+ src = torch.arange(12, device=device).view(6, self.bs) # T x B
258
+ tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B
259
+ return (src, tgt)
260
+
261
+ def forward(self, src_ids, tgt_ids):
262
+ src = self.embed_tokens(src_ids)
263
+ src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator]
264
+ tgt = self.embed_tokens(tgt_ids)
265
+ tgt = self.bn(tgt)
266
+ x = self.transformer(src, tgt)
267
+ return self.output_proj(x)
268
+
269
+ def get_loss(self, input, output):
270
+ _, tgt = input
271
+ return nn.functional.cross_entropy(
272
+ output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum"
273
+ )
274
+
275
+ def run_backward(self, loss):
276
+ loss.backward()
277
+
278
+ @staticmethod
279
+ def init(
280
+ group: dist.ProcessGroup,
281
+ fsdp_init_mode: FSDPInitMode,
282
+ cuda_init_mode: CUDAInitMode,
283
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
284
+ deterministic: bool = False,
285
+ add_bn: bool = True,
286
+ ) -> Union[nn.Module, FSDP]:
287
+ """
288
+ Initializes a :class:`TransformerWithSharedParams` instance.
289
+
290
+ Args:
291
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
292
+ any modules with FSDP. If ``RECURSIVE``, then wraps with
293
+ top-level FSDP. By default, the top-level FSDP uses the
294
+ ``ModuleWrapPolicy`` for encoder and decoder layers, but a
295
+ different auto wrap policy may be specified via
296
+ ``fsdp_kwargs``.
297
+ cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
298
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
299
+ forwarded to the FSDP constructor.
300
+ deterministic (bool): Whether to make the model deterministic
301
+ across constructions.
302
+ add_bn (bool): Whether to include batch norm in the model.
303
+ """
304
+
305
+ if fsdp_kwargs is None:
306
+ fsdp_kwargs = {}
307
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
308
+ if isinstance(group, tuple):
309
+ pg = group[0]
310
+ else:
311
+ pg = group
312
+ return TransformerWithSharedParams(
313
+ pg, cuda_init_mode, add_bn, deterministic
314
+ )
315
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
316
+ # Default to the `ModuleWrapPolicy`
317
+ if "auto_wrap_policy" not in fsdp_kwargs:
318
+ auto_wrap_policy = ModuleWrapPolicy(
319
+ {
320
+ TransformerEncoderLayer,
321
+ TransformerDecoderLayer,
322
+ }
323
+ )
324
+ else:
325
+ auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy")
326
+
327
+ if (
328
+ "sharding_strategy" in fsdp_kwargs
329
+ and fsdp_kwargs["sharding_strategy"]
330
+ in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2}
331
+ and not isinstance(group, tuple)
332
+ ):
333
+ fsdp_pg = None
334
+ else:
335
+ fsdp_pg = group
336
+
337
+ if isinstance(group, tuple):
338
+ tformer_pg = group[0]
339
+ else:
340
+ tformer_pg = group
341
+
342
+ m = TransformerWithSharedParams(
343
+ tformer_pg, cuda_init_mode, add_bn, deterministic
344
+ )
345
+ fsdp_model = FSDP(
346
+ m,
347
+ fsdp_pg,
348
+ auto_wrap_policy=auto_wrap_policy,
349
+ **fsdp_kwargs,
350
+ )
351
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
352
+ fsdp_model = fsdp_model.cuda()
353
+ return fsdp_model
354
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
355
+
356
+ def get_ignored_modules(self):
357
+ return [self.transformer]
358
+
359
+
360
+ class NestedWrappedModule(FSDPTestModel):
361
+ def __init__(
362
+ self,
363
+ group: dist.ProcessGroup,
364
+ wrap_fsdp: bool,
365
+ cuda_init_mode: CUDAInitMode,
366
+ deterministic: bool,
367
+ **fsdp_kwargs,
368
+ ):
369
+ super().__init__()
370
+ self.rank = group.rank()
371
+ self.world_size = group.size()
372
+ move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
373
+
374
+ def _maybe_wrap(layer):
375
+ if wrap_fsdp:
376
+ return FSDP(layer, group, **fsdp_kwargs)
377
+ return layer
378
+
379
+ if deterministic:
380
+ torch.manual_seed(0)
381
+ self.module = nn.Sequential(
382
+ _maybe_cuda(nn.Linear(8, 4), move_to_cuda),
383
+ _maybe_wrap(
384
+ nn.Sequential(
385
+ _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)),
386
+ _maybe_cuda(nn.Linear(16, 16), move_to_cuda),
387
+ ),
388
+ ),
389
+ _maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)),
390
+ _maybe_cuda(nn.Linear(4, 8), move_to_cuda),
391
+ )
392
+
393
+ def get_input(self, device):
394
+ torch.manual_seed(1 + self.rank) # keep everything deterministic
395
+ return (torch.rand(4, 8, device=device),)
396
+
397
+ def forward(self, x):
398
+ return self.module(x)
399
+
400
+ def get_loss(self, input, output):
401
+ loss = output.sum()
402
+ return loss
403
+
404
+ def run_backward(self, loss):
405
+ loss.backward()
406
+
407
+ @staticmethod
408
+ def init(
409
+ group: dist.ProcessGroup,
410
+ fsdp_init_mode: FSDPInitMode,
411
+ cuda_init_mode: CUDAInitMode,
412
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
413
+ deterministic: bool = False,
414
+ ) -> nn.Module:
415
+ """
416
+ Initializes a :class:`NestedWrappedModule` instance.
417
+
418
+ Args:
419
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
420
+ any modules with FSDP. If ``RECURSIVE``, then wraps some nested
421
+ modules with FSDP but not the top-level module. The model may
422
+ later be wrapped with a top-level FSDP external to this method
423
+ if desired.
424
+ cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
425
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
426
+ forwarded to the FSDP constructor.
427
+ deterministic (bool): Whether to make the model deterministic
428
+ across constructions.
429
+ """
430
+ if fsdp_kwargs is None:
431
+ fsdp_kwargs = {}
432
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
433
+ return NestedWrappedModule(
434
+ group,
435
+ wrap_fsdp=False,
436
+ cuda_init_mode=cuda_init_mode,
437
+ deterministic=deterministic,
438
+ )
439
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
440
+ # Does not wrap with top-level FSDP
441
+ fsdp_model = NestedWrappedModule(
442
+ group,
443
+ wrap_fsdp=True,
444
+ cuda_init_mode=cuda_init_mode,
445
+ deterministic=deterministic,
446
+ **fsdp_kwargs,
447
+ )
448
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
449
+ fsdp_model = fsdp_model.cuda()
450
+ return fsdp_model
451
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
452
+
453
+
454
+ class AlwaysWrapNestedWrappedModule(NestedWrappedModule):
455
+ @staticmethod
456
+ def init(
457
+ group: dist.ProcessGroup,
458
+ fsdp_init_mode: FSDPInitMode,
459
+ cuda_init_mode: CUDAInitMode,
460
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
461
+ deterministic: bool = False,
462
+ ):
463
+ """
464
+ Initializes a :class:`NestedWrappedModule` instance, but unlike
465
+ :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this
466
+ wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap
467
+ policy.
468
+ """
469
+ super_ = super(AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule)
470
+ model = super_.init(
471
+ group=group,
472
+ fsdp_init_mode=FSDPInitMode.NO_FSDP,
473
+ cuda_init_mode=cuda_init_mode,
474
+ fsdp_kwargs=fsdp_kwargs,
475
+ deterministic=deterministic,
476
+ )
477
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
478
+ return model
479
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
480
+ fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs)
481
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
482
+ fsdp_model = fsdp_model.cuda()
483
+ return fsdp_model
484
+
485
+
486
+ class NonUniformReqGradNWM(NestedWrappedModule):
487
+ def __init__(
488
+ self,
489
+ group: dist.ProcessGroup,
490
+ wrap_fsdp: bool,
491
+ cuda_init_mode: CUDAInitMode,
492
+ deterministic: bool,
493
+ **fsdp_kwargs,
494
+ ):
495
+ super(NestedWrappedModule, self).__init__()
496
+ # This `__init__` only differs from `NestedWrappedModule.__init__` in that
497
+ # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential`
498
+ # container. This arrangement results in all elements of the last two parameters
499
+ # residing on a single rank. Freezing all parameters except those two allows us
500
+ # to verify that `ShardedGradScaler` accommodates situations where some ranks
501
+ # have no (non-zero sized) parameter shards.
502
+ self.rank = group.rank()
503
+ self.world_size = group.size()
504
+ move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
505
+
506
+ def _maybe_wrap(layer):
507
+ if wrap_fsdp:
508
+ return FSDP(layer, group, **fsdp_kwargs)
509
+ return layer
510
+
511
+ if deterministic:
512
+ torch.manual_seed(0)
513
+ self.module = nn.Sequential(
514
+ _maybe_cuda(nn.Linear(8, 4), move_to_cuda),
515
+ _maybe_wrap(
516
+ nn.Sequential(
517
+ _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)),
518
+ _maybe_cuda(nn.Linear(16, 16), move_to_cuda),
519
+ ),
520
+ ),
521
+ _maybe_wrap(
522
+ nn.Sequential(
523
+ _maybe_cuda(nn.Linear(16, 4), move_to_cuda),
524
+ _maybe_cuda(nn.Linear(4, 8), move_to_cuda),
525
+ ),
526
+ ),
527
+ )
528
+
529
+ @staticmethod
530
+ def _set_nonuniform_req_grad(model, req_grad_mask) -> None:
531
+ for n, p in model.named_parameters():
532
+ if not re.match(req_grad_mask, n):
533
+ p.requires_grad_(False)
534
+
535
+ @staticmethod
536
+ def init(
537
+ group: dist.ProcessGroup,
538
+ fsdp_init_mode: FSDPInitMode,
539
+ cuda_init_mode: CUDAInitMode,
540
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
541
+ deterministic: bool = False,
542
+ ):
543
+ """
544
+ Initializes a :class:`NestedWrappedModule` instance, but unlike
545
+ :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential`
546
+ container to enable the desired non-uniform ``requires_grad``
547
+ ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP``
548
+ init modes, freezes all parameters except the last two to validate
549
+ ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in
550
+ FSDP ``use_orig_params=True`` mode.
551
+ """
552
+ # The parameters that should remain unfrozen are in `module.2.1`. The regex
553
+ # pattern below matches the relevant parameter names both with and without
554
+ # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present.
555
+ req_grad_pattern = re.compile(r"module\.2.*\.1.*")
556
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
557
+ ddp_model = NonUniformReqGradNWM(
558
+ group,
559
+ wrap_fsdp=False,
560
+ cuda_init_mode=cuda_init_mode,
561
+ deterministic=deterministic,
562
+ )
563
+ NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern)
564
+ return ddp_model
565
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
566
+ if fsdp_kwargs is None:
567
+ fsdp_kwargs = {}
568
+ fsdp_model = NonUniformReqGradNWM(
569
+ group,
570
+ wrap_fsdp=True,
571
+ cuda_init_mode=cuda_init_mode,
572
+ deterministic=deterministic,
573
+ **fsdp_kwargs,
574
+ )
575
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
576
+ fsdp_model = fsdp_model.cuda()
577
+ NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern)
578
+ return fsdp_model
579
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
580
+
581
+
582
+ class ModuleWithDelay(FSDPTestModel):
583
+ """This class wraps a :class:`FSDPTestModel` to optionally add a delay
584
+ after computing the loss and/or before the gradient reduction."""
585
+
586
+ def __init__(
587
+ self,
588
+ module: nn.Module,
589
+ delay_after_loss_ms: int,
590
+ delay_before_reduction_ms: int,
591
+ ):
592
+ super().__init__()
593
+ self.delay_after_loss_ms = delay_after_loss_ms
594
+ self.delay_before_reduction_ms = delay_before_reduction_ms
595
+ self.module = module
596
+
597
+ def get_input(self, device):
598
+ return self.module.get_input(device)
599
+
600
+ def forward(self, x):
601
+ return self.module(x)
602
+
603
+ def get_loss(self, input, output):
604
+ loss = self.module.get_loss(input, output)
605
+ if self.delay_after_loss_ms > 0:
606
+ torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms()))
607
+ return loss
608
+
609
+ def run_backward(self, loss):
610
+ orig_reduce_scatter = torch.distributed.reduce_scatter_tensor
611
+
612
+ def _delayed_reduce_scatter(*args, **kwargs):
613
+ if self.delay_before_reduction_ms > 0:
614
+ torch.cuda._sleep(
615
+ int(self.delay_before_reduction_ms * get_cycles_per_ms())
616
+ )
617
+ return orig_reduce_scatter(*args, **kwargs)
618
+
619
+ with mock.patch(
620
+ "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter
621
+ ):
622
+ self.module.run_backward(loss)
623
+
624
+ @staticmethod
625
+ def init(
626
+ module_class: Type[FSDPTestModel],
627
+ *model_args: Any,
628
+ delay_after_loss_ms: int,
629
+ delay_before_reduction_ms: int,
630
+ **model_kwargs: Any,
631
+ ):
632
+ """
633
+ Args:
634
+ module_class (Type[FSDPTestModel]): Wrapped module class to which
635
+ to add delays.
636
+ model_args: Positional arguments forwarded to the ``module_class``
637
+ ``init()``.
638
+ delay_after_loss_ms (int): Delay after computing the loss/before
639
+ the optimizer step (in ms).
640
+ delay_before_reduction_ms (int): Delay before reduce-scattering
641
+ gradients (in ms).
642
+ model_kwargs: Keyword arguments forwarded to the ``module_class``
643
+ ``init()``.
644
+ """
645
+ return ModuleWithDelay(
646
+ module_class.init(*model_args, **model_kwargs),
647
+ delay_after_loss_ms,
648
+ delay_before_reduction_ms,
649
+ )
650
+
651
+
652
+ class NestedWrappedModuleWithDelay(ModuleWithDelay):
653
+ @staticmethod
654
+ def init(
655
+ group: dist.ProcessGroup,
656
+ fsdp_init_mode: FSDPInitMode,
657
+ cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER,
658
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
659
+ deterministic: bool = False,
660
+ delay_after_loss_ms: int = 0,
661
+ delay_before_reduction_ms: int = 0,
662
+ ):
663
+ return super(NestedWrappedModuleWithDelay, NestedWrappedModuleWithDelay).init(
664
+ NestedWrappedModule,
665
+ group=group,
666
+ fsdp_init_mode=fsdp_init_mode,
667
+ cuda_init_mode=cuda_init_mode,
668
+ fsdp_kwargs=fsdp_kwargs,
669
+ deterministic=deterministic,
670
+ delay_after_loss_ms=delay_after_loss_ms,
671
+ delay_before_reduction_ms=delay_before_reduction_ms,
672
+ )
673
+
674
+
675
+ class DummyDDP(nn.Module):
676
+ def __init__(self, module):
677
+ super().__init__()
678
+ self.module = module
679
+
680
+ def forward(self, *args, **kwargs):
681
+ return self.module(*args, **kwargs)
682
+
683
+
684
+ class MixtureOfExperts(NestedWrappedModule):
685
+ def __init__(
686
+ self,
687
+ group: dist.ProcessGroup,
688
+ wrap_fsdp: bool,
689
+ cuda_init_mode: CUDAInitMode,
690
+ delay_before_free_ms: int,
691
+ deterministic: bool,
692
+ **fsdp_kwargs,
693
+ ):
694
+ super().__init__(
695
+ group=group,
696
+ wrap_fsdp=wrap_fsdp,
697
+ cuda_init_mode=cuda_init_mode,
698
+ deterministic=deterministic,
699
+ )
700
+ self.group = group
701
+ self.delay_before_free_ms = delay_before_free_ms
702
+ self.wrap_fsdp = wrap_fsdp
703
+ self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
704
+ if deterministic:
705
+ # Give each rank different expert parameters
706
+ torch.manual_seed(42 + self.rank)
707
+ d_expert = 23
708
+ d_shared = 12
709
+ d_input = 8
710
+ expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda)
711
+
712
+ self.num_expert_params = sum([p.numel() for p in expert.parameters()])
713
+ for p in expert.parameters():
714
+ p.expert = True # type: ignore[attr-defined]
715
+
716
+ if deterministic:
717
+ # Keep all other parameters the same across ranks
718
+ torch.manual_seed(0)
719
+
720
+ shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda)
721
+
722
+ if wrap_fsdp:
723
+ # we create a process group of size 1 for the expert params
724
+ expert_group = torch.distributed.new_group(
725
+ [group.rank()]
726
+ ) # world size 1 means no shard
727
+ expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment]
728
+ shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment]
729
+
730
+ self.module = nn.Sequential(
731
+ _maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda),
732
+ shared,
733
+ expert,
734
+ _maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda),
735
+ )
736
+
737
+ def forward(self, x):
738
+ if self.delay_before_free_ms > 0:
739
+ expert = self.module[2]
740
+ if isinstance(expert, FSDP):
741
+ orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
742
+
743
+ def _delayed_reshard(*args, **kwargs):
744
+ torch.cuda._sleep(
745
+ int(self.delay_before_free_ms * get_cycles_per_ms())
746
+ )
747
+ return orig_reshard(*args, **kwargs)
748
+
749
+ # This patch covers any `import torch..._reshard` uses.
750
+ with mock.patch(
751
+ "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard
752
+ ):
753
+ return self.module(x)
754
+
755
+ return self.module(x)
756
+
757
+ def run_backward(self, loss):
758
+ loss.backward()
759
+ # Manually reduce gradients if not wrapped in FullyShardedDataParallel
760
+ if not self.wrap_fsdp:
761
+ with torch.no_grad():
762
+ for p in self.parameters():
763
+ if hasattr(p, "expert"):
764
+ continue # these params don't need grad reduction
765
+ p.grad.div_(self.world_size)
766
+ torch.distributed.all_reduce(p.grad, group=self.group)
767
+
768
+ @staticmethod
769
+ def init(
770
+ group: dist.ProcessGroup,
771
+ fsdp_init_mode: FSDPInitMode,
772
+ cuda_init_mode: CUDAInitMode,
773
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
774
+ deterministic: bool = False,
775
+ delay_before_free_ms: int = 0,
776
+ ):
777
+ """
778
+ Initializes a :class:`MixtureOfExperts` instance.
779
+
780
+ Args:
781
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
782
+ any modules with FSDP. If ``RECURSIVE``, then wraps some nested
783
+ modules with FSDP, including the expert and shared layers, but
784
+ not the top-level module. The model may later be wrapped with a
785
+ top-level FSDP external to this method if desired.
786
+ cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
787
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
788
+ forwarded to the FSDP constructor.
789
+ deterministic (bool): Whether to make the model deterministic
790
+ across constructions.
791
+ delay_before_free_ms (int): Delay before resharding expert
792
+ parameters in the forward pass (in ms).
793
+ """
794
+ if fsdp_kwargs is None:
795
+ fsdp_kwargs = {}
796
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
797
+ return MixtureOfExperts(
798
+ group,
799
+ wrap_fsdp=False,
800
+ cuda_init_mode=cuda_init_mode,
801
+ delay_before_free_ms=delay_before_free_ms,
802
+ deterministic=deterministic,
803
+ )
804
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
805
+ # Does not wrap with top-level FSDP
806
+ fsdp_model = MixtureOfExperts(
807
+ group,
808
+ wrap_fsdp=True,
809
+ cuda_init_mode=cuda_init_mode,
810
+ delay_before_free_ms=delay_before_free_ms,
811
+ deterministic=deterministic,
812
+ **fsdp_kwargs,
813
+ )
814
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
815
+ fsdp_model = fsdp_model.cuda()
816
+ return fsdp_model
817
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
818
+
819
+
820
+ def run_subtests(
821
+ cls_inst,
822
+ subtest_config: Dict[str, List[Any]],
823
+ test_fn: Callable,
824
+ *test_args,
825
+ **test_kwargs: Any,
826
+ ):
827
+ """
828
+ Runs a test function given by ``test_fn`` as a subtest according to the
829
+ configurations specified by ``subtest_config``. This amortizes the
830
+ costly setup overhead (including process spawn and initializing the
831
+ process group) over the subtests.
832
+
833
+ Args:
834
+ subtest_config (Dict[str, List[Any]]): A mapping from subtest
835
+ keyword argument name to a list of its possible values.
836
+ test_fn (Callable): A callable that runs the actual test.
837
+ test_args: Positional arguments to pass to ``test_fn``.
838
+ test_kwargs: Keyword arguments to pass to ``test_fn``.
839
+ """
840
+ # Convert the config mapping to a list to have a fixed order
841
+ subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items())
842
+ subtest_config_keys: List[str] = [item[0] for item in subtest_config_items]
843
+ subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items]
844
+ for values in itertools.product(*subtest_config_values):
845
+ # Map keyword to chosen value
846
+ subtest_kwargs = dict(zip(subtest_config_keys, values))
847
+ with cls_inst.subTest(**subtest_kwargs):
848
+ test_fn(*test_args, **test_kwargs, **subtest_kwargs)
849
+ dist.barrier()
850
+
851
+
852
+ class FSDPTestMultiThread(MultiThreadedTestCase):
853
+ @property
854
+ def world_size(self):
855
+ return torch.cuda.device_count() if torch.cuda.is_available() else 4
856
+
857
+ def setUp(self):
858
+ super().setUp()
859
+ self._spawn_threads()
860
+
861
+ def run_subtests(self, *args, **kwargs):
862
+ return run_subtests(self, *args, **kwargs)
863
+
864
+
865
+ class FSDPTest(MultiProcessTestCase):
866
+ def setUp(self):
867
+ super().setUp()
868
+ # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`,
869
+ # which can cause unit test flakiness:
870
+ # https://github.com/pytorch/pytorch/issues/90848
871
+ os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0"
872
+ self._spawn_processes()
873
+
874
+ @property
875
+ def world_size(self):
876
+ return min(torch.cuda.device_count(), 8) if torch.cuda.is_available() else 4
877
+
878
+ @property
879
+ def process_group(self):
880
+ return dist.distributed_c10d._get_default_group()
881
+
882
+ @property
883
+ def init_method(self):
884
+ return f"{FILE_SCHEMA}{self.file_name}"
885
+
886
+ def _check_cpu_offload(self, fsdp_model, cpu_offload):
887
+ self.assertEqual(cpu_offload, fsdp_model.cpu_offload)
888
+
889
+ def _check_backward_prefetch(self, fsdp_model, backward_prefetch):
890
+ self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch)
891
+
892
+ def _check_forward_prefetch(self, fsdp_model, forward_prefetch):
893
+ self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch)
894
+
895
+ def run_subtests(self, *args, **kwargs):
896
+ return run_subtests(self, *args, **kwargs)
897
+
898
+ @classmethod
899
+ def _run(cls, rank, test_name, file_name, pipe):
900
+ self = cls(test_name)
901
+ self.rank = rank
902
+ self.file_name = file_name
903
+
904
+ print(f"dist init r={self.rank}, world={self.world_size}")
905
+
906
+ # Specify gloo backend to make 'init_process_group()' succeed,
907
+ # Actual tests will be skipped if there is no enough GPUs.
908
+ backend = "nccl" if torch.cuda.is_available() else "gloo"
909
+
910
+ try:
911
+ dist.init_process_group(
912
+ init_method=self.init_method,
913
+ backend=backend,
914
+ world_size=int(self.world_size),
915
+ rank=self.rank,
916
+ )
917
+ except RuntimeError as e:
918
+ if "recompile" in e.args[0]:
919
+ sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
920
+
921
+ raise
922
+
923
+ if torch.cuda.is_available() and torch.cuda.device_count():
924
+ torch.cuda.set_device(self.rank % torch.cuda.device_count())
925
+
926
+ # Execute barrier prior to running test to ensure that every process
927
+ # has finished initialization and that the following test
928
+ # immediately exiting due to a skip doesn't cause flakiness.
929
+ dist.barrier()
930
+
931
+ self.run_test(test_name, pipe)
932
+
933
+ dist.barrier()
934
+
935
+ dist.destroy_process_group()
936
+
937
+ def _train_for_several_steps(
938
+ self,
939
+ model: nn.Module,
940
+ num_steps: int,
941
+ autocast: bool,
942
+ lr: float = 0.01,
943
+ fsdp_cpu_offload: Optional[CPUOffload] = None,
944
+ save_model: bool = False,
945
+ mixed_precision: Optional[MixedPrecision] = None,
946
+ enable_sharded_grad_scaler: bool = False,
947
+ use_pure_fp16: bool = False,
948
+ sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
949
+ ):
950
+ cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params
951
+
952
+ model_device = next(model.parameters()).device
953
+ if sharded_grad_scaler_kwargs is None:
954
+ sharded_grad_scaler_kwargs = {}
955
+ sharded_grad_scaler = ShardedGradScaler(
956
+ enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs
957
+ )
958
+ # use SGD with momentum instead of Adam, since Adam is scale invariant
959
+ # and this makes it bad for tests
960
+ optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
961
+ for _ in range(num_steps):
962
+ optim.zero_grad()
963
+ with torch.cuda.amp.autocast(enabled=autocast):
964
+ # Inputs always cuda regardless of cpu offloading, or model.device
965
+ input = model.module.get_input(torch.device("cuda"))
966
+ if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)):
967
+ if isinstance(input, torch.Tensor):
968
+ input = input.half()
969
+ else:
970
+ input = tuple(x.half() for x in input)
971
+ output = model(*input)
972
+ # Post-forward, if CPU offloading model param should be on CPU.
973
+ if (
974
+ cpu_offload_params
975
+ and isinstance(model, FSDP)
976
+ # If not resharding after forward, the parameters are still
977
+ # exposed as unsharded views into the GPU flat parameter
978
+ and model.sharding_strategy
979
+ not in NO_RESHARD_AFTER_FORWARD_STRATEGIES
980
+ ):
981
+ for p in model.parameters():
982
+ # Params should always be on CPU
983
+ self.assertEqual(p.device, torch.device("cpu"))
984
+
985
+ loss = model.module.get_loss(input, output).to(model_device)
986
+ loss = sharded_grad_scaler.scale(loss)
987
+
988
+ if not mixed_precision and not use_pure_fp16:
989
+ assert (
990
+ loss.dtype == torch.float32
991
+ ), "loss data type should be float32, as the original \
992
+ parameter data type is float32."
993
+ else:
994
+ if use_pure_fp16:
995
+ self.assertEqual(loss.dtype, torch.float16)
996
+ # FSDP loss is fp16, DDP AMP loss is fp32
997
+ elif isinstance(model, FSDP):
998
+ self.assertEqual(loss.dtype, mixed_precision.param_dtype)
999
+ else:
1000
+ self.assertEqual(loss.dtype, torch.float32)
1001
+ model.module.run_backward(loss)
1002
+ # Post-backward, if CPU offloading model params should be on CPU.
1003
+ if cpu_offload_params and isinstance(model, FSDP):
1004
+ for p in model.parameters():
1005
+ # Params should always be on CPU
1006
+ self.assertEqual(p.device, torch.device("cpu"))
1007
+ # Unscale the gradients and step
1008
+ sharded_grad_scaler.step(optim)
1009
+ # Update the scale factor
1010
+ sharded_grad_scaler.update()
1011
+ # if save_model, simulate save + load.
1012
+ if save_model:
1013
+ state_dict = {k: v.clone() for k, v in model.state_dict().items()}
1014
+ # Zero params, if save/load state_dict did not work properly, this
1015
+ # would break the parity test with DDP.
1016
+ _zero_model(model)
1017
+ model.load_state_dict(state_dict)
1018
+
1019
+ if isinstance(model, FSDP):
1020
+ model._assert_state(TrainingState.IDLE)
1021
+ return loss.detach()
1022
+
1023
+ def _test_fsdp_parity(
1024
+ self,
1025
+ model_class: Type[FSDPTestModel],
1026
+ fsdp_init_mode: FSDPInitMode,
1027
+ cuda_init_mode: CUDAInitMode,
1028
+ ref_init_fn: Optional[Callable] = None,
1029
+ num_iters: int = 2,
1030
+ save_model: bool = True,
1031
+ cpu_offload: CPUOffload = CPUOffload(),
1032
+ backward_prefetch: Optional[BackwardPrefetch] = None,
1033
+ sharding_strategy: Optional[ShardingStrategy] = None,
1034
+ mixed_precision: Optional[MixedPrecision] = None,
1035
+ forward_prefetch: bool = False,
1036
+ use_orig_params: bool = False,
1037
+ enable_sharded_grad_scaler: bool = False,
1038
+ use_pure_fp16: bool = False,
1039
+ init_kwargs: Optional[Dict[str, Any]] = None,
1040
+ sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
1041
+ **fsdp_kwargs,
1042
+ ):
1043
+ """
1044
+ Tests FSDP training against a reference, which defaults to DDP but
1045
+ may be customized with ``ref_init_fn``.
1046
+
1047
+ Args:
1048
+ model_class (Type[FSDPTestModel]): A model class that inherits from
1049
+ ``FSDPTestModel``, which defines the expected interface.
1050
+ fsdp_init_mode (FSDPInitMode): The mode to initialize the
1051
+ FSDP-wrapped model. This should not be ``NO_FSDP``.
1052
+ ref_init_fn (Optional[Callable]): A callable to invoke that wraps a
1053
+ non-wrapped model to construct the reference model, where this
1054
+ wrapper should provide data parallel semantics. If ``None``,
1055
+ then the callable defaults to the DDP constructor.
1056
+ """
1057
+ assert (
1058
+ fsdp_init_mode != FSDPInitMode.NO_FSDP
1059
+ ), "Expects an FSDP init mode that wraps with FSDP"
1060
+ if init_kwargs is None:
1061
+ init_kwargs = {}
1062
+ lr = 1e-2
1063
+ rank = self.process_group.rank()
1064
+ # Establish reference behavior with DDP
1065
+ model = model_class.init(
1066
+ self.process_group,
1067
+ FSDPInitMode.NO_FSDP,
1068
+ CUDAInitMode.CUDA_BEFORE,
1069
+ deterministic=True,
1070
+ **init_kwargs,
1071
+ )
1072
+ if ref_init_fn is None:
1073
+ ref_model = DDP(model, device_ids=[rank], output_device=rank)
1074
+ else:
1075
+ ref_model = ref_init_fn(model)
1076
+ if use_pure_fp16:
1077
+ ref_model = ref_model.half()
1078
+ ref_loss = self._train_for_several_steps(
1079
+ ref_model,
1080
+ num_iters,
1081
+ autocast=mixed_precision is not None,
1082
+ lr=lr,
1083
+ fsdp_cpu_offload=cpu_offload,
1084
+ mixed_precision=mixed_precision,
1085
+ enable_sharded_grad_scaler=enable_sharded_grad_scaler,
1086
+ use_pure_fp16=use_pure_fp16,
1087
+ sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
1088
+ )
1089
+ ddp_params = list(ref_model.parameters())
1090
+ # Check against FSDP behavior
1091
+ fsdp_kwargs.update(
1092
+ {
1093
+ "cpu_offload": cpu_offload,
1094
+ "backward_prefetch": backward_prefetch,
1095
+ "sharding_strategy": sharding_strategy,
1096
+ "mixed_precision": mixed_precision,
1097
+ "forward_prefetch": forward_prefetch,
1098
+ "use_orig_params": use_orig_params,
1099
+ }
1100
+ )
1101
+ try:
1102
+ fsdp_model = model_class.init(
1103
+ self.process_group,
1104
+ fsdp_init_mode,
1105
+ cuda_init_mode,
1106
+ fsdp_kwargs,
1107
+ deterministic=True,
1108
+ **init_kwargs,
1109
+ )
1110
+ except Exception as e:
1111
+ raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e
1112
+ if not isinstance(fsdp_model, FSDP):
1113
+ # Enforce that we wrap with top-level FSDP since we are comparing
1114
+ # assuming a data parallel reference and some test models may not
1115
+ # do so in their `init()` method
1116
+ fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs)
1117
+ if use_pure_fp16:
1118
+ # Change the model parameter dtype after FSDP initialization
1119
+ fsdp_model = fsdp_model.half()
1120
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
1121
+ fsdp_model = fsdp_model.cuda()
1122
+ offload_params = cpu_offload is not None and cpu_offload.offload_params
1123
+ # Offloading parameters with `CUDA_AFTER` should raise an error during
1124
+ # lazy initialization due to the parameter devices not being CPU;
1125
+ # otherwise, all parameter devices should be CPU
1126
+ expects_device_error = (
1127
+ offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER
1128
+ )
1129
+ expects_cpu_device = (
1130
+ offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER
1131
+ )
1132
+ if expects_cpu_device:
1133
+ cpu_device = torch.device("cpu")
1134
+ for param in fsdp_model.parameters():
1135
+ self.assertEqual(param.device, cpu_device)
1136
+ context = (
1137
+ self.assertRaisesRegex(
1138
+ RuntimeError,
1139
+ "An FSDP-managed module with parameter CPU offloading enabled "
1140
+ "has parameters on cuda",
1141
+ )
1142
+ if expects_device_error
1143
+ else nullcontext()
1144
+ )
1145
+ with context:
1146
+ fsdp_loss = self._train_for_several_steps(
1147
+ fsdp_model,
1148
+ num_iters,
1149
+ autocast=False,
1150
+ lr=lr,
1151
+ fsdp_cpu_offload=cpu_offload,
1152
+ save_model=save_model,
1153
+ mixed_precision=mixed_precision,
1154
+ enable_sharded_grad_scaler=enable_sharded_grad_scaler,
1155
+ use_pure_fp16=use_pure_fp16,
1156
+ sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
1157
+ )
1158
+ # No need to check for parameter and loss parity if expecting an error
1159
+ if expects_device_error:
1160
+ return
1161
+ # Check parameter devices are CPU if offloading to CPU before calling
1162
+ # `get_full_params()`, which will cast the parameters to FP32
1163
+ if offload_params:
1164
+ for param in fsdp_model.parameters():
1165
+ self.assertEqual(param.device, cpu_device)
1166
+ fsdp_loss = fsdp_loss.cuda()
1167
+ fsdp_unsharded_params = get_full_params(fsdp_model)
1168
+ # Do not check dtype since the reference DDP loss may not be the same
1169
+ # dtype as the FSDP loss in the case of mixed precision
1170
+ torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False)
1171
+ # Do not check for parameter parity if using mixed precision since (1)
1172
+ # the DDP parameters are in FP16 (from `half()`) while the FSDP
1173
+ # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs
1174
+ # the optimizer in FP16 while FSDP runs it in FP32
1175
+ # TODO: Disable checking the parameters for pure FP16 due to floating
1176
+ # point inaccuracy. Note that this means that the backward pass is not
1177
+ # checked: https://github.com/pytorch/pytorch/issues/90784
1178
+ if mixed_precision is None and not use_pure_fp16:
1179
+ self.assertEqual(
1180
+ ddp_params,
1181
+ fsdp_unsharded_params,
1182
+ exact_device=True,
1183
+ msg="FSDP did not match DDP",
1184
+ )
1185
+
1186
+
1187
+ class SkipModule(nn.Module):
1188
+ def __init__(self):
1189
+ super().__init__()
1190
+ self.lin = nn.Linear(10, 10, bias=False)
1191
+
1192
+ def forward(self, x):
1193
+ return self.lin(x)
1194
+
1195
+
1196
+ class NestedLinear(nn.Module):
1197
+ def __init__(self, fsdp_wrap):
1198
+ super().__init__()
1199
+ if fsdp_wrap:
1200
+ self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda())
1201
+ else:
1202
+ self.nested_linear = nn.Linear(10, 10, bias=False).cuda()
1203
+
1204
+ def forward(self, x):
1205
+ return self.nested_linear(x)
1206
+
1207
+
1208
+ class SkipModel(nn.Module):
1209
+ def __init__(self, double_nest):
1210
+ super().__init__()
1211
+ self.linear = nn.Linear(10, 10, bias=False).cuda()
1212
+ self.linear_skip = SkipModule().cuda()
1213
+ self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest))
1214
+
1215
+ def forward(self, x):
1216
+ x = self.linear(x)
1217
+ x = self.linear_skip(x)
1218
+ x = self.nested_linear(x)
1219
+ return x
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Torch
2
+ import torch
3
+ import torch.cuda
4
+ import torch.jit
5
+ import torch.jit._logging
6
+ import torch.jit.frontend
7
+ import torch.jit.quantized
8
+
9
+ # Testing utils
10
+ from torch.testing._internal.common_dtype import floating_and_complex_types_and
11
+ from torch.testing._internal.common_utils import TestCase, \
12
+ freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors
13
+ from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
14
+
15
+ # Standard library
16
+ from itertools import chain
17
+ from typing import List, Union
18
+ from torch._C import TensorType
19
+
20
+ import io
21
+
22
+ def check_output_types(self, func, ref_outputs, args, kwargs):
23
+ graph = getattr(func, 'last_graph', None)
24
+ types = [o.type() for o in graph.outputs()]
25
+ self.assertTrue(len(types) == 1)
26
+ t = types[0]
27
+ torch._C._jit_assert_is_instance(ref_outputs, t)
28
+
29
+ # Test names in this set are only checked for a single derivative
30
+ nn_functional_single_grad = frozenset('test_nn_' + name for name in [
31
+ 'pdist',
32
+ 'multilabel_margin_loss',
33
+ 'max_unpool3d',
34
+ 'multi_margin_loss',
35
+ 'binary_cross_entropy',
36
+ 'binary_cross_entropy_size_average',
37
+ 'ctc_loss',
38
+ 'grid_sample',
39
+ ])
40
+
41
+ def check_against_reference(self, func, reference_func, output_func, args, kwargs=None,
42
+ allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False):
43
+ """Verifies a function performs identically to some reference implementation.
44
+
45
+ Commonly, this is used to verify that a JIT implementation
46
+ (output_func) matches the behavior of the eager implementation
47
+ (reference_func).
48
+ """
49
+ kwargs = kwargs if kwargs else {}
50
+
51
+ def allSum(vs):
52
+ if isinstance(vs, torch.Tensor):
53
+ vs = (vs,)
54
+ return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum()
55
+ for i, v in enumerate(vs)
56
+ if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16))
57
+
58
+ def clone_tensor(t, preserve_requires_grad):
59
+ require_grad = preserve_requires_grad and t.requires_grad
60
+ return t.detach().clone().requires_grad_(require_grad)
61
+
62
+ def clone_inputs(preserve_requires_grad: bool):
63
+ inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
64
+
65
+ for arg in args:
66
+ if isinstance(arg, torch.Tensor):
67
+ inputs.append(clone_tensor(arg, preserve_requires_grad))
68
+ elif is_iterable_of_tensors(arg):
69
+ inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg])
70
+ else:
71
+ inputs.append(arg)
72
+
73
+ return inputs
74
+
75
+ # Returns tensors in args that requires_grad, including tensors in TensorList args
76
+ def get_recording_tensors(args):
77
+ recording_tensors: List[torch.Tensor] = []
78
+
79
+ for arg in args:
80
+ if isinstance(arg, torch.Tensor) and arg.requires_grad:
81
+ recording_tensors.append(arg)
82
+ elif is_iterable_of_tensors(arg):
83
+ recording_tensors.extend(filter(lambda t: t.requires_grad, arg))
84
+
85
+ return recording_tensors
86
+
87
+ # test no gradients case
88
+ nograd_inputs = clone_inputs(preserve_requires_grad=False)
89
+ outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs)
90
+ with enable_profiling_mode_for_profiling_tests():
91
+ outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs)
92
+ self.assertEqual(outputs, outputs_test)
93
+
94
+ if check_types:
95
+ check_output_types(self, func, outputs_test, nograd_inputs, kwargs)
96
+
97
+ if no_grad:
98
+ # skip grad tests
99
+ return
100
+
101
+ with enable_profiling_mode_for_profiling_tests():
102
+ # test single grad case
103
+ recording_inputs = clone_inputs(preserve_requires_grad=True)
104
+ recording_tensors = get_recording_tensors(recording_inputs)
105
+ outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
106
+ grads = torch.autograd.grad(allSum(outputs), recording_tensors,
107
+ allow_unused=allow_unused)
108
+ outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
109
+ grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors,
110
+ allow_unused=allow_unused)
111
+ self.assertEqual(outputs, outputs_test)
112
+ self.assertEqual(grads, grads_test)
113
+ # test the grad grad case
114
+ if self._testMethodName in nn_functional_single_grad or no_gradgrad:
115
+ return
116
+
117
+ outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
118
+ l1 = allSum(outputs)
119
+ grads = torch.autograd.grad(l1, recording_tensors, create_graph=True,
120
+ allow_unused=allow_unused)
121
+
122
+ l2 = (allSum(grads) * l1)
123
+ grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused)
124
+ recording_inputs = clone_inputs(preserve_requires_grad=True)
125
+ recording_tensors = get_recording_tensors(recording_inputs)
126
+ outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
127
+ l1_test = allSum(outputs_test)
128
+ grads_test = torch.autograd.grad(
129
+ l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused)
130
+
131
+ l2_test = (allSum(grads_test) * l1_test)
132
+ grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused)
133
+
134
+ self.assertEqual(outputs, outputs_test)
135
+ self.assertEqual(grads, grads_test)
136
+ for g2, g2_test in zip(grads2, grads2_test):
137
+ if g2 is None and g2_test is None:
138
+ continue
139
+ self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4)
140
+
141
+ class JitCommonTestCase(TestCase):
142
+ def createFunctionFromGraph(self, trace):
143
+ graph = trace if isinstance(trace, torch._C.Graph) else trace.graph()
144
+ return torch._C._create_function_from_graph("forward", graph)
145
+
146
+ def assertExportImport(self, trace, inputs):
147
+ m = self.createFunctionFromGraph(trace)
148
+ self.assertExportImportModule(m, inputs)
149
+
150
+ def assertExportImportModule(self, m, inputs):
151
+ m_import = self.getExportImportCopy(m)
152
+ a = self.runAndSaveRNG(m, inputs)
153
+ b = self.runAndSaveRNG(m_import, inputs)
154
+ self.assertEqual(a, b, "Results of original model and "
155
+ "exported/imported version of model differed")
156
+
157
+ def runAndSaveRNG(self, func, inputs, kwargs=None):
158
+ kwargs = kwargs if kwargs else {}
159
+ with freeze_rng_state():
160
+ results = func(*inputs, **kwargs)
161
+ return results
162
+
163
+ def getExportImportCopy(self, m, also_test_file=True, map_location=None):
164
+ buffer = io.BytesIO()
165
+ torch.jit.save(m, buffer)
166
+ buffer.seek(0)
167
+ imported = torch.jit.load(buffer, map_location=map_location)
168
+
169
+ if not also_test_file:
170
+ return imported
171
+
172
+ with TemporaryFileName() as fname:
173
+ torch.jit.save(imported, fname)
174
+ return torch.jit.load(fname, map_location=map_location)
175
+
176
+ def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph,
177
+ fusion_nodes_not_found, non_fusible_nodes_being_fused,
178
+ fusion_nodes_found, nodes_in_diff_graph):
179
+ err_msg = "\nFailure in testing nodes' autodifferentiation. "
180
+ if should_autodiff_node:
181
+ err_msg += "One or more nodes were expected to be autodiffed, " \
182
+ "but were not found in specified fusible/nonfusible " \
183
+ "DifferentiableGraph groups. \nSpecifically:"
184
+ # The node is intended to appear in a differentiable graph but doesn't
185
+ diff_nodes_missing = []
186
+ # The node is intended to appear in a differentiable graph
187
+ # outside of a fusion group but instead is in a fusion group
188
+ diff_nodes_in_fusion = []
189
+ # The node is intended to appear in a fusion group but doesn't
190
+ fusion_nodes_missing = []
191
+ # The node is intended to appear in a fusion group but instead
192
+ # is just in an outer differentiable graph
193
+ fusion_nodes_in_diff = []
194
+ for node in nodes_not_in_diff_graph:
195
+ if node in non_fusible_nodes_being_fused:
196
+ diff_nodes_in_fusion.append(node)
197
+ else:
198
+ diff_nodes_missing.append(node)
199
+ for node in fusion_nodes_not_found:
200
+ if node in nodes_in_diff_graph:
201
+ fusion_nodes_in_diff.append(node)
202
+ else:
203
+ fusion_nodes_missing.append(node)
204
+ if len(diff_nodes_missing) > 0:
205
+ err_msg += f"\n {diff_nodes_missing} were not in one of the " \
206
+ "DifferentiableGraphs when they were expected to be. " \
207
+ "Did you intend for these nodes to be autodiffed? " \
208
+ "If not, remove them from the list of nonfusible nodes."
209
+ if len(diff_nodes_in_fusion) > 0:
210
+ err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \
211
+ "when they were expected to be just in a DifferentiableGraph. If it was " \
212
+ "intended for these nodes to be in FusionGroups, reclassify these nodes as " \
213
+ "fusible nodes. If these nodes were not intended to be fused, your " \
214
+ "autodifferentiation logic might be wrong."
215
+ if len(fusion_nodes_missing) > 0:
216
+ err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \
217
+ "of the DifferentiableGraphs when they were expected to be. " \
218
+ "They were also not found in an outer DifferentiableGraph. Did you " \
219
+ "intend for these nodes to be autodifferentiated? If not, you should " \
220
+ "remove these nodes from the test's fusible nodes. Otherwise your " \
221
+ "autodifferentiation logic might be wrong."
222
+ if len(fusion_nodes_in_diff) > 0:
223
+ err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \
224
+ "of the DifferentiableGraphs when they were expected to be, " \
225
+ "instead they were found just in an outer DifferentiableGraph. " \
226
+ "Did you intend for these nodes to be fused? If not, you should " \
227
+ "move these nodes into the test's nonfusible nodes. Otherwise your " \
228
+ "autodifferentiation logic might be wrong."
229
+ else:
230
+ err_msg += "One or more nodes were not expected to be autodiffed " \
231
+ "but were found in a DifferentiableGraph or in a FusionGroup " \
232
+ "of a DifferentiableGraph. Did you intend for these nodes to be " \
233
+ "autodiffed? If so, change this test to expect autodifferentiation. " \
234
+ "\nSpecifically:"
235
+ if len(fusion_nodes_found) > 0:
236
+ err_msg += f"\n {fusion_nodes_found} were not expected to be in " \
237
+ "one of the DifferentiableGraphs, but appeared in a FusionGroup " \
238
+ "of a DifferentiableGraph. "
239
+ if len(nodes_in_diff_graph) > 0:
240
+ err_msg += f"\n {nodes_in_diff_graph} were not expected to " \
241
+ "be in one of the DifferentiableGraphs but were."
242
+ return err_msg
243
+
244
+ def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes):
245
+ diff_nodes = graph.findAllNodes('prim::DifferentiableGraph')
246
+ diff_subgraphs = [node.g('Subgraph') for node in diff_nodes]
247
+
248
+ # Note: currently no tests have fusible_nodes
249
+ fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs]))
250
+ fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes]
251
+
252
+ # For any non-fusible node, it must show up in one of the DifferentiableGraphs.
253
+ nodes_in_diff_graph = []
254
+ nodes_not_in_diff_graph = []
255
+ non_fusible_nodes_being_fused = []
256
+ for node in nonfusible_nodes:
257
+ if any(g.findNode(node) is not None for g in diff_subgraphs):
258
+ nodes_in_diff_graph.append(node)
259
+ else:
260
+ nodes_not_in_diff_graph.append(node)
261
+ if any(g.findNode(node) is not None for g in fusion_subgraphs):
262
+ non_fusible_nodes_being_fused.append(node)
263
+ found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes)
264
+
265
+ # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs.
266
+ fusion_nodes_found = []
267
+ fusion_nodes_not_found = []
268
+ for node in fusible_nodes:
269
+ if any(g.findNode(node) is not None for g in fusion_subgraphs):
270
+ fusion_nodes_found.append(node)
271
+ else:
272
+ fusion_nodes_not_found.append(node)
273
+ found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes)
274
+
275
+ if should_autodiff_node is not None:
276
+ err_msg = self.autoDiffErrorMessage(should_autodiff_node,
277
+ nodes_not_in_diff_graph,
278
+ fusion_nodes_not_found,
279
+ non_fusible_nodes_being_fused,
280
+ fusion_nodes_found,
281
+ nodes_in_diff_graph)
282
+ self.assertEqual(should_autodiff_node,
283
+ found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg)
284
+
285
+ def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]],
286
+ traced_graph, assert_propagation, constant_prop=True):
287
+ # repropagte input shapes provided by tracing,
288
+ prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
289
+ for enable_test_mode in [True, False]:
290
+ # here we are testing allowing/disallowing substituting in complete shapes as constants,
291
+ # disallowing constants helps stress test partial eval and substitution pipeline
292
+ torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode)
293
+ torch._C._jit_erase_non_input_shape_information(traced_graph)
294
+ if constant_prop:
295
+ torch._C._jit_pass_constant_propagation(traced_graph)
296
+ torch._C._jit_pass_propagate_shapes_on_graph(traced_graph)
297
+ # Add sizes to default tensor type to avoid checking something out of scope
298
+ # and difficulties with tracer leaving in other parts of tensor type
299
+ output = next(traced_graph.outputs()).type()
300
+
301
+ def test_type(type, actual_size):
302
+ sizes = type.symbolic_sizes()
303
+ out_type = TensorType.get().with_sizes(sizes)
304
+ actual_type = TensorType.get().with_sizes(actual_size)
305
+
306
+ # always check actual shape is a subtype of the output
307
+ self.assertTrue(actual_type.isSubtypeOf(out_type))
308
+
309
+ # and then if assertion flag is provided, check shape analysis
310
+ # is successful
311
+ if assert_propagation:
312
+ self.assertEqual(out_type.sizes(), actual_size)
313
+
314
+ if output.isSubtypeOf(torch._C.TensorType.get()):
315
+ test_type(output, out_sizes)
316
+ else:
317
+ tuple_elements = output.elements()
318
+ for i in range(len(tuple_elements)):
319
+ test_type(tuple_elements[i], out_sizes[i])
320
+
321
+ torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Owner(s): ["module: unknown"]
2
+
3
+ from torch.ao.pruning import BaseSparsifier
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from torch import nn
7
+
8
+ class ImplementedSparsifier(BaseSparsifier):
9
+ def __init__(self, **kwargs):
10
+ super().__init__(defaults=kwargs)
11
+
12
+ def update_mask(self, module, **kwargs):
13
+ module.parametrizations.weight[0].mask[0] = 0
14
+ linear_state = self.state['linear1.weight']
15
+ linear_state['step_count'] = linear_state.get('step_count', 0) + 1
16
+
17
+
18
+ class MockSparseLinear(nn.Linear):
19
+ """
20
+ This class is a MockSparseLinear class to check convert functionality.
21
+ It is the same as a normal Linear layer, except with a different type, as
22
+ well as an additional from_dense method.
23
+ """
24
+ @classmethod
25
+ def from_dense(cls, mod):
26
+ """
27
+ """
28
+ linear = cls(mod.in_features,
29
+ mod.out_features)
30
+ return linear
31
+
32
+
33
+ def rows_are_subset(subset_tensor, superset_tensor) -> bool:
34
+ """
35
+ Checks to see if all rows in subset tensor are present in the superset tensor
36
+ """
37
+ i = 0
38
+ for row in subset_tensor:
39
+ while i < len(superset_tensor):
40
+ if not torch.equal(row, superset_tensor[i]):
41
+ i += 1
42
+ else:
43
+ break
44
+ else:
45
+ return False
46
+ return True
47
+
48
+
49
+ class SimpleLinear(nn.Module):
50
+ r"""Model with only Linear layers without biases, some wrapped in a Sequential,
51
+ some following the Sequential. Used to test basic pruned Linear-Linear fusion."""
52
+
53
+ def __init__(self):
54
+ super().__init__()
55
+ self.seq = nn.Sequential(
56
+ nn.Linear(7, 5, bias=False),
57
+ nn.Linear(5, 6, bias=False),
58
+ nn.Linear(6, 4, bias=False),
59
+ )
60
+ self.linear1 = nn.Linear(4, 4, bias=False)
61
+ self.linear2 = nn.Linear(4, 10, bias=False)
62
+
63
+ def forward(self, x):
64
+ x = self.seq(x)
65
+ x = self.linear1(x)
66
+ x = self.linear2(x)
67
+ return x
68
+
69
+
70
+ class LinearBias(nn.Module):
71
+ r"""Model with only Linear layers, alternating layers with biases,
72
+ wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion."""
73
+
74
+ def __init__(self):
75
+ super().__init__()
76
+ self.seq = nn.Sequential(
77
+ nn.Linear(7, 5, bias=True),
78
+ nn.Linear(5, 6, bias=False),
79
+ nn.Linear(6, 3, bias=True),
80
+ nn.Linear(3, 3, bias=True),
81
+ nn.Linear(3, 10, bias=False),
82
+ )
83
+
84
+ def forward(self, x):
85
+ x = self.seq(x)
86
+ return x
87
+
88
+
89
+ class LinearActivation(nn.Module):
90
+ r"""Model with only Linear layers, some with bias, some in a Sequential and some following.
91
+ Activation functions modules in between each Linear in the Sequential, and each outside layer.
92
+ Used to test pruned Linear(Bias)-Activation-Linear fusion."""
93
+
94
+ def __init__(self):
95
+ super().__init__()
96
+ self.seq = nn.Sequential(
97
+ nn.Linear(7, 5, bias=True),
98
+ nn.ReLU(),
99
+ nn.Linear(5, 6, bias=False),
100
+ nn.Tanh(),
101
+ nn.Linear(6, 4, bias=True),
102
+ )
103
+ self.linear1 = nn.Linear(4, 3, bias=True)
104
+ self.act1 = nn.ReLU()
105
+ self.linear2 = nn.Linear(3, 10, bias=False)
106
+ self.act2 = nn.Tanh()
107
+
108
+ def forward(self, x):
109
+ x = self.seq(x)
110
+ x = self.linear1(x)
111
+ x = self.act1(x)
112
+ x = self.linear2(x)
113
+ x = self.act2(x)
114
+ return x
115
+
116
+
117
+ class LinearActivationFunctional(nn.Module):
118
+ r"""Model with only Linear layers, some with bias, some in a Sequential and some following.
119
+ Activation functions modules in between each Linear in the Sequential, and functional
120
+ activationals are called in between each outside layer.
121
+ Used to test pruned Linear(Bias)-Activation-Linear fusion."""
122
+
123
+ def __init__(self):
124
+ super().__init__()
125
+ self.seq = nn.Sequential(
126
+ nn.Linear(7, 5, bias=True),
127
+ nn.ReLU(),
128
+ nn.Linear(5, 6, bias=False),
129
+ nn.ReLU(),
130
+ nn.Linear(6, 4, bias=True),
131
+ )
132
+ self.linear1 = nn.Linear(4, 3, bias=True)
133
+ self.linear2 = nn.Linear(3, 8, bias=False)
134
+ self.linear3 = nn.Linear(8, 10, bias=False)
135
+ self.act1 = nn.ReLU()
136
+
137
+ def forward(self, x):
138
+ x = self.seq(x)
139
+ x = self.linear1(x)
140
+ x = F.relu(x)
141
+ x = self.linear2(x)
142
+ x = F.relu(x)
143
+ x = self.linear3(x)
144
+ x = F.relu(x)
145
+ return x
146
+
147
+
148
+ class SimpleConv2d(nn.Module):
149
+ r"""Model with only Conv2d layers, all without bias, some in a Sequential and some following.
150
+ Used to test pruned Conv2d-Conv2d fusion."""
151
+
152
+ def __init__(self):
153
+ super().__init__()
154
+ self.seq = nn.Sequential(
155
+ nn.Conv2d(1, 32, 3, 1, bias=False),
156
+ nn.Conv2d(32, 64, 3, 1, bias=False),
157
+ )
158
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False)
159
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False)
160
+
161
+ def forward(self, x):
162
+ x = self.seq(x)
163
+ x = self.conv2d1(x)
164
+ x = self.conv2d2(x)
165
+ return x
166
+
167
+
168
+ class Conv2dBias(nn.Module):
169
+ r"""Model with only Conv2d layers, some with bias, some in a Sequential and some outside.
170
+ Used to test pruned Conv2d-Bias-Conv2d fusion."""
171
+
172
+ def __init__(self):
173
+ super().__init__()
174
+ self.seq = nn.Sequential(
175
+ nn.Conv2d(1, 32, 3, 1, bias=True),
176
+ nn.Conv2d(32, 32, 3, 1, bias=True),
177
+ nn.Conv2d(32, 64, 3, 1, bias=False),
178
+ )
179
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=True)
180
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False)
181
+
182
+ def forward(self, x):
183
+ x = self.seq(x)
184
+ x = self.conv2d1(x)
185
+ x = self.conv2d2(x)
186
+ return x
187
+
188
+
189
+ class Conv2dActivation(nn.Module):
190
+ r"""Model with only Conv2d layers, some with bias, some in a Sequential and some following.
191
+ Activation function modules in between each Sequential layer, functional activations called
192
+ in-between each outside layer.
193
+ Used to test pruned Conv2d-Bias-Activation-Conv2d fusion."""
194
+
195
+ def __init__(self):
196
+ super().__init__()
197
+ self.seq = nn.Sequential(
198
+ nn.Conv2d(1, 32, 3, 1, bias=True),
199
+ nn.ReLU(),
200
+ nn.Conv2d(32, 64, 3, 1, bias=True),
201
+ nn.Tanh(),
202
+ nn.Conv2d(64, 64, 3, 1, bias=False),
203
+ nn.ReLU(),
204
+ )
205
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False)
206
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=True)
207
+
208
+ def forward(self, x):
209
+ x = self.seq(x)
210
+ x = self.conv2d1(x)
211
+ x = F.relu(x)
212
+ x = self.conv2d2(x)
213
+ x = F.hardtanh(x)
214
+ return x
215
+
216
+
217
+ class Conv2dPadBias(nn.Module):
218
+ r"""Model with only Conv2d layers, all with bias and some with padding > 0,
219
+ some in a Sequential and some following. Activation function modules in between each layer.
220
+ Used to test that bias is propagated correctly in the special case of
221
+ pruned Conv2d-Bias-(Activation)Conv2d fusion, when the second Conv2d layer has padding > 0."""
222
+
223
+ def __init__(self):
224
+ super().__init__()
225
+ self.seq = nn.Sequential(
226
+ nn.Conv2d(1, 32, 3, 1, padding=1, bias=True),
227
+ nn.ReLU(),
228
+ nn.Conv2d(32, 32, 3, 1, bias=False),
229
+ nn.ReLU(),
230
+ nn.Conv2d(32, 32, 3, 1, padding=1, bias=True),
231
+ nn.ReLU(),
232
+ nn.Conv2d(32, 32, 3, 1, padding=1, bias=True),
233
+ nn.ReLU(),
234
+ nn.Conv2d(32, 64, 3, 1, bias=True),
235
+ nn.Tanh(),
236
+ )
237
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, padding=1, bias=True)
238
+ self.act1 = nn.ReLU()
239
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, padding=1, bias=True)
240
+ self.act2 = nn.Tanh()
241
+
242
+ def forward(self, x):
243
+ x = self.seq(x)
244
+ x = self.conv2d1(x)
245
+ x = self.act1(x)
246
+ x = self.conv2d2(x)
247
+ x = self.act2(x)
248
+ return x
249
+
250
+
251
+ class Conv2dPool(nn.Module):
252
+ r"""Model with only Conv2d layers, all with bias, some in a Sequential and some following.
253
+ Activation function modules in between each layer, Pool2d modules in between each layer.
254
+ Used to test pruned Conv2d-Pool2d-Conv2d fusion."""
255
+
256
+ def __init__(self):
257
+ super().__init__()
258
+ self.seq = nn.Sequential(
259
+ nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True),
260
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
261
+ nn.ReLU(),
262
+ nn.Conv2d(32, 64, kernel_size=3, padding=1, bias=True),
263
+ nn.Tanh(),
264
+ nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
265
+ )
266
+ self.conv2d1 = nn.Conv2d(64, 48, kernel_size=3, padding=1, bias=True)
267
+ self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
268
+ self.af1 = nn.ReLU()
269
+ self.conv2d2 = nn.Conv2d(48, 52, kernel_size=3, padding=1, bias=True)
270
+ self.conv2d3 = nn.Conv2d(52, 52, kernel_size=3, padding=1, bias=True)
271
+
272
+ def forward(self, x):
273
+ x = self.seq(x)
274
+ x = self.conv2d1(x)
275
+ x = self.maxpool(x)
276
+ x = self.af1(x)
277
+ x = self.conv2d2(x)
278
+ x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=1)
279
+ x = F.relu(x)
280
+ x = self.conv2d3(x)
281
+ return x
282
+
283
+
284
+ class Conv2dPoolFlattenFunctional(nn.Module):
285
+ r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d
286
+ and a functional Flatten followed by a Linear layer.
287
+ Activation functions and Pool2ds in between each layer also.
288
+ Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion."""
289
+
290
+ def __init__(self):
291
+ super().__init__()
292
+ self.seq = nn.Sequential(
293
+ nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True),
294
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
295
+ nn.ReLU(),
296
+ nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True),
297
+ nn.Tanh(),
298
+ nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
299
+ )
300
+ self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True)
301
+ self.af1 = nn.ReLU()
302
+ self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True)
303
+ self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
304
+ self.fc = nn.Linear(11, 13, bias=True)
305
+
306
+ def forward(self, x):
307
+ x = self.seq(x)
308
+ x = self.conv2d1(x)
309
+ x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
310
+ x = self.af1(x)
311
+ x = self.conv2d2(x)
312
+ x = self.avg_pool(x)
313
+ x = torch.flatten(x, 1) # test functional flatten
314
+ x = self.fc(x)
315
+ return x
316
+
317
+
318
+ class Conv2dPoolFlatten(nn.Module):
319
+ r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d
320
+ and a Flatten module followed by a Linear layer.
321
+ Activation functions and Pool2ds in between each layer also.
322
+ Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion."""
323
+
324
+ def __init__(self):
325
+ super().__init__()
326
+ self.seq = nn.Sequential(
327
+ nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True),
328
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
329
+ nn.ReLU(),
330
+ nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True),
331
+ nn.Tanh(),
332
+ nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
333
+ )
334
+ self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True)
335
+ self.af1 = nn.ReLU()
336
+ self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True)
337
+ self.avg_pool = nn.AdaptiveAvgPool2d((2, 2))
338
+ self.flatten = nn.Flatten()
339
+ self.fc = nn.Linear(44, 13, bias=True)
340
+
341
+ def forward(self, x):
342
+ x = self.seq(x)
343
+ x = self.conv2d1(x)
344
+ x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
345
+ x = self.af1(x)
346
+ x = self.conv2d2(x)
347
+ x = self.avg_pool(x)
348
+ x = self.flatten(x)
349
+ x = self.fc(x)
350
+ return x
351
+
352
+
353
+ class LSTMLinearModel(nn.Module):
354
+ """Container module with an encoder, a recurrent module, and a linear."""
355
+
356
+ def __init__(
357
+ self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int
358
+ ):
359
+ super().__init__()
360
+ self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers)
361
+ self.linear = nn.Linear(hidden_dim, output_dim)
362
+
363
+ def forward(self, input):
364
+ output, hidden = self.lstm(input)
365
+ decoded = self.linear(output)
366
+ return decoded, output
367
+
368
+
369
+ class LSTMLayerNormLinearModel(nn.Module):
370
+ """Container module with an LSTM, a LayerNorm, and a linear."""
371
+
372
+ def __init__(
373
+ self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int
374
+ ):
375
+ super().__init__()
376
+ self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers)
377
+ self.norm = nn.LayerNorm(hidden_dim)
378
+ self.linear = nn.Linear(hidden_dim, output_dim)
379
+
380
+ def forward(self, x):
381
+ x, state = self.lstm(x)
382
+ x = self.norm(x)
383
+ x = self.linear(x)
384
+ return x, state
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""Importing this file includes common utility methods for checking quantized
2
+ tensors and modules.
3
+ """
4
+ import numpy as np
5
+ import torch
6
+ from contextlib import contextmanager
7
+ from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_PPC, IS_MACOS, IS_WINDOWS
8
+
9
+ supported_qengines = torch.backends.quantized.supported_engines
10
+ supported_qengines.remove('none')
11
+ # Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326
12
+ # QNNPACK is not supported on PPC
13
+ # QNNPACK throws ASAN heap-buffer-overflow error.
14
+ if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_MACOS, IS_WINDOWS]):
15
+ supported_qengines.remove('qnnpack')
16
+
17
+ def _conv_output_shape(input_size, kernel_size, padding, stride, dilation,
18
+ output_padding=0):
19
+ """Computes the output shape given convolution parameters."""
20
+ return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1)
21
+ * (dilation - 1)) / stride) + 2 * output_padding + 1
22
+
23
+ # Quantization references
24
+ def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8):
25
+ """Quantizes a numpy array."""
26
+ if qmin is None:
27
+ qmin = np.iinfo(dtype).min
28
+ if qmax is None:
29
+ qmax = np.iinfo(dtype).max
30
+ qx = np.round(x / scale + zero_point).astype(np.int64)
31
+ qx = np.clip(qx, qmin, qmax)
32
+ qx = qx.astype(dtype)
33
+ return qx
34
+
35
+
36
+ def _dequantize(qx, scale, zero_point):
37
+ """Dequantizes a numpy array."""
38
+ x = (qx.astype(float) - zero_point) * scale
39
+ return x
40
+
41
+
42
+ def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8):
43
+ """Requantizes a numpy array, i.e., intermediate int32 or int16 values are
44
+ converted back to given type"""
45
+ qx = (x * multiplier).round() + zero_point
46
+ qx = np.clip(qx, qmin, qmax).astype(qtype)
47
+ return qx
48
+
49
+ def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine):
50
+ """Calculate the dynamic quantization parameters (scale, zero_point)
51
+ according to the min and max element of the tensor"""
52
+ assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric)
53
+ if qscheme == torch.per_tensor_symmetric:
54
+ assert dtype == torch.qint8
55
+ if isinstance(X, torch.Tensor):
56
+ X = X.numpy()
57
+ if dtype == torch.qint8:
58
+ if reduce_range:
59
+ qmin, qmax = -64, 63
60
+ else:
61
+ qmin, qmax = -128, 127
62
+ else: # dtype == torch.quint8
63
+ if reduce_range:
64
+ qmin, qmax = 0, 127
65
+ else:
66
+ qmin, qmax = 0, 255
67
+ min_val = X.min()
68
+ max_val = X.max()
69
+ is_symmetric = (qscheme == torch.per_tensor_symmetric)
70
+ if min_val == max_val:
71
+ scale = 1.0
72
+ zero_point = 0
73
+ else:
74
+ if is_symmetric:
75
+ max_val = max(max_val, -min_val)
76
+ min_val = -max_val
77
+ scale = (max_val - min_val) / (qmax - qmin)
78
+ scale = max(scale, np.finfo(np.float32).eps)
79
+ zero_point = 0
80
+ else:
81
+ max_val = max(max_val, 0.0)
82
+ min_val = min(min_val, 0.0)
83
+ scale = (max_val - min_val) / (qmax - qmin)
84
+ scale = max(scale, np.finfo(np.float32).eps)
85
+ zero_point = qmin - round(min_val / scale)
86
+ zero_point = max(qmin, zero_point)
87
+ zero_point = min(qmax, zero_point)
88
+ return [float(scale), int(zero_point)]
89
+
90
+ def _calculate_dynamic_per_channel_qparams(X, dtype):
91
+ """Calculate the dynamic quantization parameters (scale, zero_point)
92
+ according to the min and max element of the tensor"""
93
+ if isinstance(X, torch.Tensor):
94
+ X = X.numpy()
95
+ qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max
96
+ n_levels = qmax - qmin
97
+ scale = np.zeros(X.shape[0], dtype=np.float64)
98
+ zero_point = np.zeros(X.shape[0], dtype=np.int64)
99
+ for i in range(zero_point.shape[0]):
100
+ min_val = X.min()
101
+ max_val = X.max()
102
+ if min_val == max_val:
103
+ scale[i] = 1.0
104
+ zero_point[i] = 0
105
+ else:
106
+ max_val = max(max_val, 0.0)
107
+ min_val = min(min_val, 0.0)
108
+ scale[i] = (max_val - min_val) / n_levels
109
+ scale[i] = max(scale[i], np.finfo(np.float32).eps)
110
+ zero_point[i] = qmin - round(min_val / scale[i])
111
+ zero_point[i] = max(qmin, zero_point[i])
112
+ zero_point[i] = min(qmax, zero_point[i])
113
+
114
+ return scale, zero_point
115
+
116
+ def _snr(x, x_hat):
117
+ """Calculates the signal to noise ratio and returns the signal and noise
118
+ power, as well as the SNR in dB.
119
+ If the input is a list/tuple this function is called recursively on each
120
+ element. The result will have the same nested structure as the inputs.
121
+
122
+ Args:
123
+ x, x_hat: Either a tensor or a nested list/tuple of tensors.
124
+ Returns:
125
+ signal, noise, SNR(in dB): Either floats or a nested list of floats
126
+ """
127
+ if isinstance(x, (list, tuple)):
128
+ assert(len(x) == len(x_hat))
129
+ res = []
130
+ for idx in range(len(x)):
131
+ res.append(_snr(x[idx], x_hat[idx]))
132
+ return res
133
+ if x_hat.is_quantized:
134
+ x_hat = x_hat.dequantize()
135
+ if x.is_quantized:
136
+ x = x.dequantize()
137
+ noise = (x - x_hat).norm()
138
+ if noise == 0:
139
+ return 0.0, float('inf'), float('inf')
140
+ signal = x.norm()
141
+ snr = signal / noise
142
+ snr_db = 20 * snr.log10()
143
+ return signal, noise, snr_db
144
+
145
+ @contextmanager
146
+ def override_quantized_engine(qengine):
147
+ previous = torch.backends.quantized.engine
148
+ torch.backends.quantized.engine = qengine
149
+ try:
150
+ yield
151
+ finally:
152
+ torch.backends.quantized.engine = previous
153
+
154
+ @contextmanager
155
+ def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack):
156
+ try:
157
+ if qengine_is_qnnpack:
158
+ torch._C._set_default_mobile_cpu_allocator()
159
+ yield
160
+ finally:
161
+ if qengine_is_qnnpack:
162
+ torch._C._unset_default_mobile_cpu_allocator()
163
+
164
+ # TODO: Update all quantization tests to use this decorator.
165
+ # Currently for some of the tests it seems to have inconsistent params
166
+ # for fbgemm vs qnnpack.
167
+ def override_qengines(qfunction):
168
+ def test_fn(*args, **kwargs):
169
+ for qengine in supported_qengines:
170
+ with override_quantized_engine(qengine):
171
+ # qfunction should not return anything.
172
+ qfunction(*args, **kwargs)
173
+ return test_fn
174
+
175
+ def qengine_is_fbgemm():
176
+ return torch.backends.quantized.engine == 'fbgemm'
177
+ def qengine_is_qnnpack():
178
+ return torch.backends.quantized.engine == 'qnnpack'
179
+ def qengine_is_onednn():
180
+ return torch.backends.quantized.engine == 'onednn'
181
+ def qengine_is_x86():
182
+ return torch.backends.quantized.engine == 'x86'
183
+
184
+ # Helper function used to simulate per-channel fake-quant against any axis
185
+ def _permute_to_axis_zero(X, axis):
186
+ new_axis_list = list(range(X.dim()))
187
+ new_axis_list[axis] = 0
188
+ new_axis_list[0] = axis
189
+ y = X.permute(tuple(new_axis_list))
190
+ return y, new_axis_list
191
+
192
+ # Reference method for fake quantize
193
+ # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
194
+ def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
195
+ dtype = X.dtype
196
+ X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
197
+ res = torch.zeros_like(X)
198
+
199
+ for i in range(X.size()[0]):
200
+ res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
201
+ per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]
202
+
203
+ out = res.permute(tuple(permute_axis_list))
204
+ return out.to(dtype)
205
+
206
+ # Reference method for the gradient of the fake quantize operator
207
+ # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
208
+ def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
209
+ dtype = X.dtype
210
+ X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
211
+ Xq = torch.zeros_like(X)
212
+ for i in range(X.size()[0]):
213
+ Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i])
214
+ Xq = Xq.permute(tuple(permute_axis_list))
215
+ mask = (Xq >= quant_min) * (Xq <= quant_max)
216
+ res = torch.zeros_like(dY)
217
+ res[mask] = dY[mask]
218
+ return res.to(dtype)
219
+
220
+ def to_tensor(X, device):
221
+ if not isinstance(X, torch.Tensor):
222
+ X = torch.tensor(X)
223
+ else:
224
+ X = X.clone().detach()
225
+ return X.to(device=torch.device(device), dtype=torch.float32)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from copy import deepcopy
3
+ from torch.utils._pytree import tree_map
4
+
5
+ # TODO: Move LoggingTensor here.
6
+ from torch.testing._internal.logging_tensor import LoggingTensor
7
+
8
+
9
+ # Base class for wrapper-style tensors.
10
+ class WrapperTensor(torch.Tensor):
11
+ @staticmethod
12
+ def __new__(cls, *args, **kwargs):
13
+ t, kwargs = cls.get_wrapper_properties(*args, **kwargs)
14
+ if "size" not in kwargs:
15
+ size = t.size()
16
+ else:
17
+ size = kwargs["size"]
18
+ del kwargs["size"]
19
+ if "dtype" not in kwargs:
20
+ kwargs["dtype"] = t.dtype
21
+ if "layout" not in kwargs:
22
+ kwargs["layout"] = t.layout
23
+ if "device" not in kwargs:
24
+ kwargs["device"] = t.device
25
+ if "requires_grad" not in kwargs:
26
+ kwargs["requires_grad"] = False
27
+ # Ignore memory_format and pin memory for now as I don't know how to
28
+ # safely access them on a Tensor (if possible??)
29
+
30
+ wrapper = torch.Tensor._make_wrapper_subclass(cls, size, **kwargs)
31
+ wrapper._validate_methods()
32
+ return wrapper
33
+
34
+ @classmethod
35
+ def get_wrapper_properties(cls, *args, **kwargs):
36
+ # Should return both an example Tensor and a dictionary of kwargs
37
+ # to override any of that example Tensor's properly.
38
+ # This is very similar to the `t.new_*(args)` API
39
+ raise NotImplementedError("You need to implement get_wrapper_properties")
40
+
41
+ def _validate_methods(self):
42
+ # Skip this if not in debug mode?
43
+ # Changing these on the python side is wrong as it would not be properly reflected
44
+ # on the c++ side
45
+ # This doesn't catch attributes set in the __init__
46
+ forbidden_overrides = ["size", "stride", "dtype", "layout", "device", "requires_grad"]
47
+ for el in forbidden_overrides:
48
+ if getattr(self.__class__, el) is not getattr(torch.Tensor, el):
49
+ raise RuntimeError(f"Subclass {self.__class__.__name__} is overwriting the "
50
+ f"property {el} but this is not allowed as such change would "
51
+ "not be reflected to c++ callers.")
52
+
53
+
54
+ class DiagTensorBelow(WrapperTensor):
55
+ @classmethod
56
+ def get_wrapper_properties(cls, diag, requires_grad=False):
57
+ assert diag.ndim == 1
58
+ return diag, {"size": diag.size() + diag.size(), "requires_grad": requires_grad}
59
+
60
+ def __init__(self, diag, requires_grad=False):
61
+ self.diag = diag
62
+
63
+ handled_ops = {}
64
+
65
+ # We disable torch function here to avoid any unwanted wrapping of the output
66
+ __torch_function__ = torch._C._disabled_torch_function_impl
67
+
68
+ @classmethod
69
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
70
+ if not all(issubclass(cls, t) for t in types):
71
+ return NotImplemented
72
+
73
+ # For everything else, call the handler:
74
+ fn = cls.handled_ops.get(func.__name__, None)
75
+ if fn:
76
+ return fn(*args, **(kwargs or {}))
77
+ else:
78
+ # Note that here, because we don't need to provide the autograd formulas
79
+ # we can have a default "fallback" that creates a plain Tensor based
80
+ # on the diag elements and calls the func again.
81
+
82
+ def unwrap(e):
83
+ return e.diag.diag() if isinstance(e, DiagTensorBelow) else e
84
+
85
+ def wrap(e):
86
+ if isinstance(e, torch.Tensor) and e.ndim == 1:
87
+ return DiagTensorBelow(e)
88
+ if isinstance(e, torch.Tensor) and e.ndim == 2 and e.count_nonzero() == e.diag().count_nonzero():
89
+ return DiagTensorBelow(e.diag())
90
+ return e
91
+
92
+ rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
93
+ return rs
94
+
95
+ def __repr__(self):
96
+ return super().__repr__(tensor_contents=f"diag={self.diag}")
97
+
98
+
99
+ class SparseTensor(WrapperTensor):
100
+ @classmethod
101
+ def get_wrapper_properties(cls, size, values, indices, requires_grad=False):
102
+ assert values.device == indices.device
103
+ return values, {"size": size, "requires_grad": requires_grad}
104
+
105
+ def __init__(self, size, values, indices, requires_grad=False):
106
+ self.values = values
107
+ self.indices = indices
108
+
109
+ def __repr__(self):
110
+ return super().__repr__(tensor_contents=f"values={self.values}, indices={self.indices}")
111
+
112
+ def sparse_to_dense(self):
113
+ res = torch.zeros(self.size(), dtype=self.values.dtype)
114
+ res[self.indices.unbind(1)] = self.values
115
+ return res
116
+
117
+ @staticmethod
118
+ def from_dense(t):
119
+ indices = t.nonzero()
120
+ values = t[indices.unbind(1)]
121
+ return SparseTensor(t.size(), values, indices)
122
+
123
+ @classmethod
124
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
125
+ func_name = f"{func.__module__}.{func.__name__}"
126
+
127
+ res = cls._try_call_special_impl(func_name, args, kwargs)
128
+ if res is not NotImplemented:
129
+ return res
130
+
131
+ # Otherwise, use a default implementation that construct dense
132
+ # tensors and use that to compute values
133
+ def unwrap(e):
134
+ return e.sparse_to_dense() if isinstance(e, SparseTensor) else e
135
+
136
+ # Wrap back all Tensors into our custom class
137
+ def wrap(e):
138
+ # Check for zeros and use that to get indices
139
+ return SparseTensor.from_dense(e) if isinstance(e, torch.Tensor) else e
140
+
141
+ rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
142
+ return rs
143
+
144
+ # To show how things happen later
145
+ def __rmul__(self, other):
146
+ return super().__rmul__(other)
147
+
148
+ _SPECIAL_IMPLS = {}
149
+
150
+ @classmethod
151
+ def _try_call_special_impl(cls, func, args, kwargs):
152
+ if func not in cls._SPECIAL_IMPLS:
153
+ return NotImplemented
154
+ return cls._SPECIAL_IMPLS[func](args, kwargs)
155
+
156
+
157
+ # Example non-wrapper subclass that stores extra state.
158
+ class NonWrapperTensor(torch.Tensor):
159
+ def __new__(cls, data):
160
+ t = torch.Tensor._make_subclass(cls, data)
161
+ t.extra_state = {
162
+ 'last_func_called': None
163
+ }
164
+ return t
165
+
166
+ @classmethod
167
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
168
+ result = super().__torch_function__(func, types, args, kwargs)
169
+
170
+ if isinstance(result, cls):
171
+ # Do something with the extra state. For the example here, just store the name of the
172
+ # last function called (skip for deepcopy so the copy has the same extra state).
173
+ if func is torch.Tensor.__deepcopy__:
174
+ result.extra_state = deepcopy(args[0].extra_state)
175
+ else:
176
+ result.extra_state = {
177
+ 'last_func_called': func.__name__,
178
+ }
179
+
180
+ return result
181
+
182
+ # new_empty() must be defined for deepcopy to work
183
+ def new_empty(self, shape):
184
+ return type(self)(torch.empty(shape))
185
+
186
+
187
+ # Class used to store info about subclass tensors used in testing.
188
+ class SubclassInfo:
189
+
190
+ __slots__ = ['name', 'create_fn', 'closed_under_ops']
191
+
192
+ def __init__(self, name, create_fn, closed_under_ops=True):
193
+ self.name = name
194
+ self.create_fn = create_fn # create_fn(shape) -> tensor instance
195
+ self.closed_under_ops = closed_under_ops
196
+
197
+
198
+ subclass_db = {
199
+ torch.Tensor: SubclassInfo(
200
+ 'base_tensor', create_fn=torch.randn
201
+ ),
202
+ NonWrapperTensor: SubclassInfo(
203
+ 'non_wrapper_tensor',
204
+ create_fn=lambda shape: NonWrapperTensor(torch.randn(shape))
205
+ ),
206
+ LoggingTensor: SubclassInfo(
207
+ 'logging_tensor',
208
+ create_fn=lambda shape: LoggingTensor(torch.randn(shape))
209
+ ),
210
+ SparseTensor: SubclassInfo(
211
+ 'sparse_tensor',
212
+ create_fn=lambda shape: SparseTensor.from_dense(torch.randn(shape).relu())
213
+ ),
214
+ DiagTensorBelow: SubclassInfo(
215
+ 'diag_tensor_below',
216
+ create_fn=lambda shape: DiagTensorBelow(torch.randn(shape)),
217
+ closed_under_ops=False # sparse semantics
218
+ ),
219
+ }