applied-ai-018 commited on
Commit
1583dd5
·
verified ·
1 Parent(s): b72c45b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/_trace_wrapped_higher_order_op.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/allowed_functions.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_analysis.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/cache_size.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/config.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/current_scope_id.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/device_interface.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/eval_frame.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/funcname_cache.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/hooks.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/resume_execution.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/skipfiles.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/allowed_functions.py +553 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/common.py +109 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py +184 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py +287 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py +465 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py +16 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py +35 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py +113 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py +12 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py +73 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py +170 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/decorators.py +284 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/external_utils.py +29 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/profiler.py +155 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/replay_record.py +119 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py +931 -0
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/_trace_wrapped_higher_order_op.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/allowed_functions.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_analysis.cpython-310.pyc ADDED
Binary file (7.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/cache_size.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/compiled_autograd.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/config.cpython-310.pyc ADDED
Binary file (3.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/current_scope_id.cpython-310.pyc ADDED
Binary file (635 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/device_interface.cpython-310.pyc ADDED
Binary file (7.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/eval_frame.cpython-310.pyc ADDED
Binary file (43.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/funcname_cache.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/guards.cpython-310.pyc ADDED
Binary file (35.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (655 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/output_graph.cpython-310.pyc ADDED
Binary file (43.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/polyfill.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/resume_execution.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/skipfiles.cpython-310.pyc ADDED
Binary file (8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/types.cpython-310.pyc ADDED
Binary file (3.49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/allowed_functions.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import collections
3
+ import copy
4
+ import dataclasses
5
+ import functools
6
+ import inspect
7
+ import itertools
8
+ import math
9
+ import operator
10
+ import sys
11
+ import types
12
+ import warnings
13
+
14
+ from collections import defaultdict
15
+ from typing import Any, Callable, cast, Dict, List, Optional, Set, Union
16
+
17
+ np: Optional[types.ModuleType] = None
18
+ try:
19
+ import numpy as np
20
+ except ModuleNotFoundError:
21
+ pass
22
+
23
+
24
+ import torch
25
+ import torch._functorch.deprecated as deprecated_func
26
+ from torch.fx._symbolic_trace import is_fx_tracing
27
+
28
+ from . import config
29
+ from .external_utils import is_compiling
30
+ from .utils import hashable, is_safe_constant, NP_SUPPORTED_MODULES
31
+
32
+ """
33
+ A note on allowed functions:
34
+
35
+ Dynamo consults this file to determine if a particular function/module
36
+ is allowed to appear as a node in its fx output.
37
+
38
+ If a function is disallowed, it may either be traced-through, or skipped.
39
+
40
+ Trace-through means dynamo will continue to trace the interior code for
41
+ the function/module rather than stopping at its boundary and recording it
42
+ as a node in the fx graph. Whether tracing through or allowing, the functionality
43
+ of the function/module is part of the dynamo graph. Caveat: if tracing through,
44
+ any interior operation could trigger its own graph-break.
45
+
46
+ Skips are determined by (torch/_dynamo/skipfiles.py) - see "a note on
47
+ skipfiles" there.
48
+ """
49
+
50
+
51
+ class FunctionIdSet:
52
+ """
53
+ Track a set of `id()`s of objects which are either allowed or not
54
+ allowed to go into the generated FX graph. Use to test for torch.*,
55
+ numpy.*, builtins.*, etc.
56
+
57
+ Support user modification to permit customization of what can be
58
+ added to the graph and what will cause a graph break.
59
+ """
60
+
61
+ function_ids: Optional[Set[int]] = None
62
+ function_names: Optional[Dict[int, str]] = None
63
+
64
+ def __init__(self, lazy_initializer: Callable[[], Union[Dict[int, str], Set[int]]]):
65
+ self.lazy_initializer = lazy_initializer
66
+
67
+ def __call__(self):
68
+ if self.function_ids is None:
69
+ value = self.lazy_initializer()
70
+ if isinstance(value, dict):
71
+ self.function_ids = set(value.keys())
72
+ self.function_names = value
73
+ else:
74
+ assert isinstance(value, set)
75
+ self.function_ids = value
76
+ return self.function_ids
77
+
78
+ def get_name(self, idx: int, default: str):
79
+ self() # lazy init
80
+ assert self.function_names is not None
81
+ return self.function_names.get(idx, default)
82
+
83
+ def add(self, idx: int):
84
+ function_ids = self() # lazy init
85
+ function_ids.add(idx)
86
+
87
+ def remove(self, idx: int):
88
+ function_ids = self()
89
+ if idx in function_ids:
90
+ function_ids.remove(idx)
91
+
92
+ def __contains__(self, idx: int):
93
+ return idx in self()
94
+
95
+
96
+ @FunctionIdSet
97
+ def _disallowed_function_ids() -> Set[int]:
98
+ remove: List[Any] = [
99
+ True,
100
+ False,
101
+ None,
102
+ collections.OrderedDict,
103
+ copy.copy,
104
+ copy.deepcopy,
105
+ inspect.signature,
106
+ math.__package__,
107
+ torch.__builtins__,
108
+ torch.autocast_decrement_nesting,
109
+ torch.autocast_increment_nesting,
110
+ torch.autograd.grad,
111
+ torch.clear_autocast_cache,
112
+ torch.cuda.current_device,
113
+ torch.cuda.set_device,
114
+ torch.distributions.constraints.is_dependent,
115
+ torch.distributions.normal.Normal,
116
+ torch.inference_mode,
117
+ torch.jit.isinstance,
118
+ torch.set_anomaly_enabled,
119
+ torch.set_autocast_cache_enabled,
120
+ torch.set_autocast_cpu_dtype,
121
+ torch.set_autocast_cpu_enabled,
122
+ torch.set_autocast_enabled,
123
+ torch.set_autocast_gpu_dtype,
124
+ warnings.warn,
125
+ torch._C._dynamo.eval_frame.unsupported,
126
+ torch.Tensor.__init__,
127
+ torch.resize_as_,
128
+ torch._tensor._convert,
129
+ ]
130
+
131
+ # extract all dtypes from torch
132
+ dtypes = [
133
+ obj for obj in torch.__dict__.values() if isinstance(obj, type(torch.float32))
134
+ ]
135
+ remove += dtypes
136
+ storage = [
137
+ obj
138
+ for obj in torch.__dict__.values()
139
+ if isinstance(obj, type(torch.FloatStorage))
140
+ ]
141
+ remove += storage
142
+
143
+ # Distributed APIs don't work well with torch.compile.
144
+ if torch.distributed.is_available():
145
+ remove.extend(
146
+ torch.distributed.distributed_c10d.dynamo_unsupported_distributed_c10d_ops
147
+ )
148
+
149
+ return {id(x) for x in remove}
150
+
151
+
152
+ # Helper function to dump the torch name rule map generated based on
153
+ # the heuristic defined in gen_allowed_objs_and_ids.
154
+ def dump_allowed_torch_name_rule_map() -> None:
155
+ m = gen_allowed_objs_and_ids(record=True, c_binding_only=False).name_rule_map
156
+ for k, v in m.items():
157
+ print(f'"{k}": {v.__name__},')
158
+
159
+
160
+ @dataclasses.dataclass
161
+ class AllowedObjects:
162
+ """
163
+ Track the objects, object id - name pairs, and name - dynamo wrapping rule pairs
164
+ from the heuristic defined in `gen_allowed_objs_and_ids`.
165
+ TODO: Remove the overalp/duplication between these fields
166
+ after allowed_functions refactor is done.
167
+ """
168
+
169
+ object_ids: Dict[int, str]
170
+ ctx_mamager_classes: Set[Any]
171
+ c_binding_in_graph_functions: Set[Any]
172
+ non_c_binding_in_graph_functions: Set[Any]
173
+ name_rule_map: Dict[str, Any]
174
+
175
+
176
+ def gen_allowed_objs_and_ids(record=False, c_binding_only=True) -> AllowedObjects:
177
+ """
178
+ Walk torch.* and get the ids of all the stuff in it
179
+ """
180
+ from .variables import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable
181
+
182
+ warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
183
+ torch_object_ids = dict()
184
+ ctx_mamager_classes = set()
185
+ c_binding_in_graph_functions = set()
186
+ non_c_binding_in_graph_functions = set()
187
+ torch_name_rule_map = dict()
188
+
189
+ # Add obj to ctx_mamager_classes set if it's a torch context manager class.
190
+ # This is used to generate the ctx manager class list based on heuristic.
191
+ def heuristic_record_if_ctx_manager(obj, module, name):
192
+ if (
193
+ issubclass(type(obj), type)
194
+ and hasattr(obj, "__enter__")
195
+ and hasattr(obj, "__exit__")
196
+ ):
197
+ torch_name_rule_map[
198
+ f"{module.__name__}.{name}"
199
+ ] = TorchCtxManagerClassVariable
200
+ ctx_mamager_classes.add(obj)
201
+
202
+ # In some platforms, these functions were loaded as classes instead of functions.
203
+ # To mitigate these weired cases, we need this special check.
204
+ def is_special_functions(obj):
205
+ return hashable(obj) and obj in {
206
+ torch._C._cuda_isCurrentStreamCapturing,
207
+ torch._C._graph_pool_handle,
208
+ }
209
+
210
+ # Add obj to c_binding_in_graph_functions set or non_c_binding_in_graph_functions set
211
+ # if it's a torch function or method.
212
+ # This is used to generate the in graph function list based on heuristic.
213
+ def heuristic_record_if_in_graph_function(obj, module, name):
214
+ try:
215
+ if hasattr(obj, "__wrapped__"):
216
+ obj = obj.__wrapped__
217
+ except Exception:
218
+ pass
219
+ if isinstance(
220
+ obj,
221
+ (
222
+ types.FunctionType,
223
+ types.MethodType,
224
+ types.BuiltinFunctionType,
225
+ types.MethodDescriptorType,
226
+ types.WrapperDescriptorType,
227
+ ),
228
+ ) or is_special_functions(obj):
229
+ torch_name_rule_map[
230
+ f"{module.__name__}.{name}"
231
+ ] = TorchInGraphFunctionVariable
232
+ if c_binding_only:
233
+ if not hasattr(obj, "__code__"):
234
+ c_binding_in_graph_functions.add(obj)
235
+ else:
236
+ if hasattr(obj, "__code__"):
237
+ non_c_binding_in_graph_functions.add(obj)
238
+ else:
239
+ c_binding_in_graph_functions.add(obj)
240
+
241
+ def _is_allowed_module_prefix(obj):
242
+ allowed_modules = ("torch", "math")
243
+ # torch.nn.modules.rnn is disallowed because these modules internally
244
+ # flatten their parameters. This flattening process will call
245
+ # Tensor.set_ with a Storage, and Storages cannot be traced with
246
+ # AOTAutograd; so we need to graph-break. To ensure this, we inline
247
+ # these functions, rather than keep them opaque-ly in the graph.
248
+ disallowed_modules = [
249
+ "torch.optim",
250
+ "torch.nn.modules.rnn",
251
+ "torch._dynamo",
252
+ "torch._C._dynamo",
253
+ "torch._inductor",
254
+ "torch._C.inductor",
255
+ "torch.fx",
256
+ "torch._C._autograd",
257
+ "torch._C._cudart",
258
+ "torch._C._distributed_autograd",
259
+ "torch._C._distributed_c10d",
260
+ "torch._C._distributed_rpc",
261
+ "torch._C._functorch",
262
+ "torch._C._monitor",
263
+ "torch._C._nvtx",
264
+ "torch._C._lazy",
265
+ "torch._C._profiler",
266
+ "torch.__config__",
267
+ "torch._custom_op",
268
+ "torch._dispatch",
269
+ "torch._export",
270
+ "torch._functorch.make_functional",
271
+ "torch._functorch.compile_utils",
272
+ "torch._functorch.partitioners",
273
+ "torch._functorch.aot_autograd",
274
+ "torch._functorch.compilers",
275
+ "torch._functorch.fx_minifier",
276
+ "torch.autograd.profiler_util",
277
+ "torch.autograd.profiler",
278
+ "torch._jit_internal",
279
+ "torch._library",
280
+ "torch._lobpcg",
281
+ "torch._logging",
282
+ "torch._meta_registrations",
283
+ "torch._namedtensor_internals",
284
+ "torch._numpy",
285
+ "torch._sources",
286
+ "torch._subclasses",
287
+ "torch._tensor",
288
+ "torch._tensor_str",
289
+ "torch._utils",
290
+ "torch._utils_internal",
291
+ "torch._vmap_internals",
292
+ "torch.compiler",
293
+ "torch.distributed",
294
+ "torch.export",
295
+ "torch.hub",
296
+ "torch.jit",
297
+ "torch.library",
298
+ "torch.masked.maskedtensor",
299
+ "torch.nn.init",
300
+ "torch.nn.modules.module",
301
+ "torch.nn.parallel",
302
+ "torch.nn.utils",
303
+ "torch.multiprocessing",
304
+ "torch.onnx",
305
+ "torch.overrides",
306
+ "torch.package",
307
+ "torch.profiler",
308
+ "torch.serialization",
309
+ "torch.storage",
310
+ "torch.utils",
311
+ ]
312
+ if config.trace_distributed:
313
+ disallowed_modules.append("torch.distributed.")
314
+
315
+ allowed_modules_dot = tuple([x + "." for x in allowed_modules])
316
+ module = inspect.getmodule(obj)
317
+ if module is None:
318
+ return False
319
+
320
+ mod_name = module.__name__
321
+
322
+ if any(mod_name.startswith(m) for m in disallowed_modules):
323
+ return False
324
+
325
+ return mod_name in allowed_modules or mod_name.startswith(allowed_modules_dot)
326
+
327
+ def _find_torch_objects(module):
328
+ if any(
329
+ module.__name__.startswith(mod_name)
330
+ for mod_name in config.allowed_functions_module_string_ignorelist
331
+ ):
332
+ return
333
+ torch_object_ids[id(module)] = module.__name__
334
+ for name, obj in list(module.__dict__.items()):
335
+ if id(obj) not in torch_object_ids:
336
+ # Dynamo allows all builtins into the graph and does not attempt
337
+ # to introspect into them. We don't want to allow instances of
338
+ # HigherOrderOperator into the graph all the time (Dynamo needs
339
+ # to introspect the body functions of these HigherOrderOperator
340
+ # first, decide they are safe, and then allow them into the graph).
341
+ # So we exclude HigherOrderOperator from being a builtin.
342
+ import torch._ops
343
+
344
+ if isinstance(obj, torch._ops.HigherOrderOperator):
345
+ continue
346
+
347
+ # We want to trace through `grad` and `vmap`
348
+ if obj in (
349
+ torch.func.grad,
350
+ deprecated_func.grad,
351
+ torch.func.vmap,
352
+ deprecated_func.vmap,
353
+ torch.nn.functional.triplet_margin_with_distance_loss,
354
+ torch.cond,
355
+ ):
356
+ continue
357
+
358
+ if isinstance(obj, types.ModuleType):
359
+ if obj.__name__.startswith("torch.") and _is_allowed_module_prefix(
360
+ obj
361
+ ):
362
+ torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
363
+ _find_torch_objects(obj)
364
+ elif _is_allowed_module_prefix(obj):
365
+ if record:
366
+ heuristic_record_if_ctx_manager(obj, module, name)
367
+ heuristic_record_if_in_graph_function(obj, module, name)
368
+ torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
369
+ elif inspect.getmodule(obj) is None and not is_safe_constant(obj):
370
+ if record:
371
+ heuristic_record_if_ctx_manager(obj, module, name)
372
+ heuristic_record_if_in_graph_function(obj, module, name)
373
+ torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
374
+
375
+ _find_torch_objects(torch)
376
+ _find_torch_objects(math)
377
+
378
+ if config.trace_distributed:
379
+ from torch.distributed import _functional_collectives_impl as fci
380
+
381
+ for f in [
382
+ fci._all_gather_into_tensor,
383
+ fci._all_reduce,
384
+ fci._reduce_scatter_tensor,
385
+ fci._all_reduce_coalesced,
386
+ fci._all_gather_into_tensor_coalesced,
387
+ fci._reduce_scatter_tensor_coalesced,
388
+ ]:
389
+ torch_object_ids[id(f)] = repr(f)
390
+
391
+ # torch.Tensor.{fn}
392
+ for name in dir(torch.Tensor):
393
+ method = getattr(torch.Tensor, name)
394
+ if isinstance(
395
+ method, (types.MethodDescriptorType, types.WrapperDescriptorType)
396
+ ):
397
+ torch_object_ids[id(method)] = f"torch.Tensor.{name}"
398
+
399
+ for idx in _disallowed_function_ids():
400
+ if idx in torch_object_ids:
401
+ del torch_object_ids[idx]
402
+
403
+ for extra in (is_fx_tracing, is_compiling):
404
+ torch_object_ids[id(extra)] = f"{extra.__module__}.{extra.__name__}"
405
+
406
+ return AllowedObjects(
407
+ torch_object_ids,
408
+ ctx_mamager_classes,
409
+ c_binding_in_graph_functions,
410
+ non_c_binding_in_graph_functions,
411
+ torch_name_rule_map,
412
+ )
413
+
414
+
415
+ @FunctionIdSet
416
+ def _allowed_function_ids() -> Dict[int, str]:
417
+ return gen_allowed_objs_and_ids().object_ids
418
+
419
+
420
+ @FunctionIdSet
421
+ def _allowed_user_defined_function_ids() -> Dict[int, str]:
422
+ rv: Dict[int, str] = {}
423
+ return rv
424
+
425
+
426
+ @FunctionIdSet
427
+ def _builtin_function_ids() -> Dict[int, str]:
428
+ rv = {
429
+ id(v): f"builtins.{k}"
430
+ for k, v in builtins.__dict__.items()
431
+ if not k.startswith("_") and callable(v)
432
+ }
433
+ rv.update(
434
+ {
435
+ id(v): f"operator.{k}"
436
+ for k, v in operator.__dict__.items()
437
+ if not k.startswith("_") and callable(v)
438
+ }
439
+ )
440
+ rv.update(
441
+ {id(v): f"functools.{v.__name__}" for v in (itertools.chain, itertools.islice)}
442
+ )
443
+ rv.update(
444
+ {
445
+ id(cast): "typing.cast",
446
+ id(functools.reduce): "functools.reduce",
447
+ id(copy.deepcopy): "copy.deepcopy",
448
+ }
449
+ )
450
+ return rv
451
+
452
+
453
+ @FunctionIdSet
454
+ def _numpy_function_ids() -> Dict[int, str]:
455
+ rv = dict()
456
+ for mod in NP_SUPPORTED_MODULES:
457
+ rv.update(
458
+ {
459
+ id(v): f"{mod.__name__}.{k}"
460
+ for k, v in mod.__dict__.items()
461
+ if callable(v)
462
+ and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__
463
+ }
464
+ )
465
+ return rv
466
+
467
+
468
+ @FunctionIdSet
469
+ def _builtin_constant_ids() -> Dict[int, str]:
470
+ """
471
+ Collects constant builtins by eliminating callable items.
472
+ """
473
+ rv = {
474
+ id(v): f"builtins.{k}"
475
+ for k, v in builtins.__dict__.items()
476
+ if not k.startswith("_") and not callable(v)
477
+ }
478
+ return rv
479
+
480
+
481
+ _lazy_module_init: Dict[str, List[Callable[[], None]]] = defaultdict(list)
482
+
483
+
484
+ def add_module_init_func(name: str, init_func: Callable[[], None]) -> None:
485
+ """Register a module without eagerly importing it"""
486
+ # If the module is already imported, eagerly run init
487
+ assert "." not in name, f"Expected a root module name, but got {name}"
488
+ if name in sys.modules:
489
+ init_func()
490
+
491
+ # Module is not yet imported, delay processing until needed
492
+ assert name not in _lazy_module_init
493
+ _lazy_module_init[name].append(init_func)
494
+
495
+
496
+ def _maybe_init_lazy_module(obj: object) -> None:
497
+ module = getattr(obj, "__module__", None)
498
+ if module is None:
499
+ return
500
+
501
+ base_module = module.split(".")[0]
502
+ init_funcs = _lazy_module_init.pop(base_module, None)
503
+ if init_funcs is not None:
504
+ for fn in init_funcs:
505
+ fn()
506
+
507
+
508
+ def is_allowed(obj) -> bool:
509
+ """Is this safe to trace like torch.add ?"""
510
+ _maybe_init_lazy_module(obj)
511
+
512
+ if id(obj) in _disallowed_function_ids:
513
+ return False
514
+
515
+ if id(obj) in _allowed_function_ids:
516
+ return True
517
+
518
+ # torch.ops is populated lazily so we don't necessarily have them in
519
+ # _allowed_function_ids. Figure it out by testing the type instead
520
+ # in those cases
521
+ return isinstance(
522
+ obj,
523
+ (torch._ops.OpOverloadPacket, torch._ops.OpOverload, torch._ops._OpNamespace),
524
+ )
525
+
526
+
527
+ def is_user_defined_allowed(obj) -> bool:
528
+ _maybe_init_lazy_module(obj)
529
+ return id(obj) in _allowed_user_defined_function_ids
530
+
531
+
532
+ def is_forbidden(obj) -> bool:
533
+ _maybe_init_lazy_module(obj)
534
+ return getattr(obj, "_dynamo_forbidden", False)
535
+
536
+
537
+ def torch_get_name(obj, default) -> str:
538
+ """Convert a torch.* function to a string"""
539
+ return _allowed_function_ids.get_name(id(obj), default)
540
+
541
+
542
+ def is_builtin_callable(obj) -> bool:
543
+ return id(obj) in _builtin_function_ids
544
+
545
+
546
+ def is_builtin_constant(obj) -> bool:
547
+ return id(obj) in _builtin_constant_ids
548
+
549
+
550
+ def is_numpy(obj) -> bool:
551
+ if np is None:
552
+ return False
553
+ return isinstance(obj, (np.ndarray, np.generic)) or id(obj) in _numpy_function_ids
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/common.cpython-310.pyc ADDED
Binary file (3.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/cudagraphs.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/debugging.cpython-310.pyc ADDED
Binary file (8.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/inductor.cpython-310.pyc ADDED
Binary file (552 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/onnxrt.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/registry.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tensorrt.cpython-310.pyc ADDED
Binary file (263 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/torchxla.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/__pycache__/tvm.cpython-310.pyc ADDED
Binary file (5.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/common.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import logging
4
+ from unittest.mock import patch
5
+
6
+ import torch
7
+ from torch._dynamo import disable
8
+ from torch._dynamo.utils import counters, defake
9
+ from torch._functorch.aot_autograd import aot_module_simplified
10
+ from torch.utils._python_dispatch import _disable_current_modes
11
+
12
+ log = logging.getLogger(__name__)
13
+
14
+
15
+ def aot_autograd(**kwargs):
16
+ def compiler_fn(gm: torch.fx.GraphModule, example_inputs):
17
+ # Hack to get around circular import problems with aot_eager_decomp_partition
18
+ if callable(kwargs.get("decompositions")):
19
+ kwargs["decompositions"] = kwargs["decompositions"]()
20
+
21
+ counters["aot_autograd"]["total"] += 1
22
+ use_fallback = False
23
+
24
+ if use_fallback:
25
+ log.debug("Unable to use AOT Autograd because graph has mutation")
26
+ counters["aot_autograd"]["not_ok"] += 1
27
+ return gm
28
+
29
+ # OK attempt to compile
30
+
31
+ def _wrapped_bw_compiler(*args, **kwargs):
32
+ # stop TorchDynamo from trying to compile our generated backwards pass
33
+ return disable(disable(bw_compiler)(*args, **kwargs))
34
+
35
+ bw_compiler = kwargs.get("bw_compiler") or kwargs["fw_compiler"]
36
+ kwargs["bw_compiler"] = _wrapped_bw_compiler
37
+ kwargs["inference_compiler"] = (
38
+ kwargs.get("inference_compiler") or kwargs["fw_compiler"]
39
+ )
40
+
41
+ from functorch.compile import nop
42
+
43
+ from torch._inductor.debug import enable_aot_logging
44
+
45
+ # debug asserts slow down compile time noticeably,
46
+ # So only default them on when the aot_eager backend is used.
47
+ if kwargs.get("fw_compiler", None) == nop:
48
+ patch_config = patch("functorch.compile.config.debug_assert", True)
49
+ else:
50
+ patch_config = contextlib.nullcontext()
51
+
52
+ try:
53
+ # NB: NOT cloned!
54
+ with enable_aot_logging(), patch_config:
55
+ cg = aot_module_simplified(gm, example_inputs, **kwargs)
56
+ counters["aot_autograd"]["ok"] += 1
57
+ return disable(cg)
58
+ except Exception:
59
+ counters["aot_autograd"]["not_ok"] += 1
60
+ raise
61
+
62
+ return compiler_fn
63
+
64
+
65
+ def mem_efficient_fusion_kwargs(use_decomps):
66
+ from functorch.compile import (
67
+ default_decompositions,
68
+ min_cut_rematerialization_partition,
69
+ ts_compile,
70
+ )
71
+
72
+ kwargs = {
73
+ # these are taken from memory_efficient_fusion()
74
+ "fw_compiler": ts_compile,
75
+ "bw_compiler": ts_compile,
76
+ "partition_fn": min_cut_rematerialization_partition,
77
+ }
78
+
79
+ if use_decomps:
80
+ kwargs["decompositions"] = default_decompositions
81
+
82
+ return kwargs
83
+
84
+
85
+ def fake_tensor_unsupported(fn):
86
+ """
87
+ Decorator for backends that need real inputs. We swap out fake
88
+ tensors for zero tensors.
89
+ """
90
+
91
+ @functools.wraps(fn)
92
+ def wrapper(model, inputs, **kwargs):
93
+ with _disable_current_modes():
94
+ inputs = list(map(defake, inputs))
95
+ return fn(model, inputs, **kwargs)
96
+
97
+ return wrapper
98
+
99
+
100
+ def device_from_inputs(example_inputs) -> torch.device:
101
+ for x in example_inputs:
102
+ if hasattr(x, "device"):
103
+ return x.device
104
+
105
+
106
+ def dtype_from_inputs(example_inputs) -> torch.dtype:
107
+ for x in example_inputs:
108
+ if hasattr(x, "dtype"):
109
+ return x.dtype
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/cudagraphs.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import operator
3
+ from collections import defaultdict
4
+ from typing import Set
5
+
6
+ import torch
7
+
8
+ from torch.fx import GraphModule
9
+ from torch.fx.passes.backends.cudagraphs import partition_cudagraphs
10
+ from torch.multiprocessing.reductions import StorageWeakRef
11
+ from torch.nn import Module
12
+ from torch.utils._pytree import tree_map
13
+ from .common import aot_autograd
14
+ from .registry import register_backend
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+
19
+ def cloner(t):
20
+ if isinstance(t, torch.Tensor):
21
+ return t.clone()
22
+ else:
23
+ return t
24
+
25
+
26
+ class CudaGraphModule(Module):
27
+ gm: GraphModule
28
+ mutated_inputs: Set[int]
29
+
30
+ def __init__(self, gm, mutated_inputs):
31
+ super().__init__()
32
+ self.gm = gm
33
+ self.mutated_inputs = mutated_inputs
34
+
35
+ warmed_up = False
36
+
37
+ # these are all None or all filled
38
+ graph = None
39
+ static_inputs = None
40
+ static_outputs = None
41
+
42
+ # NB: we override __call__ as we don't need any nn.Module machinery
43
+ # and to reduce overhead
44
+ def __call__(self, *args):
45
+ # TODO: once we've recorded here, we'd like to replace the __call__
46
+ # implementation with compiled bytecode that copies into static, replays
47
+ # the cuda graph, then copies out. First condition is the hotpath,
48
+ # needs optimizing
49
+ if self.graph is not None:
50
+ assert len(args) == len(self.static_inputs)
51
+ for dst, src in zip(self.static_inputs, args):
52
+ dst.copy_(src)
53
+ self.graph.replay()
54
+ for i in self.mutated_inputs:
55
+ args[i].copy_(self.static_inputs[i])
56
+ return tree_map(cloner, self.static_outputs)
57
+
58
+ elif self.warmed_up:
59
+ # record
60
+ self.static_inputs = [x.clone() for x in args]
61
+ self.graph = torch.cuda.CUDAGraph()
62
+ with torch.cuda.graph(self.graph):
63
+ self.static_outputs = self.gm(*self.static_inputs)
64
+ # NB: recording doesn't actually run the operations, so
65
+ # now we immediately replay the graph to serve up the result
66
+ self.graph.replay()
67
+ for i in self.mutated_inputs:
68
+ args[i].copy_(self.static_inputs[i])
69
+ return tree_map(cloner, self.static_outputs)
70
+
71
+ else:
72
+ # warmup
73
+ stream = torch.cuda.Stream()
74
+ stream.wait_stream(torch.cuda.current_stream())
75
+ with torch.cuda.stream(stream):
76
+ r = self.gm(*args)
77
+ torch.cuda.current_stream().wait_stream(stream)
78
+ self.warmed_up = True
79
+ return r
80
+
81
+
82
+ # Interpreter versions of these passes can be found at
83
+ # https://gist.github.com/ezyang/df2d746cac3b2c7d55c181e37c57ef23
84
+
85
+
86
+ def find_input_mutations(g):
87
+ def meta_fk(meta):
88
+ return meta["val"] if "val" in meta else meta["fake_result"]
89
+
90
+ inputs = defaultdict(set)
91
+ input_idx = 0
92
+ mutated_inputs = set()
93
+ for n in g.nodes:
94
+ if n.op == "placeholder":
95
+ inputs[StorageWeakRef(meta_fk(n.meta)._typed_storage())].add(input_idx)
96
+ input_idx += 1
97
+ elif n.op == "call_function":
98
+ if n.target is operator.getitem:
99
+ continue
100
+ schema = n.target._schema
101
+ for i, arg in enumerate(schema.arguments):
102
+ if i < len(n.args):
103
+ argument = n.args[i]
104
+ else:
105
+ if arg.name not in n.kwargs:
106
+ continue
107
+ argument = n.kwargs[arg.name]
108
+ mut_arg = False
109
+ if arg.alias_info:
110
+ if arg.alias_info.is_write:
111
+ mut_arg = True
112
+ if mut_arg:
113
+ # TODO: not correct for args that contain tensors in a struct
114
+ # like list
115
+ mutated_inputs |= inputs[
116
+ StorageWeakRef(meta_fk(argument.meta)._typed_storage())
117
+ ]
118
+ # TODO: error on unrecognized nodes
119
+ return mutated_inputs
120
+
121
+
122
+ # Mutates input graph
123
+ def apply_cuda_graphs(gm):
124
+ for n in gm.graph.nodes:
125
+ if n.op == "call_module":
126
+ assert not n.kwargs
127
+ submod = gm.get_submodule(n.target)
128
+ gm.delete_submodule(n.target)
129
+ mutated_inputs = find_input_mutations(submod.graph)
130
+ gm.add_submodule(n.target, CudaGraphModule(submod, mutated_inputs))
131
+ # NB: we didn't actually change the graph, no need for recompile
132
+
133
+
134
+ def cudagraphs(model, inputs):
135
+ model = partition_cudagraphs(model, inputs)
136
+ apply_cuda_graphs(model)
137
+ return model
138
+
139
+
140
+ aot_cudagraphs = aot_autograd(fw_compiler=cudagraphs, bw_compiler=cudagraphs)
141
+
142
+ # aot_cudagraphs only applies CUDA graphs to the graph. It is also helpful
143
+ # for debugging and can serve as a perf baseline.
144
+ # TODO(jansel): rename to just "cudagraphs"?
145
+ register_backend(name="cudagraphs", compiler_fn=aot_cudagraphs)
146
+
147
+
148
+ def cudagraphs_inner(model, inputs, copy_outputs=True, copy_inputs=True):
149
+ """This isn't registered as a backend, but is used in some benchmarks"""
150
+ assert isinstance(inputs, (list, tuple))
151
+ if copy_inputs:
152
+ static_inputs = [torch.zeros_like(x) for x in inputs]
153
+ else:
154
+ static_inputs = list(inputs)
155
+
156
+ # warmup
157
+ torch.cuda.synchronize()
158
+ stream = torch.cuda.Stream()
159
+ stream.wait_stream(torch.cuda.current_stream())
160
+ with torch.cuda.stream(stream):
161
+ model(*inputs)
162
+ stream.synchronize()
163
+ torch.cuda.current_stream().wait_stream(stream)
164
+ torch.cuda.synchronize()
165
+
166
+ # record
167
+ graph = torch.cuda.CUDAGraph()
168
+ with torch.cuda.graph(graph, stream=stream):
169
+ static_outputs = model(*static_inputs)
170
+ if not isinstance(static_outputs, (list, tuple)):
171
+ static_outputs = (static_outputs,)
172
+
173
+ def run(*new_inputs):
174
+ assert len(static_inputs) == len(new_inputs)
175
+ if copy_inputs:
176
+ for dst, src in zip(static_inputs, new_inputs):
177
+ dst.copy_(src)
178
+ graph.replay()
179
+ if copy_outputs:
180
+ return [x.clone() for x in static_outputs]
181
+ else:
182
+ return static_outputs
183
+
184
+ return run
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/debugging.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import functools
3
+ from importlib import import_module
4
+ from typing import Any, List, Optional
5
+
6
+ from functorch.compile import min_cut_rematerialization_partition
7
+
8
+ import torch
9
+ from torch import _guards
10
+ from torch._functorch.compilers import ts_compile
11
+ from .common import aot_autograd
12
+ from .registry import register_debug_backend as register_backend
13
+
14
+ """
15
+ This file contains TorchDynamo backends intended for debugging uses.
16
+ """
17
+
18
+
19
+ @register_backend
20
+ def eager(gm, fake_tensor_inputs):
21
+ return gm
22
+
23
+
24
+ @register_backend
25
+ def pre_dispatch_eager(gm, fake_tensor_inputs):
26
+ from torch.fx.experimental.proxy_tensor import make_fx
27
+
28
+ def runnable_gm(*args):
29
+ return torch.fx.Interpreter(gm).run(*args)
30
+
31
+ pre_dispatch_gm = make_fx(runnable_gm, pre_dispatch=True)(*fake_tensor_inputs)
32
+ pre_dispatch_gm.print_readable()
33
+
34
+ return pre_dispatch_gm
35
+
36
+
37
+ @register_backend
38
+ def eager_debug(gm, fake_tensor_inputs):
39
+ from torch._subclasses.schema_check_mode import SchemaCheckMode
40
+
41
+ # We could add more debugging bits here.
42
+ # Right now, this backend can be used to check for and error on
43
+ # custom dispatcher ops that have incorrect schemas.
44
+ def inner(*args):
45
+ with SchemaCheckMode():
46
+ return torch.fx.Interpreter(gm).run(*args)
47
+
48
+ return inner
49
+
50
+
51
+ @register_backend(name="ts")
52
+ def torchscript(gm, fake_tensor_inputs):
53
+ return torch.jit.script(gm)
54
+
55
+
56
+ # used boxed call to discard inputs when they are no longer needed
57
+ def boxed_nop(fx_g, example_inputs):
58
+ def run(args):
59
+ return torch.fx.Interpreter(fx_g).boxed_run(args)
60
+
61
+ run._boxed_call = True
62
+ return run
63
+
64
+
65
+ # Useful for debugging purpose
66
+ # aot_eager uses AOT Autograd backend with nop compiler. It is helpful in debugging.
67
+ aot_eager = aot_autograd(
68
+ fw_compiler=boxed_nop, partition_fn=min_cut_rematerialization_partition
69
+ )
70
+ register_backend(name="aot_eager", compiler_fn=aot_eager)
71
+
72
+ aot_eager_default_partitioner = aot_autograd(fw_compiler=boxed_nop)
73
+ register_backend(
74
+ name="aot_eager_default_partitioner", compiler_fn=aot_eager_default_partitioner
75
+ )
76
+
77
+ # Uses TorchInductor AOT Autograd decomps and partitioner to isolate aot vs
78
+ # inductor problems.
79
+ # aot_eager_decomp_partition just replaces the inductor compiler with nop to help
80
+ # isolate inductor vs aot_eager errors
81
+ aot_eager_decomp_partition = aot_autograd(
82
+ # these are taken from memory_efficient_fusion()
83
+ fw_compiler=boxed_nop,
84
+ bw_compiler=boxed_nop,
85
+ # NB: lambda here is to delay import of inductor
86
+ decompositions=lambda: import_module(
87
+ "torch._inductor.compile_fx"
88
+ ).select_decomp_table(),
89
+ partition_fn=functools.partial(
90
+ min_cut_rematerialization_partition, compiler="inductor"
91
+ ),
92
+ )
93
+ register_backend(
94
+ name="aot_eager_decomp_partition", compiler_fn=aot_eager_decomp_partition
95
+ )
96
+
97
+ # AOT Autograd with torchscript backend. Default partitioner.
98
+ # aot_ts uses torchscript backend. We can use this with both nnc and nvfuser
99
+ # by using the relevant fuser with torch.jit.fuser(...)
100
+ aot_ts = aot_autograd(fw_compiler=ts_compile)
101
+ register_backend(name="aot_ts", compiler_fn=aot_ts)
102
+
103
+ # These buggy backends are used for inducing bugs so that we can test
104
+ # our repro extraction / minifier scripts
105
+
106
+
107
+ class ReluCompileError(Exception):
108
+ pass
109
+
110
+
111
+ class TestingOnlyCompileError(Exception):
112
+ pass
113
+
114
+
115
+ @register_backend
116
+ def relu_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
117
+ for node in gm.graph.nodes:
118
+ if node.target == torch.relu:
119
+ raise ReluCompileError()
120
+ return gm
121
+
122
+
123
+ @register_backend
124
+ def relu_runtime_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
125
+ for node in gm.graph.nodes:
126
+ if node.target == torch.relu:
127
+ node.target = torch._assert
128
+ node.args = (False, "ReluRuntimeError")
129
+ gm.recompile()
130
+ return gm
131
+
132
+
133
+ @register_backend
134
+ def relu_accuracy_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
135
+ for node in gm.graph.nodes:
136
+ if node.target == torch.relu:
137
+ node.target = torch.add
138
+ node.args = (node.args[0], 1)
139
+ gm.recompile()
140
+
141
+ return gm
142
+
143
+
144
+ @register_backend
145
+ def non_leaf_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs):
146
+ # Require at least one non-trivial thing in the graph,
147
+ # see https://github.com/pytorch/pytorch/issues/102898
148
+ for node in gm.graph.nodes:
149
+ if node.op == "call_function":
150
+ break
151
+ else:
152
+ return gm
153
+ for t in example_inputs:
154
+ if not t.is_leaf:
155
+ raise TestingOnlyCompileError()
156
+ return gm
157
+
158
+
159
+ @dataclasses.dataclass
160
+ class ExplainOutput:
161
+ """
162
+ This is the output of :func:`torch._dynamo.explain()`
163
+ There is no reason to create this class directly.
164
+ """
165
+
166
+ graphs: List[torch.fx.GraphModule]
167
+ graph_count: int
168
+ graph_break_count: int
169
+ break_reasons: List[
170
+ Any
171
+ ] # Type is GraphCompileReason but doesn't matter for this purpose
172
+ op_count: int
173
+ ops_per_graph: Optional[List[torch.fx.Node]] = None
174
+ out_guards: Optional[List[_guards.Guard]] = None
175
+ compile_times: Optional[str] = None
176
+
177
+ def __str__(self):
178
+ output = f"Graph Count: {self.graph_count}\n"
179
+ output += f"Graph Break Count: {self.graph_break_count}\n"
180
+ output += f"Op Count: {self.op_count}\n"
181
+
182
+ output += "Break Reasons:\n"
183
+ for idx, break_reason in enumerate(self.break_reasons):
184
+ output += f" Break Reason {idx+1}:\n"
185
+ output += f" Reason: {break_reason.reason}\n"
186
+ output += " User Stack:\n"
187
+ for frame_summary in break_reason.user_stack:
188
+ output += f" {frame_summary}\n"
189
+
190
+ if self.ops_per_graph is not None:
191
+ output += "Ops per Graph:\n"
192
+ for idx, ops in enumerate(self.ops_per_graph):
193
+ output += f" Ops {idx+1}:\n"
194
+ for op in ops:
195
+ output += f" {op}\n"
196
+
197
+ if self.out_guards is not None:
198
+ output += "Out Guards:\n"
199
+ for i, guard in enumerate(self.out_guards):
200
+ output += f" Guard {i+1}:\n"
201
+ output += f" {str(guard)}"
202
+
203
+ if self.compile_times is not None:
204
+ output += f"Compile Times: {self.compile_times}\n"
205
+ return output
206
+
207
+
208
+ def _explain_graph_detail(
209
+ gm: torch.fx.GraphModule, graphs, op_count, ops_per_graph, break_reasons
210
+ ):
211
+ """
212
+ This function is a utility which processes a torch.fx.GraphModule and
213
+ accumulates information about its ops, graph breaks, and other details. It
214
+ is intended to be used by the ExplainWithBackend class and
215
+ `torch._dynamo.explain()` to provide details from Dynamo's graph capture.
216
+
217
+ Parameters:
218
+ gm (torch.fx.GraphModule): The GraphModule to be processed.
219
+ graphs (list): A list that accumulates all the GraphModules processed.
220
+ op_count (int): The total count of operations in all GraphModules processed so far.
221
+ ops_per_graph (list): A list that accumulates the operations of each GraphModule.
222
+ break_reasons (list): A list that accumulates the reasons for breaks in each GraphModule.
223
+
224
+ Returns:
225
+ tuple: A tuple containing the processed GraphModule, the updated lists of graphs,
226
+ operations per graph, and break reasons, and the updated operation count.
227
+ """
228
+ graphs.append(gm)
229
+ ops = [node.target for node in gm.graph.nodes if node.op == "call_function"]
230
+ op_count += len(ops)
231
+ ops_per_graph.append(ops)
232
+ if gm.compile_subgraph_reason.graph_break:
233
+ break_reasons.append(gm.compile_subgraph_reason)
234
+
235
+ return gm, graphs, op_count, ops_per_graph, break_reasons
236
+
237
+
238
+ class ExplainWithBackend:
239
+ """
240
+ This class is intended to be used as a backend for `torch.compile`. It is
241
+ composable with other backends. When used in this way, it accumulates
242
+ information about graph breaks, ops, and other info and provides a string
243
+ representation summarizing this information.
244
+
245
+ Attributes:
246
+ backend (str): The name of the backend to use for optimization.
247
+ graphs (list): A list of the graphs captured by TorchDynamo.
248
+ op_count (int): The total number of operations in all optimized graphs.
249
+ break_reasons (list): A list of graph break reasons with stack traces.
250
+
251
+ Example Usage:
252
+ def fn(x):
253
+ x = torch.sigmoid(x)
254
+ return x
255
+
256
+ torch._dynamo.reset()
257
+ eb = ExplainWithBackend("inductor")
258
+ optimized_fn = torch.compile(fn, backend=eb)
259
+ result = optimized_fn(torch.randn(5))
260
+ print(eb.output())
261
+ """
262
+
263
+ def __init__(self, backend):
264
+ from .registry import lookup_backend
265
+
266
+ self.backend = lookup_backend(backend)
267
+ self.graphs = []
268
+ self.op_count = 0
269
+ self.break_reasons = []
270
+
271
+ def __call__(self, gm: torch.fx.GraphModule, example_inputs):
272
+ gm, self.graphs, self.op_count, _, self.break_reasons = _explain_graph_detail(
273
+ gm, self.graphs, self.op_count, [], self.break_reasons
274
+ )
275
+ return self.backend(gm, example_inputs)
276
+
277
+ def output(self) -> ExplainOutput:
278
+ graph_count = len(self.graphs)
279
+ output = ExplainOutput(
280
+ self.graphs,
281
+ graph_count,
282
+ graph_count - 1,
283
+ self.break_reasons,
284
+ self.op_count,
285
+ )
286
+
287
+ return output
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/distributed.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import traceback
3
+ from dataclasses import dataclass, field
4
+ from typing import Any, List, Optional
5
+
6
+ import torch
7
+ from torch import fx
8
+ from torch._dynamo.output_graph import GraphCompileReason
9
+ from torch._dynamo.utils import deepcopy_to_fake_tensor, detect_fake_mode
10
+ from torch.fx.node import Node
11
+
12
+ # Regular log messages should go through 'log'.
13
+ # ddp_graph_log is a separate artifact logger reserved for dumping graphs.
14
+ # See docs/source/logging.rst for more info.
15
+ log = logging.getLogger(__name__)
16
+ ddp_graph_log = torch._logging.getArtifactLogger(__name__, "ddp_graphs")
17
+
18
+
19
+ def args_str(args):
20
+ # a debug helper
21
+ if torch.is_tensor(args):
22
+ return f"T[{args.shape}]"
23
+ elif isinstance(args, tuple):
24
+ return f"tuple({', '.join([args_str(x) for x in args])})"
25
+ elif isinstance(args, list):
26
+ return f"list({', '.join([args_str(x) for x in args])})"
27
+ else:
28
+ return str(args)
29
+
30
+
31
+ @dataclass
32
+ class Bucket:
33
+ size: int = 0
34
+ params: List[str] = field(default_factory=list)
35
+ nodes: List[fx.Node] = field(default_factory=list)
36
+
37
+ # param_ids is just used for unit testing
38
+ param_ids: List = field(default_factory=list)
39
+
40
+ # keep track of any buckets that were extended for logging purposes
41
+ opcount_increased_to_capture_external_output: int = 0
42
+ paramsize_before_opcount_increase: int = 0
43
+
44
+
45
+ def bucket_has_external_output(bucket: Bucket) -> bool:
46
+ nodes_in_bucket = set()
47
+ # we want to iterate in reverse order, but clumsi-luckily the bucket.nodes list was already created backwards
48
+ # so we don't reverse it here
49
+ for node in bucket.nodes:
50
+ # assume node.op != output, since those are filtered in the original iteration
51
+ nodes_in_bucket.add(node)
52
+ for user in node.users:
53
+ if user not in nodes_in_bucket:
54
+ return True
55
+ return False
56
+
57
+
58
+ def pretty_print_buckets(buckets: List[Bucket], bucket_bytes_cap: int):
59
+ headers = ("Index", "Size (b)", "Param Names")
60
+ rows = []
61
+ extended_buckets = []
62
+ for idx, bucket in enumerate(reversed(buckets)):
63
+ if len(bucket.params) > 0:
64
+ rows.append((idx, bucket.size, bucket.params[0]))
65
+ for param in bucket.params[1:]:
66
+ rows.append((None, None, param))
67
+ if bucket.opcount_increased_to_capture_external_output > 0:
68
+ extended_buckets.append(
69
+ (
70
+ idx,
71
+ bucket.opcount_increased_to_capture_external_output,
72
+ bucket.size - bucket.paramsize_before_opcount_increase,
73
+ )
74
+ )
75
+
76
+ if len(rows):
77
+ log.info(
78
+ "\nDDPOptimizer used bucket cap %s and created %d buckets. Enable debug logs for detailed bucket info.",
79
+ bucket_bytes_cap,
80
+ len(buckets),
81
+ )
82
+
83
+ if len(extended_buckets):
84
+ log.warning(
85
+ "Some buckets were extended beyond their requested parameter capacities"
86
+ " in order to ensure each subgraph has an output node, required for fx graph partitioning."
87
+ " This can be the case when a subgraph would have only contained nodes performing inplace mutation,"
88
+ " and returning no logical outputs. This should not be a problem, unless it results in too few graph"
89
+ " partitions for optimal DDP performance."
90
+ )
91
+
92
+ try:
93
+ from tabulate import tabulate
94
+
95
+ log.debug(
96
+ "\nDDPOptimizer produced the following bucket assignments:\n%s",
97
+ tabulate(rows, headers=headers, tablefmt="simple_grid"),
98
+ )
99
+
100
+ if len(extended_buckets):
101
+ log.warning(
102
+ "DDPOptimizer extended these buckets to ensure per-subgraph output nodes:\n%s",
103
+ tabulate(
104
+ extended_buckets,
105
+ headers=("Index", "Extra Ops", "Extra Param Size (b)"),
106
+ tablefmt="simple_grid",
107
+ ),
108
+ )
109
+ except ImportError:
110
+ log.debug(
111
+ "Please `pip install tabulate` in order to display ddp bucket sizes and diagnostic information."
112
+ )
113
+ else:
114
+ log.debug("DDPOptimizer captured no parameters and did not split this graph.")
115
+
116
+
117
+ def has_higher_order_op(gm):
118
+ # Check if there is a higher order op in the graph
119
+ for node in gm.graph.nodes:
120
+ if node.op == "get_attr":
121
+ maybe_param = getattr(gm, node.target)
122
+ if isinstance(maybe_param, torch.fx.GraphModule):
123
+ return True
124
+ return False
125
+
126
+
127
+ class DDPOptimizer:
128
+ """Note [DDPOptimizer]
129
+ DDPOptimizer applies when dynamo compiles models wrapped in DistributedDataParallel (DDP),
130
+ breaking the dynamo graph into chunks to compile separately, with the breaks aligning to
131
+ the boundaries of gradient-allreduce buckets chosen by DDP.
132
+
133
+ Background/Motivation
134
+ - DDP uses allreduce collectives to synchronize partial gradients computed on different workers
135
+ - DDP groups gradient allreduces into 'buckets' to optimize communication efficiency of all-reduce
136
+ - Parameters grouped into buckets are assumed to be adjacent in time, so they become ready
137
+ at around the same time during backward and thus can share the same allreduce efficiently
138
+ - Allreduces must overlap with backward compute for optimal training performance
139
+ - DDP schedules allreduces using 'hooks' fired from the c++ autograd engine in pytorch, which
140
+ operates when individual grads become 'ready'
141
+ - Dynamo+AOTAutograd produces a single fused graph that runs 'atomically' from the perspective of the
142
+ autograd engine, such that all gradients become 'ready' at the same time. Hooks fire after the whole
143
+ fused backward function executes, preventing any overlap of compute and communication
144
+
145
+ Algorithm
146
+ - DDPOptimizer starts off with an FX graph traced by dynamo which represents forward. It can traverse
147
+ this graph in reverse order to determine the true order that gradients will become ready during backward.
148
+ - Parameter sizes are counted in reverse order, up to a bucket size limit, at which point a new bucket is started
149
+ and a graph break introduced
150
+ - Each of the subgraphs is compiled by the compiler provided to dynamo by the user, and then fused back together
151
+ into an outer module that is returned to the user
152
+
153
+ Notes
154
+ - It would be better to enforce (by adding an API to DDP) that the bucket splits chosen here are used by DDP,
155
+ and that DDP does not need to detect or optimize bucket order by observing execution at runtime, as it does
156
+ in eager.
157
+ - If Dynamo can't capture a whole graph for the portion of the model wrapped by DDP, this algorithm will currently
158
+ produce splits that do not necessarily align with the buckets used by DDP. This should result in performance
159
+ degradation approaching the baseline case where graph-splits are not used, but not worse.
160
+ - If the backend compiler fails to compile a single subgraph, it will execute eagerly despite the rest of the
161
+ subgraphs being compiled
162
+ - DDP has a 'parameters_and_buffers_to_ignore' field, which DDPOptimizer attempts to honor by reading markers
163
+ left by DDP on individual parameters. In cases where other transformations, such as reparameterization, are
164
+ also used, the ignore markers could be lost. If DDPOptimizer fails to ignore a parameter ignored by DDP,
165
+ it is not catastrophic but could impact performance by choosing sub-optimal bucket splits.
166
+ - DDPOptimizer always ignores all buffers, regardless of their ignore flag, since buffers do not require gradients,
167
+ and therefore aren't allreduced by DDP. (They are broadcast during forward, but this is not covered by
168
+ DDPOptimizer)
169
+
170
+ Debugging
171
+ - Generally, it is easiest to debug DDPOptimizer in a single process program, using pdb.
172
+ - In many cases, the log messages are helpful (they show bucket size assignments)-
173
+ just configure torch._dynamo.config.log_level to info or debug.
174
+ - See `benchmarks/dynamo/distributed.py` for a simple harness that will run a toy model or a torchbench model
175
+ in a single process (or with torchrun, in multiple processes)
176
+
177
+ Args:
178
+ bucket_bytes_cap (int): Controls the size of buckets, in bytes, used to determine graphbreaks. Should be
179
+ set to match the equivalent parameter on the original DDP module.
180
+
181
+ backend_compile_fn (callable): A dynamo compiler function, to be invoked to compile each subgraph.
182
+
183
+ first_bucket_cap (int): Controls the size of the first bucket. Should match DDP's first bucket cap. DDP
184
+ special-cases the first bucket size since it is sometimes optimal to start a small allreduce early.
185
+
186
+ """
187
+
188
+ def __init__(
189
+ self,
190
+ bucket_bytes_cap: int,
191
+ backend_compile_fn,
192
+ first_bucket_cap: Optional[int] = None,
193
+ ):
194
+ if first_bucket_cap is not None:
195
+ self.first_bucket_cap = first_bucket_cap
196
+ elif torch.distributed.is_available():
197
+ # this constant comes from C10D lib which is not always built
198
+ self.first_bucket_cap = torch.distributed._DEFAULT_FIRST_BUCKET_BYTES
199
+ else:
200
+ self.first_bucket_cap = bucket_bytes_cap
201
+
202
+ self.bucket_bytes_cap = bucket_bytes_cap
203
+ assert (
204
+ self.first_bucket_cap <= self.bucket_bytes_cap
205
+ ), "First bucket should be smaller/equal to other buckets to get comms warmed up ASAP"
206
+
207
+ self.backend_compile_fn = backend_compile_fn
208
+
209
+ def _ignore_parameter(self, parameter):
210
+ return hasattr(parameter, "_ddp_ignored") and parameter._ddp_ignored
211
+
212
+ def compile_fn(self, gm: fx.GraphModule, example_inputs: List[torch.Tensor]):
213
+ """
214
+ Implements graph splitting, first determining a set of of buckets by counting
215
+ parameter sizes in reverse graph order, then invoking the user/backend compiler
216
+ to compile each subgraph. Finally, stiches compiled graphs into one graphmodule
217
+ and returns its callable.
218
+ """
219
+
220
+ # Today, optimize_ddp=True and keep_output_stride=False can lead to silent
221
+ # correctness issues. The problem is that ddp_optimizer works by partitioning
222
+ # the dynamo graph, sending each subgraph through aot autograd to inductor,
223
+ # and creates example inputs by eagerly interpreting each subgraph to get
224
+ # an output that with the same metadata that we'd get from eager mode.
225
+ # This is a problem though, for torch._inductor.config.keep_output_stride.
226
+ # The above config can cause the outputs of the first graph to have
227
+ # **different** strides from eager, causing the inputs that we pass
228
+ # to the second graph to be wrong.
229
+ # To really fix this, we would need to faithfully ask inductor
230
+ # what the outputs to each graph it expects are.
231
+ assert torch._inductor.config.keep_output_stride, """\
232
+ Detected that you are running DDP with torch.compile, along with these two flags:
233
+ - torch._dynamo.config.optimize_ddp = True
234
+ - torch._inductor.config.keep_output_stride = False
235
+ This combination of flags is incompatible. Please set keep_output_stride to False,
236
+ or file a github issue."""
237
+ fake_mode = detect_fake_mode(example_inputs)
238
+ if fake_mode is None:
239
+ fake_mode = torch._subclasses.fake_tensor.FakeTensorMode()
240
+
241
+ if has_higher_order_op(gm):
242
+ # This indicates presence of a higher order op. For now, we
243
+ # have no way to break the higher order op into two buckets.
244
+ # Allowing higher order ops in the graph also requires
245
+ # changes in the split_module, becuase graph splitter
246
+ # currently assumes that all the args of all ops are
247
+ # tensors, but in the case of higher order ops, it could be
248
+ # a graph module. As a workaround, we are shortcircuiting
249
+ raise NotImplementedError(
250
+ "DDPOptimizer backend: Found a higher order op in the graph. "
251
+ "This is not supported. Please turn off DDP optimizer using "
252
+ "torch._dynamo.config.optimize_ddp=False. Note that this can "
253
+ "cause performance degradation because there will be one bucket "
254
+ "for the entire Dynamo graph. Please refer to this issue - "
255
+ "https://github.com/pytorch/pytorch/issues/104674."
256
+ )
257
+
258
+ # 1: compute the partition map according to DDP bucket logic
259
+ buckets = [Bucket()] # (size, param_names)
260
+ for node in reversed(gm.graph.nodes):
261
+ if node.op in ("output", "placeholder"):
262
+ continue
263
+
264
+ if (
265
+ buckets[0].size >= self.bucket_bytes_cap
266
+ or len(buckets) == 1
267
+ and buckets[0].size >= self.first_bucket_cap
268
+ ):
269
+ if bucket_has_external_output(buckets[0]):
270
+ buckets.insert(0, Bucket())
271
+ else:
272
+ # continue building this bucket past the point of filling its parameter capacity,
273
+ # to increase chances it contains at least one node that is either a global output or
274
+ # passed as input to a subsequent graph
275
+
276
+ if buckets[0].opcount_increased_to_capture_external_output == 0:
277
+ buckets[0].paramsize_before_opcount_increase = buckets[0].size
278
+ buckets[0].opcount_increased_to_capture_external_output += 1
279
+
280
+ if node.op == "call_module":
281
+ target = gm.get_submodule(node.target)
282
+ for name, param in target.named_parameters():
283
+ if param.requires_grad and not self._ignore_parameter(param):
284
+ buckets[0].size += param.untyped_storage().nbytes()
285
+ buckets[0].params.append(f"{node.target}_{name}")
286
+ buckets[0].param_ids.append(id(param))
287
+ elif node.op == "get_attr":
288
+ maybe_param = getattr(gm, node.target)
289
+ if maybe_param.requires_grad and not self._ignore_parameter(
290
+ maybe_param
291
+ ):
292
+ buckets[0].size += maybe_param.untyped_storage().nbytes()
293
+ buckets[0].params.append(node.target)
294
+ buckets[0].param_ids.append(id(maybe_param))
295
+
296
+ # All nodes have to be mapped to a bucket, even if they don't have their own params
297
+ # Ignored params still end up in buckets, we just don't count them towards the capacity
298
+ buckets[0].nodes.append(node)
299
+
300
+ if len(buckets) > 1 and buckets[0].size == 0:
301
+ # we collected a small preamble graph with ops that don't include parameters, fuse it back
302
+ buckets[1].nodes.extend(buckets[0].nodes)
303
+ assert len(buckets[0].params) == 0, "Params should be empty if size is 0"
304
+ del buckets[0]
305
+
306
+ # stash buckets for testing/debugging purposes
307
+ self.buckets = buckets
308
+ pretty_print_buckets(buckets, self.bucket_bytes_cap)
309
+
310
+ if len(buckets) == 1:
311
+ # bypass split/fuse logic if there is only one bucket
312
+ return self.backend_compile_fn(gm, example_inputs)
313
+
314
+ # 2: partition the graphmodule according to bucket capacity
315
+ partition_map = {}
316
+ for idx, b in enumerate(buckets):
317
+ for node in b.nodes:
318
+ partition_map[node] = idx
319
+
320
+ split_gm = fx.passes.split_module.split_module(
321
+ gm, None, lambda node: partition_map[node]
322
+ )
323
+
324
+ debug_str = (
325
+ f"\n---orig graph---\n{gm.graph}\n"
326
+ + f"\n---split graph---\n{split_gm.graph}\n"
327
+ )
328
+ for name, module in split_gm.named_modules():
329
+ if "." not in name and len(name):
330
+ # only print the submod graphs, not their children
331
+ debug_str += f"\n---{name} graph---\n{module.graph}\n"
332
+ debug_str += "\n---------------\n"
333
+ ddp_graph_log.debug(debug_str)
334
+
335
+ # 3: compile each of the partitioned submodules using the user-provided compiler
336
+ class SubmodCompiler(torch.fx.interpreter.Interpreter):
337
+ def __init__(self, module, compiler):
338
+ super().__init__(module)
339
+ self.compiler = compiler
340
+
341
+ def compile_submod(self, input_mod, args, kwargs):
342
+ """
343
+ Compile the submodule,
344
+ using a wrapper to make sure its output is always a tuple,
345
+ which is required by AotAutograd based compilers
346
+ """
347
+ assert len(kwargs) == 0, "We assume only args for these modules"
348
+
349
+ class WrapperModule(torch.nn.Module):
350
+ def __init__(self, submod, unwrap_singleton_tuple):
351
+ super().__init__()
352
+ self.submod = submod
353
+ self.unwrap_singleton_tuple = unwrap_singleton_tuple
354
+
355
+ def forward(self, *args):
356
+ x = self.submod(*args)
357
+ # TODO(whc)
358
+ # for some reason the isinstance check is necessary if I split one node per submod
359
+ # - even though I supposedly wrapped the output in a tuple in those cases, the real
360
+ # compiled module was still returning a tensor
361
+ if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):
362
+ return x[0]
363
+ return x
364
+
365
+ unwrap_singleton_tuple = False
366
+ for sn in input_mod.graph.nodes:
367
+ if sn.op == "output":
368
+ if not isinstance(sn.args[0], tuple):
369
+ unwrap_singleton_tuple = True
370
+ sn.args = (sn.args,)
371
+
372
+ input_mod.recompile()
373
+ input_mod.compile_subgraph_reason = GraphCompileReason(
374
+ "DDPOptimizer intentional graph-break (See Note [DDPOptimizer])."
375
+ " Set `torch._dynamo.config.optimize_ddp = False` to disable.",
376
+ [
377
+ # it's close to useless to get a real stacktrace here, and quite verbose.
378
+ traceback.FrameSummary(__file__, 0, DDPOptimizer),
379
+ ],
380
+ )
381
+ wrapper = WrapperModule(
382
+ self.compiler(input_mod, args),
383
+ unwrap_singleton_tuple,
384
+ )
385
+ return wrapper
386
+
387
+ # Note:
388
+ #
389
+ # The way distributed works today around fake tensors can be somewhat confusing.
390
+ # Some of these codepaths are shared in both runtime, and compile time. The presence
391
+ # of a fake_mode, read off of fake tensor inputs, dictates how we will operate.
392
+ #
393
+ # A few things to keep in mind:
394
+ #
395
+ # 1) We invoke `compile_submod` with a real module. The output of that gets stored
396
+ # on the graph via `self.module.add_submodule(n.target, compiled_submod_real)`.
397
+ #
398
+ # 2) When running a call_module targeted node, if we have a fake_mode, we fakify the
399
+ # module we got from self.fetch_attr(n.target). Regardless of fake_mode, we then execute it.
400
+ #
401
+ # 3) Fake tensors should always be around during compile time.
402
+ #
403
+ # 4) Fake tensors should never be around at runtime.
404
+ #
405
+ # 5) We end up with a compilation mode that takes a real submodule and fake tensors,
406
+ # to match what aot_autograd expects. See Note: [Fake Modules and AOTAutograd]
407
+ def run_node(self, n: Node) -> Any:
408
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
409
+ new_args = []
410
+ assert fake_mode
411
+ for arg in args:
412
+ if isinstance(arg, torch.Tensor) and not isinstance(
413
+ arg, torch._subclasses.FakeTensor
414
+ ):
415
+ new_args.append(
416
+ torch._dynamo.utils.to_fake_tensor(arg, fake_mode)
417
+ )
418
+ else:
419
+ new_args.append(arg)
420
+
421
+ log.debug("run_node %s, %s got args %s", n.op, n.target, args_str(args))
422
+ assert isinstance(args, tuple)
423
+ assert isinstance(kwargs, dict)
424
+
425
+ if n.op == "call_module":
426
+ real_mod = self.fetch_attr(n.target)
427
+ if fake_mode:
428
+ curr_submod = deepcopy_to_fake_tensor(real_mod, fake_mode)
429
+ else:
430
+ curr_submod = real_mod
431
+
432
+ ddp_graph_log.debug(
433
+ "\n---%s graph---\n%s", n.target, curr_submod.graph
434
+ )
435
+
436
+ # When calling the compiler on the submod, inputs (new_args) are expected to
437
+ # be FakeTensors already since Dynamo would have made them FakeTensors in the
438
+ # non-DDP flow. However, the parameters are _not_ expected to be FakeTensors,
439
+ # since this wrapping happens during compilation
440
+ compiled_submod_real = self.compile_submod(
441
+ real_mod, new_args, kwargs
442
+ )
443
+
444
+ # We update the original (outer) graph with a call into the compiled module
445
+ # instead of the uncompiled one.
446
+ self.module.delete_submodule(n.target)
447
+ n.target = "compiled_" + n.target
448
+ self.module.add_submodule(n.target, compiled_submod_real)
449
+
450
+ # Finally, we have to produce inputs for use compiling the next submodule,
451
+ # and these need to be FakeTensors, so we execute the module under fake_mode
452
+ with fake_mode:
453
+ return curr_submod(*new_args, **kwargs)
454
+ else:
455
+ # placeholder or output nodes don't need to get compiled, just executed
456
+ return getattr(self, n.op)(n.target, new_args, kwargs)
457
+
458
+ submod_compiler = SubmodCompiler(split_gm, self.backend_compile_fn)
459
+ submod_compiler.run(*example_inputs)
460
+ split_gm.recompile()
461
+
462
+ ddp_graph_log.debug(
463
+ "\n---final graph---\n%s\n---------------\n", split_gm.graph
464
+ )
465
+ return split_gm
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/inductor.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import sys
4
+
5
+ from torch._dynamo import register_backend
6
+
7
+
8
+ @register_backend
9
+ def inductor(*args, **kwargs):
10
+ if sys.platform == "win32":
11
+ raise RuntimeError("Windows not yet supported for inductor")
12
+
13
+ # do import here to avoid loading inductor into memory when it is not used
14
+ from torch._inductor.compile_fx import compile_fx
15
+
16
+ return compile_fx(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/onnxrt.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This backend is maintained by ONNX team. To direct issues
2
+ # to the right people, please tag related GitHub issues with `module: onnx`.
3
+ #
4
+ # Maintainers' Github IDs: wschin, thiagocrepaldi, BowenBao, abock
5
+ from torch.onnx._internal.onnxruntime import (
6
+ is_onnxrt_backend_supported,
7
+ torch_compile_backend,
8
+ )
9
+ from .registry import register_backend
10
+
11
+
12
+ def has_onnxruntime():
13
+ # FIXME(abock): update test/dynamo/test_backends.py to call is_onnxrt_backend_supported()
14
+ return is_onnxrt_backend_supported()
15
+
16
+
17
+ if is_onnxrt_backend_supported():
18
+ register_backend(name="onnxrt", compiler_fn=torch_compile_backend)
19
+ else:
20
+
21
+ def information_displaying_backend(*args, **kwargs):
22
+ raise ImportError(
23
+ "onnxrt is not registered as a backend. "
24
+ "Please make sure all dependencies such as "
25
+ "numpy, onnx, onnxscript, and onnxruntime-training are installed. "
26
+ "Suggested procedure to fix dependency problem:\n"
27
+ " (1) pip or conda install numpy onnx onnxscript onnxruntime-training.\n"
28
+ " (2) Open a new python terminal.\n"
29
+ " (3) Call the API `torch.onnx.is_onnxrt_backend_supported()`:\n"
30
+ " (4) If it returns `True`, then you can use `onnxrt` backend.\n"
31
+ " (5) If it returns `False`, please execute the package importing section in "
32
+ "torch/onnx/_internal/onnxruntime.py under pdb line-by-line to see which import fails."
33
+ )
34
+
35
+ register_backend(name="onnxrt", compiler_fn=information_displaying_backend)
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/registry.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import sys
3
+ from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple
4
+
5
+ import torch
6
+ from torch import fx
7
+
8
+
9
+ class CompiledFn(Protocol):
10
+ def __call__(self, *args: torch.Tensor) -> Tuple[torch.Tensor, ...]:
11
+ ...
12
+
13
+
14
+ CompilerFn = Callable[[fx.GraphModule, List[torch.Tensor]], CompiledFn]
15
+
16
+ _BACKENDS: Dict[str, CompilerFn] = dict()
17
+
18
+
19
+ def register_backend(
20
+ compiler_fn: Optional[CompilerFn] = None,
21
+ name: Optional[str] = None,
22
+ tags: Sequence[str] = (),
23
+ ):
24
+ """
25
+ Decorator to add a given compiler to the registry to allow calling
26
+ `torch.compile` with string shorthand. Note: for projects not
27
+ imported by default, it might be easier to pass a function directly
28
+ as a backend and not use a string.
29
+
30
+ Args:
31
+ compiler_fn: Callable taking a FX graph and fake tensor inputs
32
+ name: Optional name, defaults to `compiler_fn.__name__`
33
+ tags: Optional set of string tags to categorize backend with
34
+ """
35
+ if compiler_fn is None:
36
+ # @register_backend(name="") syntax
37
+ return functools.partial(register_backend, name=name, tags=tags)
38
+ assert callable(compiler_fn)
39
+ name = name or compiler_fn.__name__
40
+ assert name not in _BACKENDS, f"duplicate name: {name}"
41
+ _BACKENDS[name] = compiler_fn
42
+ compiler_fn._tags = tuple(tags)
43
+ return compiler_fn
44
+
45
+
46
+ register_debug_backend = functools.partial(register_backend, tags=("debug",))
47
+ register_experimental_backend = functools.partial(
48
+ register_backend, tags=("experimental",)
49
+ )
50
+
51
+
52
+ def lookup_backend(compiler_fn):
53
+ """Expand backend strings to functions"""
54
+ if isinstance(compiler_fn, str):
55
+ if compiler_fn not in _BACKENDS:
56
+ _lazy_import()
57
+ if compiler_fn not in _BACKENDS:
58
+ _lazy_import_entry_point(compiler_fn)
59
+ if compiler_fn not in _BACKENDS:
60
+ from ..exc import InvalidBackend
61
+
62
+ raise InvalidBackend(name=compiler_fn)
63
+ compiler_fn = _BACKENDS[compiler_fn]
64
+ return compiler_fn
65
+
66
+
67
+ def list_backends(exclude_tags=("debug", "experimental")) -> List[str]:
68
+ """
69
+ Return valid strings that can be passed to:
70
+
71
+ torch.compile(..., backend="name")
72
+ """
73
+ _lazy_import()
74
+ exclude_tags = set(exclude_tags or ())
75
+ return sorted(
76
+ [
77
+ name
78
+ for name, backend in _BACKENDS.items()
79
+ if not exclude_tags.intersection(backend._tags)
80
+ ]
81
+ )
82
+
83
+
84
+ @functools.lru_cache(None)
85
+ def _lazy_import():
86
+ from .. import backends
87
+ from ..utils import import_submodule
88
+
89
+ import_submodule(backends)
90
+
91
+ from ..repro.after_dynamo import dynamo_minifier_backend
92
+
93
+ assert dynamo_minifier_backend is not None
94
+
95
+
96
+ @functools.lru_cache(None)
97
+ def _lazy_import_entry_point(backend_name: str):
98
+ from importlib.metadata import entry_points
99
+
100
+ compiler_fn = None
101
+ group_name = "torch_dynamo_backends"
102
+ if sys.version_info < (3, 10):
103
+ backend_eps = entry_points()
104
+ eps = [ep for ep in backend_eps.get(group_name, ()) if ep.name == backend_name]
105
+ if len(eps) > 0:
106
+ compiler_fn = eps[0].load()
107
+ else:
108
+ backend_eps = entry_points(group=group_name)
109
+ if backend_name in backend_eps.names:
110
+ compiler_fn = backend_eps[backend_name].load()
111
+
112
+ if compiler_fn is not None and backend_name not in list_backends(tuple()):
113
+ register_backend(compiler_fn=compiler_fn, name=backend_name)
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/tensorrt.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import torch # type: ignore[import]
2
+ # from .common import device_from_inputs, fake_tensor_unsupported # type: ignore[import]
3
+ # from .registry import register_backend # type: ignore[import]
4
+
5
+ """
6
+ Placeholder for TensorRT backend for dynamo via torch-tensorrt
7
+ """
8
+
9
+ # @register_backend
10
+ # def tensorrt(gm, example_inputs):
11
+ # import torch_tensorrt # type: ignore[import]
12
+ # pass
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/torchxla.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import warnings
3
+
4
+ from functorch.compile import make_boxed_func
5
+
6
+ from ..backends.common import aot_autograd
7
+ from .registry import register_backend, register_experimental_backend
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ @register_experimental_backend
13
+ def torchxla_trivial(gm, fake_tensor_inputs):
14
+ return gm
15
+
16
+
17
+ @register_experimental_backend
18
+ def torchxla_trace_once(model, fake_tensor_inputs):
19
+ warnings.warn(
20
+ "This backend will be deprecated in 2.2, please use `openxla` backend instead"
21
+ )
22
+
23
+ return xla_backend_helper(model, fake_tensor_inputs)
24
+
25
+
26
+ @register_backend
27
+ def openxla_eval(model, fake_tensor_inputs):
28
+ return xla_backend_helper(model, fake_tensor_inputs, boxed=False)
29
+
30
+
31
+ def openxla_eval_boxed(model, fake_tensor_inputs):
32
+ return xla_backend_helper(model, fake_tensor_inputs, boxed=True)
33
+
34
+
35
+ def xla_backend_helper(model, fake_tensor_inputs, boxed=False):
36
+ try:
37
+ import torch_xla.core.dynamo_bridge as bridge
38
+ except ImportError as e:
39
+ raise ImportError(
40
+ "Please follow the instruction in https://github.com/pytorch/xla#pytorchxla to install torch_xla"
41
+ ) from e
42
+
43
+ compiled_graph = None
44
+
45
+ def fwd(*args):
46
+ nonlocal model
47
+ nonlocal compiled_graph
48
+ if compiled_graph is None:
49
+ compiled_graph = bridge.extract_compiled_graph(model, args)
50
+ del model
51
+ return compiled_graph(*args)
52
+
53
+ return make_boxed_func(fwd) if boxed else fwd
54
+
55
+
56
+ aot_torchxla_trivial = aot_autograd(
57
+ fw_compiler=torchxla_trivial,
58
+ )
59
+ register_experimental_backend(
60
+ name="aot_torchxla_trivial", compiler_fn=aot_torchxla_trivial
61
+ )
62
+
63
+ aot_torchxla_trace_once = aot_autograd(
64
+ fw_compiler=torchxla_trace_once,
65
+ )
66
+ register_experimental_backend(
67
+ name="aot_torchxla_trace_once", compiler_fn=aot_torchxla_trace_once
68
+ )
69
+
70
+ openxla = aot_autograd(
71
+ fw_compiler=openxla_eval_boxed,
72
+ )
73
+ register_backend(name="openxla", compiler_fn=openxla)
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import importlib
3
+ import logging
4
+ import os
5
+ import tempfile
6
+
7
+ import torch
8
+ from .common import device_from_inputs, fake_tensor_unsupported
9
+
10
+ from .registry import register_backend
11
+
12
+ log = logging.getLogger(__name__)
13
+
14
+
15
+ @register_backend
16
+ @fake_tensor_unsupported
17
+ def tvm(gm, example_inputs, *, scheduler=None, trials=20000):
18
+ import tvm # type: ignore[import]
19
+ from tvm import relay # type: ignore[import]
20
+ from tvm.contrib import graph_executor # type: ignore[import]
21
+
22
+ jit_mod = torch.jit.trace(gm, example_inputs)
23
+ device = device_from_inputs(example_inputs)
24
+ shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
25
+ example_outputs = gm(*example_inputs)
26
+ if len(example_outputs) == 0:
27
+ log.warning("Explicitly fall back to eager due to zero output")
28
+ return gm.forward
29
+ mod, params = relay.frontend.from_pytorch(jit_mod, shape_list)
30
+ if device.type == "cuda":
31
+ dev = tvm.cuda(device.index)
32
+ target = tvm.target.cuda()
33
+ else:
34
+ dev = tvm.cpu(0)
35
+ target = tvm.target.Target(llvm_target())
36
+
37
+ if scheduler is None:
38
+ scheduler = os.environ.get("TVM_SCHEDULER", None)
39
+
40
+ if scheduler == "auto_scheduler":
41
+ from tvm import auto_scheduler
42
+
43
+ log_file = tempfile.NamedTemporaryFile()
44
+
45
+ if not os.path.exists(log_file):
46
+ tasks, task_weights = auto_scheduler.extract_tasks(
47
+ mod["main"], params, target
48
+ )
49
+ for task in tasks:
50
+ print(task.compute_dag)
51
+ else:
52
+ print("No tasks")
53
+ if len(tasks) != 0:
54
+ tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
55
+ if not os.path.exists(log_file):
56
+ assert trials > 0
57
+ tune_option = auto_scheduler.TuningOptions(
58
+ num_measure_trials=trials,
59
+ measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
60
+ early_stopping=2000,
61
+ )
62
+ try:
63
+ tuner.tune(tune_option)
64
+ except Exception:
65
+ if os.path.exists(log_file):
66
+ os.unlink(log_file)
67
+ raise
68
+
69
+ with auto_scheduler.ApplyHistoryBest(log_file):
70
+ with tvm.transform.PassContext(
71
+ opt_level=3, config={"relay.backend.use_auto_scheduler": True}
72
+ ):
73
+ lib = relay.build(mod, target=target, params=params)
74
+ elif scheduler == "meta_schedule":
75
+ from tvm import meta_schedule as ms
76
+
77
+ with tempfile.TemporaryDirectory() as work_dir:
78
+ if device.type != "cuda":
79
+ # meta_schedule needs num-cores to be specified
80
+ # here we use the maximum core count
81
+ target = tvm.target.Target(
82
+ f"{llvm_target()} --num-cores {ms.utils.cpu_count(logical=False)}"
83
+ )
84
+ # TODO(shingjan): This could be replaced by tvm.contrib.torch.optimize_torch
85
+ # once USE_PT_TVMDSOOP is updated and turned on by default in TVM.
86
+ database = ms.relay_integration.tune_relay(
87
+ mod=mod,
88
+ target=target,
89
+ work_dir=work_dir,
90
+ max_trials_global=20000,
91
+ num_trials_per_iter=64,
92
+ params=params,
93
+ strategy="evolutionary",
94
+ )
95
+ lib = ms.relay_integration.compile_relay(
96
+ database=database,
97
+ mod=mod,
98
+ target=target,
99
+ params=params,
100
+ )
101
+ elif scheduler == "default" or not scheduler:
102
+ # no autotuning
103
+ with tvm.transform.PassContext(opt_level=10):
104
+ lib = relay.build(mod, target=target, params=params)
105
+ else:
106
+ raise NotImplementedError(
107
+ "This tuning option is invalid/not implemented for torchdynamo's TVM-related backend. "
108
+ "There are three available options: default, auto_scheduler and meta_schedule."
109
+ )
110
+ m = graph_executor.GraphModule(lib["default"](dev))
111
+
112
+ def to_torch_tensor(nd_tensor):
113
+ """A helper function to transfer a NDArray to torch.tensor."""
114
+ if nd_tensor.dtype == "bool":
115
+ # DLPack does not support boolean so it can't be handled by
116
+ # torch.utils.dlpack.from_pack. Workaround by going through
117
+ # numpy, although this brings additional data copy overhead.
118
+ return torch.from_numpy(nd_tensor.numpy())
119
+ return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack())
120
+
121
+ def to_tvm_tensor(torch_tensor):
122
+ """A helper function to transfer a torch.tensor to NDArray."""
123
+ if torch_tensor.dtype == torch.bool:
124
+ # same reason as above, fallback to numpy conversion which
125
+ # could introduce data copy overhead
126
+ return tvm.nd.array(torch_tensor.cpu().numpy())
127
+ return tvm.nd.from_dlpack(torch_tensor)
128
+
129
+ def exec_tvm(*i_args):
130
+ args = [a.contiguous() for a in i_args]
131
+ shape_info, _ = m.get_input_info()
132
+ active_inputs = {name for name, _ in shape_info.items()}
133
+ for idx, arg in enumerate(args, 0):
134
+ if arg.dim() != 0:
135
+ if arg.requires_grad:
136
+ arg = arg.detach()
137
+ inp_name = f"inp_{idx}"
138
+ if inp_name not in active_inputs:
139
+ log.warning(
140
+ "input %s skipped as not found in tvm's runtime library",
141
+ inp_name,
142
+ )
143
+ continue
144
+ m.set_input(
145
+ inp_name,
146
+ to_tvm_tensor(arg),
147
+ )
148
+ m.run()
149
+ return [to_torch_tensor(m.get_output(i)) for i in range(m.get_num_outputs())]
150
+
151
+ return exec_tvm
152
+
153
+
154
+ tvm_meta_schedule = functools.partial(tvm, scheduler="meta_schedule")
155
+ tvm_auto_scheduler = functools.partial(tvm, scheduler="auto_scheduler")
156
+
157
+
158
+ def has_tvm():
159
+ try:
160
+ importlib.import_module("tvm")
161
+ return True
162
+ except ImportError:
163
+ return False
164
+
165
+
166
+ @functools.lru_cache(None)
167
+ def llvm_target():
168
+ if "avx512" in open("/proc/cpuinfo").read():
169
+ return "llvm -mcpu=skylake-avx512"
170
+ return "llvm -mcpu=core-avx2"
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/decorators.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ import torch
4
+ from . import allowed_functions
5
+ from .eval_frame import DisableContext, innermost_fn, RunOnlyContext
6
+ from .exc import IncorrectUsage
7
+
8
+ if TYPE_CHECKING:
9
+ from torch._C._dynamo.eval_frame import ( # noqa: F401
10
+ reset_code,
11
+ set_eval_frame,
12
+ set_guard_error_hook,
13
+ skip_code,
14
+ unsupported,
15
+ )
16
+ else:
17
+ for name in dir(torch._C._dynamo.eval_frame):
18
+ if name.startswith("__"):
19
+ continue
20
+ globals()[name] = getattr(torch._C._dynamo.eval_frame, name)
21
+
22
+
23
+ def run(fn=None):
24
+ """Don't do any dynamic compiles, just use prior optimizations"""
25
+ if fn is not None:
26
+ fn = innermost_fn(fn)
27
+ assert callable(fn)
28
+ return RunOnlyContext()(fn)
29
+ return RunOnlyContext()
30
+
31
+
32
+ def disable(fn=None, recursive=True):
33
+ """
34
+ Decorator and context manager to disable TorchDynamo
35
+
36
+ If recursive=True, Dynamo is completely skipped on the decorated function
37
+ frame as well as the recursively invoked functions.
38
+
39
+ If recursive=False, Dynamo skips frames associated with the function code,
40
+ but still process recursively invoked frames.
41
+ """
42
+ if recursive:
43
+ if fn is not None:
44
+ fn = innermost_fn(fn)
45
+ assert callable(fn)
46
+ return DisableContext()(fn)
47
+ return DisableContext()
48
+ else:
49
+ return skip(fn)
50
+
51
+
52
+ def skip(fn=None):
53
+ """
54
+ Skip frames associated with the function code, but still process recursively
55
+ invoked frames
56
+ """
57
+ if fn is None:
58
+ return skip
59
+ fn = innermost_fn(fn)
60
+ assert callable(fn)
61
+ skip_code(fn.__code__)
62
+ fn._torchdynamo_disable = True
63
+ return fn
64
+
65
+
66
+ def assume_constant_result(fn):
67
+ fn._dynamo_marked_constant = True
68
+ return fn
69
+
70
+
71
+ def allow_in_graph(fn):
72
+ """
73
+ Customize which functions TorchDynamo will include in the generated
74
+ graph. Similar to `torch.fx.wrap()`.
75
+ ::
76
+
77
+ torch._dynamo.allow_in_graph(my_custom_function)
78
+
79
+ @torch._dynamo.optimize(...)
80
+ def fn(a):
81
+ x = torch.add(x, 1)
82
+ x = my_custom_function(x)
83
+ x = torch.add(x, 1)
84
+ return x
85
+
86
+ fn(...)
87
+
88
+ Will capture a single graph containing `my_custom_function()`.
89
+ """
90
+ if isinstance(fn, (list, tuple)):
91
+ return [allow_in_graph(x) for x in fn]
92
+ assert callable(fn), "allow_in_graph expects a callable"
93
+ allowed_functions._allowed_function_ids.add(id(fn))
94
+ allowed_functions._disallowed_function_ids.remove(id(fn))
95
+ allowed_functions._allowed_user_defined_function_ids.add(id(fn))
96
+ return fn
97
+
98
+
99
+ def _disallow_in_graph_helper(throw_if_not_allowed):
100
+ def inner(fn):
101
+ if isinstance(fn, (list, tuple)):
102
+ return [disallow_in_graph(x) for x in fn]
103
+ assert callable(fn), "disallow_in_graph expects a callable"
104
+ if throw_if_not_allowed and not allowed_functions.is_allowed(fn):
105
+ raise IncorrectUsage(
106
+ "disallow_in_graph is expected to be used on an already allowed callable (like torch.* ops). "
107
+ "Allowed callables means callables that TorchDynamo puts as-is in the extracted graph."
108
+ )
109
+ allowed_functions._allowed_function_ids.remove(id(fn))
110
+ allowed_functions._disallowed_function_ids.add(id(fn))
111
+ allowed_functions._allowed_user_defined_function_ids.remove(id(fn))
112
+ return fn
113
+
114
+ return inner
115
+
116
+
117
+ def disallow_in_graph(fn):
118
+ """
119
+ Customize which functions TorchDynamo will exclude in the generated
120
+ graph and force a graph break on.
121
+ ::
122
+
123
+ torch._dynamo.disallow_in_graph(torch.sub)
124
+
125
+ @torch._dynamo.optimize(...)
126
+ def fn(a):
127
+ x = torch.add(x, 1)
128
+ x = torch.sub(x, 1)
129
+ x = torch.add(x, 1)
130
+ return x
131
+
132
+ fn(...)
133
+
134
+ Will break the graph on `torch.sub`, and give two graphs each with a
135
+ single `torch.add()` op.
136
+ """
137
+ return _disallow_in_graph_helper(throw_if_not_allowed=True)(fn)
138
+
139
+
140
+ @_disallow_in_graph_helper(throw_if_not_allowed=False)
141
+ def graph_break():
142
+ """Force a graph break"""
143
+ pass
144
+
145
+
146
+ def forbid_in_graph(fn):
147
+ """
148
+ Customize which functions TorchDynamo will assert are not present while tracing.
149
+
150
+ If you want a graph break on this function instead, use disallow_in_graph.
151
+ TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust
152
+ documentation would not be amiss.
153
+ """
154
+ if isinstance(fn, (list, tuple)):
155
+ return [forbid_in_graph(x) for x in fn]
156
+ assert callable(fn), "forbid_in_graph applies only to callables"
157
+ fn._dynamo_forbidden = True
158
+ return fn
159
+
160
+
161
+ @forbid_in_graph
162
+ def mark_dynamic(t, index):
163
+ """
164
+ Mark a tensor as having a dynamic dim.
165
+
166
+ [Note - on the state of mark_dynamic]
167
+
168
+ The behavior of having a dynamic dimension on a tensor is governed by a few factors:
169
+
170
+ 1) torch._dynamo.config dynamic_shapes True or False.
171
+ a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work.
172
+ a) dynamic_shapes=False - This config will raise an exception when used in conjunction with
173
+ mark_dynamic. We will eventually support this.
174
+
175
+ 2) If the dimension is fully constrained - as in, it does not allow more than a single value
176
+ in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export),
177
+ we will raise an error
178
+
179
+ 3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded
180
+ range of shapes, in eager we will pass it through, but export will raise an error.
181
+
182
+ 4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made
183
+ before torch.compile.
184
+
185
+ """
186
+ if isinstance(index, int):
187
+ if not hasattr(t, "_dynamo_dynamic_indices"):
188
+ t._dynamo_dynamic_indices = set()
189
+ # TODO(voz): Should we bounds check?
190
+ t._dynamo_dynamic_indices.add(index)
191
+ return
192
+
193
+ assert isinstance(index, (list, tuple))
194
+ for i in index:
195
+ mark_dynamic(t, i)
196
+
197
+
198
+ @forbid_in_graph
199
+ def maybe_mark_dynamic(t, index):
200
+ """
201
+ Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
202
+ dimension ends up getting specialized, don't error).
203
+ """
204
+ if isinstance(index, int):
205
+ if not hasattr(t, "_dynamo_weak_dynamic_indices"):
206
+ t._dynamo_weak_dynamic_indices = set()
207
+ # TODO(voz): Should we bounds check?
208
+ t._dynamo_weak_dynamic_indices.add(index)
209
+ return
210
+
211
+ assert isinstance(index, (list, tuple))
212
+ for i in index:
213
+ maybe_mark_dynamic(t, i)
214
+
215
+
216
+ @forbid_in_graph
217
+ def mark_static(t, index=None):
218
+ """
219
+ Mark a tensor as having a static dim.
220
+
221
+ This will prevent us from attempting to compile it dynamically
222
+ when dynamic=True; this can improve trace-time performance.
223
+
224
+ This has lower precedence than mark_dynamic.
225
+ """
226
+ if isinstance(index, int):
227
+ if not hasattr(t, "_dynamo_static_indices"):
228
+ t._dynamo_static_indices = set()
229
+ # TODO(voz): Should we bounds check?
230
+ t._dynamo_static_indices.add(index)
231
+ elif index is None:
232
+ for i in range(t.dim()):
233
+ mark_static(t, i)
234
+ else:
235
+ assert isinstance(index, (list, tuple))
236
+ for i in index:
237
+ mark_static(t, i)
238
+
239
+
240
+ @forbid_in_graph
241
+ def mark_static_address(t, guard=True):
242
+ """
243
+ Marks an input tensor whose data_ptr will not change across multiple calls
244
+ to a dynamo-compiled function. This indicates to cudagraphs that an extra allocation
245
+ is not needed for this input. The data_ptr will be guarded if guard=True. Note:
246
+ Tensors marked in this way will be kept alive until `torch._dynamo.reset()` is called.
247
+ """
248
+ if not isinstance(t, torch.Tensor):
249
+ raise TypeError(f"mark_static_address expects a tensor but recieved {type(t)}")
250
+
251
+ if guard:
252
+ t._dynamo_static_input_type = "guarded" # type: ignore[attr-defined]
253
+ else:
254
+ t._dynamo_static_input_type = "unguarded" # type: ignore[attr-defined]
255
+
256
+
257
+ # Note: this carefully avoids eagerly import einops.
258
+ # TODO: we should delete this whole _allow_in_graph_einops logic by approximately 2024 Q2
259
+ def _allow_in_graph_einops():
260
+ import einops
261
+
262
+ try:
263
+ # requires einops > 0.6.1, torch >= 2.0
264
+ from einops._torch_specific import ( # noqa: F401
265
+ _ops_were_registered_in_torchdynamo,
266
+ )
267
+
268
+ # einops > 0.6.1 will call the op registration logic as it is imported.
269
+ pass
270
+ except ImportError:
271
+ # einops <= 0.6.1
272
+ allow_in_graph(einops.rearrange)
273
+ allow_in_graph(einops.reduce)
274
+ if hasattr(einops, "repeat"):
275
+ allow_in_graph(einops.repeat) # available since einops 0.2.0
276
+ if hasattr(einops, "einsum"):
277
+ allow_in_graph(einops.einsum) # available since einops 0.5.0
278
+ if hasattr(einops, "pack"):
279
+ allow_in_graph(einops.pack) # available since einops 0.6.0
280
+ if hasattr(einops, "unpack"):
281
+ allow_in_graph(einops.unpack) # available since einops 0.6.0
282
+
283
+
284
+ allowed_functions.add_module_init_func("einops", _allow_in_graph_einops)
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/external_utils.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This module contains functions that *will be allowed* by dynamo
2
+
3
+ import functools
4
+
5
+
6
+ def is_compiling() -> bool:
7
+ return False
8
+
9
+
10
+ def wrap_inline(fn):
11
+ """
12
+ Create an extra frame around fn that is not in skipfiles
13
+ """
14
+
15
+ @functools.wraps(fn)
16
+ def inner(*args, **kwargs):
17
+ return fn(*args, **kwargs)
18
+
19
+ return inner
20
+
21
+
22
+ def call_hook(hook, *args):
23
+ """
24
+ Used by compiled autograd to handle hook returning None
25
+ """
26
+ result = hook(*args)
27
+ if result is None:
28
+ return args[0]
29
+ return result
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/profiler.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import os
3
+ from typing import Any, List
4
+
5
+ import torch
6
+
7
+ from .utils import print_once
8
+
9
+
10
+ @dataclasses.dataclass
11
+ class ProfileMetrics:
12
+ microseconds: float = 0.0
13
+ operators: int = 0
14
+ fusions: int = 0
15
+ graphs: int = 0
16
+
17
+ def __iadd__(self, other: "ProfileMetrics"):
18
+ self.microseconds += other.microseconds
19
+ self.operators += other.operators
20
+ self.fusions += other.fusions
21
+ return self
22
+
23
+ def __add__(self, other: "ProfileMetrics"):
24
+ assert isinstance(other, ProfileMetrics)
25
+ return ProfileMetrics(
26
+ self.microseconds + other.microseconds,
27
+ self.operators + other.operators,
28
+ self.fusions + other.fusions,
29
+ )
30
+
31
+ def __truediv__(self, other):
32
+ if isinstance(other, int):
33
+ other = ProfileMetrics(other, other, other)
34
+ return ProfileMetrics(
35
+ self.microseconds / max(1, other.microseconds),
36
+ self.operators / max(1, other.operators),
37
+ self.fusions / max(1, other.fusions),
38
+ )
39
+
40
+ def __str__(self):
41
+ return f"{self.operators:4.0%} ops {self.microseconds:4.0%} time"
42
+
43
+ def tocsv(self):
44
+ return [self.operators, self.microseconds]
45
+
46
+
47
+ class ProfileResult:
48
+ def __init__(self, captured, total, unique_graphs):
49
+ self.captured: ProfileMetrics = captured or ProfileMetrics()
50
+ self.total: ProfileMetrics = total or ProfileMetrics()
51
+ self.unique_graphs: int = unique_graphs
52
+
53
+ def __iadd__(self, other: "ProfileResult"):
54
+ self.captured += other.captured
55
+ self.total += other.total
56
+ self.unique_graphs += other.unique_graphs
57
+ return self
58
+
59
+ def percent(self):
60
+ return self.captured / self.total
61
+
62
+ def __str__(self):
63
+ return (
64
+ f"{self.unique_graphs:2} graphs {self.captured.graphs:2} graph calls "
65
+ f"{self.captured.operators:4}/{self.total.operators:4} = "
66
+ + str(self.percent())
67
+ )
68
+
69
+ def tocsv(self):
70
+ return [
71
+ self.unique_graphs,
72
+ self.captured.graphs,
73
+ self.captured.operators,
74
+ self.total.operators,
75
+ ] + self.percent().tocsv()
76
+
77
+
78
+ def should_print_missing():
79
+ return os.environ.get("TORCHDYNAMO_PRINT_MISSING") == "1"
80
+
81
+
82
+ def print_missing(stack):
83
+ if any("/torch/autograd/profiler.py" in x for x in stack):
84
+ return
85
+ stack = [
86
+ x for x in stack if ("<built-in" not in x and "site-packages/torch/" not in x)
87
+ ]
88
+ print_once("MISSING", " >> ".join(stack[-3:]))
89
+
90
+
91
+ class Profiler:
92
+ unique_graphs = 0
93
+
94
+ def __init__(self):
95
+ self.prof = torch.profiler.profile(
96
+ activities=[torch.profiler.ProfilerActivity.CPU],
97
+ with_stack=should_print_missing(),
98
+ )
99
+
100
+ def results(self):
101
+ captured_regions = 0
102
+ captured_ops = 0
103
+ captured_microseconds = 0
104
+ total_ops = 0
105
+ total_microseconds = 0
106
+
107
+ last_op_end_time = -1
108
+ captured_region_end_time = -1
109
+ events = sorted(self.prof.events(), key=lambda x: x.time_range.start)
110
+ for e in events:
111
+ if e.name == "TORCHDYNAMO":
112
+ captured_region_end_time = e.time_range.end
113
+ captured_regions += 1
114
+ # ignore `handle = torch.zeros(1)` in record_function.__init__()
115
+ total_ops -= 1
116
+ elif e.time_range.start >= last_op_end_time:
117
+ last_op_end_time = e.time_range.end
118
+ if e.time_range.end <= captured_region_end_time:
119
+ captured_ops += 1
120
+ captured_microseconds += e.time_range.elapsed_us()
121
+ elif should_print_missing():
122
+ print_missing(e.stack)
123
+ total_ops += 1
124
+ total_microseconds += e.time_range.elapsed_us()
125
+ else:
126
+ pass # ops recursively called from other ops (ignored)
127
+
128
+ unique_graphs = Profiler.unique_graphs
129
+ Profiler.unique_graphs = 0
130
+ # we counted one extra op that is part of the profiler setup code
131
+ total_ops -= 1
132
+
133
+ return ProfileResult(
134
+ captured=ProfileMetrics(
135
+ microseconds=captured_microseconds,
136
+ operators=captured_ops,
137
+ fusions=captured_ops - captured_regions,
138
+ graphs=captured_regions,
139
+ ),
140
+ total=ProfileMetrics(
141
+ microseconds=total_microseconds,
142
+ operators=total_ops,
143
+ fusions=total_ops - 1,
144
+ ),
145
+ unique_graphs=unique_graphs,
146
+ )
147
+
148
+
149
+ def fx_insert_profiling(gm: torch.fx.GraphModule, example_inputs: List[Any]):
150
+ def _wrapped(*args):
151
+ with torch.profiler.record_function("TORCHDYNAMO"):
152
+ return gm.forward(*args)
153
+
154
+ Profiler.unique_graphs += 1
155
+ return _wrapped
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/replay_record.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from dataclasses import field
3
+ from types import CodeType, ModuleType
4
+ from typing import Any, Dict
5
+
6
+ try:
7
+ import dill
8
+ except ImportError:
9
+ dill = None
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class ModuleRecord:
14
+ module: ModuleType
15
+ accessed_attrs: Dict[str, Any] = field(default_factory=dict)
16
+
17
+
18
+ @dataclasses.dataclass
19
+ class DummyModule:
20
+ name: str
21
+
22
+
23
+ @dataclasses.dataclass
24
+ class ExecutionRecord:
25
+ code: CodeType
26
+ globals: Dict[str, Any] = field(default_factory=dict)
27
+ locals: Dict[str, Any] = field(default_factory=dict)
28
+ builtins: Dict[str, Any] = field(default_factory=dict)
29
+ code_options: Dict[str, Any] = field(default_factory=dict)
30
+
31
+ def dump(self, f):
32
+ assert dill is not None, "replay_record requires `pip install dill`"
33
+ dill.dump(self, f)
34
+
35
+ @classmethod
36
+ def load(cls, f):
37
+ assert dill is not None, "replay_record requires `pip install dill`"
38
+ return dill.load(f)
39
+
40
+
41
+ @dataclasses.dataclass
42
+ class ExecutionRecorder:
43
+ MOD_EXCLUDES = ["torch", "torch.fx", "torch.fx.passes"]
44
+ LOCAL_MOD_PREFIX = "___local_mod_"
45
+
46
+ code: CodeType
47
+ globals: Dict[str, Any] = field(default_factory=dict)
48
+ locals: Dict[str, Any] = field(default_factory=dict)
49
+ builtins: Dict[str, Any] = field(default_factory=dict)
50
+ code_options: Dict[str, Any] = field(default_factory=dict)
51
+ name_to_modrec: Dict[str, Any] = field(default_factory=dict)
52
+
53
+ def add_local_var(self, name, var):
54
+ if isinstance(var, ModuleType):
55
+ if self._is_excl(var):
56
+ return
57
+ self.locals[name] = self._add_mod(var)
58
+ else:
59
+ self.locals[name] = var
60
+
61
+ def add_global_var(self, name, var):
62
+ if isinstance(var, ModuleType):
63
+ if self._is_excl(var):
64
+ return
65
+ self.globals[name] = self._add_mod(var)
66
+ else:
67
+ self.globals[name] = var
68
+
69
+ def add_local_mod(self, name, mod):
70
+ assert isinstance(mod, ModuleType)
71
+ if self._is_excl(mod):
72
+ return
73
+
74
+ self.add_global_var(name, mod)
75
+
76
+ def record_module_access(self, mod, name, val):
77
+ if self._is_excl(mod):
78
+ return
79
+ if isinstance(val, ModuleType):
80
+ self.name_to_modrec[mod.__name__].accessed_attrs[name] = self._add_mod(val)
81
+ return
82
+
83
+ if mod.__name__ in self.name_to_modrec:
84
+ self.name_to_modrec[mod.__name__].accessed_attrs[name] = val
85
+
86
+ def get_record(self):
87
+ return ExecutionRecord(
88
+ self.code,
89
+ ExecutionRecorder._resolve_modules(self.globals),
90
+ ExecutionRecorder._resolve_modules(self.locals),
91
+ self.builtins.copy(),
92
+ self.code_options.copy(),
93
+ )
94
+
95
+ def _add_mod(self, mod):
96
+ if mod.__name__ not in self.name_to_modrec:
97
+ self.name_to_modrec[mod.__name__] = ModuleRecord(mod)
98
+
99
+ return self.name_to_modrec[mod.__name__]
100
+
101
+ @classmethod
102
+ def _is_excl(cls, mod):
103
+ return any(mod.__name__ == excl for excl in cls.MOD_EXCLUDES)
104
+
105
+ # Convert ModuleRecords -> DummyModule tree
106
+ @classmethod
107
+ def _resolve_modules(cls, vars):
108
+ def resolve_module(var):
109
+ if not isinstance(var, ModuleRecord):
110
+ return var
111
+
112
+ dummy_mod = DummyModule(var.module.__name__)
113
+ for attr_name, attr_value in var.accessed_attrs.items():
114
+ attr_value = resolve_module(attr_value)
115
+ dummy_mod.__setattr__(attr_name, attr_value)
116
+
117
+ return dummy_mod
118
+
119
+ return {k: resolve_module(v) for k, v in vars.items()}
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_aot.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/__pycache__/after_dynamo.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/repro/after_aot.py ADDED
@@ -0,0 +1,931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import copy
3
+ import functools
4
+ import io
5
+ import logging
6
+ import os
7
+ import shutil
8
+ import subprocess
9
+ import sys
10
+ import textwrap
11
+ import uuid
12
+ from importlib import import_module
13
+ from tempfile import TemporaryFile
14
+ from typing import Any, Callable, Dict, Union
15
+
16
+ import torch
17
+ import torch.fx as fx
18
+ import torch.nn as nn
19
+ from torch._dynamo.debug_utils import (
20
+ _cuda_system_info_comment,
21
+ AccuracyError,
22
+ backend_accuracy_fails,
23
+ BuckTargetWriter,
24
+ cast_to_fp64,
25
+ extra_imports,
26
+ generate_config_string,
27
+ helper_for_dump_minify,
28
+ InputReader,
29
+ InputWriter,
30
+ MAX_CONSTANT_NUMEL_INLINE,
31
+ minifier_dir,
32
+ NNModuleToString,
33
+ NopInputReader,
34
+ same_two_models,
35
+ )
36
+ from torch._dynamo.utils import clone_inputs, counters, same
37
+ from torch.fx.experimental.proxy_tensor import make_fx
38
+ from torch.fx.experimental.symbolic_shapes import (
39
+ fx_placeholder_targets,
40
+ has_free_symbols,
41
+ )
42
+ from torch.hub import tqdm
43
+
44
+ from .. import config
45
+
46
+ log = logging.getLogger(__name__)
47
+
48
+
49
+ inductor_config = import_module("torch._inductor.config")
50
+ use_buck = inductor_config.is_fbcode()
51
+
52
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
53
+ # MAIN ENTRY POINT
54
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
55
+
56
+
57
+ def wrap_compiler_debug(unconfigured_compiler_fn, compiler_name: str):
58
+ """
59
+ Minifier for Fx Graph modules after Aot Autograd has finished. We wrap both
60
+ forward and backward call separately with the backend compiler_fn - like
61
+ inductor or nvfuser. Intercepting after Aot Autograd presents neat
62
+ abstraction, where all the params are lifted as graph inputs, making it easy
63
+ to save the graph as a string.
64
+ """
65
+
66
+ @functools.wraps(unconfigured_compiler_fn)
67
+ def debug_wrapper(gm, example_inputs, **kwargs):
68
+ from torch._subclasses import FakeTensorMode
69
+
70
+ compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)
71
+
72
+ from torch._functorch.aot_autograd import get_aot_graph_name
73
+
74
+ graph_name = get_aot_graph_name()
75
+
76
+ # TODO: why do we need to deepcopy the original graph?
77
+ orig_graph = copy.deepcopy(gm.graph)
78
+ assert config.repro_after in ("dynamo", "aot", None)
79
+
80
+ try:
81
+ # Call the compiler_fn - which is either aot_autograd or inductor
82
+ # with fake inputs
83
+ inner_compiled_fn = compiler_fn(gm, example_inputs)
84
+ except Exception as e:
85
+ # TODO: Failures here are troublesome because no real inputs,
86
+ # need a different serialization strategy
87
+ if config.repro_after == "aot":
88
+ if config.repro_level == 1:
89
+ dump_compiler_graph_state(
90
+ fx.GraphModule(gm, orig_graph),
91
+ example_inputs,
92
+ compiler_name,
93
+ )
94
+ elif config.repro_level == 2:
95
+ dump_to_minify(
96
+ fx.GraphModule(gm, orig_graph),
97
+ example_inputs,
98
+ compiler_name,
99
+ )
100
+ log.error("CompilerError")
101
+ raise
102
+
103
+ # We may run regular PyTorch compute that may trigger Dynamo, do NOT
104
+ # recursively attempt to accuracy minify in that case!
105
+ def deferred_for_real_inputs(real_inputs):
106
+ # This is a bit obscure: if we recursively try to accuracy minify
107
+ # the SAME function, this would trigger. But most of the time
108
+ # we should never hit this branch
109
+ if config.repro_after != "aot":
110
+ return inner_compiled_fn(real_inputs)
111
+ with config.patch(repro_after=None):
112
+ return inner_debug_fn(real_inputs)
113
+
114
+ def inner_debug_fn(real_inputs):
115
+ """
116
+ Aot Autograd fw_compiler and bw_compiler can have fake tensors. So,
117
+ example_inputs can be fake tensors. We can call compiler_fn (which is
118
+ inductor or nvfuser) with fake tensors but the actually compiled_fn
119
+ should be called with real tensors. Therefore, the actual invocation
120
+ is deferred.
121
+ """
122
+ # Copy the tensor attrs like shape, stride etc by converting to Fake Tensor
123
+ # because inductor clears the tensor list in its codegen. And example_inputs
124
+ # are available only for the first invocation.
125
+ fake_mode = FakeTensorMode()
126
+ copy_tensor_attrs = [
127
+ fake_mode.from_tensor(x) if isinstance(x, torch.Tensor) else x
128
+ for x in real_inputs
129
+ ]
130
+ if config.repro_level == 3:
131
+ # Always dump the original module in case we have segfaults
132
+ dump_to_minify(
133
+ fx.GraphModule(gm, orig_graph), real_inputs, compiler_name
134
+ )
135
+
136
+ if config.repro_level == 4:
137
+ if compiler_name != "inductor":
138
+ raise NotImplementedError(
139
+ "Accuracy minification is supported for inductor only"
140
+ )
141
+ if backend_aot_accuracy_fails(gm, real_inputs, compiler_fn):
142
+ log.warning(
143
+ "Accuracy failed for the AOT Autograd graph %s", graph_name
144
+ )
145
+ dump_compiler_graph_state(
146
+ fx.GraphModule(gm, orig_graph),
147
+ real_inputs,
148
+ f"{compiler_name}_accuracy",
149
+ )
150
+ dump_to_minify(
151
+ fx.GraphModule(gm, orig_graph),
152
+ real_inputs,
153
+ f"{compiler_name}_accuracy",
154
+ )
155
+ raise AccuracyError("Bad accuracy detected")
156
+ else:
157
+ # Call the compiled function with real inputs
158
+ return inner_compiled_fn(real_inputs)
159
+ else:
160
+ try:
161
+ # Call the compiled function with real inputs
162
+ out = inner_compiled_fn(real_inputs)
163
+ # sync cuda kernels to ensure IMA detection
164
+ for arg in example_inputs:
165
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
166
+ torch.cuda.synchronize()
167
+ break
168
+ return out
169
+ except Exception as e:
170
+ if config.repro_level == 1:
171
+ dump_compiler_graph_state(
172
+ fx.GraphModule(gm, orig_graph),
173
+ copy_tensor_attrs,
174
+ compiler_name,
175
+ )
176
+ elif config.repro_level == 2:
177
+ dump_to_minify(
178
+ fx.GraphModule(gm, orig_graph),
179
+ copy_tensor_attrs,
180
+ compiler_name,
181
+ )
182
+ raise
183
+
184
+ if config.repro_after == "aot":
185
+ compiled_fn = deferred_for_real_inputs
186
+ compiled_fn._boxed_call = True # type: ignore[attr-defined]
187
+ return compiled_fn
188
+ else:
189
+ return inner_compiled_fn
190
+
191
+ return debug_wrapper
192
+
193
+
194
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
195
+ # DUMP REPROS
196
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
197
+
198
+
199
+ def generate_compiler_repro_string(gm, args, *, stable_output=False, save_dir=None):
200
+ model_str = textwrap.dedent(
201
+ f"""
202
+ import torch
203
+ from torch import tensor, device
204
+ import torch.fx as fx
205
+ from torch._dynamo.testing import rand_strided
206
+ from math import inf
207
+ import torch._inductor.inductor_prims
208
+
209
+ {generate_config_string(stable_output=stable_output)}
210
+
211
+ isolate_fails_code_str = None
212
+
213
+ {extra_imports}
214
+
215
+ """
216
+ )
217
+ if not stable_output:
218
+ model_str += f"# torch version: {torch.version.__version__}\n"
219
+ if hasattr(torch.version, "cuda"):
220
+ model_str += f"# torch cuda version: {torch.version.cuda}\n"
221
+ if hasattr(torch.version, "git_version"):
222
+ model_str += f"# torch git version: {torch.version.git_version}\n\n\n"
223
+ model_str += _cuda_system_info_comment()
224
+
225
+ model_str += NNModuleToString.convert(gm)
226
+
227
+ # get hint shape/stride when dynamic shape enabled
228
+ def hint_if_symint(x):
229
+ return tuple(i.node.hint if isinstance(i, torch.SymInt) else i for i in x)
230
+
231
+ writer = InputWriter(save_dir)
232
+ for placeholder, arg in zip(fx_placeholder_targets(gm), args):
233
+ if isinstance(arg, (int, torch.SymInt)):
234
+ writer.symint(placeholder, arg)
235
+ elif isinstance(arg, torch.Tensor):
236
+ # TODO: improve these names with FQN
237
+ writer.tensor(placeholder, arg)
238
+ else:
239
+ raise TypeError(f"arg is neither SymInt/int nor torch.Tensor, {arg}")
240
+
241
+ model_str += "\n".join(writer.lines()) + "\n"
242
+
243
+ model_str += "mod = Repro()\n"
244
+ return model_str
245
+
246
+
247
+ def save_graph_repro(
248
+ fd,
249
+ gm,
250
+ args,
251
+ compiler_name,
252
+ *,
253
+ stable_output=False,
254
+ save_dir=None,
255
+ command="run",
256
+ accuracy=None,
257
+ tracing_mode=None,
258
+ check_str=None,
259
+ ):
260
+ fd.write(
261
+ generate_compiler_repro_string(
262
+ gm,
263
+ args,
264
+ stable_output=stable_output,
265
+ save_dir=save_dir,
266
+ )
267
+ )
268
+ if accuracy is None:
269
+ accuracy = "_accuracy" in compiler_name
270
+ if tracing_mode is None:
271
+ tracing_mode = "real"
272
+ if any(has_free_symbols(a) for a in args):
273
+ tracing_mode = "symbolic"
274
+ fd.write("if __name__ == '__main__':\n")
275
+ fd.write(" from torch._dynamo.repro.after_aot import run_repro\n")
276
+ fd.write(
277
+ f" with torch.no_grad():"
278
+ f" run_repro(mod, load_args, accuracy={accuracy!r}, command={command!r}, "
279
+ f"save_dir={save_dir!r}, tracing_mode={tracing_mode!r}, check_str={check_str!r}"
280
+ ")\n"
281
+ )
282
+
283
+
284
+ def dump_compiler_graph_state(gm, args, compiler_name, *, accuracy=None):
285
+ subdir = os.path.join(minifier_dir(), "checkpoints")
286
+ if not os.path.exists(subdir):
287
+ os.makedirs(subdir, exist_ok=True)
288
+ file_name = os.path.join(subdir, f"{len(gm.graph.nodes)}.py")
289
+ log.warning(
290
+ "Writing checkpoint with %s nodes to %s", len(gm.graph.nodes), file_name
291
+ )
292
+ with open(file_name, "w") as fd:
293
+ save_graph_repro(
294
+ fd, gm, args, compiler_name, save_dir=subdir, accuracy=accuracy
295
+ )
296
+ curdir = os.getcwd()
297
+ repro_path = os.path.join(curdir, "repro.py")
298
+ try:
299
+ shutil.copyfile(file_name, repro_path)
300
+ log.warning("Copying repro file for convenience to %s", repro_path)
301
+ if use_buck:
302
+ BuckTargetWriter(file_name).write()
303
+ except OSError:
304
+ log.warning("No write permissions for %s", repro_path)
305
+ pass
306
+
307
+
308
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
309
+ # DUMP MINIFIER
310
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
311
+
312
+
313
+ def dump_to_minify(gm, args, compiler_name: str):
314
+ out = io.StringIO()
315
+ # TODO: factor this out
316
+ subdir = os.path.join(minifier_dir(), "checkpoints")
317
+ if not os.path.exists(subdir):
318
+ os.makedirs(subdir, exist_ok=True)
319
+ save_graph_repro(out, gm, args, compiler_name, save_dir=subdir, command="minify")
320
+ return helper_for_dump_minify(out.getvalue())
321
+
322
+
323
+ def isolate_fails(
324
+ fx_g,
325
+ args,
326
+ compiler_name: str,
327
+ env=None,
328
+ save_dir=None,
329
+ accuracy=None,
330
+ tracing_mode=None,
331
+ check_str=None,
332
+ ):
333
+ if env is None:
334
+ env = {}
335
+ subdir = os.path.join(os.getcwd(), "isolate")
336
+ if not os.path.exists(subdir):
337
+ os.makedirs(subdir, exist_ok=True)
338
+ file_name = os.path.join(subdir, f"{str(uuid.uuid4())[:5]}.py")
339
+ with open(file_name, "w") as fd:
340
+ save_graph_repro(
341
+ fd,
342
+ fx_g,
343
+ args,
344
+ compiler_name,
345
+ save_dir=save_dir,
346
+ command="minifier-query",
347
+ accuracy=accuracy,
348
+ tracing_mode=tracing_mode,
349
+ check_str=check_str,
350
+ )
351
+ # with open(file_name, "r") as fd:
352
+ # print(fd.read())
353
+ new_env = os.environ.copy()
354
+ new_env = {**new_env, **env}
355
+ stdout, stderr = TemporaryFile(), TemporaryFile()
356
+
357
+ if use_buck:
358
+ cmd = BuckTargetWriter(file_name).write(print_msg=False)
359
+ else:
360
+ cmd = ["python", file_name]
361
+
362
+ p = subprocess.Popen(
363
+ cmd,
364
+ cwd=subdir,
365
+ stdout=stdout,
366
+ stderr=stderr,
367
+ env=new_env,
368
+ )
369
+ p.wait()
370
+
371
+ stdout.seek(0)
372
+ stderr.seek(0)
373
+ print(
374
+ textwrap.indent(stdout.read().decode("utf-8"), prefix=">> "), file=sys.stdout
375
+ )
376
+ print(
377
+ textwrap.indent(stderr.read().decode("utf-8"), prefix=">> "), file=sys.stderr
378
+ )
379
+ # print(f"Isolated test failed - {file_name}")
380
+ return p.returncode != 0
381
+
382
+
383
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
384
+ # MINIFIER TOOLS
385
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
386
+
387
+
388
+ def inductor_fails(fx_g, args, check_str=None):
389
+ has_cuda = False
390
+ for arg in args:
391
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
392
+ has_cuda = True
393
+ break
394
+
395
+ def sync():
396
+ if has_cuda:
397
+ # Ensures that segfaults are surfaced
398
+ torch.cuda.synchronize()
399
+
400
+ from torch._inductor.compile_fx import compile_fx_inner
401
+
402
+ try:
403
+ result = fx_g(*args)
404
+ assert isinstance(result, (tuple, list))
405
+ assert not any(isinstance(x, (tuple, list)) for x in result)
406
+ except Exception:
407
+ return False
408
+
409
+ sync()
410
+
411
+ try:
412
+ compile_mod = compile_fx_inner(fx_g, args)
413
+ compile_mod(args)
414
+ sync()
415
+ except Exception as e:
416
+ if check_str is not None and check_str not in repr(e):
417
+ return False
418
+ print(repr(e))
419
+ return True
420
+ return False
421
+
422
+
423
+ def inductor_accuracy_fails(
424
+ fx_g, args, check_str=None, *, require_fp64=False, ignore_non_fp=False
425
+ ):
426
+ from torch._inductor.compile_fx import compile_fx_inner
427
+
428
+ return backend_aot_accuracy_fails(
429
+ fx_g,
430
+ args,
431
+ compile_fx_inner,
432
+ require_fp64=require_fp64,
433
+ ignore_non_fp=ignore_non_fp,
434
+ )
435
+
436
+
437
+ backend_aot_accuracy_fails = functools.partial(backend_accuracy_fails, only_fwd=True)
438
+
439
+
440
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
441
+ # REPRO MAIN
442
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
443
+
444
+
445
+ def repro_common(options, mod, load_args):
446
+ # Invariant for graphs we generate with the repro script
447
+ assert not any(mod.named_parameters())
448
+ for n, b in mod.named_buffers():
449
+ if b.numel() > MAX_CONSTANT_NUMEL_INLINE:
450
+ log.warning(
451
+ "Constant %s was not serialized, generated random data instead. "
452
+ "If you think this is affecting you, please comment on "
453
+ "https://github.com/pytorch/pytorch/issues/100468",
454
+ n,
455
+ )
456
+
457
+ if not hasattr(load_args, "_version"):
458
+ log.warning(
459
+ "load_args does not have a _version attribute, please file a bug to PyTorch "
460
+ "and describe how you generate this repro script"
461
+ )
462
+ else:
463
+ if load_args._version > 0:
464
+ log.warning(
465
+ "load_args is version %s, but this version of PyTorch only supports "
466
+ "version 0. We will try to run it anyway but there may be an incompatibility; "
467
+ "if so, try upgrading your version of PyTorch.",
468
+ load_args._version,
469
+ )
470
+
471
+ nop_reader = NopInputReader()
472
+ load_args(nop_reader)
473
+
474
+ with tqdm(desc="Loading inputs", total=nop_reader.total) as pbar:
475
+ input_reader = InputReader(save_dir=options.save_dir, pbar=pbar)
476
+ load_args(input_reader)
477
+ args = input_reader.args
478
+
479
+ # Turn mod into a GraphModule the slow way
480
+ # TODO: speed this up
481
+ mod = make_fx(mod, tracing_mode=options.tracing_mode)(*args)
482
+
483
+ torch._inductor.config.generate_intermediate_hooks = True
484
+
485
+ return mod, args
486
+
487
+
488
+ ACCURACY_FAILS: Dict[str, Callable[[nn.Module, Any], bool]] = {
489
+ "": inductor_fails,
490
+ # This might look inverted but it's not. strict_accuracy means "we will
491
+ # minify any time we see anything that diverges", whereas accuracy is more
492
+ # conservative, and will only minify if there is a meaningful fp64
493
+ # divergence
494
+ "accuracy": functools.partial(
495
+ inductor_accuracy_fails, require_fp64=True, ignore_non_fp=True
496
+ ),
497
+ "strict_accuracy": inductor_accuracy_fails,
498
+ }
499
+
500
+
501
+ def repro_minifier_query(options, mod, load_args):
502
+ mod, args = repro_common(options, mod, load_args)
503
+ fail_fn = functools.partial(
504
+ ACCURACY_FAILS[options.accuracy], check_str=options.check_str
505
+ )
506
+ if fail_fn(mod, args):
507
+ sys.exit(1)
508
+ else:
509
+ sys.exit(0)
510
+
511
+
512
+ def repro_minify(options, mod, load_args):
513
+ from functorch.compile import minifier
514
+
515
+ mod, args = repro_common(options, mod, load_args)
516
+ compiler_name = "inductor_accuracy" if options.accuracy != "" else "inductor"
517
+
518
+ favored_device = 1 if torch.cuda.device_count() >= 2 else 0
519
+ env_variables = {"CUDA_VISIBLE_DEVICES": str(favored_device)}
520
+
521
+ module_fails: Any
522
+ if options.isolate:
523
+ module_fails = functools.partial(
524
+ isolate_fails,
525
+ env=env_variables,
526
+ compiler_name=compiler_name,
527
+ save_dir=options.save_dir,
528
+ accuracy=options.accuracy,
529
+ tracing_mode=options.tracing_mode,
530
+ )
531
+ else:
532
+ module_fails = ACCURACY_FAILS[options.accuracy]
533
+
534
+ minifier(
535
+ mod,
536
+ args,
537
+ module_fails=functools.partial(module_fails, check_str=options.check_str),
538
+ dump_state=functools.partial(
539
+ dump_compiler_graph_state, compiler_name=compiler_name
540
+ ),
541
+ save_dir=options.save_dir,
542
+ offload_to_disk=options.offload_to_disk,
543
+ skip_offload=options.skip_saving_eager_intermediates,
544
+ skip_sanity=options.skip_sanity,
545
+ max_granularity=options.max_granularity,
546
+ )
547
+
548
+
549
+ def repro_analyze(options, mod, load_args):
550
+ from torch._inductor.compile_fx import compile_fx_inner
551
+ from torch._inductor.hooks import intermediate_hook
552
+
553
+ mod, args = repro_common(options, mod, load_args)
554
+
555
+ # TODO: The logic for cloning inputs/models here is intentionally
556
+ # modeled off of run_fwd_maybe_bwd, but arguably it is better not to
557
+ # clone inputs (as you are doubling your effective GPU memory usage).
558
+ # It is certainly faster though! It probably makes sense to let the
559
+ # user specify the offload strategy.
560
+
561
+ with tqdm(desc="Compiling"):
562
+ compiled = compile_fx_inner(mod, args)
563
+ total = counters["inductor"]["intermediate_hooks"]
564
+
565
+ known_names = set()
566
+
567
+ def save_hook(name, val):
568
+ known_names.add(name)
569
+ if not options.skip_saving_inductor_intermediates:
570
+ writer.write_tensor(os.path.join("inductor", name), val)
571
+ pbar.update(1) # type: ignore[has-type]
572
+
573
+ writer = torch.utils._content_store.ContentStoreWriter(
574
+ options.save_dir, stable_hash=options.stable_hash
575
+ )
576
+ reader = torch.utils._content_store.ContentStoreReader(options.save_dir)
577
+
578
+ new_args = clone_inputs(args)
579
+ with intermediate_hook(save_hook), tqdm(
580
+ desc="Saving inductor intermediates", total=total
581
+ ) as pbar:
582
+ compiled(new_args)
583
+ assert not new_args
584
+
585
+ def compare_tuples(tuple1, tuple2):
586
+ diff_indices = [i for i in range(len(tuple1)) if tuple1[i] != tuple2[i]]
587
+ diff_values = [(tuple1[i], tuple2[i]) for i in diff_indices]
588
+
589
+ if not diff_values:
590
+ return None
591
+ else:
592
+ return " and ".join(f"{a} != {b}" for a, b in diff_values)
593
+
594
+ def check_hook(name, val):
595
+ meta = writer.compute_tensor_metadata(val)
596
+ meta2 = reader.read_tensor_metadata(os.path.join("inductor", name))
597
+ reason = compare_tuples(meta, meta2)
598
+ if reason is not None:
599
+ pbar.write(f"NONDETERMINISTIC INDUCTOR at {name} ({reason})")
600
+ pbar.update(1)
601
+
602
+ if not options.skip_check_deterministic:
603
+ new_args = clone_inputs(args)
604
+ with intermediate_hook(check_hook), tqdm(
605
+ desc="Checking inductor determinism", total=total
606
+ ) as pbar:
607
+ compiled(new_args)
608
+ assert not new_args
609
+
610
+ class WriterInterp(fx.Interpreter):
611
+ def __init__(self, mod, subdir):
612
+ super().__init__(mod)
613
+ self.subdir = subdir
614
+
615
+ def run_node(self, n):
616
+ r = super().run_node(n)
617
+ name = n.name
618
+ if name in known_names:
619
+ pbar.update(1)
620
+ writer.write_tensor(os.path.join(self.subdir, name), r)
621
+ return r
622
+
623
+ # NB: the module cast doesn't actually do anything, since there are no
624
+ # parameters/buffers on the module
625
+ if not options.skip_saving_float64_intermediates:
626
+ new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
627
+ with tqdm(desc="Saving float64 intermediates", total=total) as pbar:
628
+ WriterInterp(new_mod, "float64").boxed_run(new_args)
629
+ assert not new_args
630
+
631
+ class ExactReaderInterp(fx.Interpreter):
632
+ def run_node(self, n):
633
+ r = super().run_node(n)
634
+ name = n.name
635
+ if name in known_names:
636
+ meta = writer.compute_tensor_metadata(r)
637
+ meta2 = reader.read_tensor_metadata(os.path.join("float64", name))
638
+ reason = compare_tuples(meta, meta2)
639
+ if reason is not None:
640
+ pbar.write(f"NONDETERMINISTIC FLOAT64 at {name} ({reason})")
641
+ pbar.update(1)
642
+ return r
643
+
644
+ # TODO: check eager determinism
645
+
646
+ if not options.skip_check_deterministic:
647
+ new_mod, new_args = cast_to_fp64(copy.deepcopy(mod), clone_inputs(args))
648
+ with tqdm(desc="Checking float64 determinism", total=total) as pbar:
649
+ ExactReaderInterp(new_mod).boxed_run(new_args)
650
+ assert not new_args
651
+
652
+ # Now that we've saved everything, interp through the eager graph
653
+ # and do comparisons
654
+ class ReaderInterp(fx.Interpreter):
655
+ def run_node(self, n):
656
+ r = super().run_node(n)
657
+ name = n.name
658
+ if name in known_names:
659
+ inductor = reader.read_tensor(os.path.join("inductor", name))
660
+ float64 = reader.read_tensor(os.path.join("float64", name))
661
+ logged = False
662
+
663
+ def log_error(msg, *args):
664
+ nonlocal logged
665
+ logged = True
666
+ pbar.write(f"DIVERGED at {name}: {msg % args}")
667
+
668
+ if not same(
669
+ r,
670
+ inductor,
671
+ float64,
672
+ tol=torch._dynamo.config.repro_tolerance,
673
+ equal_nan=True,
674
+ log_error=log_error,
675
+ ):
676
+ assert logged
677
+ pbar.update(1)
678
+ return r
679
+
680
+ with tqdm(desc="Checking divergence", total=total) as pbar:
681
+ ReaderInterp(mod).boxed_run(args)
682
+ assert not args
683
+
684
+
685
+ def repro_run(options, mod, load_args):
686
+ from torch._inductor.compile_fx import compile_fx_inner
687
+
688
+ mod, args = repro_common(options, mod, load_args)
689
+
690
+ from torch.cuda import synchronize
691
+
692
+ compiled = compile_fx_inner(mod, args)
693
+
694
+ if options.accuracy != "":
695
+ # We don't really respect --accuracy vs --strict-accuracy here, it
696
+ # seems counterintuitive
697
+ if not same_two_models(mod, compiled, args, only_fwd=True):
698
+ raise AccuracyError("Bad accuracy detected")
699
+ else:
700
+ need_sync = False
701
+ for arg in args:
702
+ if isinstance(arg, torch.Tensor) and arg.is_cuda:
703
+ need_sync = True
704
+ break
705
+ ref = compiled(args)
706
+ if need_sync:
707
+ synchronize() # ensure segfaults are surfaced
708
+
709
+
710
+ # TODO: lazily load the inputs or something, rather than cloning them
711
+ def run_repro(
712
+ mod,
713
+ load_args,
714
+ *,
715
+ command="run",
716
+ accuracy: Union[bool, str] = "",
717
+ save_dir=None,
718
+ tracing_mode=None,
719
+ patch_code=None,
720
+ check_str=None,
721
+ **kwargs,
722
+ ):
723
+ for k in kwargs:
724
+ log.warning(
725
+ "Unrecognized kwarg %s; perhaps this repro was made on a newer version of PyTorch",
726
+ k,
727
+ )
728
+
729
+ if accuracy is True:
730
+ accuracy = "accuracy"
731
+ elif accuracy is False:
732
+ accuracy = ""
733
+
734
+ if patch_code is not None:
735
+ log.warning(
736
+ "patch_code no longer works on this version of PyTorch, silently ignoring"
737
+ )
738
+
739
+ parser = argparse.ArgumentParser(
740
+ description=f"""\
741
+ An after_aot repro script, typically triggering a bug in PyTorch Inductor.
742
+ When run with no arguments, this script defaults to running '{command}'.
743
+ Extra flags may be available; to find out more, try '{command} --help'.
744
+ There are also alternate subcommands available, see below.
745
+
746
+ default settings on this script:
747
+ {accuracy=}
748
+ {tracing_mode=}
749
+ {save_dir=}
750
+ {check_str=}
751
+ """,
752
+ formatter_class=argparse.RawTextHelpFormatter,
753
+ )
754
+
755
+ def common_flags(parser):
756
+ accuracy_group = parser.add_mutually_exclusive_group()
757
+ accuracy_group.add_argument(
758
+ "--no-accuracy",
759
+ dest="accuracy",
760
+ action="store_const",
761
+ const="",
762
+ default=accuracy,
763
+ help="do not test accuracy, just run the module and see if it errors",
764
+ )
765
+ accuracy_group.add_argument(
766
+ "--accuracy",
767
+ action="store_const",
768
+ const="accuracy",
769
+ default=accuracy,
770
+ help="""\
771
+ test if the RMSE between the compiled module and the fp64 reference is greater
772
+ than eager and the fp64 reference. This is usually more reliable than the
773
+ standard allclose test, as we expect numeric differences from compiling, often
774
+ improving accuracy over eager. RMSE test allows for compiled module to
775
+ diverge greatly from eager, as long as this divergence moves it closer to the
776
+ 'true' mathematical value of the network. Caveats: (1) double precision can
777
+ still suffer from rounding error, so it is not a perfect reference (see for
778
+ example 'Herbie: Automatically Improving Floating Point Accuracy') for
779
+ approaches that detect the necessary working precision and compute it in
780
+ arbitrary precision floating point; unfortunately, this is not practical for
781
+ tensor computation; (2) if there are not enough samples in the output being
782
+ compared, we may get unlucky and have an unlucky greater RMSE than eager; this
783
+ could be overcome by applying a more rigorous statistical test at some
784
+ p-value, which we leave for future work.
785
+ """,
786
+ )
787
+ accuracy_group.add_argument(
788
+ "--strict-accuracy",
789
+ dest="accuracy",
790
+ action="store_const",
791
+ const="strict_accuracy",
792
+ default=accuracy,
793
+ help="""\
794
+ by default, when doing accuracy minification we will reject reductions which
795
+ change the divergence from a floating point divergence to a integral/boolean
796
+ divergence. This is because some operations like ReLU involve temporarily
797
+ sharp boundaries that smooth out again afterwards; without requiring
798
+ divergence on floating point, the minifier will often fixate on divergent
799
+ boolean tensor even though this is not the true source of the divergence.
800
+ However, rejecting these reductions makes it more difficult for the minifier
801
+ to make process. Using this option will let the minifier progress for ALL
802
+ divergences--you just might not end up with a useful repro in the end.""",
803
+ )
804
+
805
+ parser.add_argument(
806
+ "--save-dir",
807
+ type=str,
808
+ default=save_dir,
809
+ metavar="DIR",
810
+ help="directory where saved inputs live",
811
+ )
812
+ parser.add_argument(
813
+ "--no-save-dir",
814
+ dest="save_dir",
815
+ action="store_const",
816
+ const=None,
817
+ help="don't use any directory for saved inputs",
818
+ )
819
+ parser.add_argument(
820
+ "--tracing-mode",
821
+ type=str,
822
+ metavar="{real,fake,symbolic}",
823
+ default=tracing_mode,
824
+ help="how to trace the repro module into a GraphModule with metadata",
825
+ )
826
+
827
+ subparsers = parser.add_subparsers(
828
+ dest="command", metavar="{run,minify,analyze}", required=True
829
+ )
830
+
831
+ parser_run = subparsers.add_parser(
832
+ "run",
833
+ help="just run the repro",
834
+ )
835
+ common_flags(parser_run)
836
+
837
+ parser_minify = subparsers.add_parser(
838
+ "minify", help="run the minifier on the repro"
839
+ )
840
+ common_flags(parser_minify)
841
+ parser_minify_isolate = parser_minify.add_mutually_exclusive_group()
842
+ parser_minify_isolate.add_argument(
843
+ "--isolate",
844
+ action="store_true",
845
+ default=True,
846
+ help="run in separate processes to avoid interference (default)",
847
+ )
848
+ parser_minify_isolate.add_argument(
849
+ "--no-isolate",
850
+ dest="isolate",
851
+ action="store_false",
852
+ help="speed up by running all compilation in same process",
853
+ )
854
+ parser_minify.add_argument(
855
+ "--skip-saving-eager-intermediates",
856
+ action="store_true",
857
+ help="skip saving eager intermediates on --minify",
858
+ )
859
+ # TODO: make this an option for --analyze too
860
+ parser_minify.add_argument(
861
+ "--offload-to-disk",
862
+ action="store_true",
863
+ help="during minification, offload delta debugging intermediates to disk. Use if you're OOMing",
864
+ )
865
+ parser_minify.add_argument(
866
+ "--skip-sanity",
867
+ action="store_true",
868
+ help="skip sanity check at beginning of minification on original graph",
869
+ )
870
+ parser_minify.add_argument(
871
+ "--max-granularity",
872
+ type=int,
873
+ default=None,
874
+ help="start at this granularity and work down; must be power of 2",
875
+ )
876
+ parser_minify.add_argument(
877
+ "--check-str",
878
+ type=str,
879
+ default=check_str,
880
+ help="require minified program to fail with error containing this string",
881
+ )
882
+
883
+ parser_analyze = subparsers.add_parser(
884
+ "analyze", help="run the accuracy analyzer on the repro"
885
+ )
886
+ common_flags(parser_analyze)
887
+ parser_analyze.add_argument(
888
+ "--skip-saving-inductor-intermediates",
889
+ action="store_true",
890
+ help="skip saving inductor intermediates on --analyze",
891
+ )
892
+ parser_analyze.add_argument(
893
+ "--skip-saving-float64-intermediates",
894
+ action="store_true",
895
+ help="skip saving float64 intermediates",
896
+ )
897
+ parser_analyze.add_argument(
898
+ "--skip-check-deterministic",
899
+ action="store_true",
900
+ help="skip checking that the network is deterministic",
901
+ )
902
+ parser_analyze.add_argument(
903
+ "--stable-hash",
904
+ action="store_true",
905
+ help="use SHA-1 checksum instead of fast (but possibly unsound) hash",
906
+ )
907
+
908
+ # Run the repro in the context of minification, inverting exit code meaning
909
+ parser_minifier_query = subparsers.add_parser(
910
+ "minifier-query",
911
+ )
912
+ common_flags(parser_minifier_query)
913
+ parser_minifier_query.add_argument(
914
+ "--check-str",
915
+ type=str,
916
+ default=check_str,
917
+ help="require minified program to fail with error containing this string",
918
+ )
919
+
920
+ args = None
921
+ if len(sys.argv) <= 1:
922
+ args = [command, *sys.argv[1:]]
923
+
924
+ options = parser.parse_args(args)
925
+ COMMAND_FNS = {
926
+ "minify": repro_minify,
927
+ "analyze": repro_analyze,
928
+ "minifier-query": repro_minifier_query,
929
+ "run": repro_run,
930
+ }
931
+ COMMAND_FNS[options.command](options, mod, load_args)