applied-ai-018 commited on
Commit
7ea6729
·
verified ·
1 Parent(s): e6221a1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_export/__init__.py +1155 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_export/db/__init__.py +5 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_export/db/case.py +188 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py +52 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py +24 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py +26 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py +24 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py +46 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py +41 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py +59 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py +35 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py +25 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py +22 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py +26 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py +17 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py +18 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py +15 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py +21 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py +19 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py +15 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py +17 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py +28 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py +17 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py +18 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py +25 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py +19 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py +16 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py +19 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py +29 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py +23 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py +16 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py +17 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py +31 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py +18 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/_export/db/gen_example.py +28 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/_export/db/logging.py +2 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/_export/exported_program.py +430 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__init__.py +1 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constant_tensor_pass.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/torch/_export/__init__.py ADDED
@@ -0,0 +1,1155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import dataclasses
3
+ import functools
4
+ import io
5
+ import json
6
+ import pathlib
7
+ import re
8
+ import sys
9
+
10
+ import types
11
+ import warnings
12
+ import weakref
13
+ import zipfile
14
+ from collections import OrderedDict
15
+ from contextlib import contextmanager
16
+
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+ from unittest.mock import patch
19
+
20
+ import sympy
21
+
22
+ import torch
23
+ import torch._dynamo
24
+ import torch.fx
25
+ import torch.fx._pytree as fx_pytree
26
+
27
+ import torch.utils._pytree as pytree
28
+ from torch._decomp import core_aten_decompositions, get_decompositions
29
+ from torch._dispatch.python import enable_python_dispatcher
30
+ from torch._dynamo.exc import UserError, UserErrorType
31
+ from torch._dynamo.source import ConstantSource
32
+ from torch._export.passes.collect_tracepoints_pass import CollectTracepointsPass
33
+ from torch._functorch.aot_autograd import aot_export_module, GraphSignature
34
+ from torch._functorch.eager_transforms import functionalize
35
+ from torch._guards import detect_fake_mode
36
+ from torch._ops import OpOverload
37
+ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
38
+ from torch.export import _create_constraint, _Dim, Constraint
39
+ from torch.export.exported_program import (
40
+ ExportedProgram,
41
+ ModuleCallEntry,
42
+ ModuleCallSignature,
43
+ _disable_prexisiting_fake_mode,
44
+ )
45
+ from torch.export.graph_signature import (
46
+ _sig_to_specs,
47
+ ArgumentSpec,
48
+ ConstantArgument,
49
+ ExportGraphSignature,
50
+ InputKind,
51
+ InputSpec,
52
+ OutputKind,
53
+ OutputSpec,
54
+ SymIntArgument,
55
+ TensorArgument,
56
+ )
57
+ from torch.fx import traceback as fx_traceback
58
+ from torch.fx._compatibility import compatibility
59
+ from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
60
+ from torch.fx.experimental.symbolic_shapes import (
61
+ ConstraintViolationError,
62
+ GuardOnDataDependentSymNode,
63
+ ShapeEnv,
64
+ StrictMinMaxConstraint,
65
+ )
66
+ from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
67
+ from torch.utils._sympy.value_ranges import ValueRangeError, ValueRanges
68
+
69
+ from .exported_program import (
70
+ _create_stateful_graph_module,
71
+ _process_constraints,
72
+ CallSpec,
73
+ )
74
+ from .passes.add_runtime_assertions_for_constraints_pass import (
75
+ _AddRuntimeAssertionsForInlineConstraintsPass,
76
+ )
77
+ from .passes.lift_constant_tensor_pass import lift_constant_tensor_pass
78
+ from .passes.remove_runtime_assertions import _RemoveRuntimeAssertionsPass
79
+ from .passes.replace_sym_size_ops_pass import _replace_sym_size_ops_pass
80
+ from .passes.replace_view_ops_with_view_copy_ops_pass import (
81
+ ReplaceViewOpsWithViewCopyOpsPass,
82
+ )
83
+ from .wrappers import _wrap_submodules
84
+
85
+
86
+ def _process_dynamic_shapes(
87
+ f: Callable,
88
+ args: Tuple[Any, ...],
89
+ kwargs: Optional[Dict[str, Any]] = None,
90
+ dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any]]] = None,
91
+ ) -> Optional[List[Constraint]]:
92
+ if dynamic_shapes is None or len(dynamic_shapes) == 0:
93
+ return None
94
+
95
+ kwargs = kwargs if kwargs is not None else {}
96
+
97
+ from collections.abc import Mapping, Sequence
98
+
99
+ def tree_zip(combined_args, dynamic_shapes):
100
+ if isinstance(combined_args, (tuple, list)):
101
+ if not isinstance(dynamic_shapes, Sequence):
102
+ raise UserError(
103
+ UserErrorType.INVALID_INPUT,
104
+ f"Expected dynamic_shapes of a {type(combined_args)} to be a Sequence, "
105
+ f"got {dynamic_shapes} instead",
106
+ )
107
+ if len(combined_args) != len(dynamic_shapes):
108
+ raise UserError(
109
+ UserErrorType.INVALID_INPUT,
110
+ f"Expected {dynamic_shapes} to have {len(combined_args)} items",
111
+ )
112
+ for i, shape in enumerate(dynamic_shapes):
113
+ yield from tree_zip(combined_args[i], shape)
114
+ elif isinstance(combined_args, dict):
115
+ if not isinstance(dynamic_shapes, Mapping):
116
+ raise UserError(
117
+ UserErrorType.INVALID_INPUT,
118
+ f"Expected dynamic_shapes of a {type(combined_args)} to be a Mapping, "
119
+ f"got {dynamic_shapes} instead",
120
+ )
121
+ if len(combined_args) != len(dynamic_shapes):
122
+ raise UserError(
123
+ UserErrorType.INVALID_INPUT,
124
+ f"Expected {dynamic_shapes} to have {len(combined_args)} items",
125
+ )
126
+ for k, shape in dynamic_shapes.items():
127
+ yield from tree_zip(combined_args[k], shape)
128
+ elif dataclasses.is_dataclass(combined_args):
129
+ if not type(dynamic_shapes) == type(combined_args):
130
+ raise UserError(
131
+ UserErrorType.INVALID_INPUT,
132
+ f"Expected dynamic_shapes of a {type(combined_args)} to be a {type(combined_args)}, "
133
+ f"got {dynamic_shapes} instead",
134
+ )
135
+ for f in dataclasses.fields(combined_args):
136
+ yield from tree_zip(getattr(combined_args, f.name), getattr(dynamic_shapes, f.name))
137
+ elif isinstance(combined_args, torch.Tensor):
138
+ yield (combined_args, dynamic_shapes)
139
+ else:
140
+ if dynamic_shapes is not None:
141
+ raise UserError(
142
+ UserErrorType.INVALID_INPUT,
143
+ f"Expected dynamic_shapes of a {type(combined_args)} to be None, "
144
+ f"got {dynamic_shapes} instead",
145
+ )
146
+
147
+ def to_constraint(dim, tensor, i):
148
+ constraint = dynamic_dim(tensor, i, debug_name=dim.__name__)
149
+ if dim.min != 2:
150
+ constraint = constraint >= dim.min
151
+ if dim.max != sys.maxsize - 1:
152
+ constraint = constraint <= dim.max
153
+ return constraint
154
+
155
+ from collections import defaultdict
156
+ symbols = defaultdict(list)
157
+ bounds: Dict[str, Tuple[int, int]] = {}
158
+
159
+ def check_same_bounds(dim):
160
+ if dim.__name__ in symbols:
161
+ min_, max_ = bounds[dim.__name__]
162
+ if dim.min != min_ or dim.max != max_:
163
+ this_ = _Dim.readable(dim.__name__, min_, max_)
164
+ that_ = _Dim.readable(dim.__name__, dim.min, dim.max)
165
+ raise UserError(
166
+ UserErrorType.INVALID_INPUT,
167
+ f"Found different definitions {this_} and {that_} "
168
+ f"for the same symbolic dimension {dim}!"
169
+ )
170
+
171
+ else:
172
+ bounds[dim.__name__] = (dim.min, dim.max)
173
+
174
+ def update_symbols(tensor, shape):
175
+ if isinstance(shape, dict):
176
+ for i, dim in shape.items():
177
+ if isinstance(dim, _Dim):
178
+ check_same_bounds(dim)
179
+ symbols[dim.__name__].append(to_constraint(dim, tensor, i))
180
+ else:
181
+ if dim is not None:
182
+ raise UserError(
183
+ UserErrorType.INVALID_INPUT,
184
+ f"Unexpected item #{i} ({dim}) in dynamic_shape {shape} of Tensor, "
185
+ "try None instead",
186
+ )
187
+ elif isinstance(shape, (tuple, list)):
188
+ for i, dim in enumerate(shape):
189
+ if isinstance(dim, _Dim):
190
+ check_same_bounds(dim)
191
+ symbols[dim.__name__].append(to_constraint(dim, tensor, i))
192
+ else:
193
+ if dim is not None:
194
+ raise UserError(
195
+ UserErrorType.INVALID_INPUT,
196
+ f"Unexpected item #{i} ({dim}) in dynamic_shape {shape} of Tensor, "
197
+ "try None instead",
198
+ )
199
+ else:
200
+ if shape is not None:
201
+ raise UserError(
202
+ UserErrorType.INVALID_INPUT,
203
+ f"Unexpected dynamic_shape {shape} of Tensor, "
204
+ "try None instead",
205
+ )
206
+
207
+ import inspect
208
+ if isinstance(f, ExportedProgram):
209
+ f = f.module()
210
+ signature = inspect.signature(f.forward) if isinstance(f, torch.nn.Module) else inspect.signature(f)
211
+ combined_args = signature.bind(*args, **kwargs).arguments
212
+
213
+ # This means user didn't specify dynamic shapes with argument names.
214
+ combined_args = combined_args if isinstance(dynamic_shapes, Mapping) else list(combined_args.values()) # type: ignore[assignment]
215
+ for tensor, shape in tree_zip(combined_args, dynamic_shapes):
216
+ update_symbols(tensor, shape)
217
+
218
+ constraints = []
219
+ for dynamic_dims in symbols.values():
220
+ primary, *others = dynamic_dims
221
+ if others:
222
+ for other in others:
223
+ constraints.append(primary == other)
224
+ else:
225
+ constraints.append(primary)
226
+
227
+ return constraints
228
+
229
+
230
+ def export__RC__(
231
+ f: Callable,
232
+ args: Tuple[Any, ...],
233
+ kwargs: Optional[Dict[str, Any]] = None,
234
+ *,
235
+ dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any]]] = None,
236
+ strict: bool = True,
237
+ preserve_module_call_signature: Tuple[str, ...] = (),
238
+ ) -> ExportedProgram:
239
+ """
240
+ API for exporting with dynamic shape specifications instead of constraints.
241
+ It should be considered "release candidate" (RC), meant to replace `export`.
242
+
243
+ Here, `dynamic_shapes` is expected to be a dict from
244
+ argument names of `f` to dynamic shape specifications OR a tuple where each element
245
+ corresponds to the original order of the arguments defined in the function signature
246
+ ,as follows:
247
+ - The dynamic shape of a tensor argument can be specified as:
248
+ - Either a dict from dynamic dimension indices to Dim types. It is not
249
+ required to include static dimension indices in this dict, but when
250
+ they are, they should be mapped to None.
251
+ - Or a tuple of Dim types or None. The Dim types correspond to dynamic
252
+ dimensions, whereas static dimensions are denoted by None.
253
+ - Arguments that are dicts or tuples of tensors are recursively specified
254
+ by using mappings or sequences of contained specifications.
255
+
256
+ See `export` for documentation of `f`, `args`, `kwargs` and return.
257
+ """
258
+ constraints = _process_dynamic_shapes(f, args, kwargs, dynamic_shapes)
259
+ return _export(
260
+ f,
261
+ args,
262
+ kwargs,
263
+ constraints=constraints,
264
+ strict=strict,
265
+ preserve_module_call_signature=preserve_module_call_signature
266
+ )
267
+
268
+
269
+ def dynamic_dim(t: torch.Tensor, index: int, debug_name: Optional[str] = None):
270
+ if not isinstance(t, torch.Tensor):
271
+ raise UserError(
272
+ UserErrorType.DYNAMIC_DIM,
273
+ f"Expected tensor as input to dynamic_dim but got {type(t)}"
274
+ )
275
+
276
+ if t.dim() < 1:
277
+ raise UserError(
278
+ UserErrorType.DYNAMIC_DIM,
279
+ "Cannot mark 0-dimension tensors to be dynamic"
280
+ )
281
+
282
+ if index >= t.dim():
283
+ raise UserError(
284
+ UserErrorType.DYNAMIC_DIM,
285
+ f"Expected the dimension passed to dynamic_dim to be in the range [0:{t.dim()-1}]"
286
+ f" but got {index}, which is out of bounds for the given tensor."
287
+ )
288
+
289
+ return _create_constraint(
290
+ weakref.ref(t),
291
+ id(t),
292
+ index,
293
+ StrictMinMaxConstraint(
294
+ vr=ValueRanges(lower=2, upper=sympy.oo), warn_only=False
295
+ ),
296
+ debug_name=debug_name,
297
+ )
298
+
299
+
300
+ @dataclasses.dataclass
301
+ class ExportDynamoConfig:
302
+ """
303
+ Manage Export-specific configurations of Dynamo.
304
+ """
305
+ allow_rnn: bool = True
306
+
307
+ DEFAULT_EXPORT_DYNAMO_CONFIG = ExportDynamoConfig()
308
+
309
+
310
+ DECOMP_TABLE = core_aten_decompositions()
311
+
312
+
313
+ # TODO(zhxchen17) This is not needed if we output pre_dispatch graph upfront from export().
314
+ @contextmanager
315
+ def _disable_decomp_table():
316
+ global DECOMP_TABLE
317
+ prev, DECOMP_TABLE = DECOMP_TABLE, {}
318
+ try:
319
+ yield
320
+ finally:
321
+ DECOMP_TABLE = prev
322
+
323
+
324
+ @compatibility(is_backward_compatible=False)
325
+ def capture_pre_autograd_graph(
326
+ f: Callable,
327
+ args: Tuple[Any],
328
+ kwargs: Optional[Dict[str, Any]] = None,
329
+ constraints: Optional[List[Constraint]] = None,
330
+ ) -> torch.nn.Module:
331
+ """
332
+ A helper function that is intended to trace a module before any pre-autograd
333
+ decomposition is run. The produced module will be "non-functional" and
334
+ composed of aten operators. Later this API will be deleted in favor of more general
335
+ torch.export API.
336
+
337
+ Args:
338
+ f: A callable to be traced
339
+
340
+ args: example positional inputs.
341
+
342
+ kwargs: optional example keyword inputs.
343
+
344
+ constraints: A optional list of constraints on the dynamic arguments specifying
345
+ their possible range of their shapes
346
+
347
+ Returns:
348
+ An nn.Module containing the traced method.
349
+
350
+ """
351
+
352
+ decomp_table = {
353
+ torch.ops.aten.dropout.default: torch.ops.aten.dropout.default.decompose,
354
+ torch.ops.aten.batch_norm.default: torch.ops.aten.batch_norm.default.decompose,
355
+ torch.ops.aten._batch_norm_impl_index.default: torch.ops.aten._batch_norm_impl_index.default.decompose,
356
+ torch.ops.aten.native_batch_norm.default: torch.ops.aten.native_batch_norm.default.decompose,
357
+ }
358
+
359
+ if kwargs is None:
360
+ kwargs = {}
361
+
362
+ with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)):
363
+ m = torch._dynamo.export(
364
+ f,
365
+ constraints=constraints,
366
+ assume_static_by_default=True,
367
+ tracing_mode="symbolic",
368
+ decomposition_table=decomp_table,
369
+ pre_dispatch=True,
370
+ aten_graph=True,
371
+ )(
372
+ *args,
373
+ **kwargs,
374
+ )[0]
375
+
376
+ def _train(self, mode: bool = True):
377
+ raise NotImplementedError("Calling train() is not supported yet.")
378
+
379
+ def _eval(self, mode: bool = True):
380
+ raise NotImplementedError("Calling eval() is not supported yet.")
381
+
382
+ _, _, _, fake_mode = _convert_input_to_fake(m, args, kwargs)
383
+
384
+ m.meta["inline_constraints"] = {
385
+ k: v
386
+ for k, v in fake_mode.shape_env.runtime_var_to_range.items()
387
+ if re.match(r"^[if]\d+$", str(k))
388
+ }
389
+
390
+ flat_args, _ = pytree.tree_flatten((args, kwargs or {}))
391
+ range_constraints, equality_constraints = _process_constraints(m, 0, flat_args)
392
+ unlifted_m = _create_stateful_graph_module(
393
+ m,
394
+ range_constraints=range_constraints,
395
+ equality_constraints=equality_constraints,
396
+ )
397
+ unlifted_m.train = types.MethodType(_train, m) # type: ignore[method-assign]
398
+ unlifted_m.eval = types.MethodType(_eval, m) # type: ignore[method-assign]
399
+ return unlifted_m
400
+
401
+
402
+ def _convert_input_to_fake(gm, args, kwargs):
403
+ if len(args) == 0 and len(kwargs) == 0 and len(dict(gm.named_parameters())) == 0 and len(dict(gm.named_buffers())) == 0:
404
+ return [], {}, {}, None
405
+
406
+ fake_inps: List[torch.Tensor] = []
407
+ fake_mode = None
408
+ for node in gm.graph.nodes:
409
+ if node.op == "placeholder" and "val" in node.meta:
410
+ fake_val = node.meta["val"]
411
+ if fake_val is not None and isinstance(fake_val, torch.Tensor):
412
+ fake_inps.append(fake_val)
413
+
414
+ if detected_fake_mode := detect_fake_mode(fake_inps):
415
+ fake_mode = detected_fake_mode
416
+
417
+ assert fake_mode is not None, "Cannot find fake_mode attatched to the graph's placeholders."
418
+
419
+ count = 0
420
+
421
+ def convert_to_fake(x):
422
+ nonlocal count
423
+ val = fake_inps[count]
424
+ count += 1
425
+ return val
426
+
427
+ fake_args = pytree.tree_map_only(torch.Tensor, convert_to_fake, args)
428
+ # TODO properly use the cached fake tensor
429
+ fake_kwargs = pytree.tree_map_only(torch.Tensor, fake_mode.from_tensor, kwargs)
430
+ fake_params_buffers = pytree.tree_map_only(torch.Tensor,
431
+ functools.partial(fake_mode.from_tensor, static_shapes=True),
432
+ {**dict(gm.named_parameters(remove_duplicate=False)),
433
+ **dict(gm.named_buffers(remove_duplicate=False))})
434
+ return fake_args, fake_kwargs, fake_params_buffers, fake_mode
435
+
436
+
437
+ def _replace_param_buffer_names(param_buffer_table, sig):
438
+ for spec in sig.input_specs:
439
+ spec.target = param_buffer_table.get(spec.target, spec.target)
440
+ for spec in sig.output_specs:
441
+ spec.target = param_buffer_table.get(spec.target, spec.target)
442
+
443
+
444
+ def _normalize_nn_module_stack(gm_torch_level, root_cls):
445
+ # Append a root module to every nn_module_stack.
446
+ root = "L['self']"
447
+ root_key = re.sub(r'[^a-zA-Z0-9]', '_', root)
448
+ for gm in gm_torch_level.modules():
449
+ if not isinstance(gm, torch.fx.GraphModule):
450
+ continue
451
+ for node in gm.graph.nodes:
452
+ if node.op in ["placeholder", "output"]:
453
+ continue
454
+ add_root = True
455
+ if nn_module_stack := node.meta.get("nn_module_stack", {}):
456
+ path, ty = next(iter(nn_module_stack.values()))
457
+ assert issubclass(ty, torch.nn.Module)
458
+ # TODO Figure out why sometimes we have root sometimes we don't.
459
+ if path == root and ty is root_cls:
460
+ add_root = False
461
+ if add_root:
462
+ def normalize_path(path):
463
+ try:
464
+ parts = []
465
+
466
+ class Path:
467
+ def __getattr__(self, name):
468
+ parts.append(name)
469
+ return self
470
+
471
+ def __getitem__(self, idx):
472
+ parts.append(str(idx))
473
+ return self
474
+
475
+ eval(path, {"L": {"self": Path()}})
476
+ return ".".join(parts)
477
+ except Exception: # TODO(zhxchen17) Remove this.
478
+ return path
479
+
480
+ nn_module_stack = {root_key: (root, root_cls), **nn_module_stack}
481
+ node.meta["nn_module_stack"] = {
482
+ key: (normalize_path(path), ty)
483
+ for key, (path, ty) in nn_module_stack.items()
484
+ }
485
+
486
+ def _export_to_torch_ir(
487
+ f: Callable,
488
+ args: Tuple[Any, ...],
489
+ kwargs: Optional[Dict[str, Any]] = None,
490
+ constraints: Optional[List[Constraint]] = None,
491
+ *,
492
+ preserve_module_call_signature: Tuple[str, ...] = (),
493
+ disable_constraint_solver: bool = False,
494
+ ) -> torch.fx.GraphModule:
495
+ """
496
+ Traces either an nn.Module's forward function or just a callable with PyTorch
497
+ operations inside and produce a torch.fx.GraphModule in torch IR.
498
+ """
499
+
500
+ constraints = constraints or []
501
+ kwargs = kwargs or {}
502
+
503
+ if not isinstance(args, tuple):
504
+ raise UserError(UserErrorType.INVALID_INPUT,
505
+ f"Expecting `args` to be a tuple of example positional inputs, got {type(args)}")
506
+
507
+ # We convert to nn.Module because __call__ of ExportedProgram
508
+ # is untracable right now.
509
+ if isinstance(f, ExportedProgram):
510
+ f = f.module()
511
+
512
+ with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)):
513
+ try:
514
+ module_call_specs: Dict[str, Dict[str, pytree.TreeSpec]] = {}
515
+ with _wrap_submodules(f, preserve_module_call_signature, module_call_specs):
516
+ gm_torch_level, _ = torch._dynamo.export(
517
+ f,
518
+ constraints=constraints,
519
+ assume_static_by_default=True,
520
+ tracing_mode="symbolic",
521
+ disable_constraint_solver=disable_constraint_solver,
522
+ )(
523
+ *args,
524
+ **kwargs,
525
+ )
526
+ except (ConstraintViolationError, ValueRangeError) as e:
527
+ raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200
528
+ except GuardOnDataDependentSymNode as e:
529
+ raise UserError( # noqa: TRY200
530
+ UserErrorType.ANTI_PATTERN,
531
+ f"Consider annotating your code using torch._constrain_as_*(). {str(e)}",
532
+ case_name="constrain_as_size_example",
533
+ )
534
+
535
+ gm_torch_level.meta["module_call_specs"] = module_call_specs
536
+ return gm_torch_level
537
+
538
+
539
+ def export(
540
+ f: Callable,
541
+ args: Tuple[Any, ...],
542
+ kwargs: Optional[Dict[str, Any]] = None,
543
+ constraints: Optional[List[Constraint]] = None,
544
+ *,
545
+ strict: bool = True,
546
+ preserve_module_call_signature: Tuple[str, ...] = (),
547
+ ) -> ExportedProgram:
548
+
549
+ if constraints is not None:
550
+ warnings.warn(
551
+ "Using `constraints` to specify dynamic shapes for export is DEPRECATED "
552
+ "and will not be supported in the future. "
553
+ "Please use `dynamic_shapes` instead (see docs on `torch.export.export`).",
554
+ DeprecationWarning,
555
+ stacklevel=2,
556
+ )
557
+ return _export(
558
+ f,
559
+ args,
560
+ kwargs,
561
+ constraints,
562
+ strict=strict,
563
+ preserve_module_call_signature=preserve_module_call_signature,
564
+ )
565
+
566
+
567
+ def _unlift_user_inputs_to_buffers(
568
+ gm_torch_level: torch.fx.GraphModule,
569
+ aot_export_args
570
+ ) -> List[str]:
571
+ flat_args = pytree.tree_leaves(aot_export_args)
572
+ user_input_names = []
573
+ with gm_torch_level.graph.inserting_before():
574
+ for i, (arg, node) in enumerate(zip(flat_args, gm_torch_level.graph.nodes)):
575
+ assert node.op == "placeholder"
576
+ user_input_names.append(node.name)
577
+ if isinstance(arg, torch.Tensor):
578
+ assert not hasattr(gm_torch_level, node.name)
579
+ gm_torch_level.register_buffer(node.name, arg)
580
+ get_attr = gm_torch_level.graph.get_attr(node.name)
581
+ node.replace_all_uses_with(get_attr)
582
+ get_attr.meta = copy.copy(node.meta)
583
+
584
+ for node in list(gm_torch_level.graph.nodes):
585
+ if node.op == "placeholder":
586
+ assert len(node.users) == 0
587
+ gm_torch_level.graph.erase_node(node)
588
+ gm_torch_level.recompile()
589
+ return user_input_names
590
+
591
+
592
+ def _lift_buffers_to_user_inputs(
593
+ gm: torch.fx.GraphModule,
594
+ graph_signature: GraphSignature,
595
+ user_input_names: List[str]
596
+ ) -> Dict[str, str]:
597
+ assert len(graph_signature.user_inputs) == 0
598
+ assert graph_signature.backward_signature is None
599
+ names = set(user_input_names)
600
+
601
+ placeholders = [node for node in gm.graph.nodes if node.op == "placeholder"]
602
+ # user inputs are always added in the end
603
+ start = len(graph_signature.parameters)
604
+ end = start + len(graph_signature.buffers)
605
+ buffer_nodes = placeholders[start:end]
606
+ last_placeholder_node = placeholders[-1] if len(placeholders) > 0 else None
607
+ old_nodes: Dict[str, torch.fx.Node] = {}
608
+ for node in buffer_nodes:
609
+ buffer_name = graph_signature.inputs_to_buffers[node.name]
610
+ if buffer_name not in names:
611
+ continue
612
+ old_nodes[buffer_name] = node
613
+ replaces = {}
614
+ new_node_names: Dict[str, str] = {}
615
+ with gm.graph.inserting_after(last_placeholder_node):
616
+ for name in reversed(user_input_names):
617
+ new_node = gm.graph.placeholder(name)
618
+ new_node.target = new_node.name
619
+ new_node_names[name] = new_node.name
620
+ if name in old_nodes:
621
+ old_node = old_nodes[name]
622
+ new_node.meta = copy.copy(old_node.meta)
623
+ old_node.replace_all_uses_with(new_node)
624
+ replaces[old_node.name] = new_node.name
625
+ new_node_names = dict(reversed(new_node_names.items()))
626
+ for old_node in old_nodes.values():
627
+ gm.graph.erase_node(old_node)
628
+
629
+ gm.recompile()
630
+
631
+ graph_signature.buffers = [b for b in graph_signature.buffers if b not in names]
632
+ graph_signature.inputs_to_buffers = {
633
+ i: b for i, b in graph_signature.inputs_to_buffers.items() if b not in names
634
+ }
635
+ user_inputs_to_mutate = {
636
+ o: b for o, b in graph_signature.buffers_to_mutate.items() if b in names
637
+ }
638
+ graph_signature.buffers_to_mutate = {
639
+ o: b for o, b in graph_signature.buffers_to_mutate.items() if b not in names
640
+ }
641
+ graph_signature.user_inputs.extend(new_node_names.values()) # type: ignore[arg-type]
642
+ graph_signature.user_outputs = [
643
+ replaces[o] if o in replaces else o for o in graph_signature.user_outputs
644
+ ]
645
+ return user_inputs_to_mutate # type: ignore[return-value]
646
+
647
+
648
+ def _export_non_strict(
649
+ mod,
650
+ fake_args,
651
+ fake_kwargs,
652
+ fake_params_buffers,
653
+ *,
654
+ transform=lambda x: x # TODO(zhxchen17) Revisit if this is needed later.
655
+ ):
656
+ # This _reparametrize_module makes sure inputs and module.params/buffers have the same fake_mode,
657
+ # otherwise aot_export_module will error out because it sees a mix of fake_modes.
658
+ # And we want aot_export_module to use the fake_tensor mode in dynamo to keep the pipeline easy to reason about.
659
+ with torch.nn.utils.stateless._reparametrize_module(mod, fake_params_buffers):
660
+ gm, graph_signature = transform(aot_export_module)(
661
+ mod,
662
+ (*fake_args, *fake_kwargs.values()),
663
+ trace_joint=False
664
+ )
665
+
666
+ # NOTE: aot_export adds symint metadata for placeholders with int values;
667
+ # since these become specialized, we replace such metadata with the original values
668
+ flat_args = pytree.tree_leaves((fake_args, fake_kwargs))
669
+ index = 0
670
+ total_param_buffers = len(graph_signature.parameters) + len(graph_signature.buffers)
671
+ for node in gm.graph.nodes:
672
+ if node.op == "placeholder":
673
+ if index >= total_param_buffers:
674
+ user_arg = flat_args[index - total_param_buffers]
675
+ if not isinstance(user_arg, torch.Tensor):
676
+ node.meta["val"] = user_arg
677
+ index += 1
678
+
679
+ is_joint = graph_signature.backward_signature is not None
680
+
681
+ def make_argument_spec(node) -> ArgumentSpec:
682
+ assert "val" in node.meta, f"{node} has no 'val' metadata field"
683
+ val = node.meta["val"]
684
+ if isinstance(val, FakeTensor):
685
+ return TensorArgument(name=node.name)
686
+ elif isinstance(val, torch.SymInt):
687
+ return SymIntArgument(name=node.name)
688
+ else:
689
+ return ConstantArgument(value=val)
690
+
691
+ input_specs, output_specs = _sig_to_specs(
692
+ user_inputs=set(graph_signature.user_inputs),
693
+ inputs_to_parameters=graph_signature.inputs_to_parameters, # type: ignore[arg-type]
694
+ inputs_to_buffers=graph_signature.inputs_to_buffers, # type: ignore[arg-type]
695
+ user_outputs=set(graph_signature.user_outputs), # type: ignore[arg-type]
696
+ buffer_mutations=graph_signature.buffers_to_mutate, # type: ignore[arg-type]
697
+ user_input_mutations=gm.meta.get("user_inputs_to_mutate", {}), # type: ignore[arg-type]
698
+ grad_params=graph_signature.backward_signature.gradients_to_parameters if is_joint else {}, # type: ignore[arg-type, union-attr]
699
+ grad_user_inputs=graph_signature.backward_signature.gradients_to_user_inputs if is_joint else {}, # type: ignore[arg-type, union-attr]
700
+ loss_output=graph_signature.backward_signature.loss_output if is_joint else None, # type: ignore[arg-type, union-attr]
701
+ inputs=[make_argument_spec(node) for node in gm.graph.nodes if node.op == "placeholder"],
702
+ outputs=[make_argument_spec(node) for node in pytree.tree_leaves(next(iter(reversed(gm.graph.nodes))).args)],
703
+ )
704
+ export_graph_signature = ExportGraphSignature(input_specs=input_specs, output_specs=output_specs)
705
+
706
+ tensor_constants = lift_constant_tensor_pass(gm, export_graph_signature)
707
+
708
+ @dataclasses.dataclass
709
+ class _ExportedProgramNonStrict:
710
+ gm: torch.fx.GraphModule
711
+ sig: ExportGraphSignature
712
+ tensor_constants: Dict[str, torch.Tensor]
713
+
714
+ return _ExportedProgramNonStrict(
715
+ gm,
716
+ export_graph_signature,
717
+ tensor_constants,
718
+ )
719
+
720
+
721
+ def _get_params_buffers(mod: torch.nn.Module) -> Dict[str, torch.Tensor]:
722
+ params_buffers: Dict[str, torch.Tensor] = {}
723
+ for name, param in mod.named_parameters(remove_duplicate=False):
724
+ params_buffers[name] = param
725
+
726
+ for name, buffer in mod.named_buffers(remove_duplicate=False):
727
+ params_buffers[name] = buffer
728
+ return params_buffers
729
+
730
+
731
+ @_disable_prexisiting_fake_mode
732
+ def _export(
733
+ f: Callable,
734
+ args: Tuple[Any, ...],
735
+ kwargs: Optional[Dict[str, Any]] = None,
736
+ constraints: Optional[List[Constraint]] = None,
737
+ *,
738
+ strict: bool = True,
739
+ preserve_module_call_signature: Tuple[str, ...] = (),
740
+ ) -> ExportedProgram:
741
+ """
742
+ Traces either an nn.Module's forward function or just a callable with PyTorch
743
+ operations inside and produce a ExportedProgram.
744
+
745
+ Args:
746
+ m: the `nn.Module` or callable to trace.
747
+
748
+ args: example positional inputs.
749
+
750
+ kwargs: optional example keyword inputs.
751
+
752
+ constraints: A optional list of constraints on the dynamic arguments specifying
753
+ their possible range of their shapes
754
+
755
+ preserve_module_call_signature: A list of submodule paths for which the original
756
+ calling conventions are preserved as metadata.
757
+
758
+ Returns:
759
+ An ExportedProgram containing the traced method.
760
+ """
761
+ constraints = constraints or []
762
+ kwargs = kwargs or {}
763
+
764
+ if not strict:
765
+ assert isinstance(f, torch.nn.Module)
766
+ assert len(preserve_module_call_signature) == 0
767
+ assert len(constraints) == 0, "dynamic shape NYI"
768
+ assert len(kwargs) == 0, "keyword arguments NYI"
769
+ out_spec = None
770
+
771
+ def _tuplify_outputs(aot_export):
772
+ def _aot_export_non_strict(mod, args, **kwargs):
773
+ class Wrapper(torch.nn.Module):
774
+ def __init__(self, mod):
775
+ super().__init__()
776
+ self._export_root = mod
777
+
778
+ def forward(self, *args, **kwargs):
779
+ nonlocal out_spec
780
+ flat_outs, out_spec = pytree.tree_flatten(self._export_root(*args, **kwargs))
781
+ return tuple(flat_outs)
782
+
783
+ gm, sig = aot_export(Wrapper(mod), args, **kwargs)
784
+
785
+ def strip_root(x):
786
+ return x[len('_export_root.'):] if x.startswith('_export_root.') else x
787
+
788
+ sig.parameters = pytree.tree_map(strip_root, sig.parameters)
789
+ sig.buffers = pytree.tree_map(strip_root, sig.buffers)
790
+ sig.inputs_to_buffers = pytree.tree_map(strip_root, sig.inputs_to_buffers)
791
+ sig.inputs_to_parameters = pytree.tree_map(strip_root, sig.inputs_to_parameters)
792
+ sig.buffers_to_mutate = pytree.tree_map(strip_root, sig.buffers_to_mutate)
793
+ return gm, sig
794
+ return _aot_export_non_strict
795
+ ep_non_strict = _export_non_strict(f, args, {}, f.state_dict(), transform=_tuplify_outputs)
796
+ assert out_spec is not None
797
+ return ExportedProgram(
798
+ ep_non_strict.gm,
799
+ ep_non_strict.gm.graph,
800
+ ep_non_strict.sig,
801
+ _get_params_buffers(f),
802
+ {},
803
+ [],
804
+ [ModuleCallEntry("", ModuleCallSignature([], [], pytree.tree_flatten((args, {}))[1], out_spec))],
805
+ (args, kwargs),
806
+ tensor_constants=ep_non_strict.tensor_constants,
807
+ )
808
+
809
+
810
+ gm_torch_level = _export_to_torch_ir(
811
+ f,
812
+ args,
813
+ kwargs,
814
+ constraints,
815
+ preserve_module_call_signature=preserve_module_call_signature,
816
+ )
817
+
818
+ params_buffers = _get_params_buffers(gm_torch_level)
819
+
820
+ # We detect the fake_mode by looking at gm_torch_level's placeholders, this is the fake_mode created in dynamo.
821
+ fake_args, fake_kwargs, fake_params_buffers, dynamo_fake_mode = _convert_input_to_fake(gm_torch_level, args, kwargs)
822
+
823
+ # First, we want to pass through the graph to try populating
824
+ # val field for getattr if there is anything missing.
825
+ # THis can happen when quantization adds extra params and forgets
826
+ # to update "val"
827
+ for node in gm_torch_level.graph.nodes:
828
+ if node.op == "get_attr" and "val" not in node.meta:
829
+ attr = getattr(gm_torch_level, node.target)
830
+ # Checks if it is not a HigherOrderOp branch or a module
831
+ if not isinstance(attr, torch.nn.Module):
832
+ assert dynamo_fake_mode is not None, (
833
+ "Cannot find dynamo_fake_mode. This could be due to the exported graph module have no placeholders."
834
+ )
835
+ node.meta["val"] = dynamo_fake_mode.from_tensor(attr, static_shapes=True)
836
+
837
+ # When aot_export lifts the params, we lose the nn_module_stack
838
+ # and source_fn from the param nodes as they are treated as fresh inputs
839
+ # Therefore, we manually extract them before calling into aot_export
840
+ params_buffers_to_node_meta = {}
841
+ for node in gm_torch_level.graph.nodes:
842
+ target = node.target
843
+ meta = node.meta
844
+ if node.op == "call_module":
845
+ submodule = getattr(gm_torch_level, target)
846
+ if isinstance(submodule, torch.nn.Module):
847
+ for name, _ in submodule.named_parameters(recurse=True, remove_duplicate=False):
848
+ params_buffers_to_node_meta[target + "." + name] = meta
849
+
850
+ for name, _ in submodule.named_buffers(recurse=True, remove_duplicate=False):
851
+ params_buffers_to_node_meta[target + "." + name] = meta
852
+
853
+ if node.op == "get_attr":
854
+ submodule = getattr(gm_torch_level, target)
855
+ if not isinstance(submodule, torch.fx.GraphModule):
856
+ params_buffers_to_node_meta[target] = meta
857
+
858
+ # If the call_function uses param as input, we also need to update params' meta
859
+ # with this call_function node's meta.
860
+ # This is basically the same flow as torch.fx.traceback.preserve_meta()
861
+ if node.op == "call_function" and not isinstance(node.target, torch._ops.HigherOrderOperator):
862
+ for arg in node._input_nodes:
863
+ if arg.op == "get_attr":
864
+ for entry in torch.fx.proxy._COPY_META_FIELDS:
865
+ if entry in meta:
866
+ params_buffers_to_node_meta[arg.target][entry] = meta[entry]
867
+
868
+ # Fix the graph output signature to be tuple if scalar
869
+ out_spec = orig_out_spec = gm_torch_level._out_spec
870
+ assert out_spec is not None
871
+ # aot_export expect the return type to always be a tuple.
872
+ if out_spec.type not in (list, tuple):
873
+ out_spec = pytree.TreeSpec(tuple, None, [out_spec])
874
+
875
+ orig_args = gm_torch_level.graph._codegen.pytree_info.orig_args # type: ignore[attr-defined]
876
+
877
+ gm_torch_level.graph._codegen = _PyTreeCodeGen(
878
+ _PyTreeInfo(
879
+ orig_args,
880
+ gm_torch_level._in_spec,
881
+ out_spec,
882
+ )
883
+ )
884
+ gm_torch_level.recompile()
885
+
886
+ param_buffer_table: Dict[str, str] = {}
887
+ if isinstance(f, torch.nn.Module):
888
+ param_lookup: Dict[int, List[str]] = {}
889
+ buffer_lookup: Dict[int, List[str]] = {}
890
+ for name, param in f.named_parameters(remove_duplicate=False):
891
+ param_lookup.setdefault(id(param), []).append(name)
892
+ for name, buffer in f.named_buffers(remove_duplicate=False):
893
+ buffer_lookup.setdefault(id(buffer), []).append(name)
894
+ for dynamo_name, dynamo_param in gm_torch_level.named_parameters(remove_duplicate=False):
895
+ assert dynamo_name not in param_buffer_table
896
+ if id(dynamo_param) in param_lookup:
897
+ param_buffer_table[dynamo_name] = param_lookup[id(dynamo_param)].pop()
898
+
899
+ for dynamo_name, dynamo_buffer in gm_torch_level.named_buffers(remove_duplicate=False):
900
+ assert dynamo_name not in param_buffer_table
901
+ if id(dynamo_buffer) in buffer_lookup:
902
+ param_buffer_table[dynamo_name] = buffer_lookup[id(dynamo_buffer)].pop()
903
+
904
+ if isinstance(f, torch.nn.Module):
905
+ _normalize_nn_module_stack(gm_torch_level, type(f))
906
+
907
+ def _process_user_inputs(aot_export):
908
+ def _aot_export_strict(gm_torch_level: torch.fx.GraphModule, args, **kwargs):
909
+ user_input_names = _unlift_user_inputs_to_buffers(gm_torch_level, args)
910
+ gm, graph_signature = aot_export(gm_torch_level, (), **kwargs)
911
+ user_inputs_to_mutate = _lift_buffers_to_user_inputs(gm, graph_signature, user_input_names)
912
+ # TODO unfortunately preserving graph-level metadata is not
913
+ # working well with aot_export. So we manually copy it.
914
+ # (The node-level meta is addressed above.)
915
+ gm.meta.update(gm_torch_level.meta)
916
+ assert "user_inputs_to_mutate" not in gm.meta
917
+ gm.meta["user_inputs_to_mutate"] = user_inputs_to_mutate
918
+ return gm, graph_signature
919
+
920
+ return _aot_export_strict
921
+
922
+ # Note: aot_export_module doesn't accept kwargs, we'd like to reorder the kwargs as an OrderedDict
923
+ # to follow the order in orig_args and correctly call module
924
+ ep_non_strict = _export_non_strict(
925
+ gm_torch_level,
926
+ fake_args,
927
+ _reorder_kwargs_by_names(orig_args, fake_args, fake_kwargs),
928
+ fake_params_buffers,
929
+ transform=_process_user_inputs
930
+ )
931
+
932
+ gm = ep_non_strict.gm
933
+ export_graph_signature = ep_non_strict.sig
934
+ tensor_constants = ep_non_strict.tensor_constants
935
+
936
+ # After aot_export, set the param/buffer metadata back into placeholders
937
+ # Technically, users can still construct this data from param names
938
+ # without relying on this metadata
939
+ for node in gm.graph.nodes:
940
+ if node.op == "placeholder":
941
+ if node.target in export_graph_signature.inputs_to_parameters:
942
+ param_name = export_graph_signature.inputs_to_parameters[node.target]
943
+ if param_name in params_buffers_to_node_meta:
944
+ for k, v in params_buffers_to_node_meta[param_name].items():
945
+ node.meta[k] = v
946
+ if node.target in export_graph_signature.inputs_to_buffers:
947
+ buffer_name = export_graph_signature.inputs_to_buffers[node.target]
948
+ if buffer_name in params_buffers_to_node_meta:
949
+ for k, v in params_buffers_to_node_meta[buffer_name].items():
950
+ node.meta[k] = v
951
+
952
+ # The unbacked symint symbols are updated in aot_export
953
+ # so we serialize them here instead of inside dynamo
954
+
955
+ # dynamo_fake_mode can be None if there's no placeholder in gm_torch_level
956
+ if dynamo_fake_mode:
957
+ gm.meta["inline_constraints"] = {
958
+ k: v
959
+ for k, v in dynamo_fake_mode.shape_env.runtime_var_to_range.items()
960
+ if re.match(r"^[if]\d+$", str(k))
961
+ }
962
+
963
+ num_lifted = next(
964
+ (i for i, s in enumerate(export_graph_signature.input_specs) if s.kind == InputKind.USER_INPUT), 0
965
+ )
966
+ flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
967
+ range_constraints, equality_constraints = _process_constraints(
968
+ gm,
969
+ num_lifted,
970
+ flat_args,
971
+ )
972
+
973
+ if isinstance(f, torch.nn.Module):
974
+ _replace_param_buffer_names(param_buffer_table, export_graph_signature)
975
+ params_buffers = {param_buffer_table.get(name, name): tensor for name, tensor in params_buffers.items()}
976
+
977
+ module_call_signatures = {
978
+ fqn: ModuleCallSignature(inputs=[], outputs=[], **specs)
979
+ for fqn, specs in gm_torch_level.meta["module_call_specs"].items()
980
+ }
981
+
982
+ if len(preserve_module_call_signature) > 0:
983
+ res = CollectTracepointsPass(module_call_signatures, export_graph_signature)(gm)
984
+ assert res is not None
985
+ gm = res.graph_module
986
+
987
+ assert orig_out_spec is not None
988
+ exported_program = ExportedProgram(
989
+ gm,
990
+ gm.graph,
991
+ export_graph_signature,
992
+ # TODO(zhxchen17) Return empty state_dict for functions.
993
+ params_buffers,
994
+ range_constraints,
995
+ equality_constraints,
996
+ [ModuleCallEntry("", ModuleCallSignature(inputs=[], outputs=[], in_spec=orig_in_spec, out_spec=orig_out_spec))] +
997
+ [ModuleCallEntry(fqn, sig) for fqn, sig in module_call_signatures.items()],
998
+ (args, kwargs),
999
+ tensor_constants=tensor_constants,
1000
+ )
1001
+
1002
+ if len(range_constraints) > 0 or len(equality_constraints) > 0:
1003
+ exported_program = exported_program._transform(
1004
+ _AddRuntimeAssertionsForInlineConstraintsPass(range_constraints, equality_constraints)
1005
+ )
1006
+
1007
+ return exported_program
1008
+
1009
+
1010
+ def _reorder_kwargs_by_names(arg_names: List[str], args: Tuple[Any], kwargs: Dict[str, Any]):
1011
+ assert len(arg_names) == len(args) + len(kwargs), (
1012
+ f"Total number of arg names is expected to be {len(arg_names)} "
1013
+ f"but got {len(args)} positional args, {len(kwargs)} kwargs."
1014
+ )
1015
+ return {kw_name: kwargs[kw_name] for kw_name in arg_names[len(args):]}
1016
+
1017
+
1018
+ def save(
1019
+ ep: ExportedProgram,
1020
+ f: Union[str, pathlib.Path, io.BytesIO],
1021
+ *,
1022
+ extra_files: Optional[Dict[str, Any]] = None,
1023
+ opset_version: Optional[Dict[str, int]] = None,
1024
+ ) -> None:
1025
+ from .serde.serialize import serialize, SerializedArtifact
1026
+ from .serde.schema import SCHEMA_VERSION
1027
+ artifact: SerializedArtifact = serialize(ep, opset_version)
1028
+
1029
+ if isinstance(f, (str, pathlib.Path)):
1030
+ f = str(f)
1031
+
1032
+ with zipfile.ZipFile(f, 'w') as zipf:
1033
+ # Save every field the SerializedArtifact to a file
1034
+ for field in dataclasses.fields(artifact):
1035
+ field_name = field.name
1036
+ serialized_field = getattr(artifact, field_name)
1037
+ zipf.writestr(f"serialized_{field_name}.json", serialized_field)
1038
+
1039
+ zipf.writestr('version', str(SCHEMA_VERSION))
1040
+
1041
+ # Add extra files if provided
1042
+ if extra_files:
1043
+ for extra_file_name, content in extra_files.items():
1044
+ encoded_content = content.encode('utf-8')
1045
+ zipf.writestr(f"extra_files/{extra_file_name}", encoded_content)
1046
+
1047
+
1048
+ def load(
1049
+ f: Union[str, pathlib.Path, io.BytesIO],
1050
+ *,
1051
+ extra_files: Optional[Dict[str, Any]] = None,
1052
+ expected_opset_version: Optional[Dict[str, int]] = None,
1053
+ ) -> ExportedProgram:
1054
+ if isinstance(f, (str, pathlib.Path)):
1055
+ f = str(f)
1056
+
1057
+ with zipfile.ZipFile(f, 'r') as zipf:
1058
+ # Check the version
1059
+ version = int(zipf.read('version'))
1060
+ from .serde.schema import SCHEMA_VERSION
1061
+
1062
+ if version != SCHEMA_VERSION:
1063
+ raise RuntimeError(
1064
+ f"Serialized version {version} does not match our current "
1065
+ f"schema version {SCHEMA_VERSION}."
1066
+ )
1067
+
1068
+ from .serde.serialize import deserialize, SerializedArtifact
1069
+
1070
+ # Load serialized_ep and serialized_state_dict from the zip file
1071
+ artifact: SerializedArtifact = SerializedArtifact(
1072
+ **{
1073
+ field.name: zipf.read(f"serialized_{field.name}.json")
1074
+ for field in dataclasses.fields(SerializedArtifact)
1075
+ }
1076
+ )
1077
+
1078
+ # Deserialize ExportedProgram
1079
+ ep = deserialize(artifact)
1080
+
1081
+ # Populate extra_files map
1082
+ if extra_files is not None:
1083
+ for filename in extra_files.keys():
1084
+ extra_files[filename] = zipf.read(f"extra_files/{filename}").decode('utf-8')
1085
+
1086
+ return ep
1087
+
1088
+
1089
+ def aot_compile(
1090
+ f: Callable,
1091
+ args: Tuple[Any],
1092
+ kwargs: Optional[Dict[str, Any]] = None,
1093
+ *,
1094
+ constraints: Optional[List[Constraint]] = None,
1095
+ dynamic_shapes: Optional[Dict[str, Any]] = None,
1096
+ options: Optional[Dict[str, Any]] = None,
1097
+ remove_runtime_assertions: bool = False,
1098
+ disable_constraint_solver: bool = False,
1099
+ ) -> str:
1100
+ """
1101
+ Note: this function is not stable yet
1102
+
1103
+ Traces either an nn.Module's forward function or just a callable with PyTorch
1104
+ operations inside, generates executable cpp code from the program, and returns
1105
+ the path to the generated shared library
1106
+
1107
+ Args:
1108
+ f: the `nn.Module` or callable to trace.
1109
+
1110
+ args: example positional inputs.
1111
+
1112
+ kwargs: optional example keyword inputs.
1113
+
1114
+ constraints: A optional list of constraints on the dynamic arguments specifying
1115
+ their possible range of their shapes
1116
+
1117
+ dynamic_shapes: An experimental new feature designed to subsume ``constraints``.
1118
+ A dict mapping argument names of ``f`` to their dynamic shape
1119
+ specifications, as follows. Dynamic shape specifications can be a
1120
+ dict from dynamic dimensions to ``Dim`` types, or a tuple/list of
1121
+ ``Optional[Dim]`` corresponding to each input dimension.
1122
+
1123
+ options: A dictionary of options to control inductor
1124
+
1125
+ disable_constraint_solver: Whether the dim constraint solver must be disabled.
1126
+
1127
+ Returns:
1128
+ Path to the generated shared library
1129
+ """
1130
+ if constraints is not None:
1131
+ warnings.warn(
1132
+ "The constraints field is deprecated. "
1133
+ "Please use dynamic_shapes instead."
1134
+ )
1135
+
1136
+ from torch._inductor.decomposition import select_decomp_table
1137
+
1138
+ if constraints is None:
1139
+ constraints = _process_dynamic_shapes(f, args, kwargs, dynamic_shapes)
1140
+
1141
+ # We want to export to Torch IR here to utilize the pre_grad passes in
1142
+ # inductor, which run on Torch IR.
1143
+ gm = _export_to_torch_ir(
1144
+ f,
1145
+ args,
1146
+ kwargs,
1147
+ constraints,
1148
+ disable_constraint_solver=disable_constraint_solver
1149
+ )
1150
+ flat_example_inputs = pytree.arg_tree_leaves(*args, **(kwargs or {}))
1151
+
1152
+ with torch.no_grad():
1153
+ so_path = torch._inductor.aot_compile(gm, flat_example_inputs, options) # type: ignore[arg-type]
1154
+
1155
+ return so_path
env-llmeval/lib/python3.10/site-packages/torch/_export/db/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc ADDED
Binary file (830 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc ADDED
Binary file (318 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/db/case.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ import string
4
+ from dataclasses import dataclass, field
5
+ from enum import Enum
6
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
7
+
8
+ import torch
9
+
10
+ _TAGS: Dict[str, Dict[str, Any]] = {
11
+ "torch": {
12
+ "cond": {},
13
+ "dynamic-shape": {},
14
+ "escape-hatch": {},
15
+ "map": {},
16
+ "dynamic-value": {},
17
+ "operator": {},
18
+ "mutation": {},
19
+ },
20
+ "python": {
21
+ "assert": {},
22
+ "builtin": {},
23
+ "closure": {},
24
+ "context-manager": {},
25
+ "control-flow": {},
26
+ "data-structure": {},
27
+ "standard-library": {},
28
+ "object-model": {},
29
+ },
30
+ }
31
+
32
+
33
+ class SupportLevel(Enum):
34
+ """
35
+ Indicates at what stage the feature
36
+ used in the example is handled in export.
37
+ """
38
+
39
+ SUPPORTED = 1
40
+ NOT_SUPPORTED_YET = 0
41
+
42
+
43
+ class ExportArgs:
44
+ __slots__ = ("args", "kwargs")
45
+
46
+ def __init__(self, *args, **kwargs):
47
+ self.args = args
48
+ self.kwargs = kwargs
49
+
50
+
51
+ InputsType = Union[Tuple[Any, ...], ExportArgs]
52
+
53
+
54
+ def check_inputs_type(x):
55
+ if not isinstance(x, (ExportArgs, tuple)):
56
+ raise ValueError(
57
+ f"Expecting inputs type to be either a tuple, or ExportArgs, got: {type(x)}"
58
+ )
59
+
60
+
61
+ def _validate_tag(tag: str):
62
+ parts = tag.split(".")
63
+ t = _TAGS
64
+ for part in parts:
65
+ assert set(part) <= set(
66
+ string.ascii_lowercase + "-"
67
+ ), f"Tag contains invalid characters: {part}"
68
+ if part in t:
69
+ t = t[part]
70
+ else:
71
+ raise ValueError(f"Tag {tag} is not found in registered tags.")
72
+
73
+
74
+ @dataclass(frozen=True)
75
+ class ExportCase:
76
+ example_inputs: InputsType
77
+ description: str # A description of the use case.
78
+ model: torch.nn.Module
79
+ name: str
80
+ extra_inputs: Optional[InputsType] = None # For testing graph generalization.
81
+ # Tags associated with the use case. (e.g dynamic-shape, escape-hatch)
82
+ tags: Set[str] = field(default_factory=set)
83
+ support_level: SupportLevel = SupportLevel.SUPPORTED
84
+ dynamic_shapes: Optional[Dict[str, Any]] = None
85
+
86
+ def __post_init__(self):
87
+ check_inputs_type(self.example_inputs)
88
+ if self.extra_inputs is not None:
89
+ check_inputs_type(self.extra_inputs)
90
+
91
+ for tag in self.tags:
92
+ _validate_tag(tag)
93
+
94
+ if not isinstance(self.description, str) or len(self.description) == 0:
95
+ raise ValueError(f'Invalid description: "{self.description}"')
96
+
97
+
98
+ _EXAMPLE_CASES: Dict[str, ExportCase] = {}
99
+ _MODULES = set()
100
+ _EXAMPLE_CONFLICT_CASES = {}
101
+ _EXAMPLE_REWRITE_CASES: Dict[str, List[ExportCase]] = {}
102
+
103
+
104
+ def register_db_case(case: ExportCase) -> None:
105
+ """
106
+ Registers a user provided ExportCase into example bank.
107
+ """
108
+ if case.name in _EXAMPLE_CASES:
109
+ if case.name not in _EXAMPLE_CONFLICT_CASES:
110
+ _EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]]
111
+ _EXAMPLE_CONFLICT_CASES[case.name].append(case)
112
+ return
113
+
114
+ _EXAMPLE_CASES[case.name] = case
115
+
116
+
117
+ def to_snake_case(name):
118
+ name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
119
+ return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
120
+
121
+
122
+ def _make_export_case(m, name, configs):
123
+ if inspect.isclass(m):
124
+ if not issubclass(m, torch.nn.Module):
125
+ raise TypeError("Export case class should be a torch.nn.Module.")
126
+ m = m()
127
+
128
+ if "description" not in configs:
129
+ # Fallback to docstring if description is missing.
130
+ assert (
131
+ m.__doc__ is not None
132
+ ), f"Could not find description or docstring for export case: {m}"
133
+ configs = {**configs, "description": m.__doc__}
134
+ return ExportCase(**{**configs, "model": m, "name": name})
135
+
136
+
137
+ def export_case(**kwargs):
138
+ """
139
+ Decorator for registering a user provided case into example bank.
140
+ """
141
+
142
+ def wrapper(m):
143
+ configs = kwargs
144
+ module = inspect.getmodule(m)
145
+ if module in _MODULES:
146
+ raise RuntimeError("export_case should only be used once per example file.")
147
+
148
+ _MODULES.add(module)
149
+ normalized_name = to_snake_case(m.__name__)
150
+ assert module is not None
151
+ module_name = module.__name__.split(".")[-1]
152
+ if module_name != normalized_name:
153
+ raise RuntimeError(
154
+ f'Module name "{module.__name__}" is inconsistent with exported program '
155
+ + f'name "{m.__name__}". Please rename the module to "{normalized_name}".'
156
+ )
157
+
158
+ case = _make_export_case(m, module_name, configs)
159
+ register_db_case(case)
160
+ return case
161
+
162
+ return wrapper
163
+
164
+
165
+ def export_rewrite_case(**kwargs):
166
+ def wrapper(m):
167
+ configs = kwargs
168
+
169
+ parent = configs.pop("parent")
170
+ assert isinstance(parent, ExportCase)
171
+ key = parent.name
172
+ if key not in _EXAMPLE_REWRITE_CASES:
173
+ _EXAMPLE_REWRITE_CASES[key] = []
174
+
175
+ configs["example_inputs"] = parent.example_inputs
176
+ case = _make_export_case(m, to_snake_case(m.__name__), configs)
177
+ _EXAMPLE_REWRITE_CASES[key].append(case)
178
+ return case
179
+
180
+ return wrapper
181
+
182
+
183
+ def normalize_inputs(x: InputsType) -> ExportArgs:
184
+ if isinstance(x, tuple):
185
+ return ExportArgs(*x)
186
+
187
+ assert isinstance(x, ExportArgs)
188
+ return x
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import importlib
3
+ from os.path import basename, dirname, isfile, join
4
+
5
+ import torch
6
+ from torch._export.db.case import (
7
+ _EXAMPLE_CASES,
8
+ _EXAMPLE_CONFLICT_CASES,
9
+ _EXAMPLE_REWRITE_CASES,
10
+ SupportLevel,
11
+ )
12
+
13
+
14
+ modules = glob.glob(join(dirname(__file__), "*.py"))
15
+ __all__ = [
16
+ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py")
17
+ ]
18
+
19
+ # Import all module in the current directory.
20
+ from . import * # noqa: F403
21
+
22
+
23
+ def all_examples():
24
+ return _EXAMPLE_CASES
25
+
26
+
27
+ if len(_EXAMPLE_CONFLICT_CASES) > 0:
28
+
29
+ def get_name(case):
30
+ model = case.model
31
+ if isinstance(model, torch.nn.Module):
32
+ model = type(model)
33
+ return model.__name__
34
+
35
+ msg = "Error on conflict export case name.\n"
36
+ for case_name, cases in _EXAMPLE_CONFLICT_CASES.items():
37
+ msg += f"Case name {case_name} is associated with multiple cases:\n "
38
+ msg += f"[{','.join(map(get_name, cases))}]\n"
39
+
40
+ raise RuntimeError(msg)
41
+
42
+
43
+ def filter_examples_by_support_level(support_level: SupportLevel):
44
+ return {
45
+ key: val
46
+ for key, val in all_examples().items()
47
+ if val.support_level == support_level
48
+ }
49
+
50
+
51
+ def get_rewrite_cases(case):
52
+ return _EXAMPLE_REWRITE_CASES.get(case.name, [])
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/assume_constant_result.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch._dynamo as torchdynamo
3
+
4
+ from torch._export.db.case import export_case
5
+
6
+
7
+ @export_case(
8
+ example_inputs=(torch.ones(3, 2), torch.tensor(4)),
9
+ tags={"torch.escape-hatch"},
10
+ )
11
+ class AssumeConstantResult(torch.nn.Module):
12
+ """
13
+ Applying `assume_constant_result` decorator to burn make non-tracable code as constant.
14
+ """
15
+
16
+ def __init__(self):
17
+ super().__init__()
18
+
19
+ @torchdynamo.assume_constant_result
20
+ def get_item(self, y):
21
+ return y.int().item()
22
+
23
+ def forward(self, x, y):
24
+ return x[: self.get_item(y)]
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ class MyAutogradFunction(torch.autograd.Function):
7
+ @staticmethod
8
+ def forward(ctx, x):
9
+ return x.clone()
10
+
11
+ @staticmethod
12
+ def backward(ctx, grad_output):
13
+ return grad_output + 1
14
+
15
+
16
+ @export_case(
17
+ example_inputs=(torch.randn(3, 2),),
18
+ )
19
+ class AutogradFunction(torch.nn.Module):
20
+ """
21
+ TorchDynamo does not keep track of backward() on autograd functions. We recommend to
22
+ use `allow_in_graph` to mitigate this problem.
23
+ """
24
+
25
+ def forward(self, x):
26
+ return MyAutogradFunction.apply(x)
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/class_method.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 4),),
8
+ )
9
+ class ClassMethod(torch.nn.Module):
10
+ """
11
+ Class methods are inlined during tracing.
12
+ """
13
+
14
+ @classmethod
15
+ def method(cls, x):
16
+ return x + 1
17
+
18
+ def __init__(self):
19
+ super().__init__()
20
+ self.linear = torch.nn.Linear(4, 2)
21
+
22
+ def forward(self, x):
23
+ x = self.linear(x)
24
+ return self.method(x) * self.__class__.method(x) * type(self).method(x)
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_class_method.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+ from functorch.experimental.control_flow import cond
5
+
6
+
7
+ class MySubModule(torch.nn.Module):
8
+ def foo(self, x):
9
+ return x.cos()
10
+
11
+ def forward(self, x):
12
+ return self.foo(x)
13
+
14
+
15
+ @export_case(
16
+ example_inputs=(torch.ones(3),),
17
+ tags={
18
+ "torch.cond",
19
+ "torch.dynamic-shape",
20
+ },
21
+ )
22
+ class CondBranchClassMethod(torch.nn.Module):
23
+ """
24
+ The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules:
25
+ - both branches must take the same args, which must also match the branch args passed to cond.
26
+ - both branches must return a single tensor
27
+ - returned tensor must have the same tensor metadata, e.g. shape and dtype
28
+ - branch function can be free function, nested function, lambda, class methods
29
+ - branch function can not have closure variables
30
+ - no inplace mutations on inputs or global variables
31
+
32
+
33
+ This example demonstrates using class method in cond().
34
+
35
+ NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
36
+ """
37
+
38
+ def __init__(self):
39
+ super().__init__()
40
+ self.subm = MySubModule()
41
+
42
+ def bar(self, x):
43
+ return x.sin()
44
+
45
+ def forward(self, x):
46
+ return cond(x.shape[0] <= 2, self.subm.forward, self.bar, [x])
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nested_function.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+ from functorch.experimental.control_flow import cond
5
+
6
+
7
+ @export_case(
8
+ example_inputs=(torch.ones(3),),
9
+ tags={
10
+ "torch.cond",
11
+ "torch.dynamic-shape",
12
+ },
13
+ )
14
+ def cond_branch_nested_function(x):
15
+ """
16
+ The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules:
17
+ - both branches must take the same args, which must also match the branch args passed to cond.
18
+ - both branches must return a single tensor
19
+ - returned tensor must have the same tensor metadata, e.g. shape and dtype
20
+ - branch function can be free function, nested function, lambda, class methods
21
+ - branch function can not have closure variables
22
+ - no inplace mutations on inputs or global variables
23
+
24
+ This example demonstrates using nested function in cond().
25
+
26
+ NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
27
+ """
28
+
29
+ def true_fn(x):
30
+ def inner_true_fn(y):
31
+ return x + y
32
+
33
+ return inner_true_fn(x)
34
+
35
+ def false_fn(x):
36
+ def inner_false_fn(y):
37
+ return x - y
38
+
39
+ return inner_false_fn(x)
40
+
41
+ return cond(x.shape[0] < 10, true_fn, false_fn, [x])
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_branch_nonlocal_variables.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+ from functorch.experimental.control_flow import cond
5
+
6
+
7
+ @export_case(
8
+ example_inputs=(torch.ones(6),),
9
+ tags={
10
+ "torch.cond",
11
+ "torch.dynamic-shape",
12
+ },
13
+ )
14
+ def cond_branch_nonlocal_variables(x):
15
+ """
16
+ The branch functions (`true_fn` and `false_fn`) passed to cond() must follow these rules:
17
+ - both branches must take the same args, which must also match the branch args passed to cond.
18
+ - both branches must return a single tensor
19
+ - returned tensor must have the same tensor metadata, e.g. shape and dtype
20
+ - branch function can be free function, nested function, lambda, class methods
21
+ - branch function can not have closure variables
22
+ - no inplace mutations on inputs or global variables
23
+
24
+ This example demonstrates how to rewrite code to avoid capturing closure variables in branch functions.
25
+
26
+ The code below will not work because capturing closure variables is not supported.
27
+ ```
28
+ my_tensor_var = x + 100
29
+ my_primitive_var = 3.14
30
+
31
+ def true_fn(y):
32
+ nonlocal my_tensor_var, my_primitive_var
33
+ return y + my_tensor_var + my_primitive_var
34
+
35
+ def false_fn(y):
36
+ nonlocal my_tensor_var, my_primitive_var
37
+ return y - my_tensor_var - my_primitive_var
38
+
39
+ return cond(x.shape[0] > 5, true_fn, false_fn, [x])
40
+ ```
41
+
42
+ NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
43
+ """
44
+
45
+ my_tensor_var = x + 100
46
+ my_primitive_var = 3.14
47
+
48
+ def true_fn(x, y, z):
49
+ return x + y + z
50
+
51
+ def false_fn(x, y, z):
52
+ return x - y - z
53
+
54
+ return cond(
55
+ x.shape[0] > 5,
56
+ true_fn,
57
+ false_fn,
58
+ [x, my_tensor_var, torch.tensor(my_primitive_var)],
59
+ )
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_operands.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+ from torch.export import Dim
5
+ from functorch.experimental.control_flow import cond
6
+
7
+ x = torch.randn(3, 2)
8
+ y = torch.ones(2)
9
+ dim0_x = Dim("dim0_x")
10
+
11
+ @export_case(
12
+ example_inputs=(x, y),
13
+ tags={
14
+ "torch.cond",
15
+ "torch.dynamic-shape",
16
+ },
17
+ extra_inputs=(torch.randn(2, 2), torch.ones(2)),
18
+ dynamic_shapes={"x": {0: dim0_x}, "y": None},
19
+ )
20
+ def cond_operands(x, y):
21
+ """
22
+ The operands passed to cond() must be:
23
+ - a list of tensors
24
+ - match arguments of `true_fn` and `false_fn`
25
+
26
+ NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
27
+ """
28
+
29
+ def true_fn(x, y):
30
+ return x + y
31
+
32
+ def false_fn(x, y):
33
+ return x - y
34
+
35
+ return cond(x.shape[0] > 2, true_fn, false_fn, [x, y])
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/cond_predicate.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+ from functorch.experimental.control_flow import cond
5
+
6
+
7
+ @export_case(
8
+ example_inputs=(torch.ones(6, 4, 3),),
9
+ tags={
10
+ "torch.cond",
11
+ "torch.dynamic-shape",
12
+ },
13
+ )
14
+ def cond_predicate(x):
15
+ """
16
+ The conditional statement (aka predicate) passed to cond() must be one of the following:
17
+ - torch.Tensor with a single element
18
+ - boolean expression
19
+
20
+ NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
21
+ """
22
+
23
+ pred = x.dim() > 2 and x.shape[2] > 10
24
+
25
+ return cond(pred, lambda x: x.cos(), lambda y: y.sin(), [x])
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_size_example.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.tensor(4),),
8
+ tags={
9
+ "torch.dynamic-value",
10
+ "torch.escape-hatch",
11
+ },
12
+ )
13
+ def constrain_as_size_example(x):
14
+ """
15
+ If the value is not known at tracing time, you can provide hint so that we
16
+ can trace further. Please look at constrain_as_value and constrain_as_size APIs
17
+ constrain_as_size is used for values that NEED to be used for constructing
18
+ tensor.
19
+ """
20
+ a = x.item()
21
+ torch._constrain_as_size(a, min=0, max=5)
22
+ return torch.ones((a, 5))
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/decorator.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+
5
+ from torch._export.db.case import export_case
6
+
7
+
8
+ def test_decorator(func):
9
+ @functools.wraps(func)
10
+ def wrapper(*args, **kwargs):
11
+ return func(*args, **kwargs) + 1
12
+
13
+ return wrapper
14
+
15
+
16
+ @export_case(
17
+ example_inputs=(torch.ones(3, 2), torch.ones(3, 2)),
18
+ )
19
+ class Decorator(torch.nn.Module):
20
+ """
21
+ Decorators calls are inlined into the exported function during tracing.
22
+ """
23
+
24
+ @test_decorator
25
+ def forward(self, x, y):
26
+ return x + y
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dictionary.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2), torch.tensor(4)),
8
+ tags={"python.data-structure"},
9
+ )
10
+ def dictionary(x, y):
11
+ """
12
+ Dictionary structures are inlined and flattened along tracing.
13
+ """
14
+ elements = {}
15
+ elements["x2"] = x * x
16
+ y = y * elements["x2"]
17
+ return {"y": y}
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_assert.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"python.assert"},
9
+ )
10
+ def dynamic_shape_assert(x):
11
+ """
12
+ A basic usage of python assertion.
13
+ """
14
+ # assertion with error message
15
+ assert x.shape[0] > 2, f"{x.shape[0]} is greater than 2"
16
+ # assertion without error message
17
+ assert x.shape[0] > 1
18
+ return x
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_constructor.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"torch.dynamic-shape"},
9
+ )
10
+ def dynamic_shape_constructor(x):
11
+ """
12
+ Tensor constructors should be captured with dynamic shape inputs rather
13
+ than being baked in with static shape.
14
+ """
15
+ return torch.ones(x.shape[0] * 2)
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_if_guard.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2, 2),),
8
+ tags={"torch.dynamic-shape", "python.control-flow"},
9
+ )
10
+ class DynamicShapeIfGuard(torch.nn.Module):
11
+ """
12
+ `if` statement with backed dynamic shape predicate will be specialized into
13
+ one particular branch and generate a guard. However, export will fail if the
14
+ the dimension is marked as dynamic shape from higher level API.
15
+ """
16
+
17
+ def forward(self, x):
18
+ if x.shape[0] == 3:
19
+ return x.cos()
20
+
21
+ return x.sin()
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+ from torch.export import Dim
5
+
6
+ x = torch.ones(3, 2)
7
+ dim0_x = Dim("dim0_x")
8
+
9
+ @export_case(
10
+ example_inputs=(x,),
11
+ tags={"torch.dynamic-shape", "python.builtin"},
12
+ support_level=SupportLevel.NOT_SUPPORTED_YET,
13
+ dynamic_shapes={"x": {0: dim0_x}},
14
+ )
15
+ def dynamic_shape_round(x):
16
+ """
17
+ Calling round on dynamic shapes is not supported.
18
+ """
19
+ return x[: round(x.shape[0] / 2)]
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_slicing.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"torch.dynamic-shape"},
9
+ )
10
+ def dynamic_shape_slicing(x):
11
+ """
12
+ Slices with dynamic shape arguments should be captured into the graph
13
+ rather than being baked in.
14
+ """
15
+ return x[: x.shape[0] - 2, x.shape[1] - 1 :: 2]
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_view.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(10, 10),),
8
+ tags={"torch.dynamic-shape"},
9
+ )
10
+ def dynamic_shape_view(x):
11
+ """
12
+ Dynamic shapes should be propagated to view arguments instead of being
13
+ baked into the exported graph.
14
+ """
15
+ new_x_shape = x.size()[:-1] + (2, 5)
16
+ x = x.view(*new_x_shape)
17
+ return x.permute(0, 2, 1)
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/fn_with_kwargs.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, ExportArgs, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=ExportArgs(
8
+ torch.randn(4),
9
+ (torch.randn(4), torch.randn(4)),
10
+ *[torch.randn(4), torch.randn(4)],
11
+ mykw0=torch.randn(4),
12
+ input0=torch.randn(4), input1=torch.randn(4)
13
+ ),
14
+ tags={"python.data-structure"},
15
+ support_level=SupportLevel.SUPPORTED,
16
+ )
17
+ def fn_with_kwargs(pos0, tuple0, *myargs, mykw0, **mykwargs):
18
+ """
19
+ Keyword arguments are not supported at the moment.
20
+ """
21
+ out = pos0
22
+ for arg in tuple0:
23
+ out = out * arg
24
+ for arg in myargs:
25
+ out = out * arg
26
+ out = out * mykw0
27
+ out = out * mykwargs["input0"] * mykwargs["input1"]
28
+ return out
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"torch.dynamic-shape", "python.data-structure", "python.assert"},
9
+ )
10
+ def list_contains(x):
11
+ """
12
+ List containment relation can be checked on a dynamic shape or constants.
13
+ """
14
+ assert x.size(-1) in [6, 2]
15
+ assert x.size(0) not in [4, 5, 6]
16
+ assert "monkey" not in ["cow", "pig"]
17
+ return x + x
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/list_unpack.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import torch
4
+
5
+ from torch._export.db.case import export_case
6
+
7
+
8
+ @export_case(
9
+ example_inputs=([torch.ones(3, 2), torch.tensor(4), torch.tensor(5)],),
10
+ tags={"python.control-flow", "python.data-structure"},
11
+ )
12
+ def list_unpack(args: List[torch.Tensor]):
13
+ """
14
+ Lists are treated as static construct, therefore unpacking should be
15
+ erased after tracing.
16
+ """
17
+ x, *y = args
18
+ return x + y[0]
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/model_attr_mutation.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"python.object-model"},
9
+ support_level=SupportLevel.NOT_SUPPORTED_YET,
10
+ )
11
+ class ModelAttrMutation(torch.nn.Module):
12
+ """
13
+ Attribute mutation is not supported.
14
+ """
15
+
16
+ def __init__(self):
17
+ super().__init__()
18
+ self.attr_list = [torch.ones(3, 2), torch.ones(3, 2)]
19
+
20
+ def recreate_list(self):
21
+ return [torch.zeros(3, 2), torch.zeros(3, 2)]
22
+
23
+ def forward(self, x):
24
+ self.attr_list = self.recreate_list()
25
+ return x.sum() + self.attr_list[0].sum()
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.randn(2, 3),),
8
+ tags={"python.object-model"},
9
+ support_level=SupportLevel.NOT_SUPPORTED_YET,
10
+ )
11
+ class OptionalInput(torch.nn.Module):
12
+ """
13
+ Tracing through optional input is not supported yet
14
+ """
15
+
16
+ def forward(self, x, y=torch.ones(2, 3)):
17
+ if y is not None:
18
+ return x + y
19
+ return x
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/pytree_flatten.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+ from torch.utils import _pytree as pytree
5
+
6
+
7
+ @export_case(
8
+ example_inputs=({1: torch.randn(3, 2), 2: torch.randn(3, 2)},),
9
+ support_level=SupportLevel.SUPPORTED,
10
+ )
11
+ def pytree_flatten(x):
12
+ """
13
+ Pytree from PyTorch cannot be captured by TorchDynamo.
14
+ """
15
+ y, spec = pytree.tree_flatten(x)
16
+ return y[0] + 1
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/scalar_output.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+ from torch.export import Dim
5
+
6
+ x = torch.ones(3, 2)
7
+ dim1_x = Dim("dim1_x")
8
+
9
+ @export_case(
10
+ example_inputs=(x,),
11
+ tags={"torch.dynamic-shape"},
12
+ dynamic_shapes={"x": {1: dim1_x}},
13
+ )
14
+ def scalar_output(x):
15
+ """
16
+ Returning scalar values from the graph is supported, in addition to Tensor
17
+ outputs. Symbolic shapes are captured and rank is specialized.
18
+ """
19
+ return x.shape[1] + 1
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ import torch
4
+
5
+ from torch._export.db.case import export_case
6
+
7
+
8
+ class Animal(Enum):
9
+ COW = "moo"
10
+
11
+
12
+ @export_case(
13
+ example_inputs=(torch.ones(3, 2),),
14
+ )
15
+ class SpecializedAttribute(torch.nn.Module):
16
+ """
17
+ Model attributes are specialized.
18
+ """
19
+
20
+ def __init__(self):
21
+ super().__init__()
22
+ self.a = "moo"
23
+ self.b = 4
24
+
25
+ def forward(self, x):
26
+ if self.a == Animal.COW.value:
27
+ return x * x + self.b
28
+ else:
29
+ raise ValueError("bad")
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/static_if.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2, 2),),
8
+ tags={"python.control-flow"},
9
+ )
10
+ class StaticIf(torch.nn.Module):
11
+ """
12
+ `if` statement with static predicate value should be traced through with the
13
+ taken branch.
14
+ """
15
+
16
+ def __init__(self):
17
+ super().__init__()
18
+
19
+ def forward(self, x):
20
+ if len(x.shape) == 3:
21
+ return x + torch.ones(1, 1, 1)
22
+
23
+ return x
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.randn(3, 2), "attr"),
8
+ tags={"python.builtin"},
9
+ support_level=SupportLevel.SUPPORTED,
10
+ )
11
+ def tensor_setattr(x, attr):
12
+ """
13
+ setattr() call onto tensors is not supported.
14
+ """
15
+ setattr(x, attr, torch.randn(3, 2))
16
+ return x + 4
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/torch_sym_min.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"torch.operator"},
9
+ support_level=SupportLevel.NOT_SUPPORTED_YET,
10
+ )
11
+ class TorchSymMin(torch.nn.Module):
12
+ """
13
+ torch.sym_min operator is not supported in export.
14
+ """
15
+
16
+ def forward(self, x):
17
+ return x.sum() + torch.sym_min(x.size(0), 100)
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel, export_rewrite_case
4
+
5
+
6
+ class A:
7
+ @classmethod
8
+ def func(cls, x):
9
+ return 1 + x
10
+
11
+
12
+ @export_case(
13
+ example_inputs=(torch.ones(3, 4),),
14
+ tags={"python.builtin"},
15
+ support_level=SupportLevel.SUPPORTED,
16
+ )
17
+ def type_reflection_method(x):
18
+ """
19
+ type() calls on custom objects followed by method calls are not allowed
20
+ due to its overly dynamic nature.
21
+ """
22
+ a = A()
23
+ return type(a).func(x)
24
+
25
+
26
+ @export_rewrite_case(parent=type_reflection_method)
27
+ def type_reflection_method_rewrite(x):
28
+ """
29
+ Custom object class methods will be inlined.
30
+ """
31
+ return A.func(x)
env-llmeval/lib/python3.10/site-packages/torch/_export/db/examples/user_input_mutation.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"torch.mutation"},
9
+ support_level=SupportLevel.SUPPORTED,
10
+ )
11
+ class UserInputMutation(torch.nn.Module):
12
+ """
13
+ Directly mutate user input in forward
14
+ """
15
+
16
+ def forward(self, x):
17
+ x.mul_(2)
18
+ return x.cos()
env-llmeval/lib/python3.10/site-packages/torch/_export/db/gen_example.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import torch._export.db.examples as examples
5
+
6
+ TEMPLATE = '''import torch
7
+
8
+ from torch._export.db.case import export_case
9
+
10
+
11
+ @export_case(
12
+ example_inputs=(torch.randn(3, 2),),
13
+ tags={{}},
14
+ )
15
+ def {case_name}(x):
16
+ """
17
+ """
18
+
19
+ return
20
+ '''
21
+
22
+ if __name__ == "__main__":
23
+ assert len(sys.argv) == 2
24
+ root_dir = examples.__name__.replace(".", "/")
25
+ assert os.path.exists(root_dir)
26
+ with open(os.path.join(root_dir, sys.argv[1] + ".py"), "w") as f:
27
+ print("Writing to", f.name, "...")
28
+ f.write(TEMPLATE.format(case_name=sys.argv[1]))
env-llmeval/lib/python3.10/site-packages/torch/_export/db/logging.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def exportdb_error_message(case_name: str):
2
+ return ""
env-llmeval/lib/python3.10/site-packages/torch/_export/exported_program.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from collections import defaultdict
3
+ import dataclasses
4
+ from typing import Dict, List, Optional, Tuple
5
+ import warnings
6
+
7
+ import sympy
8
+
9
+ import torch
10
+ import torch.fx
11
+
12
+ import torch.utils._pytree as pytree
13
+ from torch._subclasses.fake_tensor import FakeTensor
14
+ from torch.fx.experimental.symbolic_shapes import SymInt
15
+ from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
16
+ from torch.utils._sympy.value_ranges import ValueRanges
17
+
18
+ from torch._export.passes.add_runtime_assertions_for_constraints_pass import (
19
+ InputDim,
20
+ )
21
+
22
+
23
+ # TODO(ycao): This is added to avoid breaking existing code temporarily.
24
+ # Remove when migration is done.
25
+ from torch.export.graph_signature import (
26
+ ExportBackwardSignature,
27
+ ExportGraphSignature,
28
+ )
29
+
30
+ from torch.export.exported_program import (
31
+ ExportedProgram,
32
+ ModuleCallEntry,
33
+ ModuleCallSignature,
34
+ )
35
+
36
+ from .utils import _check_input_constraints_pre_hook
37
+
38
+
39
+ __all__ = [
40
+ "ExportBackwardSignature",
41
+ "ExportGraphSignature",
42
+ "ExportedProgram",
43
+ "ModuleCallEntry",
44
+ "ModuleCallSignature",
45
+ ]
46
+
47
+
48
+ # Information to maintain user calling/returning specs
49
+ @dataclasses.dataclass
50
+ class CallSpec:
51
+ in_spec: Optional[pytree.TreeSpec]
52
+ out_spec: Optional[pytree.TreeSpec]
53
+
54
+
55
+ def _unlift(gm, inp_pos_to_param_buffer_name, in_spec, out_spec, state_dict, tensor_constants, buffers_to_mutate):
56
+ count = 0
57
+ buffer_name_to_node = {}
58
+ # Step 1: make lifted params as get_attr
59
+ for node in gm.graph.nodes:
60
+ if node.op == "placeholder":
61
+ if count in inp_pos_to_param_buffer_name:
62
+ with gm.graph.inserting_after(node):
63
+ getattr_node = gm.graph.get_attr(
64
+ inp_pos_to_param_buffer_name[count]
65
+ )
66
+ node.replace_all_uses_with(getattr_node)
67
+ metadata = node.meta
68
+ gm.graph.erase_node(node)
69
+ getattr_node.meta = metadata
70
+ buffer_name_to_node[inp_pos_to_param_buffer_name[count]] = getattr_node
71
+
72
+ count += 1
73
+ # Step 2: Find the all the buffers that were mutated and update them
74
+ if node.op == "output":
75
+ user_output_nodes = []
76
+ # In the case that the same node is returned multiple times,
77
+ # node.all_input_nodes will only iterate that node once
78
+ for return_node in pytree.tree_flatten(node.args)[0]:
79
+ return_node_name = return_node.name
80
+ # we found a param/buffer mutation
81
+ if return_node_name in buffers_to_mutate:
82
+ # TODO Fix situation here to replace dot with underscore...
83
+ buffer_node_name = buffers_to_mutate[return_node_name].replace('.', '_')
84
+ assert buffer_node_name in buffer_name_to_node
85
+ buffer_node = buffer_name_to_node[buffer_node_name]
86
+ with gm.graph.inserting_before(node):
87
+ gm.graph.call_function(
88
+ torch.ops.aten.copy_.default, (buffer_node, return_node)
89
+ )
90
+ else:
91
+ user_output_nodes.append(return_node)
92
+ with gm.graph.inserting_before(node):
93
+ # Only return user outputs
94
+ new_output = gm.graph.output(tuple(user_output_nodes))
95
+ node.replace_all_uses_with(new_output)
96
+ gm.graph.erase_node(node)
97
+
98
+ # Step 3: Fix the input/output of the graph now that we deleted
99
+ # some args.
100
+ gm.graph.lint()
101
+
102
+ if (
103
+ in_spec.type == tuple and
104
+ len(in_spec.children_specs) == 2 and
105
+ in_spec.children_specs[0].type == tuple and
106
+ in_spec.children_specs[1].type == dict
107
+ ):
108
+ # if in_spec contains the args (tuple) and kwargs (dict)
109
+
110
+ num_args = (
111
+ len(in_spec.children_specs[0].children_specs) +
112
+ len(in_spec.children_specs[1].children_specs)
113
+ )
114
+ else:
115
+ num_args = len(in_spec.children_specs)
116
+
117
+ names = [f"arg_{i}" for i in range(num_args)]
118
+
119
+ gm.graph._codegen = _PyTreeCodeGen(
120
+ _PyTreeInfo(
121
+ names,
122
+ in_spec,
123
+ out_spec,
124
+ )
125
+ )
126
+ gm.recompile()
127
+
128
+ # Step 4: Find state references in HigherOrderOps and recursively
129
+ # fix them.
130
+ for node in gm.graph.nodes:
131
+ if node.op == "call_function" and node.target == torch.ops.cond:
132
+ pred, true_graph, false_graph, operands = node.args
133
+ true_gm = getattr(gm, true_graph.name)
134
+ false_gm = getattr(gm, false_graph.name)
135
+ inp_pos_to_param_buffer_name_for_submod = {}
136
+ real_operands = []
137
+ for ix, operand in enumerate(operands):
138
+ if operand.target in inp_pos_to_param_buffer_name.values():
139
+ inp_pos_to_param_buffer_name_for_submod[ix] = operand.target
140
+ if operand.target in state_dict:
141
+ value = state_dict[operand.target]
142
+ elif operand.target in tensor_constants:
143
+ value = tensor_constants[operand.target]
144
+ else:
145
+ raise RuntimeError("Unable to find value for ", operand.target)
146
+ true_gm.register_buffer(operand.target, value)
147
+ false_gm.register_buffer(operand.target, value)
148
+ else:
149
+ real_operands.append(operand)
150
+ node.args = (pred, true_graph, false_graph, real_operands)
151
+
152
+ _, in_spec = pytree.tree_flatten(real_operands)
153
+
154
+ _unlift(
155
+ true_gm,
156
+ inp_pos_to_param_buffer_name_for_submod,
157
+ in_spec,
158
+ None,
159
+ state_dict,
160
+ tensor_constants,
161
+ buffers_to_mutate,
162
+ )
163
+ _unlift(
164
+ false_gm,
165
+ inp_pos_to_param_buffer_name_for_submod,
166
+ in_spec,
167
+ None,
168
+ state_dict,
169
+ tensor_constants,
170
+ buffers_to_mutate,
171
+ )
172
+ if node.op == "call_function" and node.target.__name__ == "map_impl":
173
+ body_graph, num_mapped, *operands = node.args
174
+ body_gm = getattr(gm, body_graph.name)
175
+ inp_pos_to_buffer_name_for_submod = {}
176
+ real_operands = []
177
+ # TODO Fix situation here to replace dot with underscore...
178
+ state_dict_for_lookup = {
179
+ key.replace(".", "_"): value
180
+ for key, value in state_dict.items()
181
+ }
182
+ for ix, operand in enumerate(operands):
183
+ if operand.target in inp_pos_to_param_buffer_name.values():
184
+ inp_pos_to_buffer_name_for_submod[ix] = operand.target
185
+ if operand.target in state_dict_for_lookup:
186
+ value = state_dict_for_lookup[operand.target]
187
+ elif operand.target in tensor_constants:
188
+ value = tensor_constants[operand.target]
189
+ else:
190
+ raise RuntimeError(f"Unable to find value for {operand.target}")
191
+ body_gm.register_buffer(operand.target, value)
192
+ else:
193
+ real_operands.append(operand)
194
+ node.args = (body_graph, num_mapped, *real_operands)
195
+
196
+ _, in_spec = pytree.tree_flatten(real_operands)
197
+
198
+ _unlift(
199
+ body_gm,
200
+ inp_pos_to_buffer_name_for_submod,
201
+ in_spec,
202
+ None,
203
+ state_dict,
204
+ tensor_constants,
205
+ buffers_to_mutate,
206
+ )
207
+ gm.graph.lint()
208
+ gm.graph.eliminate_dead_code()
209
+ gm.recompile()
210
+ return gm
211
+
212
+ def _construct_inp_pos_to_param_buffer_name(new_gm, graph_signature, state_dict, tensor_constants=None):
213
+ # TODO Fix the period in params/buffers names later
214
+ # maybe a pass to replace graph signature with fixed names
215
+ param_buffer_name_to_corrected_name = {}
216
+
217
+ for name, value in state_dict.items():
218
+ if name in graph_signature.buffers:
219
+ if "." in name:
220
+ new_gm.register_buffer(name.replace(".", "_"), value)
221
+ param_buffer_name_to_corrected_name[name] = name.replace(".", "_")
222
+ else:
223
+ new_gm.register_buffer(name, value)
224
+ if name in graph_signature.parameters:
225
+ if "." in name:
226
+ new_gm.register_parameter(name.replace(".", "_"), value)
227
+ param_buffer_name_to_corrected_name[name] = name.replace(".", "_")
228
+ else:
229
+ new_gm.register_parameter(name, value)
230
+
231
+ if tensor_constants is not None and len(tensor_constants) > 0:
232
+ assert hasattr(graph_signature, "lifted_tensor_constants")
233
+ for name, value in tensor_constants.items():
234
+ if name in graph_signature.lifted_tensor_constants:
235
+ new_gm.register_buffer(name, value)
236
+ param_buffer_name_to_corrected_name[name] = name
237
+
238
+ count = 0
239
+ inp_pos_to_param_buffer_name = {}
240
+ for node in new_gm.graph.nodes:
241
+ if node.op == "placeholder":
242
+ if node.name in graph_signature.inputs_to_buffers:
243
+ buffer_name = graph_signature.inputs_to_buffers[node.name]
244
+ if buffer_name in param_buffer_name_to_corrected_name:
245
+ inp_pos_to_param_buffer_name[
246
+ count
247
+ ] = param_buffer_name_to_corrected_name[buffer_name]
248
+ else:
249
+ inp_pos_to_param_buffer_name[count] = buffer_name
250
+ if node.name in graph_signature.inputs_to_parameters:
251
+ param_name = graph_signature.inputs_to_parameters[node.name]
252
+ if param_name in param_buffer_name_to_corrected_name:
253
+ inp_pos_to_param_buffer_name[
254
+ count
255
+ ] = param_buffer_name_to_corrected_name[param_name]
256
+ else:
257
+ inp_pos_to_param_buffer_name[count] = param_name
258
+ if hasattr(graph_signature, "inputs_to_lifted_tensor_constants"):
259
+ if node.name in graph_signature.inputs_to_lifted_tensor_constants:
260
+ inp_pos_to_param_buffer_name[
261
+ count
262
+ ] = graph_signature.inputs_to_lifted_tensor_constants[node.name]
263
+ count += 1
264
+
265
+ return inp_pos_to_param_buffer_name
266
+
267
+
268
+ class _StatefulGraphModuleFactory(type):
269
+ """
270
+ Metaclass that ensures a private constructor for _StatefulGraphModule
271
+ """
272
+
273
+ def __call__(cls, *args, **kwargs):
274
+ raise TypeError(
275
+ f"{cls.__module__}.{cls.__qualname__} has no public constructor. "
276
+ )
277
+
278
+ def _create(cls, root, graph, range_constraints=None, equality_constraints=None):
279
+ return super().__call__(
280
+ root,
281
+ graph,
282
+ range_constraints=range_constraints,
283
+ equality_constraints=equality_constraints
284
+ )
285
+
286
+
287
+ class _StatefulGraphModule(torch.fx.GraphModule, metaclass=_StatefulGraphModuleFactory):
288
+ def __init__(self, root, graph, range_constraints=None, equality_constraints=None):
289
+ super().__init__(root, graph)
290
+ self.range_constraints = range_constraints or []
291
+ self.equality_constraints = equality_constraints or []
292
+
293
+
294
+ def _create_stateful_graph_module(plain_graph_module: torch.fx.GraphModule, range_constraints, equality_constraints):
295
+ stateful_gm = _StatefulGraphModule._create(
296
+ plain_graph_module,
297
+ plain_graph_module.graph,
298
+ range_constraints=range_constraints,
299
+ equality_constraints=equality_constraints
300
+ )
301
+ stateful_gm.register_forward_pre_hook(_check_input_constraints_pre_hook, with_kwargs=True)
302
+ return stateful_gm
303
+
304
+
305
+ def unlift_exported_program_lifted_states(ep: torch.export.ExportedProgram) -> torch.nn.Module:
306
+ new_gm = copy.deepcopy(ep.graph_module)
307
+ inp_pos_to_param_buffer_name = _construct_inp_pos_to_param_buffer_name(
308
+ new_gm, ep.graph_signature, ep.state_dict, ep.tensor_constants
309
+ )
310
+ new_gm = _unlift(
311
+ new_gm,
312
+ inp_pos_to_param_buffer_name,
313
+ ep.call_spec.in_spec,
314
+ ep.call_spec.out_spec,
315
+ ep.state_dict,
316
+ ep.tensor_constants,
317
+ ep.graph_signature.buffers_to_mutate,
318
+ )
319
+ unlift_gm = _create_stateful_graph_module(new_gm, ep.range_constraints, ep.equality_constraints)
320
+ unlift_gm.meta.update(ep.graph_module.meta)
321
+ return unlift_gm
322
+
323
+
324
+ def _create_graph_module_for_export(root, graph):
325
+ try:
326
+ gm = torch.fx.GraphModule(root, graph)
327
+ except SyntaxError:
328
+ # If custom objects stored in memory are being used in the graph,
329
+ # the generated python code will result in a syntax error on the custom
330
+ # object, since it is unable to parse the in-memory object. However
331
+ # we can still run the graph eagerly through torch.fx.Interpreter,
332
+ # so we will bypass this error.
333
+ warnings.warn(
334
+ "Unable to execute the generated python source code from "
335
+ "the graph. The graph module will no longer be directly callable, "
336
+ "but you can still run the ExportedProgram, and if needed, you can "
337
+ "run the graph module eagerly using torch.fx.Interpreter."
338
+ )
339
+ gm = torch.fx.GraphModule(root, torch.fx.Graph())
340
+ gm._graph = graph
341
+
342
+ return gm
343
+
344
+
345
+ def _process_constraints(
346
+ graph_module: torch.fx.GraphModule,
347
+ num_lifted_params_buffers: int,
348
+ example_inputs: List[torch.Tensor],
349
+ ) -> Tuple[Dict[sympy.Symbol, ValueRanges], List[Tuple[InputDim, InputDim]]]:
350
+ """
351
+ Process the constraints stored in the graph module to return something more readable.
352
+
353
+ Args:
354
+ graph_module (torch.fx.GraphModule): GraphModule returned from
355
+ dynamo.export, which contains the "input_shape_constraints" and
356
+ "inline_constraints" metadata
357
+
358
+ example_inputs: Flattened list of example inputs used to export the graph module
359
+
360
+ Returns:
361
+ range_constraints (Dict[sympy.Symbol, ValueRanges]): Mapping of
362
+ symbols (from SymInts) appearing in the fake tensors in
363
+ node.meta["val"] to their range constraints, which are a tuple
364
+ containing (lower, upper) constraints.
365
+
366
+ equality_constraints (List[Tuple[InputDim, InputDim]]): List of tuples
367
+ of (node, dim) to mark that these dimensions are equal.
368
+ """
369
+ input_shape_constraints = graph_module.meta.get("input_shape_constraints", [])
370
+ inline_constraints = graph_module.meta.get("inline_constraints", [])
371
+
372
+ # Create dict mapping tensor_id to node names
373
+ tensor_id_to_nodes: Dict[int, List[str]] = defaultdict(list)
374
+ # Create dict mapping placeholder node names to their nodes
375
+ placeholder_nodes: Dict[str, torch.fx.Node] = {}
376
+ for i, node in enumerate(graph_module.graph.nodes):
377
+ if node.op != "placeholder":
378
+ # All placeholder nodes should be together in the beginning of the
379
+ # graph
380
+ break
381
+ if i >= num_lifted_params_buffers:
382
+ example_input = example_inputs[i - num_lifted_params_buffers]
383
+ tensor_id_to_nodes[id(example_input)].append(node.name)
384
+ placeholder_nodes[node.name] = node
385
+
386
+ # Create list of (node name, dim) tuples to mark that they are equal
387
+ equality_constraints: List[Tuple[InputDim, InputDim]] = []
388
+ # Create dict mapping (node name, dim) a list of range (lower, upper)
389
+ # constraints
390
+ multi_range_constraints: Dict[InputDim, List[ValueRanges]] = defaultdict(list)
391
+ for constraint in input_shape_constraints:
392
+ for node in tensor_id_to_nodes[constraint["t_id"]]:
393
+ node_dim = InputDim(node, constraint["dim"])
394
+
395
+ # Accumulate range constraints
396
+ multi_range_constraints[node_dim].append(
397
+ ValueRanges(constraint["min"], constraint["max"])
398
+ )
399
+
400
+ # Accumulate equality constraints
401
+ if shared := constraint.get("shared", None):
402
+ for other_node in tensor_id_to_nodes[shared["t_id"]]:
403
+ other_node_dim = InputDim(other_node, shared["dim"])
404
+ equality_constraints.append((node_dim, other_node_dim))
405
+
406
+ # Create dict mapping symbol to a singular range (lower, upper)
407
+ range_constraints: Dict[sympy.Symbol, ValueRanges] = {}
408
+
409
+ # Add inline constraints to range_constraints
410
+ range_constraints = {symbol: inline_constraints[symbol] for symbol in inline_constraints}
411
+
412
+ # Add input range constraints to range_constraints
413
+ for input_dim, multi_range_constraint in multi_range_constraints.items(): # type: ignore[assignment]
414
+ # Simplify the range constraints into a single range constraint
415
+ # Ex. ranges [2, 10] and [3, 11] would get merged to [3, 10]
416
+ min_vals = [rc.lower for rc in multi_range_constraint]
417
+ max_vals = [rc.upper for rc in multi_range_constraint]
418
+ min_val = max(min_vals) # type: ignore[type-var]
419
+ max_val = min(max_vals) # type: ignore[type-var]
420
+ assert min_val <= max_val # type: ignore[operator]
421
+
422
+ # Add input node range constraints
423
+ val = placeholder_nodes[input_dim.input_name].meta["val"]
424
+ assert isinstance(val, FakeTensor)
425
+ symint = val.shape[input_dim.dim]
426
+ assert isinstance(symint, SymInt), f"Expected SymInt but got {symint}: {type(symint)}"
427
+ symbol = symint.node._expr
428
+ range_constraints[symbol] = ValueRanges(min_val, max_val)
429
+
430
+ return range_constraints, equality_constraints
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .replace_view_ops_with_view_copy_ops_pass import ReplaceViewOpsWithViewCopyOpsPass
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (283 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc ADDED
Binary file (9.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc ADDED
Binary file (3.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constant_tensor_pass.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc ADDED
Binary file (786 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc ADDED
Binary file (2.44 kB). View file