applied-ai-018 commited on
Commit
7e985af
·
verified ·
1 Parent(s): 3afcb91

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/24.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/24.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/24.attention.query_key_value.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step120/zero/4.attention.dense.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/9.attention.query_key_value.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step120/zero/9.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step120/zero/9.attention.query_key_value.weight/fp32.pt +3 -0
  8. venv/lib/python3.10/site-packages/torch/_export/__init__.py +406 -0
  9. venv/lib/python3.10/site-packages/torch/_export/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_export/__pycache__/exported_program.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_export/__pycache__/non_strict_utils.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_export/__pycache__/utils.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_export/__pycache__/wrappers.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_export/db/__init__.py +5 -0
  18. venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/_export/db/case.py +188 -0
  23. venv/lib/python3.10/site-packages/torch/_export/db/gen_example.py +28 -0
  24. venv/lib/python3.10/site-packages/torch/_export/db/logging.py +2 -0
  25. venv/lib/python3.10/site-packages/torch/_export/error.py +56 -0
  26. venv/lib/python3.10/site-packages/torch/_export/exported_program.py +50 -0
  27. venv/lib/python3.10/site-packages/torch/_export/non_strict_utils.py +258 -0
  28. venv/lib/python3.10/site-packages/torch/_export/pass_base.py +435 -0
  29. venv/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py +0 -0
  30. venv/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/_export/pass_infra/node_metadata.py +32 -0
  34. venv/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py +41 -0
  35. venv/lib/python3.10/site-packages/torch/_export/passes/__init__.py +1 -0
  36. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py +231 -0
  46. venv/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py +66 -0
  47. venv/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py +94 -0
  48. venv/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py +248 -0
  49. venv/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py +26 -0
  50. venv/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py +141 -0
ckpts/universal/global_step120/zero/24.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:956ca64ef4a7075c8b0ef9f8de8307de62d20b7f2b8d08de58f7fba9c0992e33
3
+ size 50332828
ckpts/universal/global_step120/zero/24.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e544829b3c44de36ef1d5a4ca8d2a84caa18e54c23fe7a907a09d2089d90300f
3
+ size 50332843
ckpts/universal/global_step120/zero/24.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:452cd4dc39ef7c537e714dadd022b0d9fcaf927534119dca7e5805630e2e771e
3
+ size 50332749
ckpts/universal/global_step120/zero/4.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35e18b8ce54fe43eb1d7911b9a6d4ef4c4657e3289bde46259e470d849a1953f
3
+ size 16778411
ckpts/universal/global_step120/zero/9.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70553f24ab2bc4cf7401a6764ff87b9cb83fe747c6d56b3433700b0fc8435170
3
+ size 50332828
ckpts/universal/global_step120/zero/9.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd393b8e3bb757baeee963a4c3cc73afff5e1c92237b64638151eab8d9179f13
3
+ size 50332843
ckpts/universal/global_step120/zero/9.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71d21345e9a10694321bb7df7472028fa55661344314a260e140cec783cec684
3
+ size 50332749
venv/lib/python3.10/site-packages/torch/_export/__init__.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import dataclasses
3
+ import functools
4
+ import io
5
+ import json
6
+ import os
7
+ import re
8
+ import sys
9
+ import types
10
+ import warnings
11
+ import weakref
12
+ import zipfile
13
+ from collections import OrderedDict
14
+ from contextlib import contextmanager
15
+
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+ from unittest.mock import patch
18
+
19
+ import sympy
20
+
21
+ import torch
22
+ import torch._dynamo
23
+ import torch.fx
24
+ import torch.utils._pytree as pytree
25
+
26
+ from torch._decomp import core_aten_decompositions, get_decompositions
27
+ from torch._dispatch.python import enable_python_dispatcher
28
+ from torch._dynamo.exc import UserError, UserErrorType
29
+ from torch._dynamo.source import ConstantSource
30
+ from torch._export.passes.collect_tracepoints_pass import CollectTracepointsPass
31
+ from torch._functorch.aot_autograd import aot_export_module, GraphSignature
32
+ from torch._functorch.eager_transforms import functionalize
33
+ from torch._guards import detect_fake_mode
34
+ from torch._inductor import config
35
+ from torch._ops import OpOverload
36
+ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
37
+ from torch._subclasses.functional_tensor import FunctionalTensor
38
+ from torch._utils_internal import log_export_usage
39
+ from torch.export._tree_utils import reorder_kwargs
40
+ from torch.export._unlift import _create_stateful_graph_module
41
+ from torch.export.dynamic_shapes import (
42
+ _process_constraints,
43
+ _process_dynamic_shapes,
44
+ Constraint,
45
+ dims,
46
+ dynamic_dim,
47
+ )
48
+ from torch.export.exported_program import (
49
+ _disable_prexisiting_fake_mode,
50
+ ExportedProgram,
51
+ ModuleCallEntry,
52
+ ModuleCallSignature,
53
+ )
54
+ from torch.export.graph_signature import (
55
+ _sig_to_specs,
56
+ ArgumentSpec,
57
+ ConstantArgument,
58
+ ExportGraphSignature,
59
+ InputKind,
60
+ InputSpec,
61
+ OutputKind,
62
+ OutputSpec,
63
+ SymIntArgument,
64
+ TensorArgument,
65
+ )
66
+ from torch.fx import traceback as fx_traceback
67
+ from torch.fx._compatibility import compatibility
68
+ from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
69
+ from torch.fx.experimental.symbolic_shapes import (
70
+ ConstraintViolationError,
71
+ GuardOnDataDependentSymNode,
72
+ ShapeEnv,
73
+ StrictMinMaxConstraint,
74
+ )
75
+ from torch.fx.graph import _PyTreeCodeGen, _PyTreeInfo
76
+ from torch.utils._sympy.value_ranges import ValueRangeError, ValueRanges
77
+
78
+ from .passes.add_runtime_assertions_for_constraints_pass import (
79
+ _AddRuntimeAssertionsForInlineConstraintsPass,
80
+ )
81
+ from .wrappers import _wrap_submodules
82
+
83
+
84
+ @dataclasses.dataclass
85
+ class ExportDynamoConfig:
86
+ """
87
+ Manage Export-specific configurations of Dynamo.
88
+ """
89
+ allow_rnn: bool = True
90
+
91
+
92
+ @compatibility(is_backward_compatible=False)
93
+ def capture_pre_autograd_graph(
94
+ f: torch.nn.Module,
95
+ args: Tuple[Any],
96
+ kwargs: Optional[Dict[str, Any]] = None,
97
+ dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any]]] = None,
98
+ ) -> torch.nn.Module:
99
+ """
100
+ A helper function that is intended to trace a module before any pre-autograd
101
+ decomposition is run. The produced module will be "non-functional" and
102
+ composed of aten operators. Later this API will be deleted in favor of more general
103
+ torch.export API.
104
+
105
+ Args:
106
+ f: nn.Module to be traced
107
+
108
+ args: example positional inputs.
109
+
110
+ kwargs: optional example keyword inputs.
111
+
112
+ dynamic_shapes: Should either be:
113
+ 1) a dict from argument names of ``f`` to their dynamic shape specifications,
114
+ 2) a tuple that specifies dynamic shape specifications for each input in original order.
115
+ If you are specifying dynamism on keyword args, you will need to pass them in the order that
116
+ is defined in the original function signature.
117
+
118
+ The dynamic shape of a tensor argument can be specified as either
119
+ (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
120
+ not required to include static dimension indices in this dict, but when they are,
121
+ they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
122
+ where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
123
+ are denoted by None. Arguments that are dicts or tuples / lists of tensors are
124
+ recursively specified by using mappings or sequences of contained specifications.
125
+
126
+ Returns:
127
+ An nn.Module containing the traced method.
128
+
129
+ """
130
+ from torch.export._trace import _convert_input_to_fake, DEFAULT_EXPORT_DYNAMO_CONFIG
131
+ from torch.export.dynamic_shapes import _process_dynamic_shapes
132
+
133
+ log_export_usage(event="export.private_api", flags={"capture_pre_autograd_graph"})
134
+
135
+ assert isinstance(f, torch.nn.Module), "Expected an nn.Module instance."
136
+
137
+ if kwargs is None:
138
+ kwargs = {}
139
+
140
+ constraints = _process_dynamic_shapes(f, args, kwargs, dynamic_shapes)
141
+
142
+ # Do not decompose dropout for exported models, because in eval mode the dropout
143
+ # op disappears from the graph, which makes it difficult to switch to train mode.
144
+ # See https://github.com/pytorch/pytorch/pull/115258#issuecomment-1900755832.
145
+ decomp_table = {
146
+ op: op.decompose
147
+ for op in FunctionalTensor.maybe_aliasing_or_mutating_ops
148
+ if op != torch.ops.aten.dropout.default
149
+ }
150
+ with torch._dynamo.config.patch(dataclasses.asdict(DEFAULT_EXPORT_DYNAMO_CONFIG)):
151
+ m = torch._dynamo.export(
152
+ f,
153
+ constraints=constraints,
154
+ assume_static_by_default=True,
155
+ tracing_mode="symbolic",
156
+ decomposition_table=decomp_table,
157
+ pre_dispatch=True,
158
+ aten_graph=True,
159
+ _log_export_usage=False,
160
+ )(
161
+ *args,
162
+ **kwargs,
163
+ )[0]
164
+
165
+ _, _, _, fake_mode = _convert_input_to_fake(m, args, kwargs)
166
+
167
+ m.meta["inline_constraints"] = {
168
+ k: v
169
+ for k, v in fake_mode.shape_env.var_to_range.items()
170
+ if re.match(r"^[if]\d+$", str(k))
171
+ }
172
+
173
+ if isinstance(f, torch.nn.Module):
174
+ from torch.export._trace import _restore_state_dict
175
+ _restore_state_dict(f, m)
176
+
177
+ flat_args, _ = pytree.tree_flatten((args, kwargs or {}))
178
+ range_constraints = _process_constraints(fake_mode, m, 0, flat_args)
179
+
180
+ module = _create_stateful_graph_module(
181
+ m,
182
+ range_constraints=range_constraints,
183
+ )
184
+
185
+ error_message = \
186
+ """
187
+ Calling train() or eval() is not supported for exported models.
188
+ Alternatively, you may override these methods to do custom user behavior as follows:
189
+
190
+ def _my_train(self, mode: bool = True):
191
+ ...
192
+
193
+ def _my_eval(self):
194
+ ...
195
+
196
+ model.train = types.MethodType(_my_train, model)
197
+ model.eval = types.MethodType(_my_eval, model)
198
+ """
199
+
200
+ def _train(self, mode: bool = True):
201
+ raise NotImplementedError(error_message)
202
+
203
+ def _eval(self, mode: bool = True):
204
+ raise NotImplementedError(error_message)
205
+
206
+ module.train = types.MethodType(_train, module) # type: ignore[method-assign]
207
+ module.eval = types.MethodType(_eval, module) # type: ignore[method-assign]
208
+ return module
209
+
210
+
211
+ def save(
212
+ ep: ExportedProgram,
213
+ f: Union[str, os.PathLike, io.BytesIO],
214
+ *,
215
+ extra_files: Optional[Dict[str, Any]] = None,
216
+ opset_version: Optional[Dict[str, int]] = None,
217
+ ) -> None:
218
+ if not isinstance(ep, ExportedProgram):
219
+ raise TypeError(f"save() expects an ExportedProgram but got {type(ep)}")
220
+
221
+ from .serde.serialize import serialize, SerializedArtifact
222
+ from .serde.schema import SCHEMA_VERSION
223
+ artifact: SerializedArtifact = serialize(ep, opset_version)
224
+
225
+ if isinstance(f, (str, os.PathLike)):
226
+ f = os.fspath(f)
227
+
228
+ with zipfile.ZipFile(f, 'w') as zipf:
229
+ # Save every field the SerializedArtifact to a file
230
+ assert isinstance(artifact.exported_program, bytes)
231
+ zipf.writestr("serialized_exported_program.json", artifact.exported_program)
232
+ zipf.writestr("serialized_state_dict.pt", artifact.state_dict)
233
+ zipf.writestr("serialized_constants.pt", artifact.constants)
234
+
235
+ zipf.writestr('version', ".".join(map(str, SCHEMA_VERSION)))
236
+
237
+ # Add extra files if provided
238
+ if extra_files:
239
+ for extra_file_name, content in extra_files.items():
240
+ encoded_content = content.encode('utf-8')
241
+ zipf.writestr(f"extra_files/{extra_file_name}", encoded_content)
242
+
243
+
244
+ def load(
245
+ f: Union[str, os.PathLike, io.BytesIO],
246
+ *,
247
+ extra_files: Optional[Dict[str, Any]] = None,
248
+ expected_opset_version: Optional[Dict[str, int]] = None,
249
+ ) -> ExportedProgram:
250
+ if isinstance(f, (str, os.PathLike)):
251
+ f = os.fspath(f)
252
+
253
+ extra_files = extra_files or {}
254
+
255
+ with zipfile.ZipFile(f, 'r') as zipf:
256
+ # Check the version
257
+ version = zipf.read('version').decode().split('.')
258
+ from .serde.schema import SCHEMA_VERSION
259
+
260
+ assert len(version) == len(SCHEMA_VERSION)
261
+ if version[0] != str(SCHEMA_VERSION[0]):
262
+ raise RuntimeError(
263
+ f"Serialized version {version} does not match our current "
264
+ f"schema version {SCHEMA_VERSION}."
265
+ )
266
+
267
+ from .serde.serialize import deserialize, SerializedArtifact
268
+
269
+ # Load serialized_ep and serialized_state_dict from the zip file
270
+
271
+ serialized_exported_program: Optional[bytes] = None
272
+ serialized_state_dict: Optional[bytes] = None
273
+ serialized_constants: Optional[bytes] = None
274
+
275
+ for file_info in zipf.infolist():
276
+ file_content = zipf.read(file_info.filename)
277
+
278
+ if file_info.filename == "serialized_exported_program.json":
279
+ serialized_exported_program = file_content
280
+ elif file_info.filename == "serialized_state_dict.json":
281
+ warnings.warn("This version of file is deprecated")
282
+ serialized_state_dict = file_content
283
+ elif file_info.filename == "serialized_constants.json":
284
+ warnings.warn("This version of file is deprecated")
285
+ serialized_constants = file_content
286
+ elif file_info.filename == "serialized_state_dict.pt":
287
+ serialized_state_dict = file_content
288
+ elif file_info.filename == "serialized_constants.pt":
289
+ serialized_constants = file_content
290
+ elif file_info.filename.startswith("extra_files"):
291
+ filename = file_info.filename.split("/", 1)[1]
292
+ extra_files[filename] = file_content.decode('utf-8')
293
+
294
+ assert serialized_exported_program is not None
295
+ assert serialized_state_dict is not None
296
+ assert serialized_constants is not None
297
+ artifact: SerializedArtifact = SerializedArtifact(
298
+ serialized_exported_program,
299
+ serialized_state_dict,
300
+ serialized_constants,
301
+ )
302
+
303
+ # Deserialize ExportedProgram
304
+ ep = deserialize(artifact, expected_opset_version)
305
+
306
+ return ep
307
+
308
+
309
+ def aot_compile(
310
+ f: Callable,
311
+ args: Tuple[Any],
312
+ kwargs: Optional[Dict[str, Any]] = None,
313
+ *,
314
+ dynamic_shapes: Optional[Dict[str, Any]] = None,
315
+ options: Optional[Dict[str, Any]] = None,
316
+ remove_runtime_assertions: bool = False,
317
+ disable_constraint_solver: bool = False,
318
+ ) -> str:
319
+ """
320
+ Note: this function is not stable yet
321
+
322
+ Traces either an nn.Module's forward function or just a callable with PyTorch
323
+ operations inside, generates executable cpp code from the program, and returns
324
+ the path to the generated shared library
325
+
326
+ Args:
327
+ f: the `nn.Module` or callable to trace.
328
+
329
+ args: example positional inputs.
330
+
331
+ kwargs: optional example keyword inputs.
332
+
333
+ dynamic_shapes: Should either be:
334
+ 1) a dict from argument names of ``f`` to their dynamic shape specifications,
335
+ 2) a tuple that specifies dynamic shape specifications for each input in original order.
336
+ If you are specifying dynamism on keyword args, you will need to pass them in the order that
337
+ is defined in the original function signature.
338
+
339
+ The dynamic shape of a tensor argument can be specified as either
340
+ (1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
341
+ not required to include static dimension indices in this dict, but when they are,
342
+ they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
343
+ where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
344
+ are denoted by None. Arguments that are dicts or tuples / lists of tensors are
345
+ recursively specified by using mappings or sequences of contained specifications.
346
+
347
+ options: A dictionary of options to control inductor
348
+
349
+ disable_constraint_solver: Whether the dim constraint solver must be disabled.
350
+
351
+ Returns:
352
+ Path to the generated shared library
353
+ """
354
+ from torch.export._trace import _export_to_torch_ir
355
+ from torch._inductor.decomposition import select_decomp_table
356
+
357
+ constraints = _process_dynamic_shapes(f, args, kwargs, dynamic_shapes)
358
+
359
+ if config.is_predispatch:
360
+ gm = torch.export._trace._export(f, args, kwargs, constraints, pre_dispatch=True).module()
361
+ else:
362
+ # We want to export to Torch IR here to utilize the pre_grad passes in
363
+ # inductor, which run on Torch IR.
364
+ gm = _export_to_torch_ir(
365
+ f,
366
+ args,
367
+ kwargs,
368
+ constraints,
369
+ disable_constraint_solver=disable_constraint_solver,
370
+ # Disabling this flag, because instead we can rely on the mapping
371
+ # dynamo_flat_name_to_original_fqn which is coming from Dynamo.
372
+ restore_fqn=False,
373
+ )
374
+ flat_example_inputs = pytree.arg_tree_leaves(*args, **(kwargs or {}))
375
+
376
+ with torch.no_grad():
377
+ so_path = torch._inductor.aot_compile(gm, flat_example_inputs, options) # type: ignore[arg-type]
378
+
379
+ return so_path
380
+
381
+ def aot_load(so_path: str, device: str) -> Callable:
382
+ """
383
+ Loads a shared library generated by aot_compile and returns a callable
384
+
385
+ Args:
386
+ so_path: Path to the shared library
387
+
388
+ Returns:
389
+ A callable
390
+ """
391
+ if device == "cpu":
392
+ runner = torch._C._aoti.AOTIModelContainerRunnerCpu(so_path, 1) # type: ignore[call-arg]
393
+ elif device == "cuda" or device.startswith("cuda:"):
394
+ runner = torch._C._aoti.AOTIModelContainerRunnerCuda(so_path, 1, device) # type: ignore[assignment, call-arg]
395
+ else:
396
+ raise RuntimeError("Unsupported device " + device)
397
+
398
+ def optimized(*args, **kwargs):
399
+ call_spec = runner.get_call_spec() # type: ignore[attr-defined]
400
+ in_spec = pytree.treespec_loads(call_spec[0])
401
+ out_spec = pytree.treespec_loads(call_spec[1])
402
+ flat_inputs = pytree.tree_flatten((args, reorder_kwargs(kwargs, in_spec)))[0]
403
+ flat_outputs = runner.run(flat_inputs) # type: ignore[attr-defined]
404
+ return pytree.tree_unflatten(flat_outputs, out_spec)
405
+
406
+ return optimized
venv/lib/python3.10/site-packages/torch/_export/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/__pycache__/error.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/__pycache__/exported_program.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/__pycache__/non_strict_utils.cpython-310.pyc ADDED
Binary file (6.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/__pycache__/pass_base.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/__pycache__/utils.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/__pycache__/verifier.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/__pycache__/wrappers.cpython-310.pyc ADDED
Binary file (4.27 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/db/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/case.cpython-310.pyc ADDED
Binary file (5.45 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/gen_example.cpython-310.pyc ADDED
Binary file (833 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_export/db/__pycache__/logging.cpython-310.pyc ADDED
Binary file (321 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_export/db/case.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ import string
4
+ from dataclasses import dataclass, field
5
+ from enum import Enum
6
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
7
+ from types import ModuleType
8
+
9
+ import torch
10
+
11
+ _TAGS: Dict[str, Dict[str, Any]] = {
12
+ "torch": {
13
+ "cond": {},
14
+ "dynamic-shape": {},
15
+ "escape-hatch": {},
16
+ "map": {},
17
+ "dynamic-value": {},
18
+ "operator": {},
19
+ "mutation": {},
20
+ },
21
+ "python": {
22
+ "assert": {},
23
+ "builtin": {},
24
+ "closure": {},
25
+ "context-manager": {},
26
+ "control-flow": {},
27
+ "data-structure": {},
28
+ "standard-library": {},
29
+ "object-model": {},
30
+ },
31
+ }
32
+
33
+
34
+ class SupportLevel(Enum):
35
+ """
36
+ Indicates at what stage the feature
37
+ used in the example is handled in export.
38
+ """
39
+
40
+ SUPPORTED = 1
41
+ NOT_SUPPORTED_YET = 0
42
+
43
+
44
+ class ExportArgs:
45
+ __slots__ = ("args", "kwargs")
46
+
47
+ def __init__(self, *args, **kwargs):
48
+ self.args = args
49
+ self.kwargs = kwargs
50
+
51
+
52
+ InputsType = Union[Tuple[Any, ...], ExportArgs]
53
+
54
+
55
+ def check_inputs_type(x):
56
+ if not isinstance(x, (ExportArgs, tuple)):
57
+ raise ValueError(
58
+ f"Expecting inputs type to be either a tuple, or ExportArgs, got: {type(x)}"
59
+ )
60
+
61
+
62
+ def _validate_tag(tag: str):
63
+ parts = tag.split(".")
64
+ t = _TAGS
65
+ for part in parts:
66
+ assert set(part) <= set(
67
+ string.ascii_lowercase + "-"
68
+ ), f"Tag contains invalid characters: {part}"
69
+ if part in t:
70
+ t = t[part]
71
+ else:
72
+ raise ValueError(f"Tag {tag} is not found in registered tags.")
73
+
74
+
75
+ @dataclass(frozen=True)
76
+ class ExportCase:
77
+ example_inputs: InputsType
78
+ description: str # A description of the use case.
79
+ model: torch.nn.Module
80
+ name: str
81
+ extra_inputs: Optional[InputsType] = None # For testing graph generalization.
82
+ # Tags associated with the use case. (e.g dynamic-shape, escape-hatch)
83
+ tags: Set[str] = field(default_factory=set)
84
+ support_level: SupportLevel = SupportLevel.SUPPORTED
85
+ dynamic_shapes: Optional[Dict[str, Any]] = None
86
+
87
+ def __post_init__(self):
88
+ check_inputs_type(self.example_inputs)
89
+ if self.extra_inputs is not None:
90
+ check_inputs_type(self.extra_inputs)
91
+
92
+ for tag in self.tags:
93
+ _validate_tag(tag)
94
+
95
+ if not isinstance(self.description, str) or len(self.description) == 0:
96
+ raise ValueError(f'Invalid description: "{self.description}"')
97
+
98
+
99
+ _EXAMPLE_CASES: Dict[str, ExportCase] = {}
100
+ _MODULES: Set[ModuleType] = set()
101
+ _EXAMPLE_CONFLICT_CASES: Dict[str, List[ExportCase]] = {}
102
+ _EXAMPLE_REWRITE_CASES: Dict[str, List[ExportCase]] = {}
103
+
104
+
105
+ def register_db_case(case: ExportCase) -> None:
106
+ """
107
+ Registers a user provided ExportCase into example bank.
108
+ """
109
+ if case.name in _EXAMPLE_CASES:
110
+ if case.name not in _EXAMPLE_CONFLICT_CASES:
111
+ _EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]]
112
+ _EXAMPLE_CONFLICT_CASES[case.name].append(case)
113
+ return
114
+
115
+ _EXAMPLE_CASES[case.name] = case
116
+
117
+
118
+ def to_snake_case(name):
119
+ name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
120
+ return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
121
+
122
+
123
+ def _make_export_case(m, name, configs):
124
+ if not issubclass(m, torch.nn.Module):
125
+ raise TypeError("Export case class should be a torch.nn.Module.")
126
+ m = m()
127
+
128
+ if "description" not in configs:
129
+ # Fallback to docstring if description is missing.
130
+ assert (
131
+ m.__doc__ is not None
132
+ ), f"Could not find description or docstring for export case: {m}"
133
+ configs = {**configs, "description": m.__doc__}
134
+ return ExportCase(**{**configs, "model": m, "name": name})
135
+
136
+
137
+ def export_case(**kwargs):
138
+ """
139
+ Decorator for registering a user provided case into example bank.
140
+ """
141
+
142
+ def wrapper(m):
143
+ configs = kwargs
144
+ module = inspect.getmodule(m)
145
+ if module in _MODULES:
146
+ raise RuntimeError("export_case should only be used once per example file.")
147
+
148
+ assert module is not None
149
+ _MODULES.add(module)
150
+ normalized_name = to_snake_case(m.__name__)
151
+ module_name = module.__name__.split(".")[-1]
152
+ if module_name != normalized_name:
153
+ raise RuntimeError(
154
+ f'Module name "{module.__name__}" is inconsistent with exported program '
155
+ + f'name "{m.__name__}". Please rename the module to "{normalized_name}".'
156
+ )
157
+
158
+ case = _make_export_case(m, module_name, configs)
159
+ register_db_case(case)
160
+ return case
161
+
162
+ return wrapper
163
+
164
+
165
+ def export_rewrite_case(**kwargs):
166
+ def wrapper(m):
167
+ configs = kwargs
168
+
169
+ parent = configs.pop("parent")
170
+ assert isinstance(parent, ExportCase)
171
+ key = parent.name
172
+ if key not in _EXAMPLE_REWRITE_CASES:
173
+ _EXAMPLE_REWRITE_CASES[key] = []
174
+
175
+ configs["example_inputs"] = parent.example_inputs
176
+ case = _make_export_case(m, to_snake_case(m.__name__), configs)
177
+ _EXAMPLE_REWRITE_CASES[key].append(case)
178
+ return case
179
+
180
+ return wrapper
181
+
182
+
183
+ def normalize_inputs(x: InputsType) -> ExportArgs:
184
+ if isinstance(x, tuple):
185
+ return ExportArgs(*x)
186
+
187
+ assert isinstance(x, ExportArgs)
188
+ return x
venv/lib/python3.10/site-packages/torch/_export/db/gen_example.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import torch._export.db.examples as examples
5
+
6
+ TEMPLATE = '''import torch
7
+
8
+ from torch._export.db.case import export_case
9
+
10
+
11
+ @export_case(
12
+ example_inputs=(torch.randn(3, 2),),
13
+ tags={{}},
14
+ )
15
+ def {case_name}(x):
16
+ """
17
+ """
18
+
19
+ return
20
+ '''
21
+
22
+ if __name__ == "__main__":
23
+ assert len(sys.argv) == 2
24
+ root_dir = examples.__name__.replace(".", "/")
25
+ assert os.path.exists(root_dir)
26
+ with open(os.path.join(root_dir, sys.argv[1] + ".py"), "w") as f:
27
+ print("Writing to", f.name, "...")
28
+ f.write(TEMPLATE.format(case_name=sys.argv[1]))
venv/lib/python3.10/site-packages/torch/_export/db/logging.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def exportdb_error_message(case_name: str):
2
+ return ""
venv/lib/python3.10/site-packages/torch/_export/error.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+
4
+ class ExportErrorType(Enum):
5
+ # User providing invalid inputs to either tracer, or other public facing APIs
6
+ INVALID_INPUT_TYPE = 1
7
+
8
+ # User returning values from their models that we don’t support.
9
+ INVALID_OUTPUT_TYPE = 2
10
+
11
+ # Generated IR does not conform to Export IR Specification.
12
+ VIOLATION_OF_SPEC = 3
13
+
14
+ # User’s code contains types and functionalities we don’t support.
15
+ NOT_SUPPORTED = 4
16
+
17
+ # User's code didn't provide necessary details for us to successfully trace and export.
18
+ # For example, we use a lot of decorators and ask users to annotate their model.
19
+ MISSING_PROPERTY = 5
20
+
21
+ # User is using an API without proper initialization step.
22
+ UNINITIALIZED = 6
23
+
24
+
25
+ def internal_assert(pred: bool, assert_msg: str) -> None:
26
+ """
27
+ This is exir's custom assert method. It internally just throws InternalError.
28
+ Note that the sole purpose is to throw our own error while maintaining similar syntax
29
+ as python assert.
30
+ """
31
+
32
+ if not pred:
33
+ raise InternalError(assert_msg)
34
+
35
+
36
+ class InternalError(Exception):
37
+ """
38
+ Raised when an internal invariance is violated in EXIR stack.
39
+ Should hint users to report a bug to dev and expose the original
40
+ error message.
41
+ """
42
+
43
+ def __init__(self, message: str) -> None:
44
+ super().__init__(message)
45
+
46
+
47
+ class ExportError(Exception):
48
+ """
49
+ This type of exception is raised for errors that are directly caused by the user
50
+ code. In general, user errors happen during model authoring, tracing, using our public
51
+ facing APIs, and writing graph passes.
52
+ """
53
+
54
+ def __init__(self, error_code: ExportErrorType, message: str) -> None:
55
+ prefix = f"[{error_code}]: "
56
+ super().__init__(prefix + message)
venv/lib/python3.10/site-packages/torch/_export/exported_program.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+
4
+ import torch
5
+ import torch.fx
6
+
7
+
8
+ # TODO(ycao): This is added to avoid breaking existing code temporarily.
9
+ # Remove when migration is done.
10
+ from torch.export.graph_signature import (
11
+ ExportBackwardSignature,
12
+ ExportGraphSignature,
13
+ )
14
+
15
+ from torch.export.exported_program import (
16
+ ExportedProgram,
17
+ ModuleCallEntry,
18
+ ModuleCallSignature,
19
+ )
20
+
21
+
22
+
23
+ __all__ = [
24
+ "ExportBackwardSignature",
25
+ "ExportGraphSignature",
26
+ "ExportedProgram",
27
+ "ModuleCallEntry",
28
+ "ModuleCallSignature",
29
+ ]
30
+
31
+
32
+ def _create_graph_module_for_export(root, graph):
33
+ try:
34
+ gm = torch.fx.GraphModule(root, graph)
35
+ except SyntaxError:
36
+ # If custom objects stored in memory are being used in the graph,
37
+ # the generated python code will result in a syntax error on the custom
38
+ # object, since it is unable to parse the in-memory object. However
39
+ # we can still run the graph eagerly through torch.fx.Interpreter,
40
+ # so we will bypass this error.
41
+ warnings.warn(
42
+ "Unable to execute the generated python source code from "
43
+ "the graph. The graph module will no longer be directly callable, "
44
+ "but you can still run the ExportedProgram, and if needed, you can "
45
+ "run the graph module eagerly using torch.fx.Interpreter."
46
+ )
47
+ gm = torch.fx.GraphModule(root, torch.fx.Graph())
48
+ gm._graph = graph
49
+
50
+ return gm
venv/lib/python3.10/site-packages/torch/_export/non_strict_utils.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from collections import defaultdict
3
+ from typing import Any, Callable, Dict, List, Tuple, Union
4
+
5
+ import torch
6
+ from torch._dynamo.source import (
7
+ AttrSource,
8
+ GetItemSource,
9
+ LocalSource,
10
+ TensorProperty,
11
+ TensorPropertySource,
12
+ )
13
+ from torch._dynamo.variables.builder import TrackedFake
14
+ from torch._export.passes.add_runtime_assertions_for_constraints_pass import InputDim
15
+ from torch._guards import Source
16
+ from torch._subclasses.fake_tensor import FakeTensorMode
17
+ from torch.export import Constraint
18
+ from torch.export.graph_signature import CustomObjArgument
19
+ from torch.fx.experimental.symbolic_shapes import (
20
+ ConstraintViolationError,
21
+ DimDynamic,
22
+ EqualityConstraint,
23
+ ShapeEnv,
24
+ StatelessSymbolicContext,
25
+ )
26
+ from torch.utils._pytree import (
27
+ GetAttrKey,
28
+ KeyPath,
29
+ MappingKey,
30
+ SequenceKey,
31
+ tree_map_with_path,
32
+ )
33
+
34
+
35
+ def key_path_to_source(kp: KeyPath) -> Source:
36
+ """
37
+ Given a key path, return the source for the key path.
38
+ """
39
+ source: Source = LocalSource("args")
40
+ for k in kp:
41
+ if isinstance(k, SequenceKey):
42
+ source = GetItemSource(source, k.idx)
43
+ elif isinstance(k, MappingKey):
44
+ source = GetItemSource(source, k.key)
45
+ elif isinstance(k, GetAttrKey):
46
+ source = AttrSource(source, k.name)
47
+ else:
48
+ raise ValueError(f"Unknown KeyEntry {k}")
49
+
50
+ return source
51
+
52
+
53
+ def _is_constant_argument(t):
54
+ return t is None or isinstance(t, (int, float, bool, str))
55
+
56
+
57
+ def fakify(
58
+ mode: FakeTensorMode,
59
+ kp: KeyPath,
60
+ t: Any,
61
+ t_constraints: Dict[int, Dict[int, Constraint]],
62
+ sources: Dict[Tuple[int, int], List[Source]],
63
+ ):
64
+ source = key_path_to_source(kp)
65
+ if _is_constant_argument(t) or isinstance(t, torch.ScriptObject):
66
+ return t
67
+ if not isinstance(t, torch.Tensor):
68
+ raise ValueError(f"Unsupported input type {type(t)}")
69
+ n_dims = len(t.shape)
70
+ symbolic_context = StatelessSymbolicContext(
71
+ dynamic_sizes=[DimDynamic.STATIC] * n_dims,
72
+ constraint_sizes=[None] * n_dims,
73
+ )
74
+ t_id = id(t)
75
+ if t_id in t_constraints:
76
+ for i, constraint in t_constraints[t_id].items():
77
+ symbolic_context.constraint_sizes[i] = constraint.constraint_range
78
+ symbolic_context.dynamic_sizes[i] = DimDynamic.DYNAMIC
79
+ src = TensorPropertySource(base=source, prop=TensorProperty.SIZE, idx=i)
80
+ sources[(t_id, i)].append(src)
81
+ mode.shape_env.source_name_to_debug_name[src.name()] = constraint.debug_name
82
+ fake = mode.from_tensor(t, source=source, symbolic_context=symbolic_context)
83
+ mode.shape_env.tracked_fakes.append(TrackedFake(fake, source, symbolic_context))
84
+ return fake
85
+
86
+
87
+ def make_fake_params_buffers(
88
+ fake_mode: FakeTensorMode,
89
+ params_buffers: Dict[str, torch.Tensor],
90
+ ) -> Dict[str, Union[torch.Tensor, torch.nn.Parameter]]:
91
+ faked_params_buffers = {}
92
+ for key, value in params_buffers.items():
93
+ faked_params_buffers[key] = fake_mode.from_tensor(value, static_shapes=True)
94
+ return faked_params_buffers
95
+
96
+
97
+ def make_fake_inputs(nn_module, args, kwargs, constraints):
98
+ """
99
+ Given an nn module, example inputs, and constraints, return a new fake mode,
100
+ fake inputs created in that mode whose dynamic shape dimensions are constrained
101
+ by the given ranges, and sources for pairs of dynamic shape dimensions that are
102
+ constrained to be equal.
103
+ """
104
+ # TODO(avik): refactor Dynamo to avoid duplication of the following code
105
+ # between non-strict and strict.
106
+ # Specifically, here (non-strict) we do the following pre-tracing steps:
107
+ # - Fakify inputs.
108
+ # - Process input shape equalities.
109
+ # In strict, these steps are spread across multiple files:
110
+ # - output_graph.py fakifies inputs.
111
+ # - [post-tracing] guards.py processes input shape equalities.
112
+
113
+ t_constraints: Dict[int, Dict[int, Constraint]] = defaultdict(dict)
114
+ for constraint in constraints:
115
+ t_constraints[constraint.t_id][constraint.dim] = constraint
116
+ if constraint.shared is not None:
117
+ t_constraints[constraint.shared.t_id][constraint.shared.dim] = constraint
118
+
119
+ code = nn_module.forward.__code__
120
+ co_fields = {
121
+ "co_name": code.co_name,
122
+ "co_filename": code.co_filename,
123
+ "co_firstlineno": code.co_firstlineno,
124
+ }
125
+
126
+ fake_mode = FakeTensorMode(
127
+ shape_env=ShapeEnv(tracked_fakes=[], co_fields=co_fields),
128
+ allow_non_fake_inputs=True,
129
+ )
130
+ if fake_mode.shape_env is None or fake_mode.shape_env.tracked_fakes is None:
131
+ raise ValueError(
132
+ "Detected fake_mode does not have a shape_env with tracked fakes. "
133
+ "If you constructed the module under a FakeTensorMode, "
134
+ "please initialize it like: FakeTensorMode(shape_env=ShapeEnv(tracked_fakes=[]))"
135
+ )
136
+
137
+ with fake_mode:
138
+ original_signature = inspect.signature(nn_module.forward)
139
+ sources: Dict[Tuple[int, int], List[Source]] = defaultdict(list)
140
+ fake_args, fake_kwargs = tree_map_with_path(
141
+ lambda kp, val: fakify(fake_mode, kp, val, t_constraints, sources),
142
+ (args, kwargs),
143
+ )
144
+
145
+ from sympy import Symbol
146
+
147
+ source_pairs: List[Tuple[Source, Source]] = []
148
+ derived_equalities: List[Tuple[Source, Union[Source, Symbol], Callable]] = []
149
+ phantom_symbols: Dict[str, Symbol] = {}
150
+ for constraint in constraints:
151
+ torch.export.dynamic_shapes._process_equalities(
152
+ constraint,
153
+ lambda t_id, dim: sources[(t_id, dim)],
154
+ fake_mode.shape_env,
155
+ source_pairs,
156
+ derived_equalities,
157
+ phantom_symbols,
158
+ )
159
+
160
+ equalities_inputs = EqualityConstraint(
161
+ source_pairs=source_pairs,
162
+ derived_equalities=derived_equalities,
163
+ phantom_symbols=list(phantom_symbols.values()),
164
+ warn_only=False,
165
+ )
166
+ return fake_mode, fake_args, fake_kwargs, equalities_inputs, original_signature
167
+
168
+
169
+ def make_constraints(
170
+ fake_mode,
171
+ equalities_inputs,
172
+ original_signature,
173
+ gm,
174
+ ):
175
+ """
176
+ Given a fake mode, sources pairs corresponding to equal dynamic shape dimensions,
177
+ and a graph module, produce guards on the fake mode's shape env (raising constraint
178
+ violations if any), solve (to suggest simplifications or fixes), and return the
179
+ resulting range constraints and equality constraints.
180
+ """
181
+ # TODO(avik): refactor Dynamo to avoid duplication of the following code
182
+ # between non-strict and strict.
183
+ # Specifically, here (non-strict) we do the following post-tracing steps:
184
+ # - Produce guards.
185
+ # - Solve constraints.
186
+ # - Install shape metadata in IR.
187
+ # In strict, these steps are spread across multiple files:
188
+ # - guards.py produces guards.
189
+ # - eval_frame.py solves constraints
190
+ # - _trace.py installs shape metadata in IR.
191
+
192
+ shape_env = fake_mode.shape_env
193
+ placeholders = [tf.fake for tf in shape_env.tracked_fakes]
194
+ sources = [tf.source for tf in shape_env.tracked_fakes]
195
+ input_contexts = [tf.symbolic_context for tf in shape_env.tracked_fakes]
196
+ constraint_violation_error = None
197
+ try:
198
+ shape_env.produce_guards(
199
+ placeholders,
200
+ sources,
201
+ input_contexts=input_contexts,
202
+ equalities_inputs=equalities_inputs,
203
+ ignore_static=False,
204
+ )
205
+ except ConstraintViolationError as e:
206
+ constraint_violation_error = e
207
+
208
+ shape_env.frozen = True
209
+ dim_constraints = shape_env.dim_constraints
210
+ if dim_constraints is None:
211
+ # Expected when shape_env.produce_guards throws an early constraint violation error.
212
+ # There is nothing to solve for in this case.
213
+ # TODO(avik): Maybe record the constraint violation error instead and replay later?
214
+ assert constraint_violation_error
215
+ raise constraint_violation_error
216
+ dim_constraints.solve()
217
+ dim_constraints.remove_redundant_dynamic_results()
218
+ forced_specializations = dim_constraints.forced_specializations()
219
+ msg = dim_constraints.prettify_results(
220
+ original_signature, constraint_violation_error, forced_specializations
221
+ )
222
+ if constraint_violation_error:
223
+ constraint_violation_error.args = (constraint_violation_error.args[0] + msg,)
224
+ elif forced_specializations:
225
+ constraint_violation_error = ConstraintViolationError(msg)
226
+ if constraint_violation_error:
227
+ raise constraint_violation_error
228
+
229
+ range_constraints = {}
230
+ input_dims = defaultdict(list)
231
+ free_symbols = set()
232
+ for node in gm.graph.nodes:
233
+ if node.op != "placeholder":
234
+ continue
235
+ if _is_constant_argument(node.meta["val"]) or isinstance(
236
+ node.meta["val"], CustomObjArgument
237
+ ):
238
+ continue
239
+ for i, d in enumerate(node.meta["val"].shape):
240
+ if isinstance(d, torch.SymInt):
241
+ # Look up the range constraint for the symbol corresponding to this shape dimension
242
+ # and store it indexed by the symbolic expression corresponding to it.
243
+ # NOTE(avik): Use node._expr instead of node.expr for the lookup here because
244
+ # we want the symbol, not its replacement, which could be an expression. Maybe
245
+ # there's a better way to do this, e.g., by (re)computing value ranges for expressions?
246
+ range_constraints[d.node.expr] = shape_env.var_to_range[d.node._expr]
247
+ input_dims[d.node.expr].append(InputDim(input_name=node.name, dim=i))
248
+ free_symbols.update(d.node.expr.free_symbols)
249
+
250
+ for symbol in free_symbols:
251
+ if symbol not in range_constraints:
252
+ # Placeholders can have symbolic shapes that are derived expressions.
253
+ # The above code will record direct range constraints for them
254
+ # so that we can do runtime assertions. In addition, for serde checks
255
+ # we want to record range constraints for their root symbols.
256
+ range_constraints[symbol] = shape_env.var_to_range[symbol]
257
+
258
+ return range_constraints
venv/lib/python3.10/site-packages/torch/_export/pass_base.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import traceback
3
+ import typing
4
+ from contextlib import nullcontext
5
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
6
+
7
+ import torch
8
+ from functorch.experimental.control_flow import _unstack_pytree
9
+ from torch import fx
10
+ from torch._dispatch.python import enable_python_dispatcher
11
+ from torch._export.pass_infra.node_metadata import NodeMetadata
12
+ from torch._export.pass_infra.proxy_value import ProxyValue
13
+ from torch._subclasses import FakeTensor, UnsupportedFakeTensorException
14
+ from torch._subclasses.fake_tensor import FakeTensorMode
15
+ from torch.fx import traceback as fx_traceback
16
+ from torch.fx.experimental.proxy_tensor import PythonKeyTracer
17
+ from torch.fx.graph import CodeGen
18
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
19
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata
20
+ from torch.utils import _pytree as pytree
21
+
22
+
23
+ __all__ = ["_ExportPassBaseDeprecatedDoNotUse"]
24
+
25
+
26
+ Argument = Any
27
+ Value = Any
28
+ Fn = Callable[..., Any]
29
+ PassType = Callable[[torch.fx.GraphModule], Optional[PassResult]]
30
+
31
+
32
+ _TORCH_SYM_OPS: Set[Callable] = {
33
+ torch.sym_int,
34
+ torch.sym_ite,
35
+ torch.sym_max,
36
+ torch.sym_min,
37
+ torch.sym_not,
38
+ torch.sym_sqrt,
39
+ }
40
+
41
+
42
+ class ExportPassBaseError(RuntimeError):
43
+ pass
44
+
45
+
46
+ class _ExportPassBaseDeprecatedDoNotUse(PassBase):
47
+ """
48
+ Interpreter-based pass class to help users maintain the IR spec while writing
49
+ transformations.
50
+ """
51
+
52
+ @staticmethod
53
+ def _create_dummy_node_metadata():
54
+ return NodeMetadata({"stack_trace": "".join(traceback.format_stack(limit=1))})
55
+
56
+
57
+ class ExportTracer(PythonKeyTracer):
58
+ def __init__(self, callback: "_ExportPassBaseDeprecatedDoNotUse", codegen: CodeGen) -> None:
59
+ super().__init__()
60
+ self.callback = callback
61
+ self.root = torch.nn.Module()
62
+ self.graph = torch.fx.Graph()
63
+ self.graph.set_codegen(codegen)
64
+ self.tensor_attrs: Dict[str, torch.Tensor] = {} # type: ignore[assignment]
65
+ self.fake_tensor_mode: Optional[FakeTensorMode] = None
66
+ self.submodules: Dict[torch.nn.Module, str] = {}
67
+
68
+ def trace(self) -> None:
69
+ raise ExportPassBaseError("ExportTracer doesn't support trace().")
70
+
71
+ def create_arg(self, a: Argument) -> torch.fx.Node:
72
+ if isinstance(a, torch.nn.Module):
73
+ if a not in self.submodules:
74
+ name_submodule = f"submodule_{len(self.submodules)}"
75
+ self.root.add_module(name_submodule, a)
76
+ self.submodules[a] = name_submodule
77
+ elif isinstance(a, FakeTensor):
78
+ if not hasattr(a, "constant") or a.constant is None:
79
+ raise ExportPassBaseError(f"Cannot add {a} to graph.")
80
+ a = a.constant
81
+ node = super().create_arg(a)
82
+ if (
83
+ isinstance(a, torch.Tensor)
84
+ and isinstance(node, torch.fx.Node)
85
+ and node.op == "get_attr"
86
+ ):
87
+ self.set_metadata(node, a)
88
+ self.callback.on_attr(ProxyValue(a, node))
89
+ return node
90
+
91
+ def set_metadata(
92
+ self, node: torch.fx.Node, value: Argument,
93
+ ) -> None:
94
+ # propagate the fake tensor or sym nodes
95
+ def make_val(
96
+ x: Argument,
97
+ ) -> Union[FakeTensor, torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str, None]:
98
+ if isinstance(x, FakeTensor):
99
+ return x
100
+ elif isinstance(x, torch.Tensor):
101
+ if x.is_quantized:
102
+ # TODO (tmanlaibaatar) properly support Quantized FakeTensor
103
+ x = torch.dequantize(x)
104
+
105
+ try:
106
+ assert self.fake_tensor_mode is not None
107
+ # TODO we should allocate static shapes
108
+ # for param/buffer values
109
+ if isinstance(x, torch.nn.Parameter):
110
+ fake_tensor = self.fake_tensor_mode.from_tensor(
111
+ x, static_shapes=True
112
+ )
113
+ else:
114
+ fake_tensor = self.fake_tensor_mode.from_tensor(x)
115
+ except UnsupportedFakeTensorException:
116
+ # TODO: This is just a workaround to get over the
117
+ # x.as_subclass error
118
+ print(
119
+ "Fakeifying a Tensor subclass is not supported \
120
+ right now. Instead a TensorMetadata is used."
121
+ )
122
+ fake_tensor = None
123
+ return fake_tensor
124
+ elif isinstance(x, (torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str)):
125
+ return x
126
+ else:
127
+ return None
128
+
129
+ node.meta["val"] = pytree.tree_map(make_val, value)
130
+
131
+ # Set the tensor_metadata for values that do not have a corresponding FakeTensor
132
+ def make_tensor_meta(x: Argument) -> Optional[TensorMetadata]:
133
+ if not isinstance(x, FakeTensor) and isinstance(x, torch.Tensor):
134
+ if x.is_quantized:
135
+ # TODO (tmanlaibaatar) properly support Quantized FakeTensor
136
+ x = torch.dequantize(x)
137
+
138
+ try:
139
+ assert self.fake_tensor_mode is not None
140
+ _ = self.fake_tensor_mode.from_tensor(x)
141
+ tensor_meta = None
142
+ except UnsupportedFakeTensorException:
143
+ # TODO: This is just a workaround to get over the
144
+ # x.as_subclass error
145
+ tensor_meta = _extract_tensor_metadata(x)
146
+ return tensor_meta
147
+ else:
148
+ return None
149
+
150
+ node.meta["tensor_meta"] = pytree.tree_map(make_tensor_meta, value)
151
+
152
+ class ExportInterpreter(fx.Interpreter):
153
+ def __init__(self, callback: "_ExportPassBaseDeprecatedDoNotUse", gm: fx.GraphModule) -> None:
154
+ super().__init__(gm)
155
+ self.callback = callback
156
+ self.node: torch.fx.Node = next(iter(gm.graph.nodes))
157
+
158
+ def placeholder(
159
+ self,
160
+ target: str,
161
+ args: Tuple[Argument, ...],
162
+ kwargs: Dict[str, Argument],
163
+ ) -> ProxyValue:
164
+ arg = super().placeholder(target, args, kwargs)
165
+ return self.callback.placeholder(target, arg, NodeMetadata(self.node.meta))
166
+
167
+ def output(
168
+ self,
169
+ target: torch.fx.node.Target,
170
+ args: Tuple[Argument, ...],
171
+ kwargs: Dict[str, Argument],
172
+ ) -> ProxyValue:
173
+ return self.callback.output(args[0], NodeMetadata(self.node.meta)).data
174
+
175
+ def call_function(
176
+ self,
177
+ target: torch.fx.node.Target,
178
+ args: Tuple[Argument, ...],
179
+ kwargs: Dict[str, Argument],
180
+ ) -> ProxyValue:
181
+ meta = NodeMetadata(self.node.meta)
182
+
183
+ if target == operator.getitem:
184
+ value, key = args
185
+ return self.callback.call_getitem(value, key, meta)
186
+ elif getattr(target, "__module__", None) in {"_operator", "math"}:
187
+ assert callable(target)
188
+ return self.callback.call_sym(target, args, meta)
189
+ elif target in _TORCH_SYM_OPS:
190
+ assert callable(target)
191
+ return self.callback.call_sym(target, args, meta)
192
+ elif isinstance(target, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)):
193
+ return self.callback.call_operator(
194
+ target,
195
+ args,
196
+ kwargs,
197
+ meta,
198
+ )
199
+ elif target == torch.ops.higher_order.cond:
200
+ pred, true_fn, false_fn, inputs = args
201
+ return self.callback.call_cond(pred, true_fn, false_fn, inputs, meta)
202
+ elif target == torch.ops.higher_order.map_impl:
203
+ f, mapped_args, operands = args # type: ignore[assignment]
204
+ return self.callback.call_map(f, mapped_args, operands, meta)
205
+ # For other unregistered HigherOrderOps, just interpret them blindly
206
+ elif isinstance(target, torch._ops.HigherOrderOperator):
207
+ return self.callback._fx(
208
+ "call_function",
209
+ target,
210
+ args,
211
+ kwargs,
212
+ meta,
213
+ )
214
+ else:
215
+ raise ExportPassBaseError(f"Unsupported target type: {target}")
216
+
217
+ def get_attr(
218
+ self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument]
219
+ ) -> Argument:
220
+ return super().get_attr(target, args, kwargs)
221
+
222
+ def call_module(
223
+ self,
224
+ target: torch.fx.node.Target,
225
+ args: Tuple[Argument, ...],
226
+ kwargs: Dict[str, Argument],
227
+ ) -> None:
228
+ raise ExportPassBaseError("call_module is not supported.")
229
+
230
+ def call_method(
231
+ self, target: str, args: Tuple[Argument, ...], kwargs: Dict[str, Argument]
232
+ ) -> None:
233
+ raise ExportPassBaseError("call_method is not supported.")
234
+
235
+ def run_node(self, n: torch.fx.Node) -> Argument:
236
+ self.node = n
237
+ self.callback.node_debug_str = n.format_node()
238
+ return super().run_node(n)
239
+
240
+ def __init__(self) -> None:
241
+ self.interpreter = torch.fx.Interpreter(
242
+ torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
243
+ )
244
+ self.tracer = self.ExportTracer(self, CodeGen())
245
+ self.fake_tensor_mode: Optional[FakeTensorMode] = None
246
+ self._initialized = True
247
+ self.node_debug_str: typing.Optional[str] = None
248
+
249
+ def _fx(
250
+ self,
251
+ kind: str,
252
+ target: torch.fx.node.Target,
253
+ args: Tuple[Argument, ...],
254
+ kwargs: Dict[str, Argument],
255
+ meta: NodeMetadata,
256
+ ) -> ProxyValue:
257
+ args_data, kwargs_data = pytree.tree_map_only(
258
+ ProxyValue, lambda x: x.data, (args, kwargs)
259
+ )
260
+ res_data = getattr(self.interpreter, kind)(target, args_data, kwargs_data)
261
+ args_proxy, kwargs_proxy = pytree.tree_map_only(
262
+ ProxyValue, lambda x: x.proxy, (args, kwargs)
263
+ )
264
+
265
+ name = None
266
+ if isinstance(target, torch._ops.OpOverload):
267
+ name = self.tracer.graph._target_to_str(target.overloadpacket.__name__)
268
+
269
+ res_proxy = self.tracer.create_proxy(kind, target, args_proxy, kwargs_proxy, name=name)
270
+ res_proxy.node.meta.update(meta.data)
271
+ self.tracer.set_metadata(res_proxy.node, res_data)
272
+ return ProxyValue(res_data, res_proxy)
273
+
274
+ def inputs(self, graph_module: torch.fx.GraphModule) -> List[Argument]:
275
+ # TODO(angelayi): Update this with what we decide to do for metadata in
276
+ # the exported graph module
277
+ if (args := graph_module.meta.get("args", None)) is not None:
278
+ return list(args)
279
+
280
+ def extract_input(node: torch.fx.Node) -> Optional[FakeTensor]:
281
+ if "val" in node.meta:
282
+ fake = node.meta["val"]
283
+ if hasattr(fake, "constant") and fake.constant is not None:
284
+ return fake.constant
285
+ return fake
286
+ elif tensor_meta := node.meta.get("tensor_meta"):
287
+ assert self.fake_tensor_mode is not None
288
+ return FakeTensor(
289
+ self.fake_tensor_mode,
290
+ torch.empty(
291
+ tensor_meta.shape,
292
+ dtype=tensor_meta.dtype,
293
+ device="meta",
294
+ requires_grad=tensor_meta.requires_grad,
295
+ memory_format=tensor_meta.memory_format,
296
+ ),
297
+ torch.device("cpu"),
298
+ )
299
+ elif len(node.users) == 0:
300
+ return None
301
+ raise ExportPassBaseError(
302
+ f"Cannot construct an input for graph module: {graph_module}.",
303
+ )
304
+
305
+ return [
306
+ extract_input(node)
307
+ for node in graph_module.graph.nodes
308
+ if node.op == "placeholder"
309
+ ]
310
+
311
+ def on_attr(self, attr: ProxyValue) -> None:
312
+ pass
313
+
314
+ def placeholder(self, name: str, arg: Argument, meta: NodeMetadata) -> ProxyValue:
315
+ arg_proxy = self.tracer.create_proxy("placeholder", name, (), {})
316
+ arg_proxy.node.meta = meta.data
317
+ self.tracer.set_metadata(arg_proxy.node, arg)
318
+ return ProxyValue(arg, arg_proxy)
319
+
320
+ def call_operator(
321
+ self,
322
+ op,
323
+ args: Tuple[Argument, ...],
324
+ kwargs: Dict[str, Argument],
325
+ meta: NodeMetadata,
326
+ ) -> ProxyValue:
327
+ return self._fx("call_function", op, args, kwargs, meta)
328
+
329
+ def call_sym(
330
+ self,
331
+ target: Fn,
332
+ args: Tuple[Argument, ...],
333
+ meta: NodeMetadata,
334
+ ) -> ProxyValue:
335
+ return self._fx("call_function", target, args, {}, meta)
336
+
337
+ def call_cond(
338
+ self,
339
+ pred: ProxyValue,
340
+ true_fn: torch.fx.GraphModule,
341
+ false_fn: torch.fx.GraphModule,
342
+ inputs: List[Argument],
343
+ meta: NodeMetadata,
344
+ ) -> ProxyValue:
345
+ true_branch = self.call_submodule(true_fn, tuple(inputs))
346
+ false_branch = self.call_submodule(false_fn, tuple(inputs))
347
+ assert true_branch is not None
348
+ assert false_branch is not None
349
+ return self._fx(
350
+ "call_function",
351
+ torch.ops.higher_order.cond,
352
+ (pred, true_branch.graph_module, false_branch.graph_module, list(inputs)),
353
+ {},
354
+ meta,
355
+ )
356
+
357
+ def call_map(
358
+ self,
359
+ f: torch.fx.GraphModule,
360
+ mapped_args: List[ProxyValue],
361
+ operands: List[ProxyValue],
362
+ meta: NodeMetadata,
363
+ ) -> ProxyValue:
364
+ xs = _unstack_pytree([arg.data for arg in mapped_args])[0]
365
+ f_branch = self.call_submodule(f, tuple(xs + [arg.data for arg in operands]))
366
+ assert f_branch is not None
367
+ return self._fx(
368
+ "call_function",
369
+ torch.ops.higher_order.map_impl,
370
+ (f_branch.graph_module, mapped_args, operands),
371
+ {},
372
+ meta,
373
+ )
374
+
375
+ def call_getitem(
376
+ self, value: ProxyValue, key: int, meta: NodeMetadata
377
+ ) -> ProxyValue:
378
+ return self._fx("call_function", operator.getitem, (value, key), {}, meta)
379
+
380
+ def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue:
381
+ return self._fx("output", "output", (results,), {}, meta)
382
+
383
+ def call_submodule(
384
+ self, graph_module: fx.GraphModule, inputs: Tuple[Argument, ...]
385
+ ) -> PassResult:
386
+ prev_tracer, self.tracer = self.tracer, self.ExportTracer(
387
+ self, graph_module.graph._codegen
388
+ )
389
+ self.tracer.fake_tensor_mode = prev_tracer.fake_tensor_mode
390
+ interpreter = self.ExportInterpreter(self, graph_module)
391
+ prev_interpreter, self.interpreter = self.interpreter, torch.fx.Interpreter(
392
+ torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
393
+ )
394
+ inputs_data = pytree.tree_map_only(ProxyValue, lambda x: x.data, inputs)
395
+ with fx_traceback.preserve_node_meta():
396
+ interpreter.run(*inputs_data)
397
+
398
+ new_graph_module = torch.fx.GraphModule(self.tracer.root, self.tracer.graph)
399
+
400
+ self.tracer = prev_tracer
401
+ self.interpreter = prev_interpreter
402
+ return PassResult(
403
+ new_graph_module,
404
+ True,
405
+ )
406
+
407
+ def call(self, graph_module: fx.GraphModule) -> PassResult:
408
+ if not getattr(self, "_initialized", False):
409
+ raise ExportPassBaseError(
410
+ "ExportPass is not initialized with __init__().",
411
+ )
412
+
413
+ inputs = self.inputs(graph_module)
414
+
415
+ fake_tensor_mode = None
416
+ for i in inputs:
417
+ if isinstance(i, FakeTensor):
418
+ assert (
419
+ fake_tensor_mode is None or fake_tensor_mode is i.fake_mode
420
+ ), "Multiple fake tensor mode detected."
421
+ fake_tensor_mode = i.fake_mode
422
+ if fake_tensor_mode is None:
423
+ self.tracer.fake_tensor_mode = FakeTensorMode(allow_non_fake_inputs=True)
424
+ fake_tensor_mode = nullcontext() # type: ignore[assignment]
425
+ dispatcher_mode = nullcontext() # type: ignore[assignment]
426
+ else:
427
+ fake_tensor_mode.allow_non_fake_inputs = True
428
+ self.tracer.fake_tensor_mode = fake_tensor_mode
429
+ dispatcher_mode = enable_python_dispatcher() # type: ignore[assignment]
430
+ self.fake_tensor_mode = self.tracer.fake_tensor_mode
431
+
432
+ with fake_tensor_mode, dispatcher_mode: # type: ignore[assignment, union-attr]
433
+ result = self.call_submodule(graph_module, tuple(inputs))
434
+
435
+ return result
venv/lib/python3.10/site-packages/torch/_export/pass_infra/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/node_metadata.cpython-310.pyc ADDED
Binary file (1.49 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/pass_infra/__pycache__/proxy_value.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/pass_infra/node_metadata.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Set
2
+
3
+
4
+ NodeMetadataValue = Any
5
+
6
+
7
+ PROTECTED_KEYS: Set[str] = {
8
+ "val",
9
+ "stack_trace",
10
+ "nn_module_stack",
11
+ "debug_handle",
12
+ "tensor_meta",
13
+ }
14
+
15
+
16
+ class NodeMetadata:
17
+ def __init__(self, data: Dict[str, Any]) -> None:
18
+ self.data: Dict[str, Any] = data.copy()
19
+
20
+ def __getitem__(self, key: str) -> NodeMetadataValue:
21
+ return self.data[key]
22
+
23
+ def __setitem__(self, key: str, value: NodeMetadataValue) -> NodeMetadataValue:
24
+ if key in PROTECTED_KEYS:
25
+ raise RuntimeError(f"Could not override node key: {key}")
26
+ self.data[key] = value
27
+
28
+ def __contains__(self, key: str) -> bool:
29
+ return key in self.data
30
+
31
+ def copy(self) -> "NodeMetadata":
32
+ return NodeMetadata(self.data.copy())
venv/lib/python3.10/site-packages/torch/_export/pass_infra/proxy_value.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pyre-strict
2
+ from typing import Union
3
+
4
+ import torch
5
+
6
+
7
+ class ProxyValue:
8
+ # pyre-ignore
9
+ def __init__(self, data, proxy: Union[torch.fx.Proxy, torch.fx.Node]):
10
+ # pyre-ignore
11
+ self.data = data
12
+ self.proxy_or_node = proxy
13
+
14
+ @property
15
+ def node(self) -> torch.fx.Node:
16
+ if isinstance(self.proxy_or_node, torch.fx.Node):
17
+ return self.proxy_or_node
18
+ assert isinstance(self.proxy_or_node, torch.fx.Proxy)
19
+ return self.proxy_or_node.node
20
+
21
+ @property
22
+ def proxy(self) -> torch.fx.Proxy:
23
+ if not isinstance(self.proxy_or_node, torch.fx.Proxy):
24
+ raise RuntimeError(
25
+ f"ProxyValue doesn't have attached Proxy object. Node: {self.proxy_or_node.format_node()}"
26
+ )
27
+ return self.proxy_or_node
28
+
29
+ def to_tensor(self) -> torch.Tensor:
30
+ assert isinstance(self.data, torch.Tensor)
31
+ return self.data
32
+
33
+ def is_tensor(self) -> bool:
34
+ return isinstance(self.data, torch.Tensor)
35
+
36
+ # pyre-ignore
37
+ def __iter__(self):
38
+ yield from self.data
39
+
40
+ def __bool__(self) -> bool:
41
+ return bool(self.data)
venv/lib/python3.10/site-packages/torch/_export/passes/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .replace_view_ops_with_view_copy_ops_pass import ReplaceViewOpsWithViewCopyOpsPass
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (286 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/add_runtime_assertions_for_constraints_pass.cpython-310.pyc ADDED
Binary file (6.12 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/collect_tracepoints_pass.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/functionalize_side_effectful_ops_pass.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/lift_constants_pass.cpython-310.pyc ADDED
Binary file (6.93 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/remove_runtime_assertions.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_set_grad_with_hop_pass.cpython-310.pyc ADDED
Binary file (3.99 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_sym_size_ops_pass.cpython-310.pyc ADDED
Binary file (789 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/__pycache__/replace_view_ops_with_view_copy_ops_pass.cpython-310.pyc ADDED
Binary file (2.46 kB). View file
 
venv/lib/python3.10/site-packages/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import operator
3
+ import traceback
4
+ from functools import partial
5
+ from typing import Callable, Dict, List, NamedTuple, Set
6
+
7
+ import sympy
8
+
9
+ import torch
10
+ import torch.fx
11
+ from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, ProxyValue, PassResult
12
+ from torch.utils._sympy.value_ranges import ValueRanges
13
+ from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
14
+
15
+
16
+ __all__ = ["InputDim"]
17
+
18
+
19
+ class InputDim(NamedTuple):
20
+ input_name: str
21
+ dim: int
22
+
23
+
24
+ def _convert_to_int(val):
25
+ # Convert simple sympy Integers into concrete int
26
+ if val == sympy.oo:
27
+ return math.inf
28
+ if val == -sympy.oo:
29
+ return -math.inf
30
+ if isinstance(val, sympy.Integer):
31
+ return int(val)
32
+ raise RuntimeError(
33
+ "Export constraints cannot be non-integer expressions"
34
+ )
35
+
36
+
37
+ def _convert_range_to_int(range: ValueRanges):
38
+ assert isinstance(range, ValueRanges)
39
+ min_val = _convert_to_int(range.lower)
40
+ max_val = _convert_to_int(range.upper)
41
+ return min_val, max_val
42
+
43
+
44
+ class _AddRuntimeAssertionsForInlineConstraintsPass(_ExportPassBaseDeprecatedDoNotUse):
45
+ def __init__(
46
+ self,
47
+ range_constraints: Dict[sympy.Symbol, ValueRanges],
48
+ ):
49
+ super().__init__()
50
+ self.range_constraints: Dict[sympy.Symbol, ValueRanges] = range_constraints
51
+ self._asserts_generated_unbacked_symbols: Set[sympy.Symbol] = set()
52
+ self.counter = 0
53
+
54
+ def _assert_range_constraint(self, proxy, lower, upper, assert_msg):
55
+ if lower > -math.inf:
56
+ self._insert_assert_async(operator.ge, proxy, lower, assert_msg)
57
+
58
+ if upper < math.inf:
59
+ self._insert_assert_async(operator.le, proxy, upper, assert_msg)
60
+
61
+ def _insert_assert_async(self, operator, lower, upper, assert_msg):
62
+ """
63
+ Inserts assert_async call_function nodes in the graph. This function is
64
+ called **during** the interpreter-based pass.
65
+ """
66
+ self.counter += 1
67
+ cmp = super().call_operator(operator, (lower, upper), {}, self._create_dummy_node_metadata())
68
+ cmp_tensor = super().call_operator(torch.ops.aten.scalar_tensor.default, (cmp,), {}, self._create_dummy_node_metadata())
69
+ super().call_operator(
70
+ torch.ops.aten._assert_async.msg,
71
+ (cmp_tensor, assert_msg),
72
+ {},
73
+ self._create_dummy_node_metadata(),
74
+ )
75
+
76
+ def call_operator(self, op, args, kwargs, meta) -> ProxyValue:
77
+ ret = super().call_operator(op, args, kwargs, meta)
78
+ if "val" not in meta:
79
+ return ret
80
+
81
+ val = meta["val"]
82
+
83
+ # In general, we may have to deal the case such as: ret[1].shape[0].
84
+ # We need first find out what symbols require assertion, then we need to follow the path
85
+ # from ret to the symbol, construct the proxies along the way and construct the messages
86
+ # piece-wise at the same time.
87
+ #
88
+ # We use post-order traversal to collect all the proxies callbacks needed, construct
89
+ # the error message callbacks, and at the top-level traversal tree we execute all the callbacks.
90
+ # We need the callbacks because, in order to call the function to create a proxy for shape[0], we
91
+ # need the proxy for shape, which further requires the proxy for ret[1], etc.
92
+ def add_assertions(val):
93
+ call_backs: List[Callable] = []
94
+ messages: List[str] = []
95
+ if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)):
96
+ symbol = val.node.expr
97
+ if symbol in self.existing_inline_assertions:
98
+ return call_backs, messages
99
+ if isinstance(symbol, sympy.Symbol) and free_unbacked_symbols(symbol):
100
+ if symbol in self._asserts_generated_unbacked_symbols:
101
+ return call_backs, messages
102
+ # We only care about unbacked symints for these inline
103
+ # constraints, which are prefixed with 'u'
104
+ constraint = self.range_constraints[symbol]
105
+ min_val, max_val = _convert_range_to_int(constraint)
106
+ assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]."
107
+ call_backs.append(
108
+ partial(self._assert_range_constraint, lower=min_val, upper=max_val)
109
+ )
110
+ messages.append(assert_msg)
111
+ self._asserts_generated_unbacked_symbols.add(symbol)
112
+
113
+ elif isinstance(val, torch.Tensor):
114
+ for i, sym in enumerate(val.shape):
115
+ cbs, msgs = add_assertions(sym)
116
+ for cb, msg in zip(cbs, msgs):
117
+ def sym_size_cb(proxy, assert_msg, dim):
118
+ dim_proxy = super(
119
+ _AddRuntimeAssertionsForInlineConstraintsPass,
120
+ self
121
+ ).call_operator(
122
+ torch.ops.aten.sym_size.int,
123
+ (proxy, dim),
124
+ {},
125
+ self._create_dummy_node_metadata(),
126
+ )
127
+ cb(proxy=dim_proxy, assert_msg=assert_msg)
128
+ call_backs.append(partial(sym_size_cb, dim=i))
129
+ messages.append(f".shape[{i}]" + msg)
130
+ return call_backs, messages
131
+
132
+ callbacks, messages = add_assertions(val)
133
+ for cb, msg in zip(callbacks, messages):
134
+ cb(proxy=ret, assert_msg=f"{ret.node}" + msg)
135
+ return ret
136
+
137
+ def call(self, graph_module):
138
+ self.existing_inline_assertions = _get_existing_inline_assertions(
139
+ graph_module, self.range_constraints
140
+ )
141
+
142
+ # Add runtime asserts for inline constraints
143
+ val = super().call(graph_module)
144
+
145
+ # Sometimes this pass would return a wrong graph where we have mismatched
146
+ # node names in signature. Before we fix it, let's just skip it.
147
+ if self.counter == 0 and type(self) is _AddRuntimeAssertionsForInlineConstraintsPass:
148
+ return PassResult(graph_module, False)
149
+
150
+ # Populate the stack trace with dummy vals to respect IR
151
+ for node in val.graph_module.graph.nodes:
152
+ if not node.meta.get("stack_trace", None):
153
+ node.meta["stack_trace"] = "".join(traceback.format_stack(limit=1))
154
+
155
+ return PassResult(val.graph_module, val.modified)
156
+
157
+
158
+ def _get_existing_inline_assertions(
159
+ graph_module: torch.fx.GraphModule,
160
+ range_constraints: Dict[sympy.Symbol, ValueRanges],
161
+ ) -> Dict[sympy.Symbol, ValueRanges]:
162
+ existing_inline_assertions: Dict[sympy.Symbol, ValueRanges] = {}
163
+
164
+ for module in graph_module.modules():
165
+ if not isinstance(module, torch.fx.GraphModule):
166
+ continue
167
+
168
+ # Find all the existing inline assertions. They will look something like:
169
+ # %_local_scalar_dense = call_function[target=torch.ops.aten._local_scalar_dense.default](args = (%arg1_1,), kwargs = {})
170
+ # %ge = call_function[target=operator.ge](args = (%_local_scalar_dense, 0), kwargs = {})
171
+ # %scalar_tensor = call_function[target=torch.ops.aten.scalar_tensor.default](args = (%ge,), kwargs = {})
172
+ # %_assert_async = call_function[target=torch.ops.aten._assert_async.msg](args = (%scalar_tensor, "..."), kwargs = {})
173
+ for node in module.graph.nodes:
174
+ if node.target != torch.ops.aten._assert_async.msg:
175
+ continue
176
+
177
+ scalar_tensor_arg = node.args[0]
178
+ if not (
179
+ scalar_tensor_arg.op == "call_function" and
180
+ scalar_tensor_arg.target == torch.ops.aten.scalar_tensor.default
181
+ ):
182
+ continue
183
+
184
+ compare_arg = scalar_tensor_arg.args[0]
185
+ if not (
186
+ compare_arg.op == "call_function" and
187
+ compare_arg.target in (operator.le, operator.ge) and
188
+ len(compare_arg.args) == 2
189
+ ):
190
+ continue
191
+
192
+ compare_op = compare_arg.target
193
+ maybe_symint_arg, compare_int = compare_arg.args
194
+
195
+ # x >= 0 will sometimes be canonicalized to -x <= 0, so in some
196
+ # cases the operation before the comparison is to multiply by -1. We
197
+ # can undo the canonicalization here
198
+ if (
199
+ maybe_symint_arg.op == "call_function" and
200
+ maybe_symint_arg.target == operator.mul and
201
+ maybe_symint_arg.args[0] == -1
202
+ ):
203
+ maybe_symint_arg = maybe_symint_arg.args[1]
204
+ compare_op = operator.ge
205
+ compare_int = -1 * compare_int
206
+
207
+ if not (
208
+ "val" in maybe_symint_arg.meta and
209
+ isinstance(maybe_symint_arg.meta["val"], torch.SymInt)
210
+ ):
211
+ continue
212
+
213
+ symint = maybe_symint_arg.meta["val"].node.expr
214
+ if not isinstance(symint, sympy.Symbol):
215
+ continue
216
+
217
+ if symint not in range_constraints:
218
+ raise RuntimeError(f"Unable to find symint {symint} in {range_constraints}")
219
+
220
+ found_range = existing_inline_assertions.get(symint, ValueRanges(-math.inf, math.inf))
221
+
222
+ if compare_arg.target == operator.le:
223
+ existing_inline_assertions[symint] = ValueRanges(
224
+ lower=found_range.lower, upper=compare_int
225
+ )
226
+ elif compare_arg.target == operator.ge:
227
+ existing_inline_assertions[symint] = ValueRanges(
228
+ lower=compare_int, upper=found_range.upper
229
+ )
230
+
231
+ return existing_inline_assertions
venv/lib/python3.10/site-packages/torch/_export/passes/collect_tracepoints_pass.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import torch
4
+
5
+ from torch.export.exported_program import ConstantArgument, TensorArgument
6
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
7
+
8
+ __all__ = ["CollectTracepointsPass"]
9
+
10
+
11
+ class CollectTracepointsPass(PassBase):
12
+ """
13
+ Performs constant folding and constant propagation.
14
+ """
15
+
16
+ def __init__(self, specs, sig) -> None:
17
+ super().__init__()
18
+ self.specs = specs
19
+ self.sig = sig
20
+
21
+ def call(self, gm):
22
+ def get_arg_spec(arg):
23
+ if isinstance(arg, torch.fx.Node):
24
+ if isinstance(arg.meta.get("val"), torch.Tensor):
25
+ return TensorArgument(name=arg.name)
26
+ else:
27
+ raise AssertionError(
28
+ "Symint input is not implemented yet for submodule call signature."
29
+ )
30
+ else:
31
+ return ConstantArgument(value=arg)
32
+
33
+ for module in gm.modules():
34
+ if not isinstance(module, torch.fx.GraphModule):
35
+ continue
36
+ for node in module.graph.nodes:
37
+ if node.op != "call_function":
38
+ continue
39
+ if node.target == torch.ops.higher_order._export_tracepoint:
40
+ for i, arg in enumerate(node.args):
41
+ kind = node.kwargs["kind"]
42
+ if kind == "module_call_inputs":
43
+ self.specs[node.kwargs["path"]].inputs.append(
44
+ get_arg_spec(arg)
45
+ )
46
+ elif kind == "module_call_outputs":
47
+ self.specs[node.kwargs["path"]].outputs.append(
48
+ get_arg_spec(arg)
49
+ )
50
+ else:
51
+ raise AssertionError(f"Unknown tracepoint kind: {kind}")
52
+ if isinstance(arg, torch.fx.Node):
53
+ for user in node.users:
54
+ assert user.op == "call_function"
55
+ assert user.target == operator.getitem
56
+ assert isinstance(user.args[1], int)
57
+ if user.args[1] == i:
58
+ user.replace_all_uses_with(arg)
59
+ self.sig.replace_all_uses(user.name, arg.name)
60
+ break
61
+ users = list(node.users)
62
+ for user in users:
63
+ assert len(user.users) == 0
64
+ gm.graph.erase_node(user)
65
+ gm.graph.erase_node(node)
66
+ return PassResult(gm, True)
venv/lib/python3.10/site-packages/torch/_export/passes/functionalize_side_effectful_ops_pass.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import Dict, Optional, Tuple, List
3
+
4
+ import torch
5
+ from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, PassResult, Argument
6
+ from torch._export.pass_infra.node_metadata import NodeMetadata
7
+ from torch._export.pass_infra.proxy_value import ProxyValue
8
+ from torch._ops import OpOverload
9
+
10
+ aten = torch.ops.aten
11
+
12
+ _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS: Dict[OpOverload, OpOverload] = {
13
+ aten.sym_constrain_range.default: aten._functional_sym_constrain_range,
14
+ aten._assert_async.msg: aten._functional_assert_async.msg,
15
+ }
16
+
17
+
18
+ class _FunctionalizeSideEffectfulOpsPass(_ExportPassBaseDeprecatedDoNotUse):
19
+ """
20
+ Functionalize ops with side effect in graph module by replacing the op with
21
+ functional version of it. A new dependency token (`dep_token`) will be
22
+ created and propagated through functional ops to output.
23
+ For example:
24
+ ```
25
+ def f(x):
26
+ sym_constrain_range(x.shape[0], min=1, max=3)
27
+ return x.add(3)
28
+ ```
29
+ Will be transformed to:
30
+ ```
31
+ def f(x):
32
+ dep_token0 = _make_dep_token()
33
+ dep_token1 = _functional_sym_constrain_range(
34
+ x.shape[0], min=1, max=3, dep_token=dep_token0
35
+ )
36
+
37
+ return x.add(3), dep_token1
38
+ ```
39
+ """
40
+
41
+ def __init__(self) -> None:
42
+ super().__init__()
43
+ self._dep_token: Optional[ProxyValue] = None
44
+ self._next_dep_token_index: Optional[int] = None
45
+
46
+ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
47
+ # Early return if no non-functional assertions.
48
+ if not any(
49
+ n.target in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS
50
+ for n in graph_module.graph.nodes
51
+ ):
52
+ return PassResult(graph_module=graph_module, modified=False)
53
+
54
+ gm = copy.deepcopy(graph_module)
55
+ self._dep_token = None
56
+ self._next_dep_token_index = None
57
+ return super().call(gm)
58
+
59
+ def call_operator(
60
+ self,
61
+ op: OpOverload,
62
+ args: Tuple[Argument, ...],
63
+ kwargs: Dict[str, Argument],
64
+ meta: NodeMetadata,
65
+ ) -> ProxyValue:
66
+ if op not in _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS:
67
+ return super().call_operator(op, args, kwargs, meta)
68
+
69
+ if self._dep_token is None:
70
+ self._dep_token = super().call_operator(
71
+ aten._make_dep_token,
72
+ args=(),
73
+ kwargs={},
74
+ meta=self._create_dummy_node_metadata(),
75
+ )
76
+ self._dep_token.node.name = "dep_token0"
77
+ self._next_dep_token_index = 1
78
+
79
+ self._dep_token = super().call_operator(
80
+ _NON_FUNCTIONAL_TO_FUNCTIONAL_SIDE_EFFECTFUL_FUNCS[op],
81
+ args=args,
82
+ kwargs={**kwargs, "dep_token": self._dep_token},
83
+ meta=meta,
84
+ )
85
+ assert self._next_dep_token_index is not None
86
+ self._dep_token.node.name = f"dep_token{self._next_dep_token_index}"
87
+ self._next_dep_token_index += 1
88
+
89
+ return self._dep_token
90
+
91
+ def output(self, results: List[Argument], meta: NodeMetadata) -> ProxyValue:
92
+ assert self._dep_token is not None
93
+
94
+ return super().output(results=(*results, self._dep_token), meta=meta) # type: ignore[arg-type]
venv/lib/python3.10/site-packages/torch/_export/passes/lift_constants_pass.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from typing import Any, Dict, Union
3
+
4
+ import torch
5
+ from torch._export.verifier import SpecViolationError
6
+ from torch._guards import detect_fake_mode
7
+ from torch.export.exported_program import (
8
+ ArgumentSpec,
9
+ CustomObjArgument,
10
+ ExportGraphSignature,
11
+ InputKind,
12
+ InputSpec,
13
+ TensorArgument,
14
+ )
15
+
16
+
17
+ class ConstantAttrMap(collections.abc.MutableMapping):
18
+ """A mapping class that understands how to use module constants (tensors and
19
+ ScriptObjects) as keys. We store tensors normally, but ScriptObjects are
20
+ stored by hash, because different torch.ScriptObjects can point to the same
21
+ underlying value (but we guarantee that they will `hash()` to the same value
22
+ if that's the case).
23
+ """
24
+
25
+ def __init__(self):
26
+ # Underlying dict that we use to implement this mapping.
27
+ self._constant_attrs: Dict[Union[int, torch.Tensor], Any] = {}
28
+ # Map from the hash(ScriptObject) to the ScriptObject itself. Used for
29
+ # APIs like `__iter__` that should look like they're returning the
30
+ # original ScriptObjects.
31
+ self._script_object_map: Dict[int, torch.ScriptObject] = {}
32
+
33
+ def __getitem__(self, key: Union[torch.Tensor, torch.ScriptObject]) -> Any:
34
+ real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
35
+ assert isinstance(real_key, (int, torch.Tensor))
36
+ return self._constant_attrs[real_key]
37
+
38
+ def __setitem__(
39
+ self, key: Union[torch.Tensor, torch.ScriptObject], value: Any
40
+ ) -> None:
41
+ if isinstance(key, torch.ScriptObject):
42
+ self._constant_attrs[hash(key)] = value
43
+ self._script_object_map[hash(key)] = key
44
+ elif isinstance(key, torch.Tensor):
45
+ self._constant_attrs[key] = value
46
+ else:
47
+ raise TypeError(
48
+ f"Expected key to be a tensor or ScriptObject, got {type(key)}"
49
+ )
50
+
51
+ def __delitem__(self, key):
52
+ real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
53
+
54
+ del self._constant_attrs[real_key]
55
+
56
+ def __iter__(self):
57
+ for key in self._constant_attrs:
58
+ if isinstance(key, int):
59
+ yield self._script_object_map[key]
60
+ else:
61
+ yield key
62
+
63
+ def __len__(self):
64
+ return len(self._constant_attrs)
65
+
66
+ def __contains__(self, key: object) -> bool:
67
+ real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
68
+ return real_key in self._constant_attrs
69
+
70
+
71
+ def get_constant_fqn(node: torch.fx.Node, constant_name: str) -> str:
72
+ # The FQN of the constant tensor in the state dict should
73
+ # correspond to the module where the constant tensor was
74
+ # originally used.
75
+ parent_fqn = list(node.meta["nn_module_stack"].values())[-1][0]
76
+ if len(parent_fqn) > 0:
77
+ return f"{parent_fqn}.{constant_name}"
78
+ else:
79
+ return constant_name
80
+
81
+
82
+ def lift_constants_pass(
83
+ gm: torch.fx.GraphModule,
84
+ graph_signature: ExportGraphSignature,
85
+ constant_attrs: ConstantAttrMap,
86
+ ) -> Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]:
87
+ """
88
+ Takes a graph module, graph signature, and modifies them implace to lift any
89
+ constants (tensors or custom classes) as inputs to the graph. Returns a
90
+ dictionary of names to constants.
91
+
92
+ Arguments:
93
+ gm (torch.fx.GraphModule): The graph module containing the graph and constants to lift.
94
+ graph_signature (ExportGraphSignature): This graph signature will be
95
+ mutated to add additional CONSTANT_TENSOR and CUSTOM_OBJ inputs.
96
+ constant_attrs (ConstantAttr): A mapping from a constant value to its
97
+ fully-qualified path in `gm`. This is used to maintain consistent
98
+ location of constants between the original module and the exported
99
+ version.
100
+
101
+ Returns:
102
+ A dictionary of fqn => constant value.
103
+ """
104
+ all_constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
105
+
106
+ inputs = graph_signature.input_specs
107
+ num_custom_obj = sum(
108
+ input_specs.kind == InputKind.CUSTOM_OBJ for input_specs in inputs
109
+ )
110
+ num_tensor_constants = sum(
111
+ input_specs.kind == InputKind.CONSTANT_TENSOR for input_specs in inputs
112
+ )
113
+
114
+ fake_mode = detect_fake_mode(
115
+ tuple(node.meta["val"] for node in gm.graph.nodes if node.op == "placeholder")
116
+ )
117
+
118
+ first_user_input_loc, first_user_input = 0, None
119
+ for node in gm.graph.nodes:
120
+ if node.op == "placeholder" and node.name in graph_signature.user_inputs:
121
+ first_user_input = node
122
+ break
123
+ first_user_input_loc += 1
124
+
125
+ lifted_objs = ConstantAttrMap()
126
+ for node in gm.graph.nodes:
127
+ if node.op == "get_attr":
128
+ constant_val = getattr(gm, node.target)
129
+ if constant_val in lifted_objs:
130
+ # We already lifted this constant elsewhere. Just rewrite uses
131
+ # of this get_attr to point to the already-existing placeholder
132
+ # node.
133
+ const_placeholder_node = lifted_objs[constant_val]
134
+ node.replace_all_uses_with(const_placeholder_node)
135
+ gm.graph.erase_node(node)
136
+ continue
137
+
138
+ # For ScriptObject and Tensor constants:
139
+ # First check if the constant was an attribute on some module by
140
+ # consulting `constant_attrs` map. If it is, use the fqn that keeps
141
+ # its location consistent with the eager module.
142
+ #
143
+ # If it's not in the `constant_attrs` map, that means it's an inline
144
+ # constant (e.g. x + torch.tensor(0)), and thus did not have a
145
+ # specific location in the eager module. In that case, just generate
146
+ # some name and attach it to the module in which it was used.
147
+ if isinstance(constant_val, torch.ScriptObject):
148
+ constant_kind = InputKind.CUSTOM_OBJ
149
+ constant_fqn = constant_attrs.get(constant_val)
150
+ if constant_fqn is not None:
151
+ _, _, constant_name = constant_fqn.rpartition(".")
152
+ else:
153
+ constant_name = f"_lifted_custom_obj{num_custom_obj}"
154
+ constant_fqn = get_constant_fqn(node, constant_name)
155
+ num_custom_obj += 1
156
+ elif isinstance(constant_val, torch.Tensor):
157
+ constant_kind = InputKind.CONSTANT_TENSOR
158
+ constant_fqn = constant_attrs.get(constant_val)
159
+ if constant_fqn is not None:
160
+ _, _, constant_name = constant_fqn.rpartition(".")
161
+ else:
162
+ constant_name = f"_lifted_tensor_constant{num_tensor_constants}"
163
+ constant_fqn = get_constant_fqn(node, constant_name)
164
+ num_tensor_constants += 1
165
+ elif isinstance(constant_val, torch.fx.GraphModule):
166
+ continue
167
+ elif "LoweredBackendModule" in type(constant_val).__name__:
168
+ continue
169
+ else:
170
+ raise SpecViolationError(
171
+ f"getattr node {node} referencing unsupported type {type(constant_val)}"
172
+ )
173
+
174
+ with gm.graph.inserting_before(first_user_input):
175
+ # Insert the constant node before the first user input
176
+ const_placeholder_node = gm.graph.placeholder(constant_name)
177
+ # match target name with its node name in case there is name collision
178
+ # and suffix is added to node name in fx
179
+ const_placeholder_node.target = const_placeholder_node.name
180
+
181
+ for k, v in node.meta.items():
182
+ const_placeholder_node.meta[k] = v
183
+
184
+ input_spec_arg: ArgumentSpec
185
+ if isinstance(constant_val, torch.Tensor):
186
+ if fake_mode is not None:
187
+ const_placeholder_node.meta["val"] = fake_mode.from_tensor(
188
+ constant_val, static_shapes=True
189
+ )
190
+ const_placeholder_node.meta["val"].constant = constant_val
191
+ else:
192
+ const_placeholder_node.meta["val"] = constant_val
193
+ input_spec_arg = TensorArgument(name=const_placeholder_node.name)
194
+ elif isinstance(constant_val, torch._C.ScriptObject):
195
+ class_fqn = constant_val._type().qualified_name() # type: ignore[attr-defined]
196
+ const_placeholder_node.meta["val"] = CustomObjArgument(
197
+ constant_fqn, class_fqn
198
+ )
199
+ input_spec_arg = CustomObjArgument(
200
+ name=const_placeholder_node.name, class_fqn=class_fqn
201
+ )
202
+ else:
203
+ raise SpecViolationError(
204
+ f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}"
205
+ )
206
+
207
+ lifted_objs[constant_val] = const_placeholder_node
208
+ node.replace_all_uses_with(const_placeholder_node)
209
+ gm.graph.erase_node(node)
210
+
211
+ # Add the constant as a buffer to the graph signature
212
+ graph_signature.input_specs.insert(
213
+ first_user_input_loc,
214
+ InputSpec(
215
+ kind=constant_kind,
216
+ arg=input_spec_arg,
217
+ target=constant_fqn,
218
+ ),
219
+ )
220
+ all_constants[constant_fqn] = constant_val
221
+ first_user_input_loc += 1
222
+
223
+ return all_constants
224
+
225
+
226
+ def rewrite_script_object_meta(
227
+ gm: torch.fx.GraphModule,
228
+ ) -> Dict[str, Union[torch.Tensor, torch.ScriptObject]]:
229
+ """When tracing, we produce a graph with an actual ScriptObject in the
230
+ meta["val"]. Eventually we want to change this behavior, when FakeMode infra
231
+ for ScriptObjects lands.
232
+
233
+ For now, we rewrie meta["val"] to be a placeholder CustomObjArgument
234
+ """
235
+ constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
236
+ for node in gm.graph.nodes:
237
+ if "val" not in node.meta or not isinstance(
238
+ node.meta["val"], torch.ScriptObject
239
+ ):
240
+ continue
241
+
242
+ old_meta = node.meta["val"]
243
+ class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
244
+ new_meta = CustomObjArgument(node.name, class_fqn)
245
+ constants[node.name] = old_meta
246
+ node.meta["val"] = new_meta
247
+
248
+ return constants
venv/lib/python3.10/site-packages/torch/_export/passes/remove_runtime_assertions.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
3
+
4
+
5
+ class _RemoveRuntimeAssertionsPass(PassBase):
6
+ """
7
+ Remove runtime assertions inserted by the
8
+ _AddRuntimeAssertionsForInlineConstraintsPass.
9
+ """
10
+
11
+ def call(self, graph_module) -> PassResult:
12
+ modified = False
13
+ for module in graph_module.modules():
14
+ if not isinstance(module, torch.fx.GraphModule):
15
+ continue
16
+ for node in module.graph.nodes:
17
+ if node.target == torch.ops.aten._assert_async.msg:
18
+ assert_async_node = node
19
+ if len(assert_async_node.users) > 0:
20
+ continue
21
+ module.graph.erase_node(assert_async_node)
22
+ # the upstream scalar_tensor <- {le, ge} <- sym_size
23
+ # linear chain of nodes of nodes is removed by the
24
+ # downstream dead code elimination
25
+ modified = True
26
+ return PassResult(graph_module, modified)
venv/lib/python3.10/site-packages/torch/_export/passes/replace_set_grad_with_hop_pass.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._higher_order_ops.wrap import wrap_with_set_grad_enabled
3
+
4
+ from ..utils import (
5
+ node_inline_,
6
+ node_replace_,
7
+ nodes_filter,
8
+ nodes_first,
9
+ nodes_map,
10
+ sequential_split,
11
+ )
12
+
13
+
14
+ def _is_set_grad_enabled_node(node: torch.fx.Node):
15
+ return (
16
+ node
17
+ and node.op == "call_function"
18
+ and node.target == torch._C._set_grad_enabled
19
+ )
20
+
21
+
22
+ def _is_set_grad_enabled_sub_mod(node: torch.fx.Node, omit_if_same_with_ambient=False):
23
+ if node.op == "call_module":
24
+ assert isinstance(node.target, str)
25
+ subgm = getattr(node.graph.owning_module, node.target)
26
+ first_non_ph = nodes_first(
27
+ subgm.graph.nodes, lambda node: node.op != "placeholder"
28
+ )
29
+ if (
30
+ first_non_ph
31
+ and first_non_ph.op == "call_function"
32
+ and first_non_ph.target == torch._C._set_grad_enabled
33
+ ):
34
+ return (
35
+ first_non_ph.args[0] != torch.is_grad_enabled()
36
+ if omit_if_same_with_ambient
37
+ else True
38
+ )
39
+ return False
40
+
41
+
42
+ def _replace_with_hop(node: torch.fx.Node):
43
+ assert node.op == "call_module"
44
+ graph: torch.fx.Graph = node.graph
45
+ gm: torch.fx.GraphModule = graph.owning_module
46
+ assert isinstance(node.target, str)
47
+ sub_gm = getattr(gm, node.target)
48
+ sub_graph = sub_gm.graph
49
+ set_grad_nodes = nodes_filter(sub_graph.nodes, _is_set_grad_enabled_node)
50
+ if len(set_grad_nodes) > 0:
51
+ assert len(set_grad_nodes) == 1
52
+ set_grad_node = set_grad_nodes[0]
53
+ enable_grad_val = set_grad_node.args[0]
54
+ with graph.inserting_before(node):
55
+ get_attr_node = graph.get_attr(node.target)
56
+ output_node = next(iter(reversed(sub_gm.graph.nodes)), None)
57
+ if output_node is not None:
58
+ assert len(output_node.args) == 1
59
+ output_args = output_node.args[0]
60
+ if isinstance(output_args, (tuple, list)):
61
+ call_func_node = graph.call_function(
62
+ wrap_with_set_grad_enabled,
63
+ (enable_grad_val, get_attr_node, *node.args),
64
+ {},
65
+ )
66
+ # Create the metadata
67
+ call_func_node.meta["val"] = tuple(
68
+ arg.meta["val"] for arg in output_args
69
+ )
70
+ node_replace_(node, call_func_node, delete_old=True)
71
+
72
+ # Rename the name of getitem nodes to the actual name of its contents
73
+ # for passing verifier and better readability, also propagate metadata
74
+ for get_item_node in call_func_node.users.keys():
75
+ idx: int = get_item_node.args[1]
76
+ output_node = output_args[idx]
77
+ get_item_node._rename(output_node.name)
78
+ get_item_node.meta = output_node.meta
79
+ pass
80
+
81
+ elif isinstance(output_args, torch.fx.Node):
82
+ call_func_node = graph.create_node(
83
+ "call_function",
84
+ wrap_with_set_grad_enabled,
85
+ (enable_grad_val, get_attr_node, *node.args),
86
+ {},
87
+ output_args.name,
88
+ )
89
+ call_func_node.meta = output_args.meta
90
+ node_replace_(node, call_func_node, delete_old=True)
91
+ else:
92
+ raise NotImplementedError(
93
+ f"repalce_set_grad_with_hop_pass doesnt' support output type {type(output_args)}"
94
+ )
95
+ else:
96
+ raise NotImplementedError(
97
+ "Cannot replace a call_module with a hop if it has no output. This module will gets DCEed."
98
+ )
99
+ sub_graph.erase_node(set_grad_node)
100
+
101
+
102
+ def _remove_set_grad_and_inline(node: torch.fx.Node):
103
+ assert node.op == "call_module"
104
+ graph: torch.fx.Graph = node.graph
105
+ gm: torch.fx.GraphModule = graph.owning_module
106
+ assert isinstance(node.target, str)
107
+ sub_gm = getattr(gm, node.target)
108
+ sub_graph = sub_gm.graph
109
+ nodes_map(
110
+ sub_graph.nodes,
111
+ lambda n: sub_graph.erase_node(n) if _is_set_grad_enabled_node(n) else n,
112
+ )
113
+ node_inline_(node)
114
+
115
+
116
+ def replace_set_grad_with_hop_pass(gm: torch.fx.GraphModule):
117
+ # If there is no set_grad_enabled node, return the original graph module
118
+ need_replacing = False
119
+ for node in gm.graph.nodes:
120
+ if _is_set_grad_enabled_node(node):
121
+ need_replacing = True
122
+
123
+ if not need_replacing:
124
+ return gm
125
+
126
+ new_gm = sequential_split(gm, _is_set_grad_enabled_node)
127
+
128
+ def _maybe_inline_or_replace_with_hop(node: torch.fx.Node):
129
+ if _is_set_grad_enabled_sub_mod(node, omit_if_same_with_ambient=True):
130
+ _replace_with_hop(node)
131
+ else:
132
+ _remove_set_grad_and_inline(node)
133
+
134
+ nodes_map(
135
+ list(new_gm.graph.nodes),
136
+ lambda node: _maybe_inline_or_replace_with_hop(node)
137
+ if node.op == "call_module"
138
+ else node,
139
+ )
140
+ new_gm.graph.lint()
141
+ return new_gm