applied-ai-018 commited on
Commit
facc6c1
·
verified ·
1 Parent(s): b8bf9b4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/8.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/_library/__init__.py +3 -0
  6. venv/lib/python3.10/site-packages/torch/_library/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_library/__pycache__/abstract_impl.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_library/__pycache__/simple_registry.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_library/__pycache__/utils.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_library/abstract_impl.py +206 -0
  11. venv/lib/python3.10/site-packages/torch/_library/simple_registry.py +43 -0
  12. venv/lib/python3.10/site-packages/torch/_library/utils.py +158 -0
  13. venv/lib/python3.10/site-packages/torch/fx/__init__.py +89 -0
  14. venv/lib/python3.10/site-packages/torch/fx/__init__.pyi +11 -0
  15. venv/lib/python3.10/site-packages/torch/fx/_compatibility.py +34 -0
  16. venv/lib/python3.10/site-packages/torch/fx/_lazy_graph_module.py +182 -0
  17. venv/lib/python3.10/site-packages/torch/fx/_pytree.py +102 -0
  18. venv/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py +1202 -0
  19. venv/lib/python3.10/site-packages/torch/fx/annotate.py +21 -0
  20. venv/lib/python3.10/site-packages/torch/fx/config.py +6 -0
  21. venv/lib/python3.10/site-packages/torch/fx/experimental/__init__.py +0 -0
  22. venv/lib/python3.10/site-packages/torch/fx/experimental/_backward_state.py +27 -0
  23. venv/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py +58 -0
  24. venv/lib/python3.10/site-packages/torch/fx/experimental/accelerator_partitioner.py +1078 -0
  25. venv/lib/python3.10/site-packages/torch/fx/experimental/const_fold.py +289 -0
  26. venv/lib/python3.10/site-packages/torch/fx/experimental/debug.py +31 -0
  27. venv/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py +914 -0
  28. venv/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py +171 -0
  29. venv/lib/python3.10/site-packages/torch/fx/experimental/meta_tracer.py +268 -0
  30. venv/lib/python3.10/site-packages/torch/fx/experimental/normalize.py +162 -0
  31. venv/lib/python3.10/site-packages/torch/fx/experimental/partitioner_utils.py +317 -0
  32. venv/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py +1122 -0
  33. venv/lib/python3.10/site-packages/torch/fx/experimental/recording.py +458 -0
  34. venv/lib/python3.10/site-packages/torch/fx/experimental/refinement_types.py +16 -0
  35. venv/lib/python3.10/site-packages/torch/fx/experimental/rewriter.py +121 -0
  36. venv/lib/python3.10/site-packages/torch/fx/experimental/schema_type_annotation.py +111 -0
  37. venv/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py +4 -0
  38. venv/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py +6 -0
  39. venv/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py +395 -0
  40. venv/lib/python3.10/site-packages/torch/fx/experimental/unify_refinements.py +120 -0
  41. venv/lib/python3.10/site-packages/torch/fx/graph.py +1653 -0
  42. venv/lib/python3.10/site-packages/torch/fx/graph_module.py +884 -0
  43. venv/lib/python3.10/site-packages/torch/fx/immutable_collections.py +112 -0
  44. venv/lib/python3.10/site-packages/torch/fx/interpreter.py +512 -0
  45. venv/lib/python3.10/site-packages/torch/fx/node.py +726 -0
  46. venv/lib/python3.10/site-packages/torch/fx/operator_schemas.py +441 -0
  47. venv/lib/python3.10/site-packages/torch/fx/proxy.py +565 -0
  48. venv/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py +349 -0
  49. venv/lib/python3.10/site-packages/torch/fx/tensor_type.py +104 -0
  50. venv/lib/python3.10/site-packages/torch/fx/traceback.py +99 -0
ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3ad2aa0ddfd90fef3cbb085d3f64f8420e521ed01ea430484f62c3224cd38c1
3
+ size 33555627
ckpts/universal/global_step120/zero/8.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58541c930422082229239358e2ba25ceaa995ad2b2771fc4795e546f3b5804ae
3
+ size 50332749
ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:584b26bc75236e91ee1f9acd4969dd1db3f273d0b756acae2ee6603e416b6096
3
+ size 33555612
ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5e4ff1555a42cd6c3fba4b2647e6da6cb8ff44f6fed0c554ea1bcc8d2aec802
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/_library/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import torch._library.abstract_impl
2
+ import torch._library.simple_registry
3
+ import torch._library.utils
venv/lib/python3.10/site-packages/torch/_library/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (303 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_library/__pycache__/abstract_impl.cpython-310.pyc ADDED
Binary file (7.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_library/__pycache__/simple_registry.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
venv/lib/python3.10/site-packages/torch/_library/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.93 kB). View file
 
venv/lib/python3.10/site-packages/torch/_library/abstract_impl.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import warnings
4
+ from typing import Callable, Optional
5
+
6
+ import torch
7
+ from torch._library.utils import Kernel, RegistrationHandle
8
+
9
+
10
+ class AbstractImplHolder:
11
+ """A holder where one can register an abstract impl to."""
12
+
13
+ def __init__(self, qualname: str):
14
+ self.qualname: str = qualname
15
+ self.kernel: Optional[Kernel] = None
16
+ self.lib: Optional[torch.library.Library] = None
17
+
18
+ def register(self, func: Callable, source: str) -> RegistrationHandle:
19
+ """Register an abstract impl.
20
+
21
+ Returns a RegistrationHandle that one can use to de-register this
22
+ abstract impl.
23
+ """
24
+ if self.kernel is not None:
25
+ raise RuntimeError(
26
+ f"impl_abstract(...): the operator {self.qualname} "
27
+ f"already has an abstract impl registered at "
28
+ f"{self.kernel.source}."
29
+ )
30
+ if torch._C._dispatch_has_kernel_for_dispatch_key(self.qualname, "Meta"):
31
+ raise RuntimeError(
32
+ f"impl_abstract(...): the operator {self.qualname} "
33
+ f"already has an DispatchKey::Meta implementation via a "
34
+ f"pre-existing torch.library or TORCH_LIBRARY registration. "
35
+ f"Please either remove that registration or don't call "
36
+ f"impl_abstract."
37
+ )
38
+
39
+ if torch._C._dispatch_has_kernel_for_dispatch_key(
40
+ self.qualname, "CompositeImplicitAutograd"
41
+ ):
42
+ raise RuntimeError(
43
+ f"impl_abstract(...): the operator {self.qualname} "
44
+ f"already has an implementation for this device type via a "
45
+ f"pre-existing registration to "
46
+ f"DispatchKey::CompositeImplicitAutograd."
47
+ f"CompositeImplicitAutograd operators do not need an abstract "
48
+ f"impl; "
49
+ f"instead, the operator will decompose into its constituents "
50
+ f"and those "
51
+ f"can have abstract impls defined on them."
52
+ )
53
+
54
+ # Store the kernel in this holder
55
+ self.kernel = Kernel(func, source)
56
+
57
+ # Also register the abstract impl to Meta key
58
+ if self.lib is None:
59
+ ns = self.qualname.split("::")[0]
60
+ self.lib = torch.library.Library(ns, "FRAGMENT")
61
+ meta_kernel = construct_meta_kernel(self.qualname, self)
62
+ self.lib.impl(self.qualname, meta_kernel, "Meta")
63
+
64
+ def deregister_abstract_impl():
65
+ if self.lib:
66
+ self.lib._destroy()
67
+ self.lib = None
68
+ self.kernel = None
69
+
70
+ return RegistrationHandle(deregister_abstract_impl)
71
+
72
+
73
+ def construct_meta_kernel(
74
+ qualname: str, abstract_impl_holder: AbstractImplHolder
75
+ ) -> Callable:
76
+ assert abstract_impl_holder.kernel is not None
77
+
78
+ @functools.wraps(abstract_impl_holder.kernel.func)
79
+ def meta_kernel(*args, **kwargs):
80
+ assert abstract_impl_holder.kernel is not None
81
+ source = abstract_impl_holder.kernel.source
82
+
83
+ def error_on_ctx():
84
+ raise RuntimeError(
85
+ f"Attempted to call get_ctx() for the meta implementation "
86
+ f"for {qualname} (implemented at {source})"
87
+ f"You have presumably called get_ctx() because the operator "
88
+ f"has a data-dependent output shape; if so, there is no "
89
+ f"such meta implementation and this error is the correct "
90
+ f"behavior."
91
+ )
92
+
93
+ with set_ctx_getter(error_on_ctx):
94
+ return abstract_impl_holder.kernel(*args, **kwargs)
95
+
96
+ return meta_kernel
97
+
98
+
99
+ def get_none():
100
+ return None
101
+
102
+
103
+ global_ctx_getter: Callable = get_none
104
+
105
+
106
+ @contextlib.contextmanager
107
+ def set_ctx_getter(ctx_getter):
108
+ global global_ctx_getter
109
+ prev = global_ctx_getter
110
+ try:
111
+ global_ctx_getter = ctx_getter
112
+ yield
113
+ finally:
114
+ global_ctx_getter = prev
115
+
116
+
117
+ class AbstractImplCtx:
118
+ """
119
+ Context object for writing abstract implementations for custom operators.
120
+ """
121
+
122
+ def __init__(self, _shape_env, _op):
123
+ self._shape_env = _shape_env
124
+ self._op = _op
125
+
126
+ def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
127
+ warnings.warn(
128
+ "create_unbacked_symint is deprecated, please use new_dynamic_size instead"
129
+ )
130
+ return self.new_dynamic_size(min=min, max=max)
131
+
132
+ def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
133
+ """Constructs a new symint (symbolic int) representing a data-dependent value.
134
+
135
+ This is useful for writing the abstract implementation (which is necessary
136
+ for torch.compile) for a CustomOp where an output Tensor has a size
137
+ that depends on the data of the input Tensors.
138
+
139
+ Args:
140
+ min (int): A statically known inclusive lower bound for this symint. Default: 0
141
+ max (Optional[int]): A statically known inclusive upper bound for this
142
+ symint. Default: None
143
+
144
+ .. warning:
145
+
146
+ It is important that the ``min`` and ``max`` (if not None) values are set
147
+ correctly, otherwise, there will be undefined behavior under
148
+ torch.compile. The default value of ``min`` is 2 due to torch.compile
149
+ specializing on 0/1 sizes.
150
+
151
+ You must also verify that your implementation on concrete Tensors
152
+ (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
153
+ to the symint also has respects these constraint.
154
+ The easiest way to do this is to add an assertion in the CPU/CUDA/etc
155
+ implementation that the size follows these bounds.
156
+
157
+ Example::
158
+
159
+ >>> # An operator with data-dependent output shape
160
+ >>> lib = torch.library.Library("mymodule", "FRAGMENT")
161
+ >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
162
+ >>>
163
+ >>> @torch.library.impl_abstract("mymodule::custom_nonzero")
164
+ >>> def custom_nonzero_abstract(x):
165
+ >>> # Number of nonzero-elements is data-dependent.
166
+ >>> # Since we cannot peek at the data in an abstract impl,
167
+ >>> # we use the ctx object to construct a new symint that
168
+ >>> # represents the data-dependent size.
169
+ >>> ctx = torch.library.get_ctx()
170
+ >>> nnz = ctx.new_dynamic_size()
171
+ >>> shape = [nnz, x.dim()]
172
+ >>> result = x.new_empty(shape, dtype=torch.int64)
173
+ >>> return result
174
+ >>>
175
+ >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
176
+ >>> def custom_nonzero_cpu(x):
177
+ >>> x_np = x.numpy()
178
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
179
+ >>> return torch.tensor(res, device=x.device)
180
+
181
+ """
182
+ if (
183
+ self._shape_env is None
184
+ or not self._shape_env.allow_dynamic_output_shape_ops
185
+ ):
186
+ raise torch._subclasses.fake_tensor.DynamicOutputShapeException(self._op)
187
+
188
+ if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt):
189
+ raise ValueError(
190
+ f"ctx.new_dynamic_size(min={min}, max={max}): expected "
191
+ f"min and max to be statically known ints but got SymInt. "
192
+ f"This is not supported."
193
+ )
194
+
195
+ if min < 0:
196
+ raise ValueError(
197
+ f"ctx.new_dynamic_size(min={min}, ...): expected min to be "
198
+ f"greater than or equal to 0: this API can only create "
199
+ f"non-negative sizes."
200
+ )
201
+
202
+ result = self._shape_env.create_unbacked_symint()
203
+ torch.fx.experimental.symbolic_shapes._constrain_range_for_size(
204
+ result, min=min, max=max
205
+ )
206
+ return result
venv/lib/python3.10/site-packages/torch/_library/simple_registry.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .abstract_impl import AbstractImplHolder
2
+
3
+ __all__ = ["SimpleLibraryRegistry", "SimpleOperatorEntry", "singleton"]
4
+
5
+
6
+ class SimpleLibraryRegistry:
7
+ """Registry for the "simple" torch.library APIs
8
+
9
+ The "simple" torch.library APIs are a higher-level API on top of the
10
+ raw PyTorch DispatchKey registration APIs that includes:
11
+ - abstract impl
12
+
13
+ Registrations for these APIs do not go into the PyTorch dispatcher's
14
+ table because they may not directly involve a DispatchKey. For example,
15
+ the abstract impl is a Python function that gets invoked by FakeTensor.
16
+ Instead, we manage them here.
17
+
18
+ SimpleLibraryRegistry is a mapping from a fully qualified operator name
19
+ (including the overload) to SimpleOperatorEntry.
20
+ """
21
+
22
+ def __init__(self):
23
+ self._data = {}
24
+
25
+ def find(self, qualname: str) -> "SimpleOperatorEntry":
26
+ if qualname not in self._data:
27
+ self._data[qualname] = SimpleOperatorEntry(qualname)
28
+ return self._data[qualname]
29
+
30
+
31
+ singleton: SimpleLibraryRegistry = SimpleLibraryRegistry()
32
+
33
+
34
+ class SimpleOperatorEntry:
35
+ """This is 1:1 to an operator overload.
36
+
37
+ The fields of SimpleOperatorEntry are Holders where kernels can be
38
+ registered to.
39
+ """
40
+
41
+ def __init__(self, qualname: str):
42
+ self.qualname: str = qualname
43
+ self.abstract_impl: AbstractImplHolder = AbstractImplHolder(qualname)
venv/lib/python3.10/site-packages/torch/_library/utils.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import inspect
3
+ import sys
4
+ from typing import Any, Callable, Tuple
5
+
6
+ import torch
7
+
8
+
9
+ @dataclasses.dataclass
10
+ class Kernel:
11
+ """Models a (function, source location)"""
12
+
13
+ func: Callable
14
+ source: str
15
+
16
+ def __call__(self, *args, **kwargs):
17
+ return self.func(*args, **kwargs)
18
+
19
+
20
+ class RegistrationHandle:
21
+ """Does something when someone calls .destroy() on it"""
22
+
23
+ def __init__(self, on_destroy: Callable):
24
+ self._on_destroy = on_destroy
25
+
26
+ def destroy(self) -> None:
27
+ self._on_destroy()
28
+
29
+
30
+ def get_source(stacklevel: int) -> str:
31
+ """Get a string that represents the caller.
32
+
33
+ Example: "/path/to/foo.py:42"
34
+
35
+ Use stacklevel=1 to get the caller's source
36
+ Use stacklevel=2 to get the caller's caller's source
37
+ etc.
38
+ """
39
+ frame = inspect.getframeinfo(sys._getframe(stacklevel))
40
+ source = f"{frame.filename}:{frame.lineno}"
41
+ return source
42
+
43
+
44
+ def parse_namespace(qualname: str) -> Tuple[str, str]:
45
+ splits = qualname.split("::")
46
+ if len(splits) != 2:
47
+ raise ValueError(
48
+ f"Expected `qualname` to be of the form "
49
+ f'"namespace::name", but got {qualname}. '
50
+ f"The qualname passed to the torch.library APIs must consist "
51
+ f"of a namespace and a name, e.g. aten::sin"
52
+ )
53
+ return splits[0], splits[1]
54
+
55
+
56
+ def lookup_op(qualname: str) -> torch._ops.OpOverloadPacket:
57
+ namespace, name = parse_namespace(qualname)
58
+ if "." in name:
59
+ name, overload = name.split(".")
60
+ else:
61
+ overload = "default"
62
+ ns = getattr(torch.ops, namespace)
63
+ packet = getattr(ns, name)
64
+ return getattr(packet, overload)
65
+
66
+
67
+ def is_builtin(op: torch._ops.OpOverload) -> bool:
68
+ assert isinstance(op, torch._ops.OpOverload)
69
+ return op.namespace in {"aten", "prim", "prims"}
70
+
71
+
72
+ def is_functional_schema(schema: Any) -> bool:
73
+ """Check if the schema is functional.
74
+
75
+ An operator is functional if:
76
+ - it does not mutate any of its inputs
77
+ - it does not return a view on any of its inputs
78
+ - it has at least one return
79
+ """
80
+
81
+ # Lazy import because not all PyTorch builds have torchgen
82
+ from torchgen.model import FunctionSchema, SchemaKind
83
+
84
+ assert isinstance(schema, (str, FunctionSchema))
85
+ if isinstance(schema, str):
86
+ schema = FunctionSchema.parse(schema)
87
+
88
+ if schema.kind() != SchemaKind.functional:
89
+ return False
90
+ rets = schema.returns
91
+ is_non_mutating_view = len(rets) > 0 and any(
92
+ r.annotation is not None and not r.annotation.is_write for r in rets
93
+ )
94
+ if is_non_mutating_view:
95
+ return False
96
+ if not schema.returns:
97
+ return False
98
+ return True
99
+
100
+
101
+ def mutates_and_returns_first_arg(op: torch._ops.OpOverload):
102
+ """Check if an op is an inplace aten op, i.e. it mutates and returns the first arg.
103
+
104
+ TODO: torchgen/model.py's FunctionSchema.parse is the source of truth for this,
105
+ but not all PyTorch builds have torchgen (due to the yaml dependency being weird).
106
+ Figure this out.
107
+
108
+ Example: add_(Tensor(a!) x, Tensor y) -> Tensor(a)
109
+ """
110
+ if op.namespace != "aten":
111
+ return False
112
+ schema = op._schema
113
+ if not len(schema.returns) == 1:
114
+ return False
115
+ if schema.returns[0].alias_info is None:
116
+ return False
117
+ alias_set = schema.returns[0].alias_info.after_set
118
+ if len(alias_set) != 1:
119
+ return False
120
+ loc = next(iter(alias_set))
121
+ if len(schema.arguments) < 1:
122
+ return False
123
+ first_arg = schema.arguments[0]
124
+ if first_arg.alias_info is None:
125
+ return False
126
+ if not first_arg.alias_info.is_write:
127
+ return False
128
+ alias_set = first_arg.alias_info.after_set
129
+ if len(alias_set) != 1:
130
+ return False
131
+ if loc != next(iter(alias_set)):
132
+ return False
133
+ for arg in schema.arguments[1:]:
134
+ if arg.alias_info is not None:
135
+ return False
136
+ return True
137
+
138
+
139
+ def zip_schema(schema, args, kwargs):
140
+ """zips schema.arguments and (args, kwargs) together.
141
+
142
+ Assumes that (args, kwargs) were the inputs to some torch._ops.OpOverload:
143
+ that is, kwargs must be keyword-only arguments and default values may be omitted.
144
+ """
145
+ assert len(schema.arguments) >= len(args) + len(kwargs)
146
+ for i in range(len(schema.arguments)):
147
+ info = schema.arguments[i]
148
+ if info.kwarg_only:
149
+ if info.name in kwargs:
150
+ yield info, kwargs[info.name]
151
+ continue
152
+ if i >= len(args):
153
+ # args that are equal to their default values are not populated
154
+ # if they are followed by args that are equal to their defaults.
155
+ # Skip these.
156
+ continue
157
+ yield info, args[i]
158
+ return
venv/lib/python3.10/site-packages/torch/fx/__init__.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r'''
2
+ FX is a toolkit for developers to use to transform ``nn.Module``
3
+ instances. FX consists of three main components: a **symbolic tracer,**
4
+ an **intermediate representation**, and **Python code generation**. A
5
+ demonstration of these components in action:
6
+
7
+ ::
8
+
9
+ import torch
10
+ # Simple module for demonstration
11
+ class MyModule(torch.nn.Module):
12
+ def __init__(self):
13
+ super().__init__()
14
+ self.param = torch.nn.Parameter(torch.rand(3, 4))
15
+ self.linear = torch.nn.Linear(4, 5)
16
+
17
+ def forward(self, x):
18
+ return self.linear(x + self.param).clamp(min=0.0, max=1.0)
19
+
20
+ module = MyModule()
21
+
22
+ from torch.fx import symbolic_trace
23
+ # Symbolic tracing frontend - captures the semantics of the module
24
+ symbolic_traced : torch.fx.GraphModule = symbolic_trace(module)
25
+
26
+ # High-level intermediate representation (IR) - Graph representation
27
+ print(symbolic_traced.graph)
28
+ """
29
+ graph():
30
+ %x : [num_users=1] = placeholder[target=x]
31
+ %param : [num_users=1] = get_attr[target=param]
32
+ %add : [num_users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
33
+ %linear : [num_users=1] = call_module[target=linear](args = (%add,), kwargs = {})
34
+ %clamp : [num_users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
35
+ return clamp
36
+ """
37
+
38
+ # Code generation - valid Python code
39
+ print(symbolic_traced.code)
40
+ """
41
+ def forward(self, x):
42
+ param = self.param
43
+ add = x + param; x = param = None
44
+ linear = self.linear(add); add = None
45
+ clamp = linear.clamp(min = 0.0, max = 1.0); linear = None
46
+ return clamp
47
+ """
48
+
49
+ The **symbolic tracer** performs "symbolic execution" of the Python
50
+ code. It feeds fake values, called Proxies, through the code. Operations
51
+ on theses Proxies are recorded. More information about symbolic tracing
52
+ can be found in the :func:`symbolic_trace` and :class:`Tracer`
53
+ documentation.
54
+
55
+ The **intermediate representation** is the container for the operations
56
+ that were recorded during symbolic tracing. It consists of a list of
57
+ Nodes that represent function inputs, callsites (to functions, methods,
58
+ or :class:`torch.nn.Module` instances), and return values. More information
59
+ about the IR can be found in the documentation for :class:`Graph`. The
60
+ IR is the format on which transformations are applied.
61
+
62
+ **Python code generation** is what makes FX a Python-to-Python (or
63
+ Module-to-Module) transformation toolkit. For each Graph IR, we can
64
+ create valid Python code matching the Graph's semantics. This
65
+ functionality is wrapped up in :class:`GraphModule`, which is a
66
+ :class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
67
+ ``forward`` method generated from the Graph.
68
+
69
+ Taken together, this pipeline of components (symbolic tracing ->
70
+ intermediate representation -> transforms -> Python code generation)
71
+ constitutes the Python-to-Python transformation pipeline of FX. In
72
+ addition, these components can be used separately. For example,
73
+ symbolic tracing can be used in isolation to capture a form of
74
+ the code for analysis (and not transformation) purposes. Code
75
+ generation can be used for programmatically generating models, for
76
+ example from a config file. There are many uses for FX!
77
+
78
+ Several example transformations can be found at the
79
+ `examples <https://github.com/pytorch/examples/tree/master/fx>`__
80
+ repository.
81
+ '''
82
+
83
+ from .graph_module import GraphModule
84
+ from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
85
+ from .graph import Graph, CodeGen
86
+ from .node import Node, map_arg, has_side_effect
87
+ from .proxy import Proxy
88
+ from .interpreter import Interpreter as Interpreter, Transformer as Transformer
89
+ from .subgraph_rewriter import replace_pattern
venv/lib/python3.10/site-packages/torch/fx/__init__.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._symbolic_trace import (
2
+ symbolic_trace as symbolic_trace,
3
+ Tracer as Tracer,
4
+ wrap as wrap,
5
+ )
6
+ from .graph import Graph as Graph
7
+ from .graph_module import GraphModule as GraphModule
8
+ from .interpreter import Interpreter as Interpreter, Transformer as Transformer
9
+ from .node import has_side_effect as has_side_effect, map_arg as map_arg, Node as Node
10
+ from .proxy import Proxy as Proxy
11
+ from .subgraph_rewriter import replace_pattern as replace_pattern
venv/lib/python3.10/site-packages/torch/fx/_compatibility.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+ import textwrap
3
+
4
+ _BACK_COMPAT_OBJECTS : Dict[Any, None] = {}
5
+ _MARKED_WITH_COMPATIBILITY : Dict[Any, None] = {}
6
+
7
+ def compatibility(is_backward_compatible : bool):
8
+ if is_backward_compatible:
9
+
10
+ def mark_back_compat(fn):
11
+ docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
12
+ docstring += """
13
+ .. note::
14
+ Backwards-compatibility for this API is guaranteed.
15
+ """
16
+ fn.__doc__ = docstring
17
+ _BACK_COMPAT_OBJECTS.setdefault(fn)
18
+ _MARKED_WITH_COMPATIBILITY.setdefault(fn)
19
+ return fn
20
+
21
+ return mark_back_compat
22
+ else:
23
+
24
+ def mark_not_back_compat(fn):
25
+ docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
26
+ docstring += """
27
+ .. warning::
28
+ This API is experimental and is *NOT* backward-compatible.
29
+ """
30
+ fn.__doc__ = docstring
31
+ _MARKED_WITH_COMPATIBILITY.setdefault(fn)
32
+ return fn
33
+
34
+ return mark_not_back_compat
venv/lib/python3.10/site-packages/torch/fx/_lazy_graph_module.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+
3
+ from torch.fx import GraphModule
4
+ from torch.fx.graph_module import (
5
+ _format_import_block,
6
+ reduce_graph_module,
7
+ reduce_package_graph_module,
8
+ )
9
+ from torch.package import PackageExporter, sys_importer
10
+ from ._compatibility import compatibility
11
+
12
+ _use_lazy_graph_module_flag = False
13
+ _force_skip_lazy_graph_module_flag = False
14
+
15
+
16
+ @compatibility(is_backward_compatible=False)
17
+ @contextmanager
18
+ def _force_skip_lazy_graph_module():
19
+ """
20
+ Skip using lazy graph module disregarding the setting of _use_lazy_graph_module.
21
+ Use to skip _LazyGraphModule when testing inductor torchscript related backend.
22
+
23
+ torch.jit.script a _LazyGraphModule results in following error:
24
+ https://gist.github.com/shunting314/5143654c8084aed84ecd19b818258a69
25
+ """
26
+ try:
27
+ global _force_skip_lazy_graph_module_flag
28
+ prior = _force_skip_lazy_graph_module_flag
29
+ _force_skip_lazy_graph_module_flag = True
30
+ yield
31
+ finally:
32
+ _force_skip_lazy_graph_module_flag = prior
33
+
34
+
35
+ @compatibility(is_backward_compatible=False)
36
+ @contextmanager
37
+ def _use_lazy_graph_module(should_use: bool):
38
+ try:
39
+ global _use_lazy_graph_module_flag
40
+ prior = _use_lazy_graph_module_flag
41
+ _use_lazy_graph_module_flag = (
42
+ should_use and not _force_skip_lazy_graph_module_flag
43
+ )
44
+ yield
45
+ finally:
46
+ _use_lazy_graph_module_flag = prior
47
+
48
+
49
+ @compatibility(is_backward_compatible=False)
50
+ def _get_graph_module_cls():
51
+ return _LazyGraphModule if _use_lazy_graph_module_flag else GraphModule
52
+
53
+
54
+ def _make_graph_module(*args, graph_module_cls=None, **kwargs):
55
+ if graph_module_cls is None:
56
+ graph_module_cls = _get_graph_module_cls()
57
+
58
+ return graph_module_cls(*args, **kwargs)
59
+
60
+
61
+ @compatibility(is_backward_compatible=False)
62
+ class _LazyGraphModule(GraphModule):
63
+ """
64
+ The main difference between _LazyGraphModule and GraphModule is how recompile happens.
65
+ GraphModule will do a 'recompile' call to generate python code and the forward method when it's
66
+ constructed. Later on if the graph get updated, recompile method can be called again to refresh
67
+ the saved python code and forward method.
68
+
69
+ However in some cases especially in inductor, the recompilation can be a waste since we never
70
+ check the python code for the graph module or call its forward method. A few more concreate
71
+ examples regarding pattern matching fx passes in inductor:
72
+ 1. some passes will update the graph to be compiled and then call recompile on the GraphModule.
73
+ 2. some passes will trace small pattern function to search it in the graph being compiled and
74
+ replace the match with the traced graph of a replacement function. The pattern graph and
75
+ replacement graph are quite small but there are large amount of them. Doing GraphModule.recompile
76
+ for them in GraphModule.__init__ is also a waste of time.
77
+
78
+ However simply skip calling GraphModule.recompile in these scenarios is also dangeruous.
79
+ People may want to check the python code or call the GraphModule's forward method for debugging purposes.
80
+
81
+ The way _LazyGraphModule solves it is, we override the recompile method to just mark the
82
+ need for recompilation but does not do the actual recompilation. Later on if people really
83
+ access the compiled python code or call the GraphModule's forward method, we do the real
84
+ recompilation.
85
+ """
86
+
87
+ @classmethod
88
+ def from_graphmodule(cls, gm: GraphModule):
89
+ if isinstance(gm, _LazyGraphModule):
90
+ return gm
91
+ else:
92
+ return _LazyGraphModule(gm, gm.graph)
93
+
94
+ @staticmethod
95
+ def force_recompile(gm):
96
+ """
97
+ Sometimes we need force a recompile as a workaround
98
+ - we want to do the real recompilation before symbolic_trace to avoid error:
99
+ https://gist.github.com/shunting314/75549c2e82ae07ac1139c94a3583d259
100
+ """
101
+ if isinstance(gm, _LazyGraphModule):
102
+ gm.real_recompile()
103
+
104
+ def real_recompile(self):
105
+ if self._needs_recompile():
106
+ self._real_recompile()
107
+
108
+ @classmethod
109
+ def _needs_recompile(cls):
110
+ return cls.forward is cls._lazy_forward
111
+
112
+ def _lazy_forward(self, *args, **kwargs):
113
+ # Call self.real_recompile() rather than self._real_recompile() here.
114
+ # The _lazy_forward method may be saved and call repeatedly.
115
+ # Calling self.real_recompile can make sure we skip recompilation if
116
+ # we have already done so.
117
+ self.real_recompile()
118
+ assert not self._needs_recompile()
119
+
120
+ # call `__call__` rather than 'forward' since recompilation may
121
+ # install a wrapper for `__call__` to provide a customized error
122
+ # message.
123
+ return self(*args, **kwargs)
124
+
125
+ forward = _lazy_forward
126
+
127
+ # TODO: we shold handle __reduce_deploy__ the same way as __reduce_package__,
128
+ # or __reduce__ by calling _real_recompile. But I don't find a good way
129
+ # to test __reduce_deploy__ out. Also it's very unlikely that LazyGraphModule
130
+ # will be used in torch::deploy. So it's skipped for now.
131
+
132
+ def __reduce_package__(self, exporter: PackageExporter):
133
+ """
134
+ Follow GraphModule.__reduce__ but call 'self._real_recompile' rather
135
+ than 'self.recompile' since for a _LazyGraphModule, self.recompile just
136
+ mark the need of recompilation and does not return the PythonCode object.
137
+ """
138
+ python_code = self._real_recompile()
139
+ dict_without_graph = self.__dict__.copy()
140
+ dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__
141
+ del dict_without_graph["_graph"]
142
+
143
+ generated_module_name = f"fx-generated._{exporter.get_unique_id()}"
144
+ import_block = _format_import_block(python_code.globals, exporter.importer)
145
+ module_code = import_block + self.code
146
+ exporter.save_source_string(generated_module_name, module_code)
147
+ return (
148
+ reduce_package_graph_module,
149
+ (dict_without_graph, generated_module_name),
150
+ )
151
+
152
+ def __reduce__(self):
153
+ """
154
+ Follow GraphModule.__reduce__ but call 'self._real_recompile' rather
155
+ than 'self.recompile' since for a _LazyGraphModule, self.recompile just
156
+ mark the need of recompilation and does not return the PythonCode object.
157
+ """
158
+ python_code = self._real_recompile()
159
+ dict_without_graph = self.__dict__.copy()
160
+ import_block = _format_import_block(python_code.globals, sys_importer)
161
+ del dict_without_graph["_graph"]
162
+ return (reduce_graph_module, (dict_without_graph, import_block))
163
+
164
+ def _real_recompile(self):
165
+ return super().recompile()
166
+
167
+ @classmethod
168
+ def recompile(cls):
169
+ cls.forward = cls._lazy_forward
170
+
171
+ @property
172
+ def code(self) -> str:
173
+ self.real_recompile()
174
+ return super().code
175
+
176
+ def __str__(self) -> str:
177
+ """
178
+ str(GraphModule) will access the _code attribute. Make sure recompile
179
+ happens so _code attribute is available.
180
+ """
181
+ self.real_recompile()
182
+ return super().__str__()
venv/lib/python3.10/site-packages/torch/fx/_pytree.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Type
3
+
4
+ import torch.return_types
5
+
6
+ from torch.utils._pytree import PyTree, TreeSpec
7
+
8
+ FlattenFuncSpec = Callable[[PyTree, TreeSpec], List]
9
+ FlattenFuncExactMatchSpec = Callable[[PyTree, TreeSpec], bool]
10
+
11
+ SUPPORTED_NODES: Dict[Type[Any], FlattenFuncSpec] = {}
12
+ SUPPORTED_NODES_EXACT_MATCH: Dict[Type[Any], Optional[FlattenFuncExactMatchSpec]] = {}
13
+
14
+
15
+ def register_pytree_flatten_spec(
16
+ cls: Type[Any],
17
+ flatten_fn_spec: FlattenFuncSpec,
18
+ flatten_fn_exact_match_spec: Optional[FlattenFuncExactMatchSpec] = None,
19
+ ) -> None:
20
+ SUPPORTED_NODES[cls] = flatten_fn_spec
21
+ SUPPORTED_NODES_EXACT_MATCH[cls] = flatten_fn_exact_match_spec
22
+
23
+
24
+ def tree_flatten_spec(
25
+ pytree: PyTree,
26
+ spec: TreeSpec,
27
+ exact_structural_match=False,
28
+ ) -> List[Any]:
29
+ if spec.is_leaf():
30
+ return [pytree]
31
+ if spec.type not in SUPPORTED_NODES:
32
+ raise RuntimeError(
33
+ f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with "
34
+ "torch.fx._pytree.register_pytree_flatten_spec. If you have serialized your model, make "
35
+ "sure that any custom pytrees have been registered before loading it.",
36
+ )
37
+ flatten_fn_spec = SUPPORTED_NODES[spec.type]
38
+ child_pytrees = flatten_fn_spec(pytree, spec)
39
+ if exact_structural_match:
40
+ flatten_fn_exact_match_spec = SUPPORTED_NODES_EXACT_MATCH[spec.type]
41
+ if flatten_fn_exact_match_spec and not flatten_fn_exact_match_spec(
42
+ pytree,
43
+ spec,
44
+ ):
45
+ raise RuntimeError(f"Cannot flatten pytree {pytree}, given spec: {spec}")
46
+ result = []
47
+ for child, child_spec in zip(child_pytrees, spec.children_specs):
48
+ flat = tree_flatten_spec(child, child_spec, exact_structural_match)
49
+ result += flat
50
+ return result
51
+
52
+
53
+ def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]:
54
+ return [d[k] for k in spec.context]
55
+
56
+
57
+ def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
58
+ return [d[i] for i in range(spec.num_children)]
59
+
60
+
61
+ def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]:
62
+ return [d[i] for i in range(spec.num_children)]
63
+
64
+
65
+ def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> List[Any]:
66
+ return [d[i] for i in range(spec.num_children)]
67
+
68
+
69
+ def _dict_flatten_spec_exact_match(d: Dict[Any, Any], spec: TreeSpec) -> bool:
70
+ return len(d) == spec.num_children
71
+
72
+
73
+ def _list_flatten_spec_exact_match(d: List[Any], spec: TreeSpec) -> bool:
74
+ return len(d) == spec.num_children
75
+
76
+
77
+ def _tuple_flatten_spec_exact_match(d: Tuple[Any], spec: TreeSpec) -> bool:
78
+ return len(d) == spec.num_children
79
+
80
+
81
+ def _namedtuple_flatten_spec_exact_match(d: NamedTuple, spec: TreeSpec) -> bool:
82
+ return len(d) == spec.num_children
83
+
84
+
85
+ register_pytree_flatten_spec(dict, _dict_flatten_spec, _dict_flatten_spec_exact_match)
86
+ register_pytree_flatten_spec(list, _list_flatten_spec, _list_flatten_spec_exact_match)
87
+ register_pytree_flatten_spec(
88
+ tuple,
89
+ _tuple_flatten_spec,
90
+ _tuple_flatten_spec_exact_match,
91
+ )
92
+ for return_type in torch.return_types.all_return_types:
93
+ register_pytree_flatten_spec(
94
+ return_type,
95
+ _tuple_flatten_spec,
96
+ _tuple_flatten_spec_exact_match,
97
+ )
98
+ register_pytree_flatten_spec(
99
+ namedtuple, # type: ignore[arg-type]
100
+ _namedtuple_flatten_spec,
101
+ _namedtuple_flatten_spec_exact_match,
102
+ )
venv/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py ADDED
@@ -0,0 +1,1202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import copy
3
+ import functools
4
+ import inspect
5
+ import math
6
+ import os
7
+ import warnings
8
+ import collections
9
+ from itertools import chain
10
+ from types import CodeType, FunctionType, ModuleType
11
+ from typing import (
12
+ Any,
13
+ Callable,
14
+ Dict,
15
+ List,
16
+ NamedTuple,
17
+ Optional,
18
+ Set,
19
+ Tuple,
20
+ Type,
21
+ Union,
22
+ )
23
+
24
+ import torch
25
+ import torch.utils._pytree as pytree
26
+ from torch._C import ScriptObject # type: ignore[attr-defined]
27
+
28
+ from ._compatibility import compatibility
29
+ from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph
30
+ from .graph_module import GraphModule
31
+ from ._lazy_graph_module import _make_graph_module
32
+ from .node import Argument, base_types, map_aggregate
33
+ from .proxy import ParameterProxy, Proxy, TracerBase, Scope, ScopeContextManager
34
+
35
+ HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
36
+
37
+ # These need to run in global scope to handle nested calls correctly
38
+ _orig_module_call: Callable = torch.nn.Module.__call__
39
+ _orig_module_getattr: Callable = torch.nn.Module.__getattr__
40
+
41
+ _proxyable_classes: Dict[Type, None] = {}
42
+
43
+ _is_fx_tracing_flag = False
44
+
45
+
46
+ def is_fx_tracing():
47
+ return _is_fx_tracing_flag
48
+
49
+ @compatibility(is_backward_compatible=True)
50
+ class ProxyableClassMeta(type):
51
+ """
52
+ ProxyableClassMeta allows you to make construction of a given Python class
53
+ symbolically traceable. For example::
54
+
55
+ import torch
56
+ import torch.fx
57
+
58
+ class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
59
+ def __init__(self, left, right):
60
+ self.left, self.right = left, right
61
+
62
+ def add(self, other):
63
+ l = self.left + other.left
64
+ r = self.right + other.right
65
+ return TensorPair(l, r)
66
+
67
+ def mul(self, other):
68
+ l = self.left * other.left
69
+ r = self.right * other.right
70
+ return TensorPair(l, r)
71
+
72
+ def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
73
+ s = x.add(TensorPair(y, y))
74
+ return s.mul(x)
75
+
76
+ x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
77
+ y = torch.randn(5, 3)
78
+ ref_out = use_tensor_pair_ctor(x, y)
79
+
80
+ traced = torch.fx.symbolic_trace(use_tensor_pair_ctor)
81
+ print(traced.code)
82
+ '''
83
+ def forward(self, x : __main___TensorPair, y : torch.Tensor):
84
+ tensor_pair = __main___TensorPair(y, y); y = None
85
+ add = x.add(tensor_pair); tensor_pair = None
86
+ mul = add.mul(x); add = x = None
87
+ return mul
88
+ '''
89
+
90
+ From this example, we can see that construction of a class (``TensorPair``)
91
+ defined with ``ProxyableClassMeta`` as metaclass can be recorded in symbolic
92
+ tracing.
93
+ """
94
+
95
+ def __init__(cls, name, bases, attrs):
96
+ _proxyable_classes.setdefault(cls)
97
+ super().__init__(name, bases, attrs)
98
+
99
+ def __call__(cls, *args, **kwargs):
100
+ instance = cls.__new__(cls) # type: ignore[call-overload]
101
+
102
+ if not is_fx_tracing():
103
+ cls.__init__(instance, *args, **kwargs) # type: ignore[misc]
104
+ return instance
105
+
106
+ found_proxies = []
107
+
108
+ def check_proxy(a):
109
+ if isinstance(a, Proxy):
110
+ found_proxies.append(a)
111
+
112
+ map_aggregate(args, check_proxy)
113
+ map_aggregate(kwargs, check_proxy)
114
+
115
+ if len(found_proxies) != 0:
116
+ tracer = found_proxies[0].tracer
117
+ return tracer.create_proxy("call_function", cls, args, kwargs)
118
+ else:
119
+ cls.__init__(instance, *args, **kwargs) # type: ignore[misc]
120
+ return instance
121
+
122
+
123
+ def _patch_function(fn: FunctionType, nargs: int) -> FunctionType:
124
+ co = fn.__code__
125
+ co_flags = co.co_flags & ~HAS_VARSTUFF
126
+ co_args: tuple
127
+ if hasattr(co, "co_qualname"):
128
+ # Python-3.11+ code signature
129
+ co_args = (
130
+ nargs,
131
+ 0,
132
+ 0,
133
+ co.co_nlocals,
134
+ co.co_stacksize,
135
+ co_flags,
136
+ co.co_code,
137
+ co.co_consts,
138
+ co.co_names,
139
+ co.co_varnames,
140
+ co.co_filename,
141
+ co.co_name,
142
+ co.co_qualname, # type: ignore[attr-defined]
143
+ co.co_firstlineno,
144
+ co.co_lnotab,
145
+ co.co_exceptiontable, # type: ignore[attr-defined]
146
+ co.co_freevars,
147
+ co.co_cellvars,
148
+ )
149
+ elif hasattr(co, "co_posonlyargcount"):
150
+ co_args = (
151
+ nargs,
152
+ 0,
153
+ 0,
154
+ co.co_nlocals,
155
+ co.co_stacksize,
156
+ co_flags,
157
+ co.co_code,
158
+ co.co_consts,
159
+ co.co_names,
160
+ co.co_varnames,
161
+ co.co_filename,
162
+ co.co_name,
163
+ co.co_firstlineno,
164
+ co.co_lnotab,
165
+ co.co_freevars,
166
+ co.co_cellvars,
167
+ )
168
+ else:
169
+ co_args = (
170
+ nargs,
171
+ 0,
172
+ co.co_nlocals,
173
+ co.co_stacksize,
174
+ co_flags,
175
+ co.co_code,
176
+ co.co_consts,
177
+ co.co_names,
178
+ co.co_varnames,
179
+ co.co_filename,
180
+ co.co_name,
181
+ co.co_firstlineno,
182
+ co.co_lnotab,
183
+ co.co_freevars,
184
+ co.co_cellvars,
185
+ )
186
+ new_code = CodeType(*co_args) # type: ignore[arg-type]
187
+ return FunctionType(
188
+ new_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__
189
+ )
190
+
191
+ # we need to insert placeholder nodes for *args and **kwargs
192
+ # we can't call this function normally, otherwise it would try to unpack them
193
+ # instead, let's make python think that args and kwargs are normal variables
194
+
195
+
196
+ @compatibility(is_backward_compatible=False)
197
+ class PHBase:
198
+ """
199
+ Object representing an input placeholder to `concrete_args`
200
+ """
201
+
202
+ def __repr__(self):
203
+ return "PH"
204
+
205
+
206
+ PH = PHBase()
207
+
208
+
209
+ @compatibility(is_backward_compatible=False)
210
+ class PHWithMeta(PHBase):
211
+ """
212
+ Object representing an input placeholder to `concrete_args`
213
+ """
214
+ def __init__(self, ph_key: Optional[str] = None):
215
+ super().__init__()
216
+
217
+ # Provide a hey for user to identify placeholder node during analysis
218
+ self.ph_key = ph_key
219
+
220
+
221
+ def _transfer_attrs(fr, to):
222
+ for attr_name in dir(fr):
223
+ attr_val = getattr(fr, attr_name)
224
+ if (
225
+ not callable(attr_val)
226
+ and not attr_name.startswith("__")
227
+ and not hasattr(to, attr_name)
228
+ ):
229
+ setattr(to, attr_name, attr_val)
230
+
231
+
232
+ @compatibility(is_backward_compatible=True)
233
+ class Tracer(TracerBase):
234
+ # Reference: https://github.com/pytorch/pytorch/issues/54354
235
+ # The first line of this docstring overrides the one Sphinx generates for the
236
+ # documentation. We need it so that Sphinx doesn't leak `math`s path from the
237
+ # build environment (e.g. `<module 'math' from '/leaked/path').
238
+
239
+ """Tracer(autowrap_modules=(math,), autowrap_functions=())
240
+
241
+ ``Tracer`` is the class that implements the symbolic tracing functionality
242
+ of ``torch.fx.symbolic_trace``. A call to ``symbolic_trace(m)`` is equivalent
243
+ to ``Tracer().trace(m)``.
244
+
245
+ Tracer can be subclassed to override various behaviors of the tracing
246
+ process. The different behaviors that can be overridden are described
247
+ in the docstrings of the methods on this class.
248
+ """
249
+
250
+ # Not checking BC on this API because the default value for `autowrap_modules`
251
+ # includes the local filepath to the `math` module, which would jitter
252
+ # across machines.
253
+ @compatibility(is_backward_compatible=True)
254
+ def __init__(
255
+ self,
256
+ autowrap_modules: Tuple[ModuleType] = (math,),
257
+ autowrap_functions: Tuple[Callable, ...] = (),
258
+ param_shapes_constant: bool = False,
259
+ ) -> None:
260
+ # This method's signature is overridden by the first line of this class'
261
+ # docstring. If this method's signature is modified, the signature that
262
+ # overrides it also should be modified accordingly.
263
+
264
+ """
265
+ Construct a Tracer object.
266
+
267
+ Args:
268
+
269
+ autowrap_modules (Tuple[ModuleType]): defaults to `(math, )`,
270
+ Python modules whose functions should be wrapped automatically
271
+ without needing to use fx.wrap(). Backward-compatibility for
272
+ this parameter is guaranteed.
273
+
274
+ autowrap_functions (Tuple[Callable, ...]): defaults to `()`,
275
+ Python functions that should be wrapped automatically without
276
+ needing to use fx.wrap(). Backward compatibility for this
277
+ parameter is guaranteed.
278
+
279
+ param_shapes_constant (bool): When this flag is set, calls to shape,
280
+ size and a few other shape like attributes of a module's parameter
281
+ will be evaluated directly, rather than returning a new Proxy value
282
+ for an attribute access. Backward compatibility for this parameter
283
+ is guaranteed.
284
+ """
285
+
286
+ super().__init__()
287
+
288
+ # Functions we will eagerly wrap when we see them while tracing
289
+ # this captures both `math.sqrt()` and `from math import sqrt` automatically
290
+ self._autowrap_function_ids: Set[int] = {
291
+ id(value)
292
+ for name, value in chain(*[m.__dict__.items() for m in autowrap_modules])
293
+ if not name.startswith("_") and callable(value)
294
+ }
295
+ self._autowrap_function_ids.update({id(f) for f in autowrap_functions})
296
+
297
+ # Python modules to apply autowrap to at the start, in addition to
298
+ # modules we see while tracing
299
+ self._autowrap_search: List[ModuleType] = list(autowrap_modules)
300
+ self.param_shapes_constant = param_shapes_constant
301
+
302
+ self.submodule_paths: Optional[Dict[torch.nn.Module, str]] = None
303
+ self.root_module_name: str = ""
304
+ # Maps the containing module's name to the operator name
305
+ self.scope = Scope("", None)
306
+ # Records the module call stack
307
+ self.module_stack = collections.OrderedDict()
308
+ # Mapping of node name to module scope
309
+ self.node_name_to_scope: Dict[str, Tuple[str, type]] = {}
310
+
311
+ @compatibility(is_backward_compatible=True)
312
+ def create_arg(self, a: Any) -> "Argument":
313
+ """
314
+ A method to specify the behavior of tracing when preparing values to
315
+ be used as arguments to nodes in the ``Graph``.
316
+
317
+ By default, the behavior includes:
318
+
319
+ #. Iterate through collection types (e.g. tuple, list, dict) and recursively
320
+ call ``create_args`` on the elements.
321
+ #. Given a Proxy object, return a reference to the underlying IR ``Node``
322
+ #. Given a non-Proxy Tensor object, emit IR for various cases:
323
+
324
+ * For a Parameter, emit a ``get_attr`` node referring to that Parameter
325
+ * For a non-Parameter Tensor, store the Tensor away in a special
326
+ attribute referring to that attribute.
327
+
328
+ This method can be overridden to support more types.
329
+
330
+ Args:
331
+
332
+ a (Any): The value to be emitted as an ``Argument`` in the ``Graph``.
333
+
334
+
335
+ Returns:
336
+
337
+ The value ``a`` converted into the appropriate ``Argument``
338
+ """
339
+ # The base tracer is used to construct Graphs when there is no associated
340
+ # module hierarchy, so it can never create parameter references.
341
+ # The default tracer adds the ability to refer to parameters when
342
+ # tracing modules.
343
+ if isinstance(a, torch.nn.Parameter):
344
+ for n, p in self.root.named_parameters():
345
+ if a is p:
346
+ return self.create_node("get_attr", n, (), {})
347
+ raise NameError("parameter is not a member of this module")
348
+ elif isinstance(a, torch.Tensor):
349
+ for n_, p_ in self.root.named_buffers():
350
+ if a is p_:
351
+ return self.create_node("get_attr", n_, (), {})
352
+ elif isinstance(a, torch.nn.Module):
353
+ for n_, p_ in self.root.named_modules():
354
+ if a is p_:
355
+ return self.create_node("get_attr", n_, (), {})
356
+ # For NamedTuple instances that appear literally as args, we emit
357
+ # a node to construct the NamedTuple and use that Node as the argument.
358
+ if isinstance(a, tuple) and hasattr(a, "_fields"):
359
+ args = tuple(self.create_arg(elem) for elem in a)
360
+ return self.create_node("call_function", a.__class__, args, {})
361
+
362
+ # Tensors do not have a reliable string repr() from which they can be
363
+ # constructed (and we probably don't want to rely on that, either), so
364
+ # for any constant Tensor values we encounter, first search for if they
365
+ # are an attribute of some module in the module hierarchy. If so, emit
366
+ # a get_attr to retrieve that tensor. Otherwise, we'll store away the
367
+ # tensor value into a special attribute on the Module s.t. we can
368
+ # retrieve it with a get_attr.
369
+ if isinstance(a, (torch.Tensor, ScriptObject)):
370
+ qualname: Optional[str] = self.tensor_attrs.get(a)
371
+
372
+ # Tensor was not found in the Module hierarchy, stow it away in a
373
+ # special attribute and set the qualname to refer to that
374
+ if not qualname:
375
+ i = 0
376
+ while True:
377
+ qualname = f"_tensor_constant{i}"
378
+ if not hasattr(self.root, qualname):
379
+ break
380
+ i += 1
381
+ self.tensor_attrs[a] = qualname
382
+ setattr(self.root, qualname, a)
383
+
384
+ return self.create_node("get_attr", qualname, (), {})
385
+
386
+ if type(a) in _proxyable_classes:
387
+ # This is an instance of a proxyable class for which we did not
388
+ # witness its construction. Intern this as a constant attribute
389
+
390
+ # TODO: binary search
391
+ i = 0
392
+ while True:
393
+ qualname = f"_{a.__class__.__name__}_constant_{i}"
394
+ if not hasattr(self.root, qualname):
395
+ break
396
+ i += 1
397
+ setattr(self.root, qualname, a)
398
+
399
+ return self.create_node("get_attr", qualname, (), {})
400
+
401
+ return super().create_arg(a)
402
+
403
+ @compatibility(is_backward_compatible=True)
404
+ def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
405
+ """
406
+ A method to specify whether a given ``nn.Module`` is a "leaf" module.
407
+
408
+ Leaf modules are the atomic units that appear in
409
+ the IR, referenced by ``call_module`` calls. By default,
410
+ Modules in the PyTorch standard library namespace (torch.nn)
411
+ are leaf modules. All other modules are traced through and
412
+ their constituent ops are recorded, unless specified otherwise
413
+ via this parameter.
414
+
415
+ Args:
416
+
417
+ m (Module): The module being queried about
418
+ module_qualified_name (str): The path to root of this module. For example,
419
+ if you have a module hierarchy where submodule ``foo`` contains
420
+ submodule ``bar``, which contains submodule ``baz``, that module will
421
+ appear with the qualified name ``foo.bar.baz`` here.
422
+ """
423
+ return (
424
+ (m.__module__.startswith("torch.nn") or m.__module__.startswith("torch.ao.nn"))
425
+ and not isinstance(m, torch.nn.Sequential)
426
+ )
427
+
428
+ @compatibility(is_backward_compatible=True)
429
+ def path_of_module(self, mod: torch.nn.Module) -> str:
430
+ """
431
+ Helper method to find the qualified name of ``mod`` in the Module hierarchy
432
+ of ``root``. For example, if ``root`` has a submodule named ``foo``, which has
433
+ a submodule named ``bar``, passing ``bar`` into this function will return
434
+ the string "foo.bar".
435
+
436
+ Args:
437
+
438
+ mod (str): The ``Module`` to retrieve the qualified name for.
439
+ """
440
+ # Prefer the O(1) algorithm
441
+ if self.submodule_paths:
442
+ path = self.submodule_paths.get(mod)
443
+ if path is None:
444
+ raise NameError("module is not installed as a submodule")
445
+ assert isinstance(path, str)
446
+ return path
447
+ # O(N^2) fallback in the case that we didn't store the submodule
448
+ # paths.
449
+ else:
450
+ for n, p in self.root.named_modules():
451
+ if mod is p:
452
+ return n
453
+ raise NameError("module is not installed as a submodule")
454
+
455
+ @compatibility(is_backward_compatible=True)
456
+ def call_module(
457
+ self,
458
+ m: torch.nn.Module,
459
+ forward: Callable[..., Any],
460
+ args: Tuple[Any, ...],
461
+ kwargs: Dict[str, Any],
462
+ ) -> Any:
463
+ """
464
+ Method that specifies the behavior of this ``Tracer`` when it encounters
465
+ a call to an ``nn.Module`` instance.
466
+
467
+ By default, the behavior is to check if the called module is a leaf module
468
+ via ``is_leaf_module``. If it is, emit a ``call_module`` node referring to
469
+ ``m`` in the ``Graph``. Otherwise, call the ``Module`` normally, tracing through
470
+ the operations in its ``forward`` function.
471
+
472
+ This method can be overridden to--for example--create nested traced
473
+ GraphModules, or any other behavior you would want while tracing across
474
+ ``Module`` boundaries.
475
+
476
+ Args:
477
+
478
+ m (Module): The module for which a call is being emitted
479
+ forward (Callable): The forward() method of the ``Module`` to be invoked
480
+ args (Tuple): args of the module callsite
481
+ kwargs (Dict): kwargs of the module callsite
482
+
483
+ Return:
484
+
485
+ The return value from the Module call. In the case that a ``call_module``
486
+ node was emitted, this is a ``Proxy`` value. Otherwise, it is whatever
487
+ value was returned from the ``Module`` invocation.
488
+ """
489
+ module_qualified_name = self.path_of_module(m)
490
+ with ScopeContextManager(self.scope, Scope(module_qualified_name, type(m))) as _scope:
491
+ # module_stack is an ordered dict so writing then deleting the
492
+ # entry is equivalent to push/pop on a list
493
+ self.module_stack[_scope.module_path] = (module_qualified_name, _scope.module_type)
494
+ if not self.is_leaf_module(m, module_qualified_name):
495
+ ret_val = forward(*args, **kwargs)
496
+ else:
497
+ ret_val = self.create_proxy("call_module", module_qualified_name, args, kwargs)
498
+ key, _ = self.module_stack.popitem(last=True)
499
+ assert key == _scope.module_path, f" Unexpected key {key}"
500
+
501
+ return ret_val
502
+
503
+ @compatibility(is_backward_compatible=False)
504
+ def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]):
505
+ """
506
+ Method that specifies the behavior of this ``Tracer`` when we call getattr
507
+ on a call to an ``nn.Module`` instance.
508
+
509
+ By default, the behavior is to return a proxy value for the attribute. It
510
+ also stores the proxy value in the ``parameter_proxy_cache``, so that future
511
+ calls will reuse the proxy rather than creating a new one.
512
+
513
+ This method can be overridden to --for example-- not return proxies when
514
+ querying parameters.
515
+
516
+ Args:
517
+
518
+ attr (str): The name of the attribute being queried
519
+ attr_val (Any): The value of the attribute
520
+ parameter_proxy_cache (Dict[str, Any]): A cache of attr names to proxies
521
+
522
+ Return:
523
+
524
+ The return value from the getattr call.
525
+ """
526
+ def maybe_get_proxy_for_attr(
527
+ attr_val, collection_to_search, parameter_proxy_cache
528
+ ):
529
+ for n, p in collection_to_search:
530
+ if attr_val is p:
531
+ if n not in parameter_proxy_cache:
532
+ kwargs = {}
533
+ if (
534
+ "proxy_factory_fn"
535
+ in inspect.signature(self.create_proxy).parameters
536
+ ):
537
+ kwargs["proxy_factory_fn"] = (
538
+ None
539
+ if not self.param_shapes_constant
540
+ else lambda node: ParameterProxy(
541
+ self, node, n, attr_val
542
+ )
543
+ )
544
+ val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
545
+ parameter_proxy_cache[n] = val_proxy
546
+ return parameter_proxy_cache[n]
547
+ return None
548
+
549
+ if isinstance(attr_val, torch.nn.Parameter):
550
+ maybe_parameter_proxy = maybe_get_proxy_for_attr(
551
+ attr_val, self.root.named_parameters(), parameter_proxy_cache
552
+ )
553
+ if maybe_parameter_proxy is not None:
554
+ return maybe_parameter_proxy
555
+
556
+ if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
557
+ maybe_buffer_proxy = maybe_get_proxy_for_attr(
558
+ attr_val, self.root.named_buffers(), parameter_proxy_cache
559
+ )
560
+ if maybe_buffer_proxy is not None:
561
+ return maybe_buffer_proxy
562
+
563
+ return attr_val
564
+
565
+ # This method will be refactored
566
+ @compatibility(is_backward_compatible=False)
567
+ def create_args_for_root(self, root_fn, is_module, concrete_args=None):
568
+ """
569
+ Create ``placeholder`` nodes corresponding to the signature of the ``root``
570
+ Module. This method introspects root's signature and emits those
571
+ nodes accordingly, also supporting ``*args`` and ``**kwargs``.
572
+ """
573
+ # In some cases, a function or method has been decorated with a wrapper
574
+ # defined via ``functools.wraps``. In this case, the outer code object
575
+ # will likely not contain the actual parameters we care about, so unwrap
576
+ # the function to get to the innermost callable.
577
+ fn_for_analysis = inspect.unwrap(root_fn)
578
+ co = fn_for_analysis.__code__
579
+ total_args = co.co_argcount + co.co_kwonlyargcount
580
+ orig_args = list(co.co_varnames)
581
+ names_iter = iter(co.co_varnames)
582
+ args: List[Any] = []
583
+ skip_arg_idx = 0
584
+ if is_module:
585
+ if total_args == 0:
586
+ raise RuntimeError(
587
+ "``self`` argument cannot be part of *args expansion!"
588
+ )
589
+ skip_arg_idx = 1
590
+ next(names_iter) # skip self
591
+ args.append(self.root)
592
+
593
+ sig = inspect.signature(fn_for_analysis)
594
+
595
+
596
+ # This covers the very specific case where we are passing in flat
597
+ # concrete_args as a tuple, but our traced fn takes (*args, **kwargs).
598
+ # In this case, just take the concrete_args and pass them through.
599
+ name_idx = 0
600
+ if isinstance(concrete_args, tuple) and \
601
+ len(concrete_args) > 0 and \
602
+ (co.co_flags & HAS_VARSTUFF) and \
603
+ total_args == 1:
604
+ for concrete_arg in concrete_args:
605
+ out = self.create_proxy("placeholder", f"input_{name_idx}", (), {})
606
+ if isinstance(concrete_arg, PHBase):
607
+ if concrete_arg != PH:
608
+ # Transfer attrs in the case where you're using a placeholder other
609
+ # than the singleton PH (PH has no attributes to transfer).
610
+ # Proxies were created out of the placeholders.
611
+ # Transfer any metadata (put on the placeholders in the form of
612
+ # attributes set by the user) from the placeholder to the
613
+ # underlying nodes (the proxy is unwrapped by the user, but
614
+ # the metadata should hold).
615
+ _transfer_attrs(fr=concrete_arg, to=out.node)
616
+ args.append(out)
617
+ name_idx += 1
618
+ return root_fn, args
619
+
620
+ arg_names = [next(names_iter) for idx in range(skip_arg_idx, total_args)]
621
+ if isinstance(concrete_args, tuple):
622
+ if len(arg_names) != len(concrete_args):
623
+ raise RuntimeError(
624
+ f"Tracing expected {len(arg_names)} arguments but got {len(concrete_args)} concrete arguments"
625
+ )
626
+ concrete_args = dict(zip(arg_names, concrete_args))
627
+
628
+ def proxy_placeholder(name):
629
+ return self._proxy_placeholder(name, concrete_args, sig, fn_for_analysis)
630
+
631
+ args.extend(proxy_placeholder(names) for names in arg_names)
632
+
633
+ if co.co_kwonlyargcount > 0 or co.co_flags & HAS_VARSTUFF:
634
+ # TODO: type annotations for *args and **kwargs
635
+ if co.co_flags & inspect.CO_VARARGS:
636
+ args.append(proxy_placeholder("*" + next(names_iter)))
637
+ if co.co_flags & inspect.CO_VARKEYWORDS:
638
+ args.append(proxy_placeholder("**" + next(names_iter)))
639
+ root_fn = _patch_function(root_fn, len(args))
640
+
641
+ flat_args, in_spec = pytree.tree_flatten(tuple(args))
642
+ if not all(child.is_leaf() for child in in_spec.children_specs):
643
+ # In the case that we have pytree-flattened inputs in
644
+ # `concrete_args`, generate a flattening wrapper around the
645
+ # original root function and return that.
646
+ self.graph._codegen = _PyTreeCodeGen(
647
+ _PyTreeInfo(orig_args[:total_args], in_spec, None)
648
+ )
649
+
650
+ def flatten_fn(*args):
651
+ tree_args = pytree.tree_unflatten(list(args), in_spec)
652
+ tree_out = root_fn(*tree_args)
653
+ out_args, out_spec = pytree.tree_flatten(tree_out)
654
+ assert isinstance(self.graph._codegen, _PyTreeCodeGen)
655
+ self.graph._codegen.pytree_info = (
656
+ self.graph._codegen.pytree_info._replace(out_spec=out_spec)
657
+ )
658
+ return out_args
659
+
660
+ return flatten_fn, flat_args
661
+ return root_fn, args
662
+
663
+ @compatibility(is_backward_compatible=True)
664
+ def trace(
665
+ self,
666
+ root: Union[torch.nn.Module, Callable[..., Any]],
667
+ concrete_args: Optional[Dict[str, Any]] = None,
668
+ ) -> Graph:
669
+ """
670
+ Trace ``root`` and return the corresponding FX ``Graph`` representation. ``root``
671
+ can either be an ``nn.Module`` instance or a Python callable.
672
+
673
+ Note that after this call, ``self.root`` may be different from the ``root`` passed
674
+ in here. For example, when a free function is passed to ``trace()``, we will
675
+ create an ``nn.Module`` instance to use as the root and add embedded constants
676
+ to.
677
+
678
+
679
+ Args:
680
+
681
+ root (Union[Module, Callable]): Either a ``Module`` or a function to be
682
+ traced through. Backwards-compatibility for this parameter is
683
+ guaranteed.
684
+ concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
685
+ not be treated as Proxies. This parameter is experimental and
686
+ its backwards-compatibility is *NOT* guaranteed.
687
+
688
+ Returns:
689
+
690
+ A ``Graph`` representing the semantics of the passed-in ``root``.
691
+ """
692
+ global _is_fx_tracing_flag
693
+ old_is_fx_tracing_flag = _is_fx_tracing_flag
694
+ _is_fx_tracing_flag = True
695
+ try:
696
+ if isinstance(root, torch.nn.Module):
697
+
698
+ # do real recompilation for _LazyGraphModule before retracing since the trace
699
+ # method can not trace the _lazy_forward method. Got error:
700
+ # https://gist.github.com/shunting314/75549c2e82ae07ac1139c94a3583d259
701
+ # without this.
702
+ from torch.fx._lazy_graph_module import _LazyGraphModule
703
+ _LazyGraphModule.force_recompile(root)
704
+
705
+ self.root = root
706
+
707
+ assert hasattr(
708
+ type(root), self.traced_func_name
709
+ ), f"traced_func_name={self.traced_func_name} doesn't exist in {type(root).__name__}"
710
+
711
+ fn = getattr(type(root), self.traced_func_name)
712
+ self.root_module_name = root._get_name()
713
+ self.submodule_paths = {mod: name for name, mod in root.named_modules()}
714
+ else:
715
+ self.root = torch.nn.Module()
716
+ fn = root
717
+
718
+ tracer_cls: Optional[Type[Tracer]] = getattr(self, "__class__", None)
719
+ self.graph = Graph(tracer_cls=tracer_cls)
720
+ if hasattr(fn, '__code__'):
721
+ code = fn.__code__
722
+ self.graph._co_fields = {
723
+ 'co_name': code.co_name,
724
+ 'co_filename': code.co_filename,
725
+ 'co_firstlineno': code.co_firstlineno,
726
+ }
727
+
728
+ # When we encounter a Tensor value that's not a parameter, we look if it
729
+ # is some other attribute on the model. Construct a dict mapping Tensor
730
+ # values to the qualified name here for efficiency. This is used downstream
731
+ # in create_arg
732
+ self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {}
733
+
734
+ def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]):
735
+ for k, v in m.__dict__.items():
736
+ if isinstance(v, (torch.Tensor, ScriptObject)):
737
+ self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
738
+ for k, v in m.named_children():
739
+ collect_tensor_attrs(v, prefix_atoms + [k])
740
+
741
+ collect_tensor_attrs(self.root, [])
742
+
743
+ assert isinstance(fn, FunctionType)
744
+
745
+ fn_globals = fn.__globals__ # run before it gets patched
746
+ fn, args = self.create_args_for_root(
747
+ fn, isinstance(root, torch.nn.Module), concrete_args
748
+ )
749
+
750
+ parameter_proxy_cache: Dict[
751
+ str, Proxy
752
+ ] = {} # Reduce number of get_attr calls
753
+
754
+ # Method dispatch on parameters is not recorded unless it's directly used.
755
+ # Thus, we need to insert a proxy when __getattr__ requests a parameter.
756
+ @functools.wraps(_orig_module_getattr)
757
+ def module_getattr_wrapper(mod, attr):
758
+ attr_val = _orig_module_getattr(mod, attr)
759
+ return self.getattr(attr, attr_val, parameter_proxy_cache)
760
+
761
+ @functools.wraps(_orig_module_call)
762
+ def module_call_wrapper(mod, *args, **kwargs):
763
+ def forward(*args, **kwargs):
764
+ return _orig_module_call(mod, *args, **kwargs)
765
+
766
+ _autowrap_check(
767
+ patcher,
768
+ getattr(getattr(mod, "forward", mod), "__globals__", {}),
769
+ self._autowrap_function_ids,
770
+ )
771
+ return self.call_module(mod, forward, args, kwargs)
772
+
773
+ with _Patcher() as patcher:
774
+ # allow duplicate patches to support the case of nested calls
775
+ patcher.patch_method(
776
+ torch.nn.Module,
777
+ "__getattr__",
778
+ module_getattr_wrapper,
779
+ deduplicate=False,
780
+ )
781
+ patcher.patch_method(
782
+ torch.nn.Module, "__call__", module_call_wrapper, deduplicate=False
783
+ )
784
+ _patch_wrapped_functions(patcher)
785
+ _autowrap_check(patcher, fn_globals, self._autowrap_function_ids)
786
+ for module in self._autowrap_search:
787
+ _autowrap_check(
788
+ patcher, module.__dict__, self._autowrap_function_ids
789
+ )
790
+ self.create_node(
791
+ "output",
792
+ "output",
793
+ (self.create_arg(fn(*args)),),
794
+ {},
795
+ type_expr=fn.__annotations__.get("return", None),
796
+ )
797
+
798
+ self.submodule_paths = None
799
+ finally:
800
+ _is_fx_tracing_flag = old_is_fx_tracing_flag
801
+ return self.graph
802
+
803
+ def __deepcopy__(self, memo):
804
+ # _autowrap_search contains modules, which cannot be deepcopied.
805
+ new_tracer = Tracer.__new__(Tracer)
806
+
807
+ for k, v in self.__dict__.items():
808
+ if k in {'_autowrap_search'}:
809
+ new_obj = copy.copy(v)
810
+ else:
811
+ new_obj = copy.deepcopy(v, memo)
812
+
813
+ new_tracer.__dict__[k] = new_obj
814
+
815
+ return new_tracer
816
+
817
+ def _proxy_placeholder(self, name, concrete_args, sig, fn_for_analysis):
818
+ if concrete_args is not None and name in concrete_args:
819
+ cnt = 0
820
+
821
+ def replace_ph(x):
822
+ nonlocal cnt
823
+ cnt += 1
824
+ param = sig.parameters[name]
825
+ default = (
826
+ ()
827
+ if param.default is inspect.Parameter.empty
828
+ else (param.default,)
829
+ )
830
+ out = self.create_proxy(
831
+ "placeholder", f"{name}_{str(cnt)}", default, {}
832
+ )
833
+ if isinstance(x, PHBase):
834
+ if x != PH:
835
+ # Transfer attrs in the case where you're using a placeholder other
836
+ # than the singleton PH (PH has no attributes to transfer).
837
+ # Proxies were created out of the placeholders.
838
+ # Transfer any metadata (put on the placeholders in the form of
839
+ # attributes set by the user) from the placeholder to the
840
+ # underlying nodes (the proxy is unwrapped by the user, but
841
+ # the metadata should hold).
842
+ _transfer_attrs(fr=x, to=out.node)
843
+
844
+ return out
845
+ # Union[int, bool] == bool in Python <= 3.6
846
+ if (
847
+ type(x) == bool
848
+ or type(x) in base_types
849
+ and type(x) != torch.Tensor
850
+ ):
851
+ torch._assert(
852
+ out == x,
853
+ f"{name} has been specialized to have value {x} but got another value",
854
+ )
855
+ elif x is None:
856
+ args = (
857
+ out,
858
+ f"{name} has been specialized to have value None but got another value",
859
+ )
860
+ self.create_proxy("call_function", _assert_is_none, args, {})
861
+ else:
862
+ warnings.warn(
863
+ f"Was not able to add assertion to guarantee correct input {name} to "
864
+ f"specialized function. It is up to the user to make sure that your inputs match the "
865
+ f"inputs you specialized the function with."
866
+ )
867
+
868
+ return x
869
+
870
+ return pytree.tree_map(replace_ph, concrete_args[name])
871
+ if name[0] == "*":
872
+ default = ()
873
+ else:
874
+ param = sig.parameters[name]
875
+ default = () if param.default is inspect.Parameter.empty else (param.default,) # type: ignore[assignment]
876
+ return self.create_proxy(
877
+ "placeholder",
878
+ name,
879
+ default,
880
+ {},
881
+ type_expr=fn_for_analysis.__annotations__.get(name, None)
882
+ )
883
+
884
+
885
+ # Dictionary of (id(globals dict), function name) => globals_dict to patch for
886
+ # the purposes of the wrap() API.
887
+ # We key by the globals dict id and function name to ensure we're wrapping a given
888
+ # function only once.
889
+ _wrapped_fns_to_patch: Dict[Tuple[int, str], dict] = {}
890
+
891
+ # List of methods on classes to wrap (class type, function name)
892
+ # this currently only works for Tensor.* methods that aren't traced properly
893
+ _wrapped_methods_to_patch: List[Tuple[type, str]] = []
894
+
895
+ if os.environ.get("FX_PATCH_GETITEM") == "1":
896
+ # This change is needed to trace models like PositionalEmbedding from BERT:
897
+ # https://github.com/pytorch/benchmark/blob/master/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/position.py
898
+ # but causes issues in quantization documented here:
899
+ # https://github.com/pytorch/pytorch/issues/50710
900
+ # once that is fixed we can make this the default behavior.
901
+ _wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
902
+
903
+
904
+ def _find_proxy(*objects_to_search):
905
+ """
906
+ Recursively search a data structure for a Proxy() and return it,
907
+ return None if not found.
908
+ """
909
+ proxy = None
910
+
911
+ def find_proxy(x):
912
+ nonlocal proxy
913
+ if isinstance(x, Proxy):
914
+ proxy = x
915
+
916
+ map_aggregate(objects_to_search, find_proxy)
917
+ return proxy
918
+
919
+
920
+ def _create_wrapped_func(orig_fn):
921
+ @functools.wraps(orig_fn)
922
+ def wrapped(*args, **kwargs):
923
+ """
924
+ Given an closed-over ``orig_function`` to invoke, search the args and kwargs for
925
+ a Proxy object. If there is one, emit a ``call_function`` node to preserve the
926
+ call to this leaf function directly. Otherwise, just return the results of
927
+ this function call, as this function is not being traced.
928
+ """
929
+ proxy = _find_proxy(args, kwargs)
930
+ if proxy is not None:
931
+ return_proxy = proxy.tracer.create_proxy(
932
+ "call_function", orig_fn, args, kwargs
933
+ )
934
+ return_proxy.node.meta["is_wrapped"] = True
935
+ return return_proxy
936
+ return orig_fn(*args, **kwargs)
937
+
938
+ return wrapped
939
+
940
+
941
+ def _create_wrapped_method(cls, name):
942
+ orig_fn = getattr(cls, name)
943
+
944
+ @functools.wraps(orig_fn)
945
+ def wrapped(*args, **kwargs):
946
+ """
947
+ Search the args and kwargs for a Proxy object. If there is one,
948
+ emit a ``call_method`` node to preserve the call to this method
949
+ directly. Otherwise, just return the results of this function
950
+ call, as this function is not being traced.
951
+ """
952
+ proxy = _find_proxy(args, kwargs)
953
+ if proxy is not None:
954
+ return proxy.tracer.create_proxy("call_method", name, args, kwargs)
955
+ return orig_fn(*args, **kwargs)
956
+
957
+ return wrapped
958
+
959
+
960
+ class _PatchedFn(NamedTuple):
961
+ frame_dict: Any
962
+ fn_name: str
963
+ orig_fn: Any
964
+
965
+ def revert(self):
966
+ raise NotImplementedError()
967
+
968
+
969
+ class _PatchedFnSetItem(_PatchedFn):
970
+ def revert(self):
971
+ self.frame_dict[self.fn_name] = self.orig_fn
972
+
973
+
974
+ class _PatchedFnDel(_PatchedFn):
975
+ def revert(self):
976
+ del self.frame_dict[self.fn_name]
977
+
978
+
979
+ class _PatchedFnSetAttr(_PatchedFn):
980
+ def revert(self):
981
+ setattr(self.frame_dict, self.fn_name, self.orig_fn)
982
+
983
+
984
+ class _Patcher:
985
+ def __init__(self):
986
+ super().__init__()
987
+ self.patches_made: List[_PatchedFn] = []
988
+ self.visited: Set[int] = set()
989
+
990
+ def patch(
991
+ self,
992
+ frame_dict: Dict[str, Any],
993
+ name: str,
994
+ new_fn: Callable,
995
+ deduplicate: bool = True,
996
+ ):
997
+ """
998
+ Replace frame_dict[name] with new_fn until we exit the context manager.
999
+ """
1000
+ new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
1001
+ if name not in frame_dict and hasattr(builtins, name):
1002
+ self.patches_made.append(_PatchedFnDel(frame_dict, name, None))
1003
+ elif getattr(frame_dict[name], "__fx_already_patched", False):
1004
+ return # already patched, no need to do it again
1005
+ else:
1006
+ self.patches_made.append(
1007
+ _PatchedFnSetItem(frame_dict, name, frame_dict[name])
1008
+ )
1009
+ frame_dict[name] = new_fn
1010
+
1011
+ def patch_method(
1012
+ self, cls: type, name: str, new_fn: Callable, deduplicate: bool = True
1013
+ ):
1014
+ """
1015
+ Replace object_or_dict.name with new_fn until we exit the context manager.
1016
+ """
1017
+ new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
1018
+ orig_fn = getattr(cls, name)
1019
+ if getattr(orig_fn, "__fx_already_patched", False):
1020
+ return # already patched, no need to do it again
1021
+ self.patches_made.append(_PatchedFnSetAttr(cls, name, orig_fn))
1022
+ setattr(cls, name, new_fn)
1023
+
1024
+ def visit_once(self, thing: Any):
1025
+ """Return True on the first call to with thing, otherwise false"""
1026
+ idx = id(thing)
1027
+ if idx in self.visited:
1028
+ return False
1029
+ self.visited.add(idx)
1030
+ return True
1031
+
1032
+ def __enter__(self):
1033
+ return self
1034
+
1035
+ def __exit__(self, exc_type, exc_val, exc_tb):
1036
+ """
1037
+ Undo all the changes made via self.patch() and self.patch_method()
1038
+ """
1039
+ while self.patches_made:
1040
+ # unpatch in reverse order to handle duplicates correctly
1041
+ self.patches_made.pop().revert()
1042
+ self.visited.clear()
1043
+
1044
+
1045
+ def _patch_wrapped_functions(patcher: _Patcher):
1046
+ """
1047
+ Go through ``_wrapped_fn_patch_table`` and, for each frame object, wrap
1048
+ the listed global functions in the `_create_wrapped_func` wrapper.
1049
+ """
1050
+ for (_, name), frame_dict in _wrapped_fns_to_patch.copy().items():
1051
+ if name not in frame_dict and hasattr(builtins, name):
1052
+ orig_fn = getattr(builtins, name)
1053
+ else:
1054
+ orig_fn = frame_dict[name]
1055
+ patcher.patch(frame_dict, name, _create_wrapped_func(orig_fn))
1056
+
1057
+ for cls, name in _wrapped_methods_to_patch:
1058
+ patcher.patch_method(cls, name, _create_wrapped_method(cls, name))
1059
+
1060
+
1061
+ def _autowrap_check(
1062
+ patcher: _Patcher, frame_dict: Dict[str, Any], function_ids: Set[int]
1063
+ ):
1064
+ """
1065
+ Some methods, like `math.sqrt` are common enough we want to automatically wrap them as we see them.
1066
+ This method searches a scope for them and patches them if found.
1067
+ """
1068
+ if patcher.visit_once(frame_dict):
1069
+ for name, value in frame_dict.items():
1070
+ if (
1071
+ not name.startswith("_")
1072
+ and callable(value)
1073
+ and id(value) in function_ids
1074
+ ):
1075
+ patcher.patch(frame_dict, name, _create_wrapped_func(value))
1076
+
1077
+
1078
+ @compatibility(is_backward_compatible=True)
1079
+ def wrap(fn_or_name: Union[str, Callable]):
1080
+ """
1081
+ This function can be called at module-level scope to register fn_or_name as a "leaf function".
1082
+ A "leaf function" will be preserved as a CallFunction node in the FX trace instead of being
1083
+ traced through::
1084
+
1085
+ # foo/bar/baz.py
1086
+ def my_custom_function(x, y):
1087
+ return x * x + y * y
1088
+
1089
+ torch.fx.wrap('my_custom_function')
1090
+
1091
+ def fn_to_be_traced(x, y):
1092
+ # When symbolic tracing, the below call to my_custom_function will be inserted into
1093
+ # the graph rather than tracing it.
1094
+ return my_custom_function(x, y)
1095
+
1096
+ This function can also equivalently be used as a decorator::
1097
+
1098
+ # foo/bar/baz.py
1099
+ @torch.fx.wrap
1100
+ def my_custom_function(x, y):
1101
+ return x * x + y * y
1102
+
1103
+ A wrapped function can be thought of a "leaf function", analogous to the concept of
1104
+ "leaf modules", that is, they are functions that are left as calls in the FX trace
1105
+ rather than traced through.
1106
+
1107
+ Args:
1108
+
1109
+ fn_or_name (Union[str, Callable]): The function or name of the global function to insert into the
1110
+ graph when it's called
1111
+ """
1112
+ if not callable(fn_or_name) and not isinstance(fn_or_name, str):
1113
+ raise RuntimeError(
1114
+ "Unsupported type for global function! Must be either a callable or "
1115
+ "string name"
1116
+ )
1117
+
1118
+ if callable(fn_or_name):
1119
+ assert not isinstance(fn_or_name, str) # to make mypy happy
1120
+ fn_name = fn_or_name.__name__
1121
+ else:
1122
+ assert isinstance(
1123
+ fn_or_name, str
1124
+ ), "fn_or_name must be a global function or string name"
1125
+ fn_name = fn_or_name
1126
+
1127
+ currentframe = inspect.currentframe()
1128
+ assert currentframe is not None
1129
+ f = currentframe.f_back
1130
+ assert f is not None
1131
+ if f.f_code.co_name != "<module>":
1132
+ raise NotImplementedError("wrap must be called at the top level of a module")
1133
+
1134
+ # consider implementing Callable version of this via _autowrap_function_ids / _autowrap_search
1135
+ # semantics would be slightly different, but would add support `from x import wrapped_function`
1136
+ _wrapped_fns_to_patch[(id(f.f_globals), fn_name)] = f.f_globals
1137
+ return fn_or_name
1138
+
1139
+
1140
+ @compatibility(is_backward_compatible=True)
1141
+ def symbolic_trace(
1142
+ root: Union[torch.nn.Module, Callable[..., Any]],
1143
+ concrete_args: Optional[Dict[str, Any]] = None,
1144
+ ) -> GraphModule:
1145
+ """
1146
+ Symbolic tracing API
1147
+
1148
+ Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule``
1149
+ constructed by recording operations seen while tracing through ``root``.
1150
+
1151
+ ``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures.
1152
+
1153
+ For example::
1154
+
1155
+ def f(a, b):
1156
+ if b == True:
1157
+ return a
1158
+ else:
1159
+ return a*2
1160
+
1161
+ FX can typically not trace through this due to the presence of control
1162
+ flow. However, we can use `concrete_args` to specialize on the value of
1163
+ `b` to trace through this::
1164
+
1165
+ f = fx.symbolic_trace(f, concrete_args={'b': False})
1166
+ assert f(3, False) == 6
1167
+
1168
+ Note that although you can still pass in different values of `b`, they will be ignored.
1169
+
1170
+ We can also use `concrete_args` to eliminate data-structure handling from
1171
+ our function. This will use pytrees to flatten your input. To avoid
1172
+ overspecializing, pass in `fx.PH` for values that shouldn't be
1173
+ specialized. For example::
1174
+
1175
+ def f(x):
1176
+ out = 0
1177
+ for v in x.values():
1178
+ out += v
1179
+ return out
1180
+ f = fx.symbolic_trace(f, concrete_args={'x': {'a': fx.PH, 'b': fx.PH, 'c': fx.PH}})
1181
+ assert f({'a': 1, 'b': 2, 'c': 4}) == 7
1182
+
1183
+
1184
+ Args:
1185
+ root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted
1186
+ into a Graph representation.
1187
+ concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized
1188
+
1189
+ Returns:
1190
+ GraphModule: a Module created from the recorded operations from ``root``.
1191
+ """
1192
+ tracer = Tracer()
1193
+ graph = tracer.trace(root, concrete_args)
1194
+ name = (
1195
+ root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
1196
+ )
1197
+ return _make_graph_module(tracer.root, graph, name)
1198
+
1199
+
1200
+ @wrap
1201
+ def _assert_is_none(value, msg):
1202
+ assert value is None, msg
venv/lib/python3.10/site-packages/torch/fx/annotate.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.proxy import Proxy
2
+ from ._compatibility import compatibility
3
+
4
+ @compatibility(is_backward_compatible=False)
5
+ def annotate(val, type):
6
+ # val could be either a regular value (not tracing)
7
+ # or fx.Proxy (tracing)
8
+ if isinstance(val, Proxy):
9
+ if val.node.type:
10
+ raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
11
+ f" Existing type is {val.node.type} "
12
+ f"and new type is {type}. "
13
+ f"This could happen if you tried to annotate a function parameter "
14
+ f"value (in which case you should use the type slot "
15
+ f"on the function signature) or you called "
16
+ f"annotate on the same value twice")
17
+ else:
18
+ val.node.type = type
19
+ return val
20
+ else:
21
+ return val
venv/lib/python3.10/site-packages/torch/fx/config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Whether to disable showing progress on compilation passes
2
+ # Need to add a new config otherwise wil get a circular import if dynamo config is imported here
3
+ disable_progress = True
4
+
5
+ # If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
6
+ verbose_progress = False
venv/lib/python3.10/site-packages/torch/fx/experimental/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/fx/experimental/_backward_state.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.fx
2
+
3
+
4
+ class BackwardState:
5
+ """
6
+ BackwardState is used to pass Python hooks from the forwards pass
7
+ into the backwards pass in Dynamo+Compiled Autograd.
8
+
9
+ It is created by TorchDynamo and has special handling there.
10
+ Dynamo will pass an empty BackwardState to the forwards, then populate
11
+ members on it (via setattr) only after the forwards graph is finished.
12
+ Later on, in CompileAutograd we will inline and add the needed guards
13
+ on the BackwardState.
14
+
15
+ BackwardState is identified and has special handling in AOTAutograd.
16
+ During AOTAutograd:
17
+ 1) BackwardState is an input to the forwards graph
18
+ 2) It must only be used in the backwards
19
+ 3) It will be empty in the forwards
20
+ 4) In the forwards we add a wrapper to save it
21
+ 5) In the backwards it becomes an input
22
+ 6) There can only be one per graph
23
+
24
+ BackwardState requires CompiledAutograd.
25
+ """
26
+
27
+ proxy: torch.fx.Proxy
venv/lib/python3.10/site-packages/torch/fx/experimental/_sym_dispatch_mode.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Type
2
+
3
+ __all__ = ["SymDispatchMode", "handle_sym_dispatch", "sym_function_mode"]
4
+
5
+ SYM_FUNCTION_MODE: Optional["SymDispatchMode"] = None
6
+
7
+
8
+ # SymDispatchMode gets invoked whenever an operation is processed on
9
+ # a PySymInt. When this occurs, you get called at __sym_dispatch__
10
+ # with the operation in question. This is symmetric to TorchDispatchMode
11
+ # but with some caveats:
12
+ #
13
+ # - In TorchDispatchMode, you get the same arguments as what a user
14
+ # invoked your API with; e.g., if you call torch.ops.aten.foo(a, b),
15
+ # you get (a, b) as args to your call. In SymDispatchMode, if
16
+ # you call a + b (where a and b are SymInts), you will get
17
+ # (a.node, b.node) as your args (these are PySymInts)
18
+ #
19
+ # - SymInt/PySymInt don't have FX proxy support (unlike, e.g., Tensor).
20
+ # So you have to manually call Tracer/create_node to write into
21
+ # the graph. See ProxySymDispatchMode for an example
22
+ #
23
+ class SymDispatchMode:
24
+ def __sym_dispatch__(self, func, types, args, kwargs):
25
+ raise NotImplementedError()
26
+
27
+ def __enter__(self):
28
+ global SYM_FUNCTION_MODE
29
+ old = SYM_FUNCTION_MODE
30
+ if hasattr(self, "inner"):
31
+ raise RuntimeError(
32
+ f"{self} has already been used as a mode. Please use a fresh version"
33
+ )
34
+ else:
35
+ self.inner = old
36
+ SYM_FUNCTION_MODE = self
37
+ return self
38
+
39
+ def __exit__(self, exc_type, exc_val, exc_tb):
40
+ global SYM_FUNCTION_MODE
41
+ SYM_FUNCTION_MODE = self.inner
42
+
43
+
44
+ def handle_sym_dispatch(func, args, kwargs):
45
+ global SYM_FUNCTION_MODE
46
+ mode = sym_function_mode()
47
+ assert mode
48
+ SYM_FUNCTION_MODE = mode.inner
49
+ try:
50
+ # TODO: properly compute types
51
+ types: List[Type] = []
52
+ return mode.__sym_dispatch__(func, types, args, kwargs)
53
+ finally:
54
+ SYM_FUNCTION_MODE = mode
55
+
56
+
57
+ def sym_function_mode():
58
+ return SYM_FUNCTION_MODE
venv/lib/python3.10/site-packages/torch/fx/experimental/accelerator_partitioner.py ADDED
@@ -0,0 +1,1078 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from collections import deque
3
+ from typing import Dict, List, Set, NamedTuple, Tuple, Deque
4
+
5
+ import torch
6
+ from torch.fx.passes.graph_manipulation import get_size_of_all_nodes
7
+ from torch.fx.experimental.partitioner_utils import (
8
+ Partition,
9
+ Device,
10
+ PartitionerConfig,
11
+ get_partition_to_latency_mapping,
12
+ get_latency_of_partitioned_graph,
13
+ NodeLatency,
14
+ get_extra_size_of,
15
+ PartitionMode,
16
+ )
17
+ from torch.fx.graph_module import GraphModule
18
+ from torch.fx.node import Node, map_arg
19
+ from torch.fx.passes.split_module import split_module
20
+
21
+
22
+ class DAGNode:
23
+ """DAGNode class maintains useful information for a partition (submodule),
24
+ and its input submodules and output submodules.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ submodule_node: Node,
30
+ input_nodes: List[Node],
31
+ output_nodes: List[Node],
32
+ logical_device_ids: List[int],
33
+ size_bytes: int,
34
+ ) -> None:
35
+ self.submodule_node: Node = submodule_node
36
+ self.input_nodes: List[Node] = input_nodes
37
+ self.output_nodes: List[Node] = output_nodes
38
+ self.logical_device_ids: List[int] = logical_device_ids
39
+ self.size_bytes = size_bytes
40
+
41
+ def __str__(self) -> str:
42
+ return str(self.submodule_node)
43
+
44
+
45
+ class DAG:
46
+ """DAG class contains all the DAG nodes"""
47
+
48
+ def __init__(self) -> None:
49
+ self.nodes: List[DAGNode] = []
50
+
51
+ def create_node(
52
+ self,
53
+ submodule_node: Node,
54
+ input_nodes: List[Node],
55
+ output_nodes: List[Node],
56
+ logical_devices: List[int],
57
+ size_bytes: int,
58
+ ) -> None:
59
+ node = DAGNode(
60
+ submodule_node, input_nodes, output_nodes, logical_devices, size_bytes
61
+ )
62
+ self.nodes.append(node)
63
+
64
+
65
+ class PartitionResult(NamedTuple):
66
+ """NameTuple used for returning DAG and a new fx module"""
67
+
68
+ dag: DAG
69
+ module_with_submodules: GraphModule
70
+
71
+
72
+ """Followings are some helper functions for partition manipulation"""
73
+
74
+
75
+ def reset_partition_device(partitions):
76
+ for partition in partitions:
77
+ partition.logical_device_ids = []
78
+
79
+
80
+ def combine_two_partitions(
81
+ partition_0: Partition, partition_1: Partition, partitions: List[Partition]
82
+ ) -> None:
83
+ """Given a list of partitions and its two partitions,
84
+ combine these two partitions into a new one appending to the partitions
85
+ and remove the previous two partitions from the list of partitions
86
+ """
87
+ partition = Partition(len(partitions))
88
+ partition.nodes = partition_0.nodes.union(partition_1.nodes)
89
+ partition.recalculate_mem_size()
90
+ partitions.append(partition)
91
+ partitions.remove(partition_0)
92
+ partitions.remove(partition_1)
93
+ reorganize_partitions(partitions)
94
+ return
95
+
96
+
97
+ def set_parents_and_children(partitions: List[Partition]) -> None:
98
+ """Given a list of partitions, mark parents and children for each partition"""
99
+ # Go through all nodes in a partition.
100
+ # If a node's user is in other partition,
101
+ # then the other partition is this partition's children.
102
+ # This partition is the other partition's parent
103
+ for partition in partitions:
104
+ partition.children = set()
105
+ partition.parents = set()
106
+ for partition in partitions:
107
+ for node in partition.nodes:
108
+ # For each node in the current partition, find its users
109
+ users = node.users
110
+ for n in users:
111
+ # Find which the partition the user node belongs to.
112
+ # Note that if the node itself is also belongs to that partition,
113
+ # that partition is not the child of the current partition
114
+ for p in partitions:
115
+ if p != partition and n in p.nodes and node not in p.nodes:
116
+ partition.children.add(p)
117
+ p.parents.add(partition)
118
+ return
119
+
120
+
121
+ def reorganize_partitions(partitions: List[Partition]) -> None:
122
+ """Given a list of partitions, reorganize partition id,
123
+ its parents and its children for each partition
124
+ """
125
+ # Rearrange partition ids
126
+ for i, partition in enumerate(partitions):
127
+ partition.partition_id = i
128
+ set_parents_and_children(partitions)
129
+ return
130
+
131
+
132
+ def get_bfs_level_partition(partitions: List[Partition]) -> None:
133
+ """Given a list of partitions,
134
+ mark the bfs level for each partition
135
+ """
136
+ current_level: Set[Partition] = set()
137
+ visited: Set[Partition] = set()
138
+ for partition in partitions:
139
+ # If a partition has no parent, it should be in root level
140
+ if len(partition.parents) == 0:
141
+ current_level.add(partition)
142
+ next_level: Set[Partition] = set()
143
+ level = 0
144
+ # bfs
145
+ while current_level:
146
+ partition = current_level.pop()
147
+ partition.bfs_level = level
148
+ visited.add(partition)
149
+ children = partition.children
150
+ for child in children:
151
+ if child not in next_level:
152
+ next_level.add(child)
153
+ if not current_level:
154
+ current_level = next_level.copy()
155
+ next_level = set()
156
+ level += 1
157
+ return
158
+
159
+
160
+ def get_node_to_partition_mapping(partitions: List[Partition]) -> Dict[Node, int]:
161
+ """Given a list of partitions,return node to partition mapping"""
162
+ node_to_partition: Dict[Node, int] = {}
163
+ for partition in partitions:
164
+ for node in partition.nodes:
165
+ node_to_partition[node] = partition.partition_id
166
+ return node_to_partition
167
+
168
+
169
+ def get_logical_id_to_device(devices: List[Device]) -> Dict[int, Device]:
170
+ """Get a mapping from device logical ID to Device object."""
171
+ logical_id_to_device: Dict[int, Device] = {}
172
+ for d in devices:
173
+ logical_id_to_device[d.logical_id] = d
174
+ return logical_id_to_device
175
+
176
+
177
+ def get_device_partition_stats(
178
+ partitions: List[Partition], devices: List[Device]
179
+ ) -> Tuple[Dict[Device, List[Partition]], Dict[Device, int], List[Partition]]:
180
+ """Given a list of partitions and a list of devices, returns:
181
+ 1. A mapping from device to partitions on it;
182
+ 2. A mapping from device to its remaining memory size;
183
+ 3. A list of partitions that do not have a device.
184
+ """
185
+ # logical id to device
186
+ logical_id_to_device = get_logical_id_to_device(devices)
187
+ # Track partitions on device
188
+ device_to_partitions: Dict[Device, List[Partition]] = {}
189
+ # Track device's left mem size
190
+ device_to_left_mem_bytes: Dict[Device, int] = {}
191
+ for d in devices:
192
+ device_to_partitions[d] = []
193
+ device_to_left_mem_bytes[d] = d.available_mem_bytes
194
+
195
+ # Deal with the partitions that already have a device
196
+ # and also collect all partitions without a device (no_device_partitions)
197
+ no_device_partitions = []
198
+ for partition in partitions:
199
+ if partition.logical_device_ids != []:
200
+ for logical_id in partition.logical_device_ids:
201
+ device = logical_id_to_device[logical_id]
202
+ device_to_partitions[device].append(partition)
203
+ device_to_left_mem_bytes[device] -= partition.used_mem_bytes
204
+ else:
205
+ no_device_partitions.append(partition)
206
+
207
+ return (
208
+ device_to_partitions,
209
+ device_to_left_mem_bytes,
210
+ no_device_partitions,
211
+ )
212
+
213
+
214
+ def get_device_to_partitions_mapping(
215
+ partitions: List[Partition], devices: List[Device]
216
+ ):
217
+ """Given a list of partitions and a list of devices,
218
+ map each partition into a device.
219
+ """
220
+
221
+ def calculate_extra_mem_bytes_needed_for(
222
+ partition: Partition, partitions: List[Partition]
223
+ ):
224
+ all_nodes: Set[Node] = set()
225
+ for p in partitions:
226
+ all_nodes = all_nodes.union(p.nodes)
227
+ if len(all_nodes) == 0:
228
+ return partition.used_mem_bytes
229
+ all_nodes = all_nodes.union(partition.nodes)
230
+ extra_size_needed = 0
231
+ for node in partition.nodes:
232
+ extra_size_needed += get_extra_size_of(node, all_nodes)
233
+ return extra_size_needed
234
+
235
+ def find_device_for(partition: Partition):
236
+ """Given a partition, find a logical device for the partition
237
+ The algorithm is to put the partition on the device
238
+ that has just enough mem left for that partition.
239
+ device_to_left_mem_bytes is a dictionary between device and its left mem size
240
+ sorted by its left mem size
241
+ """
242
+ for d in device_to_left_mem_bytes:
243
+ extra_size_needed = calculate_extra_mem_bytes_needed_for(
244
+ partition, device_to_partitions[d]
245
+ )
246
+ if extra_size_needed < device_to_left_mem_bytes[d]:
247
+ device_to_partitions[d].append(partition)
248
+ partition.logical_device_ids.append(d.logical_id)
249
+ device_to_left_mem_bytes[d] -= extra_size_needed
250
+ return True
251
+ return False
252
+
253
+ (
254
+ device_to_partitions,
255
+ device_to_left_mem_bytes,
256
+ no_device_partitions,
257
+ ) = get_device_partition_stats(partitions, devices)
258
+
259
+ # Find devices for all the partitions without a device
260
+ found_device = True
261
+ for partition in no_device_partitions:
262
+ device_to_left_mem_bytes = dict(sorted(device_to_left_mem_bytes.items(), key=lambda item: item[1]))
263
+ found_device = find_device_for(partition)
264
+ if not found_device:
265
+ break
266
+ return found_device
267
+
268
+
269
+ def check_dependency(partition):
270
+ """Given a partition,check if there is a circular dependency on
271
+ this partition using bfs
272
+ """
273
+ visited: Set[Partition] = {partition}
274
+ queue: Deque[Partition] = deque([partition])
275
+ while queue:
276
+ p = queue.popleft()
277
+ for child in p.children:
278
+ if child == partition:
279
+ return True
280
+ else:
281
+ if child not in visited:
282
+ visited.add(child)
283
+ queue.append(child)
284
+ return False
285
+
286
+
287
+ class Partitioner:
288
+ """A fx module may not fit into one device.
289
+ Partitioner class helps partition one fx module into submodules (partitions),
290
+ so that the submodules can be executed crossing different accelerators.
291
+ The main function of this class is self.partition_graph.
292
+ It partitions the fx module based on the scheme specified in partition_config
293
+ A DAG structure is returned
294
+ along with a new fx module with submodule nodes.
295
+ """
296
+
297
+ def __init__(self) -> None:
298
+ self.partitions: List[Partition] = []
299
+ self.node_to_partition: Dict[Node, int] = {}
300
+ self.devices: List[Device] = []
301
+
302
+ def partition_graph(
303
+ self,
304
+ fx_module: GraphModule,
305
+ torch_module: torch.nn.Module,
306
+ partitioner_config: PartitionerConfig,
307
+ ) -> PartitionResult:
308
+ """Given the fx module, torch module and partitioner_config,
309
+ find the partitions, do the partitions,
310
+ and then return a DAG and a new fx module with submodule nodes (partitions)
311
+ """
312
+ self.graph_module = fx_module
313
+ self.torch_module = torch_module
314
+ self.devices = partitioner_config.devices
315
+ if len(self.devices) == 0:
316
+ raise RuntimeError("No devices")
317
+ # Tag the size in bytes to all nodes in the graph_module.
318
+ get_size_of_all_nodes(self.graph_module)
319
+ # Check if there are op nodes in the fx module
320
+ nodes = self.graph_module.graph.nodes
321
+ if all(node.op in {"placeholder", "get_attr", "output"} for node in nodes):
322
+ raise RuntimeError("No Partition since no operations in the module")
323
+ # Calculate total size of the fx module
324
+ total_size_of_graph = 0
325
+ for node in nodes:
326
+ if node.op == "output":
327
+ break
328
+ total_size_of_graph += node.size_bytes.total_size
329
+ # Find the device with the max mem size
330
+ device_with_max_mem = max(self.devices, key=lambda d: d.available_mem_bytes)
331
+ # AOT based partition
332
+ if partitioner_config.mode == PartitionMode.aot_based:
333
+ self.aot_based_partition(
334
+ partitioner_config.node_to_partition_mapping,
335
+ partitioner_config.partition_to_logical_device_mapping,
336
+ )
337
+ # Single partition if the whole module can be fit into one device
338
+ elif total_size_of_graph <= device_with_max_mem.available_mem_bytes:
339
+ self.find_single_partition(
340
+ total_size_of_graph, logical_device_id=device_with_max_mem.logical_id
341
+ )
342
+ elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]):
343
+ raise RuntimeError("Devices have no enough memory for the module")
344
+ else:
345
+ # Sparse nn based partition
346
+ if partitioner_config.mode == PartitionMode.sparse_nn:
347
+ available_mem_bytes = self.devices[0].available_mem_bytes
348
+ if not all(
349
+ device.available_mem_bytes == available_mem_bytes
350
+ for device in self.devices
351
+ ):
352
+ raise RuntimeError("All devices must have same memory size!")
353
+ # sparse_nn_partition only support same memory size
354
+ # TODO: add different size support for sparse_nn_partition
355
+ self.sparse_nn_partition(available_mem_bytes)
356
+ # Cost aware partition
357
+ elif partitioner_config.mode == PartitionMode.cost_aware:
358
+ self.cost_aware_partition(
359
+ partitioner_config.transfer_rate_bytes_per_sec,
360
+ partitioner_config.node_to_latency_mapping,
361
+ )
362
+ # KL based partition
363
+ elif partitioner_config.mode == PartitionMode.kl_based:
364
+ self.kl_based_partition(
365
+ partitioner_config.transfer_rate_bytes_per_sec,
366
+ partitioner_config.node_to_latency_mapping,
367
+ )
368
+ else:
369
+ self.size_based_partition()
370
+
371
+ # Saturate host if possible.
372
+ if partitioner_config.saturate_host:
373
+ self.saturate_host()
374
+
375
+ # Partition the graph module based on the partition assignment.
376
+ module_with_submodules = self.do_partition()
377
+
378
+ # The DAG contains DAGNodes with info of each partition's input nodes, output nodes
379
+ # and how partitions are connected.
380
+ dag = self.dump_dag(module_with_submodules)
381
+ ret = PartitionResult(dag, module_with_submodules)
382
+ return ret
383
+
384
+ def find_single_partition(
385
+ self, total_size_of_graph, logical_device_id: int = 0
386
+ ) -> None:
387
+ """Fit the whole fx module into one device"""
388
+ partition_0 = self.create_partition()
389
+ for node in self.graph_module.graph.nodes:
390
+ if node.op == "output":
391
+ # Skip the output node, but there can
392
+ # be nodes after the output in certain cases.
393
+ continue
394
+ partition_0.nodes.add(node)
395
+ partition_0.used_mem_bytes = total_size_of_graph
396
+ partition_0.logical_device_ids = [logical_device_id]
397
+ # Get the node to partition mapping
398
+ self.node_to_partition = get_node_to_partition_mapping(self.partitions)
399
+ return
400
+
401
+ def size_based_partition(self) -> None:
402
+ """This method is to partition the fx module based on memory size.
403
+ It uses greedy approach. The result may not be the best.
404
+ The basic idea is:
405
+ Step 1:
406
+ Find a device which has enough memory to fit the current node, create a empty partition
407
+ with the size of that device.
408
+ Then keep adding the following nodes into the partition until the partition is full.
409
+ Step 2:
410
+ Repeat Step 1 until no device left
411
+ Step 3:
412
+ If some nodes are left, create a partition for each left node (single node partition).
413
+ and then try to map those partitions into logical devices with enough mem left.
414
+ """
415
+
416
+ def find_device_based_on_size(node) -> Device:
417
+ """Given a node, this function is to find a logical device
418
+ that could fit the node.
419
+ """
420
+ mem_size_needed = get_extra_size_of(node, set())
421
+ device = Device("", -1, -1)
422
+ for d in self.devices:
423
+ if (
424
+ d not in occupied_devices
425
+ and d.available_mem_bytes >= mem_size_needed
426
+ ):
427
+ device = d
428
+ break
429
+ if device.available_mem_bytes < 0:
430
+ raise RuntimeError(str(node) + "is too large to fit any device")
431
+ occupied_devices.append(device)
432
+ return device
433
+
434
+ # Track partition and its left mem size
435
+ partition_to_left_mem_bytes: Dict[Partition, int] = {}
436
+ # Track all the devices that have been used
437
+ occupied_devices: List[Device] = []
438
+ partition = self.create_partition()
439
+ for node in self.graph_module.graph.nodes:
440
+ if node.op in {"call_module", "call_method", "call_function"}:
441
+ # Check if there are devices left
442
+ if len(self.partitions) <= len(self.devices):
443
+ total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
444
+ # Check if the current partition is the very first partition
445
+ if partition.used_mem_bytes == 0:
446
+ # Find a device to fit the first node, return available mem size
447
+ device = find_device_based_on_size(node)
448
+ occupied_devices.append(device)
449
+ # Update partition and its left mem size
450
+ partition_to_left_mem_bytes[
451
+ partition
452
+ ] = device.available_mem_bytes
453
+ # Update available mem for the current partition
454
+ partition.logical_device_ids.append(device.logical_id)
455
+ else:
456
+ # The current partition is not the first partition
457
+ # Check if the current node can fit into current partition
458
+ if (
459
+ partition_to_left_mem_bytes[partition]
460
+ < total_size_of_input_nodes
461
+ ):
462
+ # Check if no device is left
463
+ if len(self.partitions) == len(self.devices):
464
+ # No device is left
465
+ # Put the previous partitions into a list (non_single_node_partitions)
466
+ non_single_node_partitions = self.partitions[:]
467
+ # Create the first single node partition for the current node
468
+ self.create_single_node_partition(node)
469
+ continue
470
+ # Some devices are still left
471
+ # Create a new partition with a mem size that is enough for the current node
472
+ device = find_device_based_on_size(node)
473
+ partition = self.create_partition()
474
+ total_size_of_input_nodes = get_extra_size_of(
475
+ node, partition.nodes
476
+ )
477
+ partition_to_left_mem_bytes[
478
+ partition
479
+ ] = device.available_mem_bytes
480
+ partition.logical_device_ids.append(device.logical_id)
481
+ partition.add_node(node)
482
+ partition_to_left_mem_bytes[partition] -= total_size_of_input_nodes
483
+ # Create single node partitions if no device is left
484
+ else:
485
+ self.create_single_node_partition(node)
486
+ reorganize_partitions(self.partitions)
487
+ # Get the node to partition mapping
488
+ self.node_to_partition = get_node_to_partition_mapping(self.partitions)
489
+ # Mapping all partitions into device
490
+ found_partition_to_device_mapping = get_device_to_partitions_mapping(
491
+ self.partitions, self.devices
492
+ )
493
+ if not found_partition_to_device_mapping:
494
+ raise RuntimeError("Cannot Get a Valid Partition to Logical Device Mapping")
495
+ return
496
+
497
+ def saturate_host(self) -> None:
498
+ """Saturate host by assigning replicates to unused devices with enough memory.
499
+ It uses a greedy approach to find a next available set of devices to place all split
500
+ partitions: For each used device, it searches for an idle device with minimal memory
501
+ size that can hold all the partition located on that device; If the search is successful
502
+ for all used devices, it then assigns the new devices' logical ID to the corresponding
503
+ partition.
504
+ """
505
+ (
506
+ device_to_partitions,
507
+ device_to_left_mem_bytes,
508
+ no_device_partitions,
509
+ ) = get_device_partition_stats(self.partitions, self.devices)
510
+
511
+ assert (
512
+ len(no_device_partitions) == 0
513
+ ), f"Expect no_device_partitions has 0 device, but get {len(no_device_partitions)}"
514
+
515
+ # Devices that hold partitions
516
+ used_devices = [d for d in self.devices if len(device_to_partitions[d]) > 0]
517
+ # Track replicates of the assigned devices
518
+ replicated_device_to_used_device: Dict[Device, Device] = {}
519
+
520
+ while len(used_devices) * 2 + len(replicated_device_to_used_device) <= len(
521
+ self.devices
522
+ ):
523
+ # Success flag for this round
524
+ success = True
525
+ # Devices that have not been assigned
526
+ idle_devices = [
527
+ d
528
+ for d in self.devices
529
+ if d not in used_devices and d not in replicated_device_to_used_device
530
+ ]
531
+ # Temporary mapping from replicated device to original device
532
+ temp_replicate_mapping = {}
533
+
534
+ # Find a new device to replicate all partitions on an used device
535
+ for used_device in used_devices:
536
+ # Idle devices that have enough memory
537
+ available_devices = [
538
+ d
539
+ for d in idle_devices
540
+ if d.available_mem_bytes
541
+ >= used_device.available_mem_bytes
542
+ - device_to_left_mem_bytes[used_device]
543
+ ]
544
+ if len(available_devices) == 0:
545
+ success = False
546
+ break
547
+ new_device = min(available_devices, key=lambda d: d.available_mem_bytes)
548
+ idle_devices.remove(new_device)
549
+ temp_replicate_mapping[new_device] = used_device
550
+
551
+ if not success:
552
+ break
553
+ replicated_device_to_used_device.update(temp_replicate_mapping)
554
+
555
+ # Update logical device IDs assigned to the partitions
556
+ for (
557
+ replicate_device,
558
+ original_device,
559
+ ) in replicated_device_to_used_device.items():
560
+ logical_id = replicate_device.logical_id
561
+ for partition in device_to_partitions[original_device]:
562
+ partition.logical_device_ids.append(logical_id)
563
+ for p in self.partitions:
564
+ print(p.logical_device_ids)
565
+
566
+ def do_partition(self) -> GraphModule:
567
+ """Return a new fx module with submodule nodes (partitions)."""
568
+ module_with_submodules = split_module(
569
+ self.graph_module,
570
+ self.torch_module,
571
+ lambda node: self.node_to_partition[node],
572
+ )
573
+ return module_with_submodules
574
+
575
+ def dump_dag(self, module_with_submodules: GraphModule) -> DAG:
576
+ """Return the dag structure and the new fx module with submodules."""
577
+ dag = DAG()
578
+ for node in module_with_submodules.graph.nodes:
579
+ if node.op == "output":
580
+ break
581
+ if node.op in {"placeholder", "get_attr"}:
582
+ continue
583
+ if node.target == operator.__getitem__:
584
+ continue
585
+ input_nodes: Dict[Node, None] = {}
586
+ map_arg(node.args, input_nodes.setdefault)
587
+ map_arg(node.kwargs, input_nodes.setdefault)
588
+ # When a node has two or more output nodes,
589
+ # it outputs its result to 'getitem' nodes.
590
+ # Those 'getitem' nodes are the output node for this node.
591
+ # Otherwise, the output node is this node itself.
592
+ if len(node.users) > 1:
593
+ output_nodes = list(node.users)
594
+ else:
595
+ output_nodes = [node]
596
+ partition_id = int(node.name.rsplit("_", 1)[-1])
597
+ device_ids = self.partitions[partition_id].logical_device_ids
598
+ size_bytes = self.partitions[partition_id].used_mem_bytes
599
+ dag.create_node(
600
+ node, list(input_nodes), output_nodes, device_ids, size_bytes
601
+ )
602
+ return dag
603
+
604
+ def create_partition(self) -> Partition:
605
+ """Create a partition and append it to self.partitions."""
606
+ partition_id = len(self.partitions)
607
+ partition = Partition(partition_id)
608
+ self.partitions.append(partition)
609
+ return partition
610
+
611
+ def create_single_node_partition(self, node):
612
+ """Create a partition for a single node"""
613
+ partition = self.create_partition()
614
+ partition.add_node(node)
615
+ return
616
+
617
+ def sparse_nn_partition(self, available_mem_bytes: int) -> None:
618
+ """This method partition a sparse nn module.
619
+ It is size based partition but different from size_based_partition,
620
+ it only works when all the devices have same memory size (available_mem_bytes).
621
+ In the future, devices with different mem sizes will be supported like size_based_partition.
622
+ It first traverse all the nodes and do the partitions based on the same memory size.
623
+ If the current partition has no enough memory left for a new op node
624
+ (call_module, call_method, call_function), a new partition is created.
625
+ When crossing the boundary between non-embedding nodes and embedding nodes,
626
+ a new partition is created regardlessly.
627
+ For example, if the current node is a non-embedding node but the next node is an
628
+ embedding node, a new partition is created for the next node.
629
+ After the partition, the partitions are combined as much as possible.
630
+ The rule is that a non-embedding partition only
631
+ combines with another non-embedding one.
632
+ So as the embedding partitions.
633
+ """
634
+
635
+ def combine_partitions_based_on_size(
636
+ partitions: List[Partition], available_mem_bytes: int
637
+ ) -> None:
638
+ """Combining small partitions together to keep as less partitions as possible.
639
+ Here is an example of the algorithm to do this:
640
+ Assume some partitions, we first sort them based on partition used memory size.
641
+ [(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)]
642
+ The available memory is 10.
643
+ step 1: self.find_partition_to_combine_based_on_size()
644
+ First, mark bfs level for each partition
645
+ Second, look the smallest partition, partition_4: 10 - 1 = 9
646
+ It means any partition has a used memory equal or less than 9 could combine this partition
647
+ We go from the largest and selection partition_0.
648
+ Check the bfs level for two partitions, if the level difference is less than 2,
649
+ it can be combined.
650
+ step 2: repeat step 1 until no partitions can be combined
651
+ """
652
+ find_combination = True
653
+ while find_combination:
654
+ # Sort partitions based on memory size
655
+ sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)
656
+ # Mark bfs level
657
+ get_bfs_level_partition(self.partitions)
658
+ find_combination, partitions = find_partition_to_combine_based_on_size(
659
+ sorted_partitions, available_mem_bytes, partitions
660
+ )
661
+ return
662
+
663
+ def calculate_mem_bytes_needed(p1, p2):
664
+ """Given two partitions, calculate how many mem bytes
665
+ are needed if two partitions are combined
666
+ """
667
+ nodes = p1.nodes.union(p2.nodes)
668
+ mem_bytes_needed = 0
669
+ for node in nodes:
670
+ mem_bytes_needed += get_extra_size_of(node, nodes)
671
+ return mem_bytes_needed
672
+
673
+ def find_partition_to_combine_based_on_size(
674
+ sorted_partitions: List[Partition],
675
+ available_mem_bytes: int,
676
+ partitions: List[Partition],
677
+ ) -> Tuple[bool, List[Partition]]:
678
+ """step 1 in combine_partition_based_on_size()"""
679
+ find_combination = False
680
+ smallest_partition = sorted_partitions.pop(0)
681
+ for p in sorted_partitions[::-1]:
682
+ if abs(smallest_partition.bfs_level - p.bfs_level) <= 1:
683
+ # Calculate how many bytes needed if combined
684
+ mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition)
685
+ if mem_bytes_needed <= available_mem_bytes:
686
+ combine_two_partitions(p, smallest_partition, self.partitions)
687
+ partitions.remove(smallest_partition)
688
+ partitions.remove(p)
689
+ partitions.append(self.partitions[-1])
690
+ find_combination = True
691
+ break
692
+ return find_combination, partitions
693
+
694
+ def reset_partition_in_sparse_nn(partition, new_partition=True):
695
+ """If crossing the boundary between non-embedding nodes and
696
+ embedding nodes, create a new partition
697
+ """
698
+ if in_embedding_region:
699
+ embedding_partitions.append(partition)
700
+ else:
701
+ non_embedding_partitions.append(partition)
702
+ if new_partition:
703
+ partition = self.create_partition()
704
+ partition.left_mem_bytes = available_mem_bytes
705
+ return partition
706
+ return None
707
+
708
+ def is_embedding_node(node: Node) -> bool:
709
+ """Check if a node is an embedding node"""
710
+ if node.op == "call_module":
711
+ submodule = self.graph_module
712
+ for atom in str(node.target).split("."):
713
+ if not hasattr(submodule, atom):
714
+ raise RuntimeError(
715
+ f"Module {submodule} has no attribute {atom}"
716
+ )
717
+ submodule = getattr(submodule, atom)
718
+ if "Embedding" in str(submodule):
719
+ return True
720
+ return False
721
+
722
+ # Track embedding partitions and non-embedding partitions separately
723
+ embedding_partitions: List[Partition] = []
724
+ non_embedding_partitions: List[Partition] = []
725
+ # A Flag to check the boundary
726
+ in_embedding_region: bool = False
727
+ partition = self.create_partition()
728
+ for node in self.graph_module.graph.nodes:
729
+ if node.op in {"call_module", "call_method", "call_function"}:
730
+ # Check if crossing the boundary between embedding nodes and non embedding nodes
731
+ if is_embedding_node(node) != in_embedding_region:
732
+ # Crossing the boundary
733
+ # Check if the current partition is an empty partition
734
+ if partition.used_mem_bytes != 0:
735
+ # The current partition isn't an empty partition. Create a new one.
736
+ partition = reset_partition_in_sparse_nn(partition)
737
+ in_embedding_region = not in_embedding_region
738
+ total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
739
+ if (
740
+ total_size_of_input_nodes + partition.used_mem_bytes
741
+ > available_mem_bytes
742
+ ):
743
+ partition = reset_partition_in_sparse_nn(partition)
744
+ total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
745
+ if total_size_of_input_nodes > available_mem_bytes:
746
+ raise RuntimeError(
747
+ node.target + "is too large to fit into a device"
748
+ )
749
+ partition.add_node(node)
750
+ reset_partition_in_sparse_nn(partition, new_partition=False)
751
+ # Set parents and children for partitions
752
+ set_parents_and_children(self.partitions)
753
+ # Combining non-embedding partitions
754
+ combine_partitions_based_on_size(non_embedding_partitions, available_mem_bytes)
755
+ # Combining embedding partitions
756
+ combine_partitions_based_on_size(embedding_partitions, available_mem_bytes)
757
+ total_size_of_non_embedding_partitions = 0
758
+ for partition in non_embedding_partitions:
759
+ total_size_of_non_embedding_partitions += partition.used_mem_bytes
760
+ # Check if devices are enough for all partitions
761
+ if len(embedding_partitions) > len(self.devices):
762
+ msg = (
763
+ "Need "
764
+ + str(len(embedding_partitions))
765
+ + " devices, but only "
766
+ + str(len(self.devices))
767
+ + " provided"
768
+ )
769
+ raise RuntimeError(msg)
770
+ occupied_devices = []
771
+ for i, partition in enumerate(embedding_partitions):
772
+ # Check if all non-embedding partitions can fit into embedding partition devices
773
+ if (
774
+ total_size_of_non_embedding_partitions + partition.used_mem_bytes
775
+ > available_mem_bytes
776
+ ):
777
+ raise RuntimeError(
778
+ "partition_"
779
+ + str(partition.partition_id)
780
+ + "(embedding partition) and non embedding partitions can not fit into one device"
781
+ )
782
+ else:
783
+ # Add logical device to the partition
784
+ partition.logical_device_ids = [self.devices[i].logical_id]
785
+ occupied_devices.append(self.devices[i].logical_id)
786
+ # Add logical devices to the non_embedding_partitions
787
+ for partition in non_embedding_partitions:
788
+ partition.logical_device_ids = occupied_devices
789
+ # Get the node to partition mapping
790
+ self.node_to_partition = get_node_to_partition_mapping(self.partitions)
791
+ return
792
+
793
+ def cost_aware_partition(
794
+ self,
795
+ transfer_rate_bytes_per_sec: float,
796
+ node_to_latency_mapping: Dict[Node, NodeLatency],
797
+ ) -> None:
798
+ """This method is to partition the fx module based on the cost.
799
+ The cost is the total latency of running the whole fx module.
800
+ In partitioner_utils.py, the cost model is built.
801
+ The cost aware partition algorithm is:
802
+ #1. At every beginning, each node is a partition.
803
+ Then we map all the partitions to the devices
804
+ and calculate the cost
805
+ #2. Then try to pre-combine any two of the partitions if the two
806
+ partitions can be combined.
807
+ (the bfs level is less than 2 or two partitions are connected and
808
+ can find partition to device mapping)
809
+ See if any partition pair could reduce the current cost.
810
+ Choose the pair that shows the minimum cost and then combine them
811
+ #3. Repeat #2 until the cost cannot be reduced.
812
+ """
813
+
814
+ def try_combining_partitions(p0_index, p1_index, partitions) -> float:
815
+ """Given two partitions and a list of partitions, combine these two partitions
816
+ and see what is the cost of the modified partition list
817
+ """
818
+ p0 = partitions[p0_index]
819
+ p1 = partitions[p1_index]
820
+ """If two partitions' bfs level are less than 2 or two partitions are connected to each other,
821
+ then they can be combined
822
+ """
823
+ if (
824
+ (abs(p0.bfs_level - p1.bfs_level) <= 1)
825
+ or (p0 in p1.parents)
826
+ or p0 in (p1.children)
827
+ ):
828
+ combine_two_partitions(p0, p1, partitions)
829
+ # Check if a circular dependency exists after combining
830
+ if check_dependency(partitions[-1]):
831
+ return float("inf")
832
+ # Check if the modified partition list can be mapped to devices after combination
833
+ reset_partition_device(partitions)
834
+ found_deivce = get_device_to_partitions_mapping(
835
+ partitions, self.devices
836
+ )
837
+ if not found_deivce:
838
+ return float("inf")
839
+ # Calculate the new cost
840
+ partition_to_latency_mapping = get_partition_to_latency_mapping(
841
+ partitions, node_to_latency_mapping
842
+ )
843
+ cost = get_latency_of_partitioned_graph(
844
+ partitions,
845
+ partition_to_latency_mapping,
846
+ transfer_rate_bytes_per_sec,
847
+ )
848
+ return cost
849
+ # If two partition can not be combined, the cost is inf
850
+ return float("inf")
851
+
852
+ def search_combination(
853
+ transfer_rate_bytes_per_sec, node_to_latency_mapping
854
+ ) -> bool:
855
+ """Given transfer rate between partitions and each node's latency,
856
+ find two partitions to combine so the cost of the partitions can
857
+ be reduced.
858
+ The algorithm is :
859
+ 1. Go through all the partition pairs and see
860
+ if any pair of partitions can be combined.
861
+ 2. Calculate the cost after the combination.
862
+ 3. Select the minimum cost and combine its corresponding partition pair.
863
+ """
864
+ partition_to_latency_mapping = get_partition_to_latency_mapping(
865
+ self.partitions, node_to_latency_mapping
866
+ )
867
+ cost = get_latency_of_partitioned_graph(
868
+ self.partitions,
869
+ partition_to_latency_mapping,
870
+ transfer_rate_bytes_per_sec,
871
+ )
872
+ if len(self.partitions) == 1:
873
+ return False
874
+ partition_pair: List[int] = []
875
+ for i in range(len(self.partitions) - 1):
876
+ for j in range(i + 1, len(self.partitions)):
877
+ # Try to combine the partition pair
878
+ # and see the new cost after combination
879
+ new_cost = try_combining_partitions(i, j, self.partitions[:])
880
+ if new_cost <= cost:
881
+ partition_pair = [i, j]
882
+ cost = new_cost
883
+ reorganize_partitions(self.partitions)
884
+ # If a partition pair is found, combine them
885
+ if len(partition_pair) != 0:
886
+ p0 = self.partitions[partition_pair[0]]
887
+ p1 = self.partitions[partition_pair[1]]
888
+ combine_two_partitions(p0, p1, self.partitions)
889
+ get_bfs_level_partition(self.partitions)
890
+ reset_partition_device(self.partitions)
891
+ get_device_to_partitions_mapping(self.partitions, self.devices)
892
+ return len(partition_pair) != 0
893
+
894
+ for node in self.graph_module.graph.nodes:
895
+ if node.op not in {"placeholder", "get_attr", "output"}:
896
+ self.create_single_node_partition(node)
897
+ # Set up parent partitions and children partitions for each partition
898
+ set_parents_and_children(self.partitions)
899
+ # Get bfs level for each partition
900
+ get_bfs_level_partition(self.partitions)
901
+ find_combination = True
902
+ while find_combination:
903
+ # Search for a pair partition to generate the minimum new cost,
904
+ # then combine them
905
+ find_combination = search_combination(
906
+ transfer_rate_bytes_per_sec, node_to_latency_mapping
907
+ )
908
+ # Make sure all partitions are set up correctly
909
+ reorganize_partitions(self.partitions)
910
+ # Set up node to partition mapping
911
+ self.node_to_partition = get_node_to_partition_mapping(self.partitions)
912
+ return
913
+
914
+ def kl_based_partition(
915
+ self,
916
+ transfer_rate_bytes_per_sec: float,
917
+ node_to_latency_mapping: Dict[Node, NodeLatency],
918
+ ) -> None:
919
+ """This function is a cost aware partition based
920
+ on Kernighan-Lin algorithm.
921
+ First, the graph is partitioned using size_based_partition.
922
+ Then, each node is swapped with any other node in a different
923
+ partition, and at the same time, the cost is estimated after
924
+ the swapping.
925
+ For example, we have nodes n0, n1, n2, n3 and n4.
926
+ Using size_based_partition, n0 and n1 are in Partition p0.
927
+ n2, n3 and n4 in Partition p1. The current cost is estimated.
928
+ We first tried using n0 to swap with n2 from the other partition.
929
+ Then we see that swapping n0 and n2 shows a lower cost
930
+ than the current cost and it is the minimum among other pairs like
931
+ (n0, None)(This means moving n0 to Partition without swapping other nodes),
932
+ (n0, n3) and (n0, n4). We swap n0 and n2 and set the new cost
933
+ as the current cost.
934
+ Then We repeat this process for all the other nodes until all swapping pairs
935
+ are tried.
936
+ """
937
+
938
+ def swap_nodes(n0, n1, p0, p1):
939
+ # Either n0 or n1 could be None
940
+ # That means we simply move the node
941
+ # to another partition
942
+ if n0 is not None:
943
+ p0.remove_node(n0)
944
+ p1.add_node(n0)
945
+ if n1 is not None:
946
+ p0.add_node(n1)
947
+ p1.remove_node(n1)
948
+
949
+ def try_swap_nodes(
950
+ n0, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
951
+ ):
952
+ cost = float("inf")
953
+ swap_nodes(n0, n1, p0, p1)
954
+ # Reorganize partitions after swapping
955
+ reorganize_partitions(self.partitions)
956
+ # Check if there is a circular dependency after swapping
957
+ if (not check_dependency(p0)) and (not check_dependency(p1)):
958
+ reset_partition_device(self.partitions)
959
+ partition_to_latency_mapping = get_partition_to_latency_mapping(
960
+ self.partitions, node_to_latency_mapping
961
+ )
962
+ # Check if all partitions can be mapped to logical devices after swapping
963
+ found_device = get_device_to_partitions_mapping(
964
+ self.partitions, self.devices
965
+ )
966
+ if not found_device:
967
+ cost = float("inf")
968
+ else:
969
+ cost = get_latency_of_partitioned_graph(
970
+ self.partitions,
971
+ partition_to_latency_mapping,
972
+ transfer_rate_bytes_per_sec,
973
+ )
974
+ # Swap back and reset all partitions back to original
975
+ swap_nodes(n1, n0, p0, p1)
976
+ reorganize_partitions(self.partitions)
977
+ reset_partition_device(self.partitions)
978
+ get_device_to_partitions_mapping(self.partitions, self.devices)
979
+ return cost
980
+
981
+ def swap_node_to_partition(
982
+ node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
983
+ ):
984
+ """This function helps to swap one node from partition p0
985
+ with all the nodes in another partition p1
986
+ """
987
+ p1_nodes = list(p1.nodes) + [None]
988
+ min_cost = float("inf")
989
+ node_pair: List[Node] = []
990
+ for n1 in p1_nodes:
991
+ # Ignore the node if it is not a op node
992
+ if n1 is not None and n1.op in {"placeholder", "get_attr"}:
993
+ continue
994
+ # Try swapping node in p0 with n1 in p1
995
+ cost = try_swap_nodes(
996
+ node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
997
+ )
998
+ if cost < min_cost:
999
+ node_pair = [node, n1]
1000
+ min_cost = cost
1001
+ return cost, node_pair # type: ignore[possibly-undefined]
1002
+
1003
+ # First use size_base_partition
1004
+ self.size_based_partition()
1005
+ partition_to_latency_mapping = get_partition_to_latency_mapping(
1006
+ self.partitions, node_to_latency_mapping
1007
+ )
1008
+ # Calculate the cost of the partitions
1009
+ cost = get_latency_of_partitioned_graph(
1010
+ self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec
1011
+ )
1012
+ # Keep tracking the node pair that shows the better cost
1013
+ node_pair: List[Node] = []
1014
+ # Keep tracking the partition pair of node pair
1015
+ partition_pair: List[Partition] = []
1016
+ # Collect all the op nodes from the graph
1017
+ op_nodes = []
1018
+ for n in self.graph_module.graph.nodes:
1019
+ if n.op not in {"placeholder", "get_attr", "output"}:
1020
+ op_nodes.append(n)
1021
+ for node in op_nodes:
1022
+ # Find which partition the current node belongs
1023
+ p0_index = self.node_to_partition[node]
1024
+ p0 = self.partitions[p0_index]
1025
+ # Go through all the other partitions to swap
1026
+ # with other nodes from those partitions
1027
+ for p1_index, _ in enumerate(self.partitions):
1028
+ if p0_index != p1_index:
1029
+ p1 = self.partitions[p1_index]
1030
+ new_cost, new_node_pair = swap_node_to_partition(
1031
+ node,
1032
+ p0,
1033
+ p1,
1034
+ node_to_latency_mapping,
1035
+ transfer_rate_bytes_per_sec,
1036
+ )
1037
+ # Update the cost
1038
+ # Track the swapped node pair and their partitions
1039
+ if new_cost < cost:
1040
+ cost = new_cost
1041
+ node_pair = new_node_pair
1042
+ partition_pair = [p0, p1]
1043
+ # Do the swapping after trying all the nodes from a partition
1044
+ if len(node_pair) != 0:
1045
+ swap_nodes(
1046
+ node_pair[0], node_pair[1], partition_pair[0], partition_pair[1]
1047
+ )
1048
+ reorganize_partitions(self.partitions)
1049
+ get_device_to_partitions_mapping(self.partitions, self.devices)
1050
+ reorganize_partitions(self.partitions)
1051
+ # Mapping the device to the partition
1052
+ get_device_to_partitions_mapping(self.partitions, self.devices)
1053
+ return
1054
+
1055
+ def aot_based_partition(
1056
+ self, node_to_partition_mapping, partition_to_logical_device_mapping
1057
+ ):
1058
+ """This function helps to rebuild the partitions given the nodes and its
1059
+ corresponding partition id
1060
+ """
1061
+ partition_id_to_partition_mapping: Dict[int, Partition] = {}
1062
+ self.node_to_partition = node_to_partition_mapping
1063
+ for node in self.node_to_partition:
1064
+ partition_id = self.node_to_partition[node]
1065
+ # If the requested partition has not been created, create the partition
1066
+ if partition_id not in partition_id_to_partition_mapping:
1067
+ partition = Partition(partition_id)
1068
+ self.partitions.append(partition)
1069
+ partition_id_to_partition_mapping[partition_id] = partition
1070
+ partition.logical_device_ids = partition_to_logical_device_mapping[
1071
+ partition_id
1072
+ ]
1073
+ else:
1074
+ partition = partition_id_to_partition_mapping[
1075
+ self.node_to_partition[node]
1076
+ ]
1077
+ # Add the current node into the partition
1078
+ partition.add_node(node)
venv/lib/python3.10/site-packages/torch/fx/experimental/const_fold.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Callable, Dict, Optional, Set, Union
3
+
4
+ import torch.fx
5
+ from torch.fx.node import map_arg
6
+ from torch.fx.passes.split_module import split_module
7
+
8
+
9
+ __all__ = ['FoldedGraphModule', 'get_unique_attr_name_in_module', 'split_const_subgraphs']
10
+
11
+ class FoldedGraphModule(torch.fx.GraphModule):
12
+ """
13
+ FoldedGraphModule is a GraphModule which also contains another
14
+ `const_subgraph_module` representing a subgraph which has all const attr
15
+ inputs and which can be run once before running the main standard
16
+ `graph`. The `const_output_names` are the ordered list names of attrs which
17
+ represent what each respective output from the const_subgraph should be set
18
+ on which attrs.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ root: torch.nn.Module,
24
+ graph: torch.fx.Graph,
25
+ const_subgraph: Optional[torch.fx.Graph] = None,
26
+ fx_const_folded_attrs_name: Optional[str] = None,
27
+ device_for_folded_attrs: str = "cuda",
28
+ ):
29
+ super().__init__(root, graph)
30
+ self.const_subgraph_module = (
31
+ None
32
+ if const_subgraph is None
33
+ else torch.fx.GraphModule(root, const_subgraph)
34
+ )
35
+ self.has_folding_been_run = False
36
+ self.fx_const_folded_attrs_name = fx_const_folded_attrs_name
37
+ self.device_for_folded_attrs = device_for_folded_attrs
38
+
39
+ def __call__(self, *args, **kwargs):
40
+ if not self.has_folding_been_run:
41
+ self.run_folding()
42
+ return super().__call__(*args)
43
+
44
+ def run_folding(self):
45
+ # If there's no const subgraph module or attr output names to use, return
46
+ # early as there is no const folding to perform.
47
+ if (
48
+ self.const_subgraph_module is None
49
+ or self.fx_const_folded_attrs_name is None
50
+ ):
51
+ return
52
+
53
+ assert not self.has_folding_been_run
54
+ self.has_folding_been_run = True
55
+
56
+ # Actually run const folding subgraph. Note that single attr const fold
57
+ # subgraphs output a single Tensor while multiple outputs are returned as
58
+ # Tuple[Tensor,].
59
+ folded_attrs = self.const_subgraph_module()
60
+
61
+ def _create_param(i):
62
+ return torch.nn.Parameter(
63
+ i
64
+ if not isinstance(i, int)
65
+ else torch.Tensor([i]).to(device=self.device_for_folded_attrs),
66
+ requires_grad=i.requires_grad if isinstance(i, torch.Tensor) else False,
67
+ )
68
+
69
+ params = (
70
+ torch.nn.ParameterList([_create_param(i) for i in folded_attrs])
71
+ if isinstance(folded_attrs, tuple)
72
+ else _create_param(folded_attrs)
73
+ )
74
+ setattr(self, self.fx_const_folded_attrs_name, params)
75
+
76
+
77
+ def _inline_module(gm: torch.fx.GraphModule, inline_mod_name: str):
78
+ """
79
+ Given `gm` and some graph module which is called with target name `inline_mod_name`,
80
+ this helper will inline all of the nodes from that called graph module into `gm`.
81
+ """
82
+ # Fetch the inner graph module that we want to inline inside `gm`.
83
+ inline_mod = dict(gm.named_modules())[inline_mod_name]
84
+ assert isinstance(inline_mod, torch.fx.GraphModule)
85
+ call_mod_node_to_replace = None
86
+ for node in gm.graph.nodes:
87
+ if node.op == "call_module" and node.target == inline_mod_name:
88
+ call_mod_node_to_replace = node
89
+ break
90
+ assert call_mod_node_to_replace is not None
91
+
92
+ # Now actually do the swap. Note that we have to keep track of new nodes that are
93
+ # copied into `gm` -- we do this via replacement_mapping.
94
+ call_mod_args = call_mod_node_to_replace.args
95
+ replacement_mapping: Dict[torch.fx.Node, torch.fx.Node] = {}
96
+ ph_count = 0
97
+
98
+ def replacement_fn(node):
99
+ new_node = replacement_mapping[node]
100
+ new_node.meta = node.meta.copy()
101
+ return new_node
102
+
103
+ for inline_node in inline_mod.graph.nodes:
104
+ if inline_node.op == "placeholder":
105
+ replacement_mapping[inline_node] = call_mod_args[ph_count]
106
+ ph_count += 1
107
+ continue
108
+
109
+ if inline_node.op == "output":
110
+ outputs = inline_node.args[0]
111
+ output_replacements = map_arg(outputs, replacement_fn)
112
+ call_mod_node_to_replace.replace_all_uses_with(output_replacements)
113
+ continue
114
+
115
+ with gm.graph.inserting_before(call_mod_node_to_replace):
116
+ new_node = gm.graph.node_copy(inline_node, replacement_fn)
117
+ replacement_mapping[inline_node] = new_node
118
+
119
+ gm.graph.eliminate_dead_code()
120
+
121
+
122
+ def get_unique_attr_name_in_module(mod_traced: torch.fx.GraphModule, name: str) -> str:
123
+ """
124
+ Make sure the name is unique (in a module) and can represents an attr.
125
+ """
126
+ # Delete all characters that are illegal in a Python identifier.
127
+ name = re.sub("[^0-9a-zA-Z_]+", "_", name)
128
+ if name[0].isdigit():
129
+ name = f"_{name}"
130
+ # Now make sure it is in fact unique to the module by incrementing suffix value.
131
+ while hasattr(mod_traced, name):
132
+ match = re.match(r"(.*)_(\d+)$", name)
133
+ if match is None:
134
+ name = name + "_1"
135
+ else:
136
+ base, num = match.group(1, 2)
137
+ name = f"{base}_{int(num) + 1}"
138
+
139
+ return name
140
+
141
+
142
+ def split_const_subgraphs(
143
+ module: Union[torch.nn.Module, torch.fx.GraphModule],
144
+ skip_folding_node_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
145
+ device_for_folded_attrs: str = "cpu",
146
+ ) -> FoldedGraphModule:
147
+ """
148
+ Looks through `module` for any nodes that have all constant attribute inputs
149
+ and separates them out into their own constant subgraph, and returns a
150
+ FoldedGraphModule which runs that constant subgraph on the first run to set
151
+ attributes on the module prior to running the non-constant portion of the
152
+ graph.
153
+ """
154
+ if not isinstance(module, torch.fx.GraphModule):
155
+ mod_traced = torch.fx.symbolic_trace(module)
156
+ else:
157
+ mod_traced = module
158
+
159
+ # Build up a list of const_nodes, defined as nodes that are themselves
160
+ # get_attrs, or have all get_attr or other constant node inputs.
161
+ const_nodes: Set[torch.fx.Node] = set()
162
+ found_const_folding = False
163
+ for node in mod_traced.graph.nodes:
164
+ # Skip over placeholders/outputs because they can't be const folded and
165
+ # we don't want to add tags to them.
166
+ if node.op in {"placeholder", "output"}:
167
+ continue
168
+
169
+ # If the node itself is constant, or all of its inputs are constant,
170
+ # then tag it as constant.
171
+ if node.op != "get_attr" and not set(node.all_input_nodes).issubset(
172
+ const_nodes
173
+ ):
174
+ continue
175
+
176
+ # If provided skip folding function says to skip, then skip.
177
+ if skip_folding_node_fn and skip_folding_node_fn(node):
178
+ continue
179
+
180
+ # Skip folding side-effectful functions
181
+ if node.is_impure():
182
+ continue
183
+
184
+ # Must be a constant foldable node at this point.
185
+ const_nodes.add(node)
186
+ if node.op != "get_attr":
187
+ found_const_folding = True
188
+
189
+ # If we did not find any const folding then return early without a const fold subgraph.
190
+ if not found_const_folding:
191
+ return FoldedGraphModule(mod_traced, mod_traced.graph)
192
+
193
+ # Partition the module into two: submod_0 for constant folding subgraph, and
194
+ # submod_1 for the rest.
195
+ def mod_partition(node: torch.fx.Node):
196
+ return 0 if node in const_nodes else 1
197
+
198
+ split = split_module(mod_traced, module, mod_partition)
199
+
200
+ const_gm, non_const_gm = split.submod_0, split.submod_1
201
+ const_mod_name, non_const_mod_name = "submod_0", "submod_1"
202
+
203
+ # The module that a call_module node refers to gets copied to submodules during split.
204
+ # The path to the module also gets inlined, i.e. mod.a.b -> mod_a_b. Here we need to
205
+ # attach inlined modules to `split` as it's the owning module now.
206
+ for node in non_const_gm.graph.nodes:
207
+ if node.op == "call_module":
208
+ setattr(split, node.target, getattr(non_const_gm, node.target))
209
+ for node in const_gm.graph.nodes:
210
+ if node.op == "call_module":
211
+ setattr(split, node.target, getattr(const_gm, node.target))
212
+
213
+ # split_module currently does not use get_attrs for attrs. Instead it passes
214
+ # them in as args from the parent module, which used get_attrs. Here we set
215
+ # them as get_attrs inside const_gm, allowing for running folding without
216
+ # somehow a priori knowing the attrs that should be passed as args. We can
217
+ # unconditionally do this for all placeholders because we know all
218
+ # placeholders to const_gm must be constants accessible via get_attr.
219
+ call_const_gm_args = None
220
+ for node in split.graph.nodes:
221
+ if node.op == "call_module":
222
+ if node.target == const_mod_name:
223
+ call_const_gm_args = node.args
224
+ break
225
+ assert call_const_gm_args is not None
226
+
227
+ # Here we do the actual replacement of placeholders to get_attrs. Note that here we
228
+ # set the const_gm.graph into a new root_const_gm with split as the root module,
229
+ # because we are fetching attributes directly from the root module, instead of
230
+ # fetching them from const_gm. Example: The const_gm must have some format like:
231
+ # graph():
232
+ # %inp : [num_users=1] = placeholder[target=const_inp]
233
+ # %add : [num_users=1] = call_function[target=operator.add](args = (%inp, %inp), kwargs = {})
234
+ # return add
235
+ # We replace that with the following, which does not have any placeholders:
236
+ # graph():
237
+ # %inp_1 : [num_users=1] = get_attr[target=const_inp]
238
+ # %add : [num_users=1] = call_function[target=operator.add](args = (%inp_1, %inp_1), kwargs = {})
239
+ # return add
240
+ root_const_gm = torch.fx.GraphModule(split, const_gm.graph)
241
+ for node in root_const_gm.graph.nodes:
242
+ if node.op == "output":
243
+ multiple_outputs = isinstance(node.args[0], tuple)
244
+ continue
245
+ if node.op != "placeholder":
246
+ continue
247
+ in_node = next(n for n in call_const_gm_args if n.name == node.target)
248
+ assert in_node.op == "get_attr"
249
+ with root_const_gm.graph.inserting_before(node):
250
+ new_node = root_const_gm.graph.get_attr(in_node.target)
251
+ new_node.meta = node.meta.copy()
252
+ node.replace_all_uses_with(new_node)
253
+ root_const_gm.graph.erase_node(node)
254
+ assert "multiple_outputs" in locals()
255
+
256
+ # Now find the call to const_gm inside split, and replace it with a getattr to the
257
+ # folded tensor(s) that result from constant folding. Note that we don't need to
258
+ # worry about whether this is one or more tensors because the original graph
259
+ # correctly uses getitem to extract individual tensors if there are multiple folded.
260
+ fx_const_folded_attrs_name = get_unique_attr_name_in_module(
261
+ split, "_FX_CONST_FOLDED_ATTRS"
262
+ )
263
+ setattr(
264
+ split,
265
+ fx_const_folded_attrs_name,
266
+ torch.nn.ParameterList() if multiple_outputs else torch.nn.Parameter(), # type: ignore[possibly-undefined]
267
+ )
268
+ for node in split.graph.nodes:
269
+ if node.op == "call_module" and node.target == const_mod_name:
270
+ with node.graph.inserting_before(node):
271
+ folded_attrs = node.graph.get_attr(fx_const_folded_attrs_name)
272
+ folded_attrs.meta = node.meta.copy()
273
+ node.replace_all_uses_with(folded_attrs)
274
+ break
275
+
276
+ split.graph.eliminate_dead_code()
277
+
278
+ # Finally, inline the non-constant submod into the split submod. This is so that the
279
+ # original caller who may have passed in a graph module will get back out a graph
280
+ # module whose graph is traced to the same granularity.
281
+ _inline_module(split, non_const_mod_name)
282
+
283
+ return FoldedGraphModule(
284
+ split,
285
+ split.graph,
286
+ root_const_gm.graph,
287
+ fx_const_folded_attrs_name,
288
+ device_for_folded_attrs,
289
+ )
venv/lib/python3.10/site-packages/torch/fx/experimental/debug.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.fx as fx
2
+
3
+ def set_trace(gm: fx.GraphModule) -> fx.GraphModule:
4
+ """
5
+ Sets a breakpoint in `gm`'s generated python code. It drops into pdb when
6
+ `gm` gets run.
7
+
8
+ Args:
9
+ gm: graph module to insert breakpoint. It is then recompiled for it to
10
+ take effect.
11
+
12
+ Returns:
13
+ the `gm` with breakpoint inserted.
14
+ """
15
+ def insert_pdb(body):
16
+ return ["import pdb; pdb.set_trace()\n", *body]
17
+
18
+ with gm.graph.on_generate_code(
19
+ make_transformer=lambda cur_transform: (
20
+ # new code transformer to register
21
+ lambda body: (
22
+ insert_pdb(
23
+ cur_transform(body) if cur_transform
24
+ else body
25
+ )
26
+ )
27
+ )
28
+ ):
29
+ gm.recompile()
30
+
31
+ return gm
venv/lib/python3.10/site-packages/torch/fx/experimental/graph_gradual_typechecker.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import reduce
2
+ import torch
3
+ import operator
4
+ from torch.fx.tensor_type import Dyn, is_consistent, TensorType, is_more_precise
5
+ from typing import Callable, Dict
6
+ from torch.fx.node import Target, Node
7
+ from torch.nn.modules.batchnorm import BatchNorm2d
8
+ from torch.nn.modules.conv import Conv2d
9
+ from torch.fx.experimental.refinement_types import Equality
10
+ import itertools
11
+
12
+ from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
13
+
14
+ import sympy
15
+
16
+ _INFERENCE_RULES: Dict[Target, Callable] = {}
17
+ _REFINEMENT_RULES: Dict[Target, Callable] = {}
18
+ _RULES: Dict[Target, Callable] = {}
19
+
20
+
21
+ def expand_to_tensor_dim(t, n):
22
+ """
23
+ Expand a type to the desired tensor dimension if possible
24
+ Raise an error otherwise.
25
+ - t is the given type
26
+ - n is a number of dimensions to expand to
27
+ """
28
+ if t == Dyn:
29
+ dims = [Dyn] * n
30
+ return TensorType(tuple(dims))
31
+ elif isinstance(t, TensorType):
32
+ if len(t.__args__) != n:
33
+ raise TypeError(f'Cannot extend tensor. Tensor {t} has rank {len(t.__args__)}. It should have rank {n}')
34
+ return t
35
+ else:
36
+ raise TypeError(f'Cannot match the type {t}')
37
+
38
+
39
+ def broadcast_types(t1, t2):
40
+ """
41
+ Applies broadcasting to both given types such that they
42
+ become consistent with eachother and returns two new
43
+ resulting types
44
+ """
45
+
46
+ # if either type is Dyn, do nothing since the types are already consistent
47
+ if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
48
+ return t1, t2
49
+
50
+ if isinstance(t1, TensorType) and isinstance(t2, TensorType):
51
+ s1 = len(t1.__args__)
52
+ s2 = len(t2.__args__)
53
+
54
+ new_t1 = list(t1.__args__)
55
+ new_t2 = list(t2.__args__)
56
+
57
+ # We make the types the same length which is the first requirement
58
+ # for consistency
59
+ if s1 > s2:
60
+ for i in range(s1 - s2):
61
+ new_t2.insert(0, 1)
62
+
63
+ elif s2 > s1:
64
+ for i in range(s2 - s1):
65
+ new_t1.insert(0, 1)
66
+
67
+ # we replace occurrences of "1" with each tensor with
68
+ # the corresponding type from the other tensor
69
+ for i, (x, y) in enumerate(zip(new_t1, new_t2)):
70
+ if x == 1:
71
+ new_t1[i] = y
72
+ elif y == 1:
73
+ new_t2[i] = x
74
+
75
+ # at this point our tensors should be consistent
76
+ # and we can apply the element-wise operation and find the right dimension
77
+ # for the output of the operation
78
+ (t1, t2) = TensorType(tuple(new_t1)), TensorType(tuple(new_t2))
79
+ return (t1, t2)
80
+ else:
81
+ raise TypeError(f'Cannot broadcast types {t1} and {t2}')
82
+
83
+ def register_inference_rule(call_target):
84
+ def register(fn):
85
+ if call_target in _INFERENCE_RULES:
86
+ raise RuntimeError(f'Inference rule already registered for {call_target}!')
87
+ _INFERENCE_RULES[call_target] = fn
88
+ return fn
89
+ return register
90
+
91
+ def register_refinement_rule(call_target):
92
+ def register(fn):
93
+ if call_target in _REFINEMENT_RULES:
94
+ raise RuntimeError(f'Refinement rule already registered for {call_target}!')
95
+ _REFINEMENT_RULES[call_target] = fn
96
+ return fn
97
+ return register
98
+
99
+ def register_algebraic_expressions_inference_rule(call_target):
100
+ def register(fn):
101
+ if call_target in _RULES:
102
+ raise RuntimeError(f'Rule already registered for {call_target}!')
103
+ _RULES[call_target] = fn
104
+ return fn
105
+ return register
106
+
107
+ @register_inference_rule(torch.add)
108
+ @register_inference_rule(operator.add)
109
+ def add_inference_rule(n: Node):
110
+ """
111
+ Apply the addition inference rule. This includes:
112
+ - scalar addition
113
+ - broadcasting semantics
114
+
115
+ Note that we always return the least precise type between
116
+ the operands (after applying broadcasting) to be the final type of the operation
117
+
118
+ Note that we do not modify the operand types themselves after applying broadcasting
119
+ to them. We only use them to calculate the final type
120
+ """
121
+ assert isinstance(n.args[0], Node)
122
+ assert isinstance(n.args[1], Node)
123
+ t1 = n.args[0].type
124
+ t2 = n.args[1].type
125
+
126
+ # handle scalar addition
127
+ if t1 == int and isinstance(t2, TensorType):
128
+ n.type = t2
129
+ return n.type
130
+
131
+ # handle scalar addition
132
+ elif t2 == int and isinstance(t1, TensorType):
133
+ n.type = t1
134
+ return n.type
135
+
136
+ # we bring the new types to the point where
137
+ # we can check for consistency
138
+ # any inconsistency would not have been caused
139
+ # by broadcasting at this point
140
+ (new_t1, new_t2) = broadcast_types(t1, t2)
141
+
142
+ if new_t1 != t1 or new_t2 != t2:
143
+ n.meta['broadcast'] = True
144
+ n.meta[str(n.args[0])] = new_t1
145
+ n.meta[str(n.args[1])] = new_t2
146
+
147
+ else:
148
+ n.meta['broadcast'] = False
149
+
150
+ new_t1 = t1 if not n.meta['broadcast'] else new_t1
151
+ new_t2 = t2 if not n.meta['broadcast'] else new_t2
152
+
153
+ # we check for consistency between the new types
154
+ if is_consistent(new_t1, new_t2):
155
+ # we return the less precise type because
156
+ # broadcasting may have happened
157
+ # for operands with shape [1,2,Dyn] and [1,2,1]
158
+ # we have to assign the node [1,2,Dyn]
159
+ if is_more_precise(new_t1, new_t2):
160
+ n.type = new_t2
161
+ else:
162
+ n.type = new_t1
163
+ return n.type
164
+ else:
165
+ raise TypeError(f'Cannot add arguments {n.args[0]} ({ n.args[0].type}) and {n.args[1]} ({ n.args[1].type}) in node {n}.'
166
+ f' Types should match ')
167
+
168
+ @register_inference_rule(getattr)
169
+ def get_attr_inference_rule(n: Node, traced):
170
+ """
171
+ The current getattr rule only handles the shape attribute
172
+ Can be extended to other attributes
173
+ The most representitive type we have is "Dyn" but the system
174
+ can be extended with more types, such as a type to represent shapes
175
+ """
176
+ attr_node = n.args[0]
177
+ attr_name = n.args[1]
178
+
179
+ if attr_name == "shape":
180
+ n.type = Dyn
181
+ else:
182
+ raise TypeError("Not yet implemented")
183
+
184
+ # TODO. We leave it like this till we add a type to represent tensor sizes
185
+ return n.type
186
+
187
+ @register_inference_rule(torch.transpose)
188
+ def transpose_inference_rule(n: Node):
189
+ """
190
+ We check that dimensions for the transpose operations
191
+ are within range of the tensor type of the node
192
+ """
193
+ if n.target == torch.transpose:
194
+ assert isinstance(n.args[0], Node)
195
+ t = n.args[0].type
196
+
197
+ assert isinstance(n.args[1], int)
198
+ assert isinstance(n.args[2], int)
199
+ dim1, dim2 = n.args[1], n.args[2]
200
+
201
+ if t == Dyn:
202
+ n.type = Dyn
203
+ return n.type
204
+
205
+ elif isinstance(t, TensorType):
206
+ if 0 <= dim1 < len(t.__args__) and 0 <= dim2 < len(t.__args__):
207
+ new_type = list(t.__args__)
208
+ new_type[dim1], new_type[dim2] = new_type[dim2], new_type[dim1]
209
+ final = TensorType(new_type)
210
+ n.type = get_greatest_upper_bound(n.type, final)
211
+ return n.type
212
+ else:
213
+ raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
214
+ else:
215
+ raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
216
+
217
+
218
+ @register_inference_rule(torch.reshape)
219
+ def reshape_inference_rule(n: Node):
220
+ """
221
+ Without dynamism, the rule checks that the
222
+ product of the elements of the argument tensor
223
+ type is equal to the product of the elements
224
+ of the required shape. We gradualize this rule
225
+ by adding a case to handle fully dynamic input
226
+ as well as input where some of the tensor dimensions
227
+ are unknown. In this case we check for divisibility
228
+ """
229
+ assert isinstance(n.args[0], Node)
230
+ t1 = n.args[0].type
231
+
232
+ assert isinstance(n.args[1], list)
233
+ t2 = n.args[1]
234
+ t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2])
235
+
236
+ # if we do not know the original tensor dimension,
237
+ # we return the required dimension
238
+ if t1 == Dyn:
239
+ n.type = t2_type
240
+ return t2_type
241
+
242
+ # if any of the dimensions are unknown,
243
+ # we check for divisibility
244
+ elif isinstance(t1, TensorType):
245
+ assert isinstance(t1, TensorType)
246
+ a = [e if e != Dyn else 1 for e in t1.__args__]
247
+ p1 = reduce(operator.mul, a)
248
+ p2 = reduce(operator.mul, t2)
249
+ if p1 % p2 == 0 or p2 % p1 == 0:
250
+ n.type = t2_type
251
+ return t2_type
252
+ else:
253
+ raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
254
+ else:
255
+ raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
256
+
257
+ @register_inference_rule(BatchNorm2d)
258
+ def bn2d_inference_rule(n: Node, module_instance):
259
+ """
260
+ Given a BatchNorm2D instance and a node check the following conditions:
261
+ - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, x_3, x_4)
262
+ - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
263
+ - t is consistent with t'
264
+ - x_2 is consistent with the module's num_features
265
+ - x_2' is consistent with the module's num_features
266
+ output type: the more precise type of t and t'
267
+ """
268
+ assert isinstance(n.args[0], Node)
269
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
270
+ arg_type = n.args[0].type
271
+ n.type = expand_to_tensor_dim(n.type, 4)
272
+
273
+ # we check the conditions on the incoming argument
274
+ # and any existing annotation
275
+ # we also check for consistency between both annotations
276
+ if is_consistent(arg_type.__args__[1], module_instance.num_features) and \
277
+ is_consistent(n.type.__args__[1], module_instance.num_features) and \
278
+ is_consistent(arg_type, n.type):
279
+
280
+ # we choose the more precise type
281
+ # to be the node type
282
+ # so if an incoming argument has more type information
283
+ # we set this node's type to be the argument type
284
+ n.type = get_greatest_upper_bound(arg_type, n.type)
285
+ return n.type
286
+ else:
287
+ raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')
288
+
289
+
290
+ def calculate_out_dimension(d_in, module_instance, index):
291
+ """
292
+ For calculating h_in and w_out according to the conv2D documentation
293
+ """
294
+ padding = (module_instance.padding, module_instance.padding) \
295
+ if isinstance(module_instance.padding, int) else module_instance.padding
296
+ kernel_size = (module_instance.kernel_size, module_instance.kernel_size) \
297
+ if isinstance(module_instance.kernel_size, int) else module_instance.kernel_size
298
+ stride = (module_instance.stride, module_instance.stride) \
299
+ if isinstance(module_instance.stride, int) else module_instance.stride
300
+ dilation = (module_instance.dilation, module_instance.dilation) \
301
+ if isinstance(module_instance.dilation, int) else module_instance.dilation
302
+
303
+ DIMENSION_TYPES = (int, sympy.Symbol)
304
+
305
+ if d_in == Dyn:
306
+ return Dyn
307
+
308
+ elif isinstance(d_in, DIMENSION_TYPES):
309
+ n = d_in + 2 * padding[index] - \
310
+ dilation[index] * \
311
+ (kernel_size[index] - 1) - 1
312
+
313
+ return (n // stride[0]) + 1
314
+
315
+ else:
316
+ raise TypeError(f'{d_in} in {module_instance} must be a number or Dyn. Received {type(d_in)}')
317
+
318
+
319
+ def get_greatest_upper_bound(type1, type2):
320
+ """
321
+ Get the most precise type that's consistent with the given types
322
+ """
323
+ if type1 == Dyn:
324
+ return type2
325
+ elif type2 == Dyn:
326
+ return type1
327
+ elif isinstance(type1, TensorType) and isinstance(type2, TensorType):
328
+ if not is_consistent(type1, type2):
329
+ raise TypeError(f'Inconsistent types {type1}, {type2}')
330
+ gub = [t1 if is_more_precise(t1, t2) else t2 for (t1, t2) in zip(type1.__args__, type2.__args__)]
331
+ return TensorType(tuple(gub))
332
+
333
+
334
+ @register_inference_rule(Conv2d)
335
+ def conv2d_inference_rule(n: Node, module_instance):
336
+ """
337
+ Given a Conv2D instance and a node check the following conditions:
338
+ - the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W)
339
+ - the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
340
+ - x_2 is consistent with the module's in_channels
341
+ - let o = (x_1, out_channels, H_out, W_out)
342
+ then the output is the greatest upper bound of o and the existing node type t'.
343
+ """
344
+ assert isinstance(n.args[0], Node)
345
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
346
+ arg_type = n.args[0].type
347
+ curr_node_type = expand_to_tensor_dim(n.type, 4)
348
+
349
+ if is_consistent(arg_type.__args__[1], module_instance.in_channels):
350
+ w_in = arg_type.__args__[3]
351
+ h_in = arg_type.__args__[2]
352
+ h_out = calculate_out_dimension(h_in, module_instance, 0)
353
+ w_out = calculate_out_dimension(w_in, module_instance, 1)
354
+ new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out))
355
+ gub = get_greatest_upper_bound(new_type, curr_node_type)
356
+ n.type = gub
357
+ return n.type
358
+ else:
359
+ raise TypeError(f'Cannot apply {module_instance} with input type { arg_type} and existing type {n.type} on {n}')
360
+
361
+
362
+ @register_inference_rule(torch.nn.ReLU)
363
+ def relu_inference_rule(n: Node, module_instance):
364
+ """
365
+ Input and output shapes should be equal.
366
+ """
367
+ assert isinstance(n.args[0], Node)
368
+
369
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
370
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
371
+
372
+ if isinstance(n.args[0].type, TensorType):
373
+ n.type = get_greatest_upper_bound(n.args[0].type, n.type)
374
+ return n.type
375
+
376
+
377
+ def maxpool2d_check(typ, module_instance):
378
+ """
379
+ Applies the maxpool2d shape information to the input
380
+ this affects the last two dimensions
381
+ """
382
+ new_type_list = list(typ.__args__)
383
+ if len(new_type_list) == 4 or len(new_type_list) == 3:
384
+ w_in = new_type_list[-1]
385
+ h_in = new_type_list[-2]
386
+
387
+ h_out = calculate_out_dimension(h_in, module_instance, 0)
388
+ w_out = calculate_out_dimension(w_in, module_instance, 1)
389
+
390
+ new_type_list[-1] = w_out
391
+ new_type_list[-2] = h_out
392
+ return TensorType(tuple(new_type_list))
393
+
394
+ else:
395
+ raise TypeError(f'Wrong size {typ} for {module_instance}')
396
+
397
+
398
+ @register_inference_rule(torch.nn.MaxPool2d)
399
+ def maxpool2d_inference_rule(n: Node, module_instance):
400
+ """
401
+ Given a MaxPool2D instance and a node check the following conditions:
402
+ - Input size matches size 3 or 4
403
+ - Current node type is consistent with the output type we will calculate
404
+ - Input size matches output size and the last two dimensions of the output
405
+ are w_out and h_out. The remaining dimensions are the same as the input
406
+ - Our final result is the greatest upper bound of the output we calculate
407
+ and the current node type.
408
+ """
409
+ assert isinstance(n.args[0], Node)
410
+
411
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
412
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
413
+ if isinstance(n.args[0].type, TensorType):
414
+ output = maxpool2d_check(n.args[0].type, module_instance)
415
+ n.type = get_greatest_upper_bound(output, n.type)
416
+ return n.type
417
+
418
+
419
+
420
+ def linear_check(tensor_type, module_instance):
421
+ """
422
+ Checks that an input tensor type satisfies the conditions for linear operation
423
+ and returns the output type based on in and out features given by module_instance
424
+ """
425
+ if len(tensor_type.__args__) >= 2:
426
+ if is_consistent(module_instance.in_features, tensor_type.__args__[-1]):
427
+ new_type_args = list(tensor_type.__args__)
428
+ new_type_args[-1] = module_instance.out_features
429
+ return TensorType(tuple(new_type_args))
430
+ else:
431
+ raise TypeError(f'Inconsistent {module_instance.in_features} and {tensor_type.__args__[-1]} in {module_instance}')
432
+ else:
433
+ raise TypeError(f'Type {tensor_type} must have rank 2 or more.')
434
+
435
+
436
+ @register_inference_rule(torch.nn.Linear)
437
+ def linear_inference_rule(n: Node, module_instance):
438
+ """
439
+ Applies the shape information to the input then gets the greatest upper bound
440
+ of the resulting type and the existing type
441
+ """
442
+ assert isinstance(n.args[0], Node)
443
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
444
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
445
+ if isinstance(n.args[0].type, TensorType):
446
+ output_type = linear_check(n.args[0].type, module_instance)
447
+ n.type = get_greatest_upper_bound(output_type, n.type)
448
+ return n.type
449
+
450
+
451
+ def adaptiveavgpool2d_check(tensor_type, module_instance):
452
+ output_size = module_instance.output_size
453
+ if isinstance(output_size, int):
454
+ output_size = [output_size, output_size]
455
+ elif isinstance(output_size, tuple):
456
+ output_size = list(output_size)
457
+ if output_size[0] is None:
458
+ output_size[0] = output_size[1]
459
+ if output_size[1] is None:
460
+ output_size[1] = output_size[0]
461
+
462
+ new_type_list = list(tensor_type.__args__)
463
+
464
+ if len(tensor_type.__args__) == 4 or len(tensor_type.__args__) == 3:
465
+ new_type_list[-1] = output_size[1]
466
+ new_type_list[-2] = output_size[0]
467
+
468
+ return TensorType(tuple(new_type_list))
469
+
470
+ else:
471
+ raise TypeError(f'Tensor ranks must be 3 or 4. Got {tensor_type}')
472
+
473
+ @register_inference_rule(torch.nn.AdaptiveAvgPool2d)
474
+ def adaptiveavgpool2d_inference_rule(n: Node, module_instance):
475
+ """
476
+ The input and output sizes should be the same except for the last
477
+ two dimensions taken from the input, which represent width and height
478
+ """
479
+ assert isinstance(n.args[0], Node)
480
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
481
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
482
+ if isinstance(n.args[0].type, TensorType):
483
+ output_type = adaptiveavgpool2d_check(n.args[0].type, module_instance)
484
+ n.type = get_greatest_upper_bound(n.type, output_type)
485
+ return n.type
486
+
487
+ def flatten_check(tensor_type, start_dim, end_dim):
488
+ l = len(tensor_type.__args__)
489
+
490
+ start_dim = l if start_dim == -1 else abs(start_dim)
491
+ end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
492
+
493
+ if 0 <= start_dim <= (l - 1) and 0 <= end_dim <= l and start_dim < end_dim:
494
+ my_args = list(tensor_type.__args__)
495
+ lhs = my_args[0:start_dim]
496
+ rhs = my_args[end_dim:]
497
+ mid = my_args[start_dim:end_dim]
498
+ if Dyn in mid:
499
+ mid = [Dyn]
500
+ else:
501
+ mid = [reduce(operator.mul, my_args[start_dim:end_dim])]
502
+ new_type_list = lhs + mid + rhs
503
+ return TensorType(tuple(new_type_list))
504
+ else:
505
+ raise TypeError(f'Incompatible dimensions {start_dim}, {end_dim - 1} in type {tensor_type}')
506
+
507
+ @register_inference_rule(torch.flatten)
508
+ def flatten_inference_rule(n: Node):
509
+ """
510
+ Applies the flatten shape information to the input then gets the
511
+ greatest upper bound of the resulting type and the existing type
512
+ """
513
+ assert isinstance(n.args[0], Node)
514
+
515
+ # set the default start and end dims
516
+ start_dim = 1
517
+ end_dim = -1
518
+
519
+ if len(n.args) > 1:
520
+ assert isinstance(n.args[1], int)
521
+ start_dim = n.args[1]
522
+
523
+ if len(n.args) > 2:
524
+ assert isinstance(n.args[2], int)
525
+ end_dim = n.args[2]
526
+
527
+ if n.args[0].type == Dyn and isinstance(n.type, TensorType):
528
+ n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
529
+
530
+ if isinstance(n.args[0].type, TensorType):
531
+ output_type = flatten_check(n.args[0].type, start_dim, end_dim)
532
+ n.type = get_greatest_upper_bound(output_type , n.type)
533
+
534
+ return n.type
535
+
536
+ class GraphTypeChecker:
537
+ def __init__(self, env, traced):
538
+ self.env = env
539
+ self.traced = traced
540
+
541
+ def type_check(self):
542
+ """
543
+ A gradual type checker for graphs
544
+ Effect: every node's field type will be
545
+ populated with a type after type-checking is done
546
+ """
547
+ graph = self.traced.graph
548
+
549
+ # type check every node with gradual type rules
550
+ # if any node does not type check return false
551
+ for n in graph.nodes:
552
+ self.type_check_node(n)
553
+ return True
554
+
555
+ def type_check_node(self, n: Node):
556
+ """
557
+ Type check a given fx node.
558
+ Current operations:
559
+ - Reshape
560
+ - Transpose
561
+ - Add
562
+ - Relu
563
+ - conv2d
564
+ - batchnorm2d
565
+ - flatten
566
+ - maxpool2d
567
+ - adaptiveavgpool2d
568
+ - linear
569
+ """
570
+ if n.type is None:
571
+ n.type = Dyn
572
+
573
+ if n.op == 'placeholder':
574
+ return n.type
575
+
576
+ elif n.op == 'get_attr':
577
+ t = get_parameter(self.traced, n.target) # type: ignore[arg-type]
578
+ if isinstance(t.data, torch.Tensor):
579
+ n.type = TensorType(t.data.shape)
580
+ return n.type
581
+
582
+ elif n.op == 'call_function':
583
+ if n.target == getattr:
584
+ assert getattr in _INFERENCE_RULES
585
+ return _INFERENCE_RULES[n.target](n, self.traced)
586
+
587
+ elif n.target in _INFERENCE_RULES:
588
+ return _INFERENCE_RULES[n.target](n)
589
+ else:
590
+ raise RuntimeError(f'No inference rule registered for target {n.target}!')
591
+
592
+ elif n.op == 'call_module':
593
+ module_instance = self.traced.get_submodule(n.target)
594
+ if type(module_instance) in _INFERENCE_RULES:
595
+ return _INFERENCE_RULES[type(module_instance)](n, module_instance)
596
+ else:
597
+ raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
598
+
599
+ elif n.op == 'output':
600
+ def get_node_type(a):
601
+ return a.type
602
+ n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
603
+ return n.type
604
+
605
+ else:
606
+ raise NotImplementedError(f"Method {n.op} not yet implemented")
607
+
608
+
609
+ @register_refinement_rule(Conv2d)
610
+ def conv_refinement_rule(n: Node):
611
+ """
612
+ The equality constraints are between the first dimension of
613
+ the input and output
614
+ """
615
+ res = []
616
+ assert isinstance(n.args[0], Node)
617
+ arg_type = n.args[0].type
618
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
619
+ res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
620
+ return res
621
+
622
+
623
+ @register_refinement_rule(torch.nn.Linear)
624
+ def linear_refinement_rule(n: Node):
625
+ """
626
+ The equality constraints are between the first dimension of
627
+ the input and output
628
+ """
629
+ res = []
630
+ assert isinstance(n.args[0], Node)
631
+ arg_type = n.args[0].type
632
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
633
+ res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
634
+ return res
635
+
636
+ @register_refinement_rule(BatchNorm2d)
637
+ @register_refinement_rule(torch.nn.ReLU)
638
+ def all_eq(n: Node):
639
+ """
640
+ For operations where the input shape is equal to the output shape
641
+ """
642
+ res = []
643
+ assert isinstance(n.args[0], Node)
644
+ arg_type = n.args[0].type
645
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
646
+ args1 = arg_type.__args__
647
+ args2 = n.type.__args__
648
+ res = [Equality(args1[i], args2[i]) for i in range(len(args1))]
649
+ return res
650
+
651
+
652
+ @register_refinement_rule(torch.nn.AdaptiveAvgPool2d)
653
+ @register_refinement_rule(torch.nn.MaxPool2d)
654
+ def first_two_eq(n: Node):
655
+ """
656
+ For operations where the first two dimensions of the input and output shape
657
+ are equal
658
+ """
659
+ res = []
660
+ assert isinstance(n.args[0], Node)
661
+ arg_type = n.args[0].type
662
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
663
+ args1 = arg_type.__args__
664
+ args2 = n.type.__args__
665
+ res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])]
666
+ return res
667
+
668
+
669
+ @register_refinement_rule(torch.add)
670
+ @register_refinement_rule(operator.add)
671
+ def element_wise_eq(n: Node):
672
+ """
673
+ For element-wise operations and handles broadcasting.
674
+ Note that after applying broadcasting to the arguments
675
+ we are able to determine if certain dimensions have not been broadcast
676
+ if they are symbolicallu equal.
677
+
678
+ in this case, we can establish equality between those dimensions and the
679
+ corresponding output dimensions.
680
+
681
+ Note that it takes two iterations for this result. One iteration to establish
682
+ equality between certain dimensions of the operands (requiring the whole solver
683
+ including unification) and another iteration to establish equality between the operands
684
+ and the resulting type, requiring another round of constraint generation and unificaiton.
685
+ """
686
+ res = []
687
+ if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
688
+ arg_type1 = n.args[0].type
689
+ arg_type2 = n.args[1].type
690
+ if isinstance(arg_type1, TensorType) and isinstance(arg_type2, TensorType) and isinstance(n.type, TensorType):
691
+ args1, args2 = broadcast_types(arg_type1, arg_type2)
692
+ # by this point, we know that args1 and args2 are the same size.
693
+ a1 = args1.__args__
694
+ a2 = args2.__args__
695
+ a3 = n.type.__args__
696
+
697
+ # we would be here in the second iteration where we establish equality
698
+ # between operand type dimensions and the resulting type dimensions
699
+ r = []
700
+ for x, y, z in zip(a1, a2, a3):
701
+ if x == y:
702
+ r.append(Equality(x, z))
703
+ res = r
704
+ return res
705
+
706
+
707
+ @register_refinement_rule(torch.flatten)
708
+ def flatten_refinement_rule(n: Node):
709
+ """
710
+ Generates equality constraints between the dimensions of the input and output
711
+ that will not be involved in the flatten operation
712
+ """
713
+ assert isinstance(n.args[0], Node)
714
+
715
+ eq_const = []
716
+
717
+ start_dim = 1
718
+ end_dim = -1
719
+
720
+ if len(n.args) > 1:
721
+ assert isinstance(n.args[1], int)
722
+ start_dim = n.args[1]
723
+
724
+ if len(n.args) > 2:
725
+ assert isinstance(n.args[2], int)
726
+ end_dim = n.args[2]
727
+
728
+ if isinstance(n.type, TensorType) and isinstance(n.args[0].type, TensorType):
729
+ l = len(n.type.__args__)
730
+ arg_type = n.args[0].type
731
+ start_dim = l if start_dim == -1 else start_dim
732
+ end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
733
+
734
+ for t1, t2 in zip(n.type.__args__[0:start_dim], arg_type.__args__[0:start_dim]):
735
+ eq_const.append(Equality(t1, t2))
736
+
737
+ for t1, t2 in zip(n.type.__args__[end_dim:], arg_type.__args__[end_dim:]):
738
+ eq_const.append(Equality(t1, t2))
739
+ return eq_const
740
+
741
+
742
+ @register_algebraic_expressions_inference_rule(Conv2d)
743
+ def conv_rule(n: Node, module_instance):
744
+ """
745
+ Represents the outout in terms of an algrbraic expression w.r.t
746
+ the input when possible
747
+ """
748
+ assert isinstance(n.args[0], Node)
749
+ arg_type = n.args[0].type
750
+ if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
751
+ w_in = arg_type.__args__[3]
752
+ h_in = arg_type.__args__[2]
753
+ h_out = calculate_out_dimension(h_in, module_instance, 0)
754
+ w_out = calculate_out_dimension(w_in, module_instance, 1)
755
+ new_type = TensorType((n.type.__args__[0], n.type.__args__[1], h_out, w_out))
756
+ n.type = new_type
757
+ return new_type
758
+
759
+ class Refine:
760
+ """
761
+ Symbolic shape inference.
762
+ Generates constraints over type variables.
763
+ Currently all constraints are equality constraints.
764
+ """
765
+ def __init__(self, traced):
766
+ self.constraints = []
767
+ self.traced = traced
768
+ self.symbol_iter = itertools.count(start=0, step=1)
769
+
770
+ def refine(self):
771
+ """
772
+ Generates constraints for
773
+ every node in the graph based on
774
+ the operation.
775
+ """
776
+ graph = self.traced.graph
777
+ for n in graph.nodes:
778
+ self.refine_node(n)
779
+ return True
780
+
781
+ def symbolic_relations(self):
782
+ """
783
+ Infers algebraic relations
784
+ """
785
+ graph = self.traced.graph
786
+ for n in graph.nodes:
787
+ self.infer_symbolic_relations(n)
788
+ return True
789
+
790
+ def replace_dyn_with_fresh_var(self, typ):
791
+ """
792
+ Replace all unknown types with fresh type variables.
793
+ """
794
+ if typ == Dyn:
795
+ new_symbol = Var(next(self.symbol_iter))
796
+ return new_symbol
797
+ elif isinstance(typ, TensorType):
798
+ new_args = [self.replace_dyn_with_fresh_var(a) for a in typ.__args__]
799
+ return TensorType(tuple(new_args))
800
+ elif isinstance(typ, list):
801
+ return [self.replace_dyn_with_fresh_var(t) for t in typ]
802
+ elif isinstance(typ, tuple):
803
+ return (self.replace_dyn_with_fresh_var(t) for t in typ)
804
+ else:
805
+ return typ
806
+
807
+
808
+ def convert_to_sympy_symbols(self, typ):
809
+ """
810
+ Replace all unknown types with fresh type variables.
811
+ """
812
+ if isinstance(typ, Var):
813
+ return sympy.symbols(str(typ))
814
+ elif isinstance(typ, TensorType):
815
+ new_args = [self.convert_to_sympy_symbols(a) for a in typ.__args__]
816
+ return TensorType(tuple(new_args))
817
+ elif isinstance(typ, list):
818
+ return [self.convert_to_sympy_symbols(t) for t in typ]
819
+ elif isinstance(typ, tuple):
820
+ return (self.convert_to_sympy_symbols(t) for t in typ)
821
+ else:
822
+ return typ
823
+
824
+ def refine_node(self, n: Node):
825
+ """
826
+ Returns a list of equality constraints for
827
+ call_module and call_function nodes.
828
+ Models the relation between input and output dimensions
829
+ using constraints in case they are both tensors.
830
+ All operations used in resnet50 are defined.
831
+ """
832
+ if n.type is None:
833
+ n.type = Dyn
834
+
835
+ n.type = self.replace_dyn_with_fresh_var(n.type)
836
+
837
+ if n.op == 'call_function':
838
+ if n.target in _REFINEMENT_RULES:
839
+ self.constraints += _REFINEMENT_RULES[n.target](n)
840
+ else:
841
+ pass
842
+
843
+ if n.op == 'call_module':
844
+ module_instance = self.traced.get_submodule(n.target)
845
+ if type(module_instance) in _REFINEMENT_RULES:
846
+ self.constraints += _REFINEMENT_RULES[type(module_instance)](n)
847
+ else:
848
+ pass
849
+
850
+ if n.op == 'output':
851
+ def get_node_type(a):
852
+ return a.type
853
+ n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
854
+ return n.type
855
+
856
+ else:
857
+ pass
858
+
859
+ def infer_symbolic_relations(self, n: Node):
860
+ n.type = self.convert_to_sympy_symbols(n.type)
861
+ if n.op == 'call_function':
862
+ if n.target in _RULES:
863
+ return _RULES[n.target](n)
864
+ else:
865
+ pass
866
+
867
+ if n.op == 'call_module':
868
+ module_instance = self.traced.get_submodule(n.target)
869
+ if type(module_instance) in _RULES:
870
+ return _RULES[type(module_instance)](n, module_instance)
871
+ else:
872
+ pass
873
+
874
+ if n.op == 'output':
875
+ def get_node_type(a):
876
+ return a.type
877
+ n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
878
+ return n.type
879
+
880
+ else:
881
+ pass
882
+
883
+ def get_parameter(traced, target: str):
884
+ """
885
+ Returns the parameter given by ``target`` if it exists,
886
+ otherwise throws an error.
887
+
888
+ See the docstring for ``get_submodule`` for a more detailed
889
+ explanation of this method's functionality as well as how to
890
+ correctly specify ``target``.
891
+
892
+ Args:
893
+ target: The fully-qualified string name of the Parameter
894
+ to look for. (See ``get_submodule`` for how to specify a
895
+ fully-qualified string.)
896
+
897
+ Returns:
898
+ torch.nn.Parameter: The Parameter referenced by ``target``
899
+
900
+ Raises:
901
+ AttributeError: If the target string references an invalid
902
+ path or resolves to something that is not an
903
+ ``nn.Parameter``
904
+ """
905
+ module_path, _, param_name = target.rpartition(".")
906
+
907
+ mod: torch.nn.Module = traced.get_submodule(module_path)
908
+
909
+ if not hasattr(mod, param_name):
910
+ raise AttributeError(mod._get_name() + " has no attribute `" + param_name + "`")
911
+
912
+ param: torch.nn.Parameter = getattr(mod, param_name)
913
+
914
+ return param
venv/lib/python3.10/site-packages/torch/fx/experimental/merge_matmul.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch.fx.node import Node
4
+ from torch.fx._symbolic_trace import symbolic_trace
5
+ from torch.fx.passes.tools_common import legalize_graph
6
+ import itertools
7
+ import operator
8
+
9
+ from typing import Dict, List, Tuple
10
+
11
+
12
+ def split_result_tensors(
13
+ result: torch.Tensor, inputs: List[torch.Tensor]
14
+ ) -> Tuple[torch.Tensor, ...]:
15
+ """
16
+ A free function for use in the merge_matmul graph transformation below that
17
+ splits the output from a merged matmul into the individual results for each
18
+ input tensor.
19
+
20
+ Arguments:
21
+ result: The merged matmul result tensor.
22
+ inputs: The list of inputs that were merged into one for the matmul.
23
+
24
+ Returns:
25
+ List of matmul results for each input tensor.
26
+ """
27
+ # When fx tracer is running, x.shape[0] will be torch.fx.Attribute but we
28
+ # need an int even when tracing
29
+ if isinstance(result, torch.fx.Proxy):
30
+ splits = [0] * len(inputs)
31
+ else:
32
+ splits = [x.shape[0] for x in inputs]
33
+
34
+ return torch.split(result, splits)
35
+
36
+
37
+ def may_depend_on(a: Node, b: Node, search_depth: int = 6):
38
+ """
39
+ Determine if one node depends on another in a torch.fx.Graph.
40
+
41
+ Arguments:
42
+ a: The node that may have a dependency on b.
43
+ b: The node that a may have a dependency on.
44
+ search_depth: In the case of an indirect dependency, this function
45
+ searches upto this many nodes away in search of a
46
+ data dependency. If none is found, the function
47
+ makes the conservative assumption that there is a
48
+ dependency.
49
+
50
+ Returns:
51
+ True if a may depend on b, False if it definitely does not.
52
+ """
53
+ # Equivalence is defined as dependence.
54
+ if a == b:
55
+ return True
56
+
57
+ # If a has no inputs, it cannot depend on b.
58
+ if len(a.all_input_nodes) == 0:
59
+ return False
60
+
61
+ # If the search depth has been exhausted and no conclusion has been
62
+ # reached, assume that there is a data dependency.
63
+ if search_depth == 0:
64
+ return True
65
+
66
+ # Recursively check all inputs of a.
67
+ for inp in a.all_input_nodes:
68
+ if may_depend_on(inp, b, search_depth - 1):
69
+ return True
70
+
71
+ return False
72
+
73
+
74
+ def are_nodes_independent(nodes: List[Node]):
75
+ """
76
+ Check if all of the given nodes are pairwise-data independent.
77
+
78
+ Arguments:
79
+ nodes: The nodes to check for data dependencies.
80
+
81
+ Returns:
82
+ True if any pair in nodes has a data dependency.
83
+ """
84
+ # For each pair in nodes:
85
+ for i, j in itertools.combinations(nodes, 2):
86
+ if may_depend_on(i, j) or may_depend_on(j, i):
87
+ return False
88
+
89
+ return True
90
+
91
+
92
+ def merge_matmul(in_mod: torch.nn.Module):
93
+ """
94
+ A graph transformation that merges matrix multiplication operations that share the same right-hand
95
+ side operand into one large matrix multiplication.
96
+ ____ _________ _________
97
+ ---- | | | | M| A * C |
98
+ M| A | T| B | * K| C | = |---------|
99
+ ---- , | | | | T| B * C |
100
+ K ---- --------- ---------
101
+ K R R
102
+ """
103
+ gm = symbolic_trace(in_mod)
104
+
105
+ rhs_users: Dict[Node, List[Node]] = {}
106
+ lhs_users: Dict[Node, List[Node]] = {}
107
+
108
+ # Populate rhs_users and lhs_users - maps from LHS/RHS matrix multiply operands to
109
+ # the matmul of which they are the LHS/RHS.
110
+ for node in gm.graph.nodes:
111
+ if node.op != "call_function" or node.target is not torch.matmul:
112
+ continue
113
+
114
+ lhs, rhs = node.args
115
+
116
+ # TODO: Properly handle aliasing caused by get_attr. For now,
117
+ # use the attribute name as the operand if the node is a
118
+ # get_attr.
119
+ lhs = lhs.target if lhs.op == "get_attr" else lhs
120
+ rhs = rhs.target if rhs.op == "get_attr" else rhs
121
+
122
+ lhs_users.setdefault(lhs, []).append(node)
123
+ rhs_users.setdefault(rhs, []).append(node)
124
+
125
+ for rhs, mms in rhs_users.items():
126
+ # There must be at least matmuls for a merge to make sense.
127
+ if len(mms) < 2:
128
+ continue
129
+
130
+ # All matmuls must not depend on each other directly or indirectly
131
+ # in order for the merge to be possible.
132
+ if not are_nodes_independent(mms):
133
+ continue
134
+
135
+ lhs_vals = [mm.args[0] for mm in mms]
136
+
137
+ # Merge the matmul.
138
+ # Collect a list of LHS operands and the single RHS operand.
139
+ lhs = [gm.graph.get_attr(l) if isinstance(l, str) else l for l in lhs_vals]
140
+ rhs = gm.graph.get_attr(rhs) if isinstance(rhs, str) else rhs
141
+
142
+ # Concatenate all the LHS operands.
143
+ merge_mm_cat = gm.graph.call_function(torch.cat, (lhs,), {})
144
+
145
+ # Multiply the concatenated LHS operands with the one RHS. This will produce
146
+ # the same results as all the individual matmuls involving rhs in the original graph,
147
+ # but they will all be concatenated together.
148
+ merge_mm = gm.graph.call_function(torch.matmul, (merge_mm_cat, rhs,), {})
149
+
150
+ # Split the result of the merged matmul using the shapes of the LHS operands
151
+ # to ascertain how large each chunk should be.
152
+ merge_mm_split = gm.graph.call_function(
153
+ split_result_tensors, (merge_mm, lhs), {}
154
+ )
155
+ merge_mm_res = [
156
+ gm.graph.call_function(operator.getitem, (merge_mm_split, out), {})
157
+ for out in range(len(lhs))
158
+ ]
159
+
160
+ # Replace all uses of the original, unmerged matmuls with the equivalent split chunk from the merged matmul.
161
+ for old, new in zip(mms, merge_mm_res):
162
+ old.replace_all_uses_with(new)
163
+ gm.graph.erase_node(old)
164
+
165
+ # All of the new nodes created above were inserted at the end, so we need to sort
166
+ # the nodes topologically to make sure all definitions precede uses.
167
+ legalize_graph(gm)
168
+
169
+ gm.recompile()
170
+ gm.graph.lint()
171
+ return gm
venv/lib/python3.10/site-packages/torch/fx/experimental/meta_tracer.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.fx
3
+ import warnings
4
+ import functools
5
+ import builtins
6
+
7
+ from typing import Any, Callable, Dict, Optional, Union
8
+
9
+ def embedding_override(self, input):
10
+ return torch.empty(*input.shape, self.weight.shape[-1], device='meta')
11
+
12
+
13
+ def nn_layernorm_override(self, input):
14
+ return input
15
+
16
+
17
+ def torch_relu_override(x):
18
+ return x
19
+
20
+
21
+ def torch_nn_relu_override(self, x):
22
+ return x
23
+
24
+
25
+ def functional_relu_override(x, inplace=False):
26
+ assert not inplace, 'dont support inplace functional.relu for metatensor analysis'
27
+ return x
28
+
29
+
30
+ def torch_where_override(condition, x, y):
31
+ # torch.where returns the broadcasted tensor of condition, x, and y,
32
+ # so hack it by using addition
33
+ return condition.to(device='meta') + x.to(device='meta') + y.to(device='meta')
34
+
35
+
36
+ def torch_abs_override(input, *, out=None):
37
+ assert out is None, 'Dont support in-place abs for MetaTensor analysis'
38
+ return input
39
+
40
+ manual_meta_overrides : Dict[Callable, Callable] = {
41
+ torch.nn.Embedding: embedding_override,
42
+ torch.nn.LayerNorm: nn_layernorm_override,
43
+ torch.relu: torch_relu_override,
44
+ torch.nn.functional.relu: functional_relu_override,
45
+ torch.nn.ReLU: torch_nn_relu_override,
46
+ torch.where: torch_where_override,
47
+ torch.abs: torch_abs_override,
48
+ }
49
+
50
+ def gen_constructor_wrapper(target):
51
+ @functools.wraps(target)
52
+ def wrapper(*args, **kwargs):
53
+ proxy = None
54
+
55
+ def check_has_proxy(v):
56
+ if isinstance(v, torch.fx.Proxy):
57
+ nonlocal proxy
58
+ proxy = v
59
+ torch.fx.node.map_aggregate(args, check_has_proxy)
60
+ torch.fx.node.map_aggregate(kwargs, check_has_proxy)
61
+
62
+ if proxy is not None:
63
+ return proxy.tracer.create_proxy('call_function', target, args, kwargs)
64
+ else:
65
+ return target(*args, **kwargs)
66
+ return wrapper, target
67
+
68
+ class MetaProxy(torch.fx.Proxy):
69
+ def install_tensor_meta(self, tensor_meta):
70
+ self._tensor_meta = tensor_meta
71
+
72
+ def size(self, dim=None):
73
+ if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
74
+ return self._tensor_meta.size(*[dim] if dim else [])
75
+ return self.tracer.create_proxy('call_method', 'size', (self, dim) if dim else (self,), {})
76
+
77
+ def dim(self):
78
+ if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
79
+ return self._tensor_meta.dim()
80
+ return self.tracer.create_proxy('call_method', 'dim', (self,), {})
81
+
82
+ @property
83
+ def shape(self):
84
+ if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
85
+ return self._tensor_meta.shape
86
+ return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'shape'), {})
87
+
88
+ @property
89
+ def dtype(self):
90
+ if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
91
+ return self._tensor_meta.dtype
92
+ return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'dtype'), {})
93
+
94
+ @property
95
+ def device(self):
96
+ # Hack so we can track when devices are used. During meta-tensor propagation,
97
+ # replace these values with a constant 'meta'
98
+ return MetaDeviceAttribute(self, 'device')
99
+
100
+ def __getattr__(self, k):
101
+ if k == '_tensor_meta':
102
+ return self.__getattribute__(k)
103
+ # note: not added to the graph yet, if this is a method call
104
+ # we peephole optimize to the method invocation
105
+ return MetaAttribute(self, k)
106
+
107
+ class MetaAttribute(MetaProxy):
108
+ def __init__(self, root, attr: str):
109
+
110
+ self.root = root
111
+ self.attr = attr
112
+ self.tracer = root.tracer
113
+ self._node = None
114
+
115
+ @property
116
+ def node(self):
117
+ # the node for attributes is added lazily, since most will just be method calls
118
+ # which do not rely on the getitem call
119
+ if self._node is None:
120
+ self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node
121
+ return self._node
122
+
123
+ def __call__(self, *args, **kwargs):
124
+ return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs)
125
+
126
+ class MetaDeviceAttribute(MetaAttribute):
127
+ pass
128
+
129
+ def proxys_to_metas(v):
130
+ if isinstance(v, MetaDeviceAttribute):
131
+ return 'meta'
132
+ if isinstance(v, torch.fx.Proxy):
133
+ assert isinstance(v, MetaProxy), f'Expected MetaProxy but got {type(v)}'
134
+ assert hasattr(v, '_tensor_meta'), 'MetaProxy does not have an associated meta'
135
+ return v._tensor_meta
136
+ return v
137
+
138
+ class MetaTracer(torch.fx.Tracer):
139
+ allow_insert_stateless_mods : bool = True
140
+
141
+ _TORCH_METHODS_TO_PATCH = ['arange', 'zeros', 'ones', 'full_like', 'eye']
142
+
143
+ def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None):
144
+ rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
145
+
146
+ if kind == 'placeholder' and target in self.meta_args:
147
+ rv.install_tensor_meta(self.meta_args[target])
148
+ return rv
149
+
150
+ if target in self.orig_fns:
151
+ # NOTE: tensor constructors in PyTorch define the `device` argument as
152
+ # *kwargs-only*. That is why this works. If you add methods to
153
+ # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only,
154
+ # this will break and you will likely see issues where we cannot infer
155
+ # the size of the output.
156
+ if 'device' in kwargs:
157
+ kwargs['device'] = 'meta'
158
+
159
+ try:
160
+ args_metas = torch.fx.node.map_aggregate(args, proxys_to_metas)
161
+ kwargs_metas = torch.fx.node.map_aggregate(kwargs, proxys_to_metas)
162
+
163
+ if kind == 'call_function':
164
+ meta_target = manual_meta_overrides.get(target, target)
165
+ meta_out = meta_target(*args_metas, **kwargs_metas)
166
+ elif kind == 'call_method':
167
+ meta_out = getattr(args_metas[0], target)(*args_metas[1:], **kwargs_metas)
168
+ elif kind == 'call_module':
169
+ assert hasattr(self, 'orig_forward')
170
+ self._disable_module_getattr = True
171
+ try:
172
+ mod = self.root.get_submodule(target)
173
+ mod_type = type(mod)
174
+ if mod_type in manual_meta_overrides:
175
+ meta_out = manual_meta_overrides[mod_type](mod, *args_metas, **kwargs_metas)
176
+ else:
177
+ meta_out = self.orig_forward(*args_metas, **kwargs_metas)
178
+ finally:
179
+ self._disable_module_getattr = False
180
+ elif kind == 'get_attr':
181
+ self._disable_module_getattr = True
182
+ try:
183
+ attr_itr = self.root
184
+ atoms = target.split('.')
185
+ for atom in atoms:
186
+ attr_itr = getattr(attr_itr, atom)
187
+ assert isinstance(attr_itr, torch.Tensor)
188
+ meta_out = attr_itr.to(device='meta')
189
+ finally:
190
+ self._disable_module_getattr = False
191
+ else:
192
+ return rv
193
+
194
+ # TODO
195
+ assert isinstance(rv, torch.fx.Proxy), 'Dont support composite output yet'
196
+ rv.install_tensor_meta(meta_out)
197
+ except Exception as e:
198
+ warnings.warn(f'Could not compute metadata for {kind} target {target}: {e}')
199
+
200
+ return rv
201
+
202
+ def getattr(self, attr, attr_val, parameter_proxy_cache):
203
+ if getattr(self, '_disable_module_getattr', False):
204
+ return attr_val
205
+ else:
206
+ return super().getattr(attr, attr_val, parameter_proxy_cache)
207
+
208
+ def call_module(self, m, forward, args, kwargs):
209
+ self.orig_forward = forward
210
+ return super().call_module(m, forward, args, kwargs)
211
+
212
+ def _insert_module_as_submodule(self, mod: torch.nn.Module) -> str:
213
+ """
214
+ Helper method which tries to insert a module that was not declared as submodule.
215
+ """
216
+ idx = 0
217
+ mod_name = mod.__class__.__name__.lower()
218
+ path = f"{mod_name}_{idx}"
219
+ while hasattr(self.root, path):
220
+ path = f"{mod_name}_{idx}"
221
+ idx += 1
222
+
223
+ self.root.add_module(path, mod)
224
+ return path
225
+
226
+ def path_of_module(self, mod: torch.nn.Module) -> str:
227
+ try:
228
+ return super().path_of_module(mod)
229
+ except NameError as e:
230
+ if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0:
231
+ path = self._insert_module_as_submodule(mod)
232
+ self.prev_module = path
233
+ return path
234
+ raise
235
+
236
+ def proxy(self, node):
237
+ return MetaProxy(node, self)
238
+
239
+ def trace(self, root, meta_args : Dict[str, torch.Tensor], concrete_args=None):
240
+ assert isinstance(meta_args, dict)
241
+ self.meta_args = meta_args
242
+
243
+ self.patched_torch_methods = {
244
+ target: gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH
245
+ }
246
+ self.orig_fns = set()
247
+
248
+ for name, (wrapper, orig) in self.patched_torch_methods.items():
249
+ setattr(torch, name, wrapper)
250
+ self.orig_fns.add(orig)
251
+
252
+ try:
253
+ graph = super().trace(root, concrete_args)
254
+ graph._tracer_extras = {'meta_args': meta_args}
255
+ return graph
256
+ finally:
257
+ for name, (_, orig) in self.patched_torch_methods.items():
258
+ setattr(torch, name, orig)
259
+
260
+
261
+ def symbolic_trace(root : Union[torch.nn.Module, Callable[..., Any]],
262
+ meta_args : Optional[Dict[str, torch.Tensor]] = None,
263
+ concrete_args: Optional[Dict[str, Any]] = None) -> torch.fx.GraphModule:
264
+ tracer = MetaTracer()
265
+ graph = tracer.trace(root, meta_args, concrete_args)
266
+ name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
267
+ gm = torch.fx.GraphModule(tracer.root, graph, name)
268
+ return gm
venv/lib/python3.10/site-packages/torch/fx/experimental/normalize.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from typing import Any, Callable, Dict, Tuple, Optional
3
+
4
+ import torch
5
+ import torch.fx
6
+ import torch.fx as fx
7
+ from torch.fx import Transformer, Proxy
8
+ from torch.fx.node import Argument, Target, Node, map_aggregate
9
+ from torch.fx.operator_schemas import (
10
+ normalize_module,
11
+ normalize_function,
12
+ create_type_hint,
13
+ )
14
+
15
+ from .schema_type_annotation import AnnotateTypesWithSchema
16
+
17
+
18
+ class NormalizeArgs(Transformer):
19
+ """
20
+ Normalize arguments to Python targets. This means that
21
+ `args/kwargs` will be matched up to the module/functional's
22
+ signature and rewritten to exclusively kwargs in positional order
23
+ if `normalize_to_only_use_kwargs` is true. Also populates default
24
+ values. Does not support positional-only parameters or varargs
25
+ parameters (*args, **kwargs).
26
+
27
+ If the nodes have 'type' metadata, it will use it to disambiguate
28
+ overloads. Otherwise, it will throw an error.
29
+
30
+ Example usage:
31
+ m = torchvision.models.resnet18()
32
+ traced = torch.fx.symbolic_trace(m)
33
+ traced = NormalizeArgs(traced).transform()
34
+ """
35
+
36
+ def __init__(
37
+ self, module: torch.fx.GraphModule, normalize_to_only_use_kwargs: bool = True
38
+ ):
39
+ super().__init__(module)
40
+ self.node_map: Dict[Proxy, Node] = {}
41
+ self.normalize_to_only_use_kwargs = normalize_to_only_use_kwargs
42
+
43
+ def run_node(self, n: Node) -> Any:
44
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
45
+
46
+ def get_type(arg):
47
+ if isinstance(arg, fx.Node):
48
+ return n.meta["type"] if "type" in n.meta else None
49
+ return type(arg)
50
+
51
+ arg_types = map_aggregate(n.args, get_type)
52
+ assert isinstance(arg_types, tuple)
53
+ arg_types = tuple([create_type_hint(i) for i in arg_types])
54
+ kwarg_types = {k: get_type(v) for k, v in kwargs.items()}
55
+ if n.op == "call_function":
56
+ out = self.call_function(n.target, args, kwargs, arg_types, kwarg_types)
57
+ else:
58
+ out = super().run_node(n)
59
+ if n.op != "output":
60
+ self.node_map[out] = n
61
+ out.node.meta = n.meta
62
+ out.node.type = n.type
63
+ return out
64
+
65
+ def call_function(
66
+ self,
67
+ target: Target,
68
+ args: Tuple[Argument, ...],
69
+ kwargs: Dict[str, Any],
70
+ arg_types: Optional[Tuple[Any, ...]] = None,
71
+ kwarg_types: Optional[Dict[str, Any]] = None,
72
+ ):
73
+ assert callable(target)
74
+ new_args_and_kwargs = normalize_function(
75
+ target,
76
+ args, # type: ignore[arg-type]
77
+ kwargs,
78
+ arg_types, # type: ignore[arg-type]
79
+ kwarg_types,
80
+ self.normalize_to_only_use_kwargs,
81
+ )
82
+ if new_args_and_kwargs:
83
+ new_args, new_kwargs = new_args_and_kwargs
84
+ return self.tracer.create_proxy(
85
+ "call_function", target, new_args, new_kwargs
86
+ )
87
+ else:
88
+ return super().call_function(target, args, kwargs)
89
+
90
+ def call_module(
91
+ self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
92
+ ):
93
+ assert isinstance(target, str)
94
+ new_args_and_kwargs = normalize_module(
95
+ self.module,
96
+ target,
97
+ args, # type: ignore[arg-type]
98
+ kwargs,
99
+ self.normalize_to_only_use_kwargs,
100
+ )
101
+ if new_args_and_kwargs:
102
+ new_args, new_kwargs = new_args_and_kwargs
103
+ return super().call_module(target, new_args, new_kwargs)
104
+ else:
105
+ return super().call_module(target, args, kwargs)
106
+
107
+
108
+ class NormalizeOperators(AnnotateTypesWithSchema):
109
+ """
110
+ Normalize callsites that are different ways of "spelling" the same
111
+ invocation into a single, canonical call. Currently supports:
112
+
113
+ 1. Normalize operators (e.g. operator.add) to the `torch` ops they
114
+ ultimately invoke (e.g. torch.add) when it is possible to statically
115
+ reason that
116
+
117
+ Example usage:
118
+
119
+ m = torchvision.models.resnet18()
120
+
121
+ traced = torch.fx.symbolic_trace(m)
122
+
123
+ traced = NormalizeOperators(traced).transform()
124
+ """
125
+
126
+ binary_magic_method_remap: Dict[
127
+ Callable[[Any, Any], Any], Callable[[Any, Any], Any]
128
+ ] = {
129
+ torch.add: operator.add,
130
+ torch.mul: operator.mul,
131
+ torch.sub: operator.sub,
132
+ torch.div: operator.truediv,
133
+ torch.floor_divide: operator.floordiv,
134
+ torch.remainder: operator.mod,
135
+ torch.eq: operator.eq,
136
+ torch.ne: operator.ne,
137
+ torch.lt: operator.lt,
138
+ torch.le: operator.le,
139
+ torch.gt: operator.gt,
140
+ torch.ge: operator.ge,
141
+ }
142
+
143
+ def call_function(
144
+ self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
145
+ ):
146
+ # Normalize operators according to the magic methods implemented on tensors here:
147
+ # https://github.com/pytorch/pytorch/blob/28c5d90b679c6b38bf4183ec99f16d933c2f1bcd/tools/autograd/templates/python_variable_methods.cpp#L1137 # noqa: B950
148
+
149
+ assert callable(target)
150
+
151
+ if target in self.binary_magic_method_remap:
152
+ if len(args) != 2:
153
+ return super().call_function(target, args, kwargs)
154
+ lhs, rhs = args
155
+
156
+ return super().call_function(
157
+ target=self.binary_magic_method_remap[target],
158
+ args=(lhs, rhs),
159
+ kwargs={},
160
+ )
161
+
162
+ return super().call_function(target, args, kwargs)
venv/lib/python3.10/site-packages/torch/fx/experimental/partitioner_utils.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import NamedTuple, Dict, List, Set
3
+
4
+ from torch.fx.node import Node, map_arg
5
+
6
+
7
+ class Partition:
8
+ """Partition class contains all the information about an individual partition.
9
+ It also provides necessary methods for manipulation the partition.
10
+ """
11
+
12
+ def __init__(self, partition_id: int) -> None:
13
+ self.nodes: Set[Node] = set()
14
+ self.partition_id = partition_id
15
+ self.parents: Set[Partition] = set()
16
+ self.children: Set[Partition] = set()
17
+ self.bfs_level: int = -1
18
+ self.used_mem_bytes: int = 0
19
+ self.logical_device_ids: List[int] = []
20
+
21
+ def __str__(self):
22
+ return str(self.partition_id)
23
+
24
+ def recalculate_mem_size(self):
25
+ self.used_mem_bytes = 0
26
+ for node in self.nodes:
27
+ self.used_mem_bytes += get_extra_size_of(node, self.nodes)
28
+
29
+ def add_node(self, node):
30
+ input_nodes: Dict[Node, None] = {}
31
+ map_arg(node.args, input_nodes.setdefault)
32
+ map_arg(node.kwargs, input_nodes.setdefault)
33
+ # Add current node's input nodes if they are placeholder or constants
34
+ for n in input_nodes:
35
+ if n.op in {"placeholder", "get_attr"}:
36
+ self.nodes.add(n)
37
+ self.nodes.add(node)
38
+ self.recalculate_mem_size()
39
+
40
+ def remove_node(self, node):
41
+ # Remove a node only if the node is in the partition
42
+ if node in self.nodes:
43
+ self.nodes.remove(node)
44
+ # Collect the node's input nodes
45
+ input_nodes: Dict[Node, None] = {}
46
+ map_arg(node.args, input_nodes.setdefault)
47
+ map_arg(node.kwargs, input_nodes.setdefault)
48
+ # Check if an input node is a placeholder or get_attr,
49
+ # and this input node is not used by some other nodes in this partition,
50
+ # the remove this input node
51
+ for input_node in input_nodes:
52
+ if all(
53
+ n not in self.nodes for n in input_node.users
54
+ ) and input_node.op in {"placeholder", "get_attr"}:
55
+ self.nodes.remove(input_node)
56
+ self.recalculate_mem_size()
57
+
58
+
59
+ class Device(NamedTuple):
60
+ name: str
61
+ available_mem_bytes: int
62
+ logical_id: int
63
+
64
+
65
+ class NodeLatency(NamedTuple):
66
+ # Latency due to the memory bandwidth
67
+ mem_latency_sec: float
68
+ # Latency due to the computation
69
+ computer_latency_sec: float
70
+
71
+
72
+ class PartitionLatency(NamedTuple):
73
+ # Sum of all nodes' memory latency on the critical path
74
+ mem_latency_sec: float
75
+ # Sum of all nodes' compute latency on the critical path
76
+ computer_latency_sec: float
77
+ # Latency of the critical path
78
+ overall_latency_sec: float
79
+
80
+
81
+ class PartitionMode(Enum):
82
+ size_based = 0
83
+ sparse_nn = 1
84
+ cost_aware = 2
85
+ kl_based = 3
86
+ aot_based = 4
87
+
88
+
89
+ class PartitionerConfig(NamedTuple):
90
+ devices: List[Device]
91
+ mode: PartitionMode = PartitionMode.size_based
92
+ transfer_rate_bytes_per_sec: float = 0.0
93
+ node_to_latency_mapping: Dict[Node, NodeLatency] = {}
94
+ node_to_partition_mapping: Dict[Node, int] = {}
95
+ partition_to_logical_device_mapping: Dict[int, List[int]] = {}
96
+ # Saturate host by replicating partitions to the remaining idle devices.
97
+ saturate_host: bool = False
98
+
99
+
100
+ def get_extra_size_of(node: Node, nodes: Set[Node]) -> int:
101
+ """Given a node and a set of nodes,
102
+ this function return the extra size that needed
103
+ if this node is included in this set.
104
+ """
105
+ # Find all its input nodes
106
+ input_nodes: Dict[Node, None] = {}
107
+ map_arg(node.args, input_nodes.setdefault)
108
+ map_arg(node.kwargs, input_nodes.setdefault)
109
+ # Calculate total size of related nodes
110
+ total_size_of_input_nodes = 0
111
+ for n in input_nodes:
112
+ # Make sure this node hasn't been in this set yet
113
+ if n not in nodes:
114
+ size_bytes = getattr(n, "size_bytes", None)
115
+ if size_bytes:
116
+ total_size_of_input_nodes += size_bytes.output_size
117
+ else:
118
+ raise RuntimeError("node has no size_bytes attr")
119
+ # Don't forget the op node itself
120
+ size_bytes = getattr(node, "size_bytes", None)
121
+ if size_bytes:
122
+ total_size_of_input_nodes += size_bytes.total_size
123
+ else:
124
+ raise RuntimeError("node has no size_bytes attr")
125
+ return total_size_of_input_nodes
126
+
127
+
128
+ def get_latency_of_one_partition(
129
+ partition: Partition, node_to_latency_mapping: Dict[Node, NodeLatency]
130
+ ) -> PartitionLatency:
131
+ """Given a partition and its nodes' latency, return a PartitionLatency for this partition"""
132
+
133
+ def get_top_nodes(partition: Partition) -> List[Node]:
134
+ """Given a partition, return a list of nodes on the top bfs level"""
135
+ top_nodes: List[Node] = []
136
+ for node in partition.nodes:
137
+ # Skip placeholder and get_attr nodes
138
+ if node.op in {"placeholder", "get_attr"}:
139
+ continue
140
+ input_nodes: Dict[Node, None] = {}
141
+ map_arg(node.args, input_nodes.setdefault)
142
+ map_arg(node.kwargs, input_nodes.setdefault)
143
+ # If a node has no input nodes in this partition,
144
+ # or its input nodes in this partition are placeholders and get_attrs
145
+ # this node is on the top bfs level in this partition
146
+ if not any(
147
+ n in partition.nodes and n.op not in {"placeholder", "get_attr"}
148
+ for n in input_nodes
149
+ ):
150
+ top_nodes.append(node)
151
+ return top_nodes
152
+
153
+ def dfs_helper(node: Node, partition_latency) -> PartitionLatency:
154
+ """Given a top node of a partition, this function returns
155
+ the latency of the critical path in the partition
156
+ """
157
+ node_latency = node_to_latency_mapping[node]
158
+ # Calculate the current overall latency of the partition
159
+ overall_latency_sec = partition_latency.overall_latency_sec + max(
160
+ node_latency.computer_latency_sec, node_latency.mem_latency_sec
161
+ )
162
+ # Update the mem latency of this path
163
+ mem_latency_sec = (
164
+ partition_latency.mem_latency_sec + node_latency.mem_latency_sec
165
+ )
166
+ # Update the compute latency of this path
167
+ computer_latency_sec = (
168
+ partition_latency.computer_latency_sec + node_latency.computer_latency_sec
169
+ )
170
+ # Get all users of this node that are in this partition
171
+ users = set(node.users).intersection(partition.nodes)
172
+ if users:
173
+ max_latency = PartitionLatency(
174
+ mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
175
+ )
176
+ for n in users:
177
+ # Get new partition latency recursively
178
+ new_partition_latency = dfs_helper(
179
+ n,
180
+ PartitionLatency(
181
+ mem_latency_sec, computer_latency_sec, overall_latency_sec
182
+ ),
183
+ )
184
+ if (
185
+ new_partition_latency.overall_latency_sec
186
+ > max_latency.overall_latency_sec
187
+ ):
188
+ max_latency = new_partition_latency
189
+ return max_latency
190
+ # If there is no user, the node is at bottom of the partition
191
+ return PartitionLatency(
192
+ mem_latency_sec, computer_latency_sec, overall_latency_sec
193
+ )
194
+
195
+ # Main part starts
196
+ # Get all top level nodes of this partition
197
+ top_nodes = get_top_nodes(partition)
198
+ critical_path_latency = PartitionLatency(
199
+ mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
200
+ )
201
+ # Go through all top nodes and find the largest latency (critical pass latency)
202
+ for node in top_nodes:
203
+ partition_latency = dfs_helper(
204
+ node,
205
+ PartitionLatency(
206
+ mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
207
+ ),
208
+ )
209
+ if (
210
+ partition_latency.overall_latency_sec
211
+ > critical_path_latency.overall_latency_sec
212
+ ):
213
+ critical_path_latency = partition_latency
214
+ return critical_path_latency
215
+
216
+
217
+ def get_partition_to_latency_mapping(
218
+ partitions: List[Partition], node_to_latency_mapping: Dict[Node, NodeLatency]
219
+ ) -> Dict[Partition, PartitionLatency]:
220
+ """Given all the partitions and node_to_latency_mapping dictionary,
221
+ return a mapping dictionary of each partition to its overall latency
222
+ """
223
+ partition_to_latency_mapping: Dict[Partition, PartitionLatency] = {}
224
+ # Go through each partition and get its latency
225
+ for partition in partitions:
226
+ partition_latency = get_latency_of_one_partition(
227
+ partition, node_to_latency_mapping
228
+ )
229
+ partition_to_latency_mapping[partition] = partition_latency
230
+ return partition_to_latency_mapping
231
+
232
+
233
+ def get_comm_latency_between(
234
+ parent_partition: Partition,
235
+ child_partition: Partition,
236
+ transfer_rate_bytes_per_sec: float,
237
+ ):
238
+ """Given two partitions (parent and child),
239
+ calculate the communication latency between the two.
240
+ """
241
+ # If two partitions are on the same device, the comm latency is 0.
242
+ if (
243
+ parent_partition.logical_device_ids != []
244
+ and child_partition.logical_device_ids != []
245
+ and parent_partition.logical_device_ids == child_partition.logical_device_ids
246
+ ):
247
+ return 0.0
248
+ # Keep tracking the communication size between parent and child
249
+ comm_size = 0
250
+ # Keep tracking all the counted node
251
+ visited_nodes = set()
252
+ # Go through all nodes in the child partition
253
+ # If a node has input nodes from the parent partition,
254
+ # the output size of those input nodes will be counted
255
+ # and added to comm_size
256
+ for node in child_partition.nodes:
257
+ input_nodes: Dict[Node, None] = {}
258
+ map_arg(node.args, input_nodes.setdefault)
259
+ map_arg(node.kwargs, input_nodes.setdefault)
260
+ for n in input_nodes:
261
+ if n in parent_partition.nodes and n not in visited_nodes:
262
+ size_bytes = getattr(n, "size_bytes", None)
263
+ if size_bytes is not None:
264
+ comm_size += size_bytes.output_size
265
+ visited_nodes.add(n)
266
+ return comm_size / transfer_rate_bytes_per_sec
267
+
268
+
269
+ def get_latency_of_partitioned_graph(
270
+ partitions: List[Partition],
271
+ partition_to_latency_mapping: Dict[Partition, PartitionLatency],
272
+ transfer_rate_bytes_per_sec: float,
273
+ ):
274
+ """Given all partitions in a graph, find the critical path among all partitions
275
+ and return its latency as the latency of the whole graph
276
+ """
277
+
278
+ def dfs_helper(partition: Partition, latency_so_far_sec: float) -> float:
279
+ """This function helps to recursively get the latency of a path of partitions"""
280
+ # Update latency by adding current partition's latency
281
+ latency_so_far_sec += partition_to_latency_mapping[
282
+ partition
283
+ ].overall_latency_sec
284
+ children = partition.children
285
+ if partition.children:
286
+ max_latency_sec = 0.0
287
+ for child in partition.children:
288
+ # Calculate latency between
289
+ comm_latency_sec = get_comm_latency_between(
290
+ partition, child, transfer_rate_bytes_per_sec
291
+ )
292
+ new_latency_sec = dfs_helper(
293
+ child, latency_so_far_sec + comm_latency_sec
294
+ )
295
+ if new_latency_sec > max_latency_sec:
296
+ max_latency_sec = new_latency_sec
297
+ return max_latency_sec
298
+ return latency_so_far_sec
299
+
300
+ def get_top_partitions(partitions: List[Partition]) -> List[Partition]:
301
+ """This function is to return all the partitions without parents
302
+ as the starting points of all the paths
303
+ """
304
+ top_partitions = []
305
+ for partition in partitions:
306
+ # If a partition has no parents, then it is a top partition
307
+ if len(partition.parents) == 0:
308
+ top_partitions.append(partition)
309
+ return top_partitions
310
+
311
+ top_partitions = get_top_partitions(partitions)
312
+ critical_path_latency_sec = 0.0
313
+ for partition in top_partitions:
314
+ latency_sec = dfs_helper(partition, 0.0)
315
+ if latency_sec > critical_path_latency_sec:
316
+ critical_path_latency_sec = latency_sec
317
+ return critical_path_latency_sec
venv/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py ADDED
@@ -0,0 +1,1122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+ import contextlib
9
+ import functools
10
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
11
+ import torch
12
+ import torch.utils._pytree as pytree
13
+ from torch.fx import Tracer, GraphModule
14
+ from torch.fx.graph_module import _assign_attr
15
+ from weakref import WeakKeyDictionary
16
+ from collections import defaultdict
17
+ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode, unset_fake_temporarily, is_fake
18
+ from torch._dispatch.python import enable_python_dispatcher, enable_pre_dispatch
19
+ import torch.fx as fx
20
+ from torch.fx.node import _side_effectful_need_to_be_preserved_pre_dispatch
21
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata
22
+ from contextlib import contextmanager, nullcontext
23
+ import inspect
24
+ from dataclasses import dataclass
25
+ import weakref
26
+ import operator
27
+ from torch.utils._stats import count
28
+ import logging
29
+
30
+ from torch.overrides import TorchFunctionMode
31
+
32
+ from torch.utils._python_dispatch import (
33
+ TorchDispatchMode,
34
+ _disable_infra_mode,
35
+ _push_mode,
36
+ _unset_infra_mode,
37
+ )
38
+
39
+ from ._backward_state import BackwardState
40
+ from .sym_node import SymNode
41
+ from ._sym_dispatch_mode import SymDispatchMode
42
+ from torch.fx import Proxy
43
+ import torch.fx.traceback as fx_traceback
44
+ from torch import SymInt, SymFloat, SymBool
45
+ from torch.utils.weak import WeakTensorKeyDictionary, WeakIdKeyDictionary, _WeakHashRef
46
+
47
+ __all__ = ["PythonKeyTracer", "dispatch_trace", "make_fx", "DecompositionInterpreter", "py_sym_types", "get_innermost_proxy_mode"]
48
+
49
+ aten = torch.ops.aten
50
+ prim = torch.ops.prim
51
+
52
+ log = logging.getLogger(__name__)
53
+ not_implemented_log = torch._logging.getArtifactLogger(__name__, "not_implemented")
54
+
55
+ CURRENT_DECOMPOSITION_TABLE: Dict[torch._ops.OperatorBase, Callable] = {}
56
+
57
+ CONSTANT_NUMEL_LIMIT = 1
58
+
59
+ # We currently convert all SymInt to proxies before we use them.
60
+ # This could plausibly be handled at the Dynamo level.
61
+ pytree.register_pytree_node(
62
+ torch.Size,
63
+ lambda xs: (list(xs), None),
64
+ lambda xs, _: tuple(xs),
65
+ flatten_with_keys_fn=lambda xs: (
66
+ [(pytree.SequenceKey(i), x) for i, x in enumerate(xs)],
67
+ None,
68
+ ),
69
+ )
70
+ def fake_signature(fn, nargs):
71
+ """FX gets confused by varargs, de-confuse it"""
72
+ argnames = ",".join(f"arg{i}" for i in range(nargs))
73
+ return eval(f"lambda {argnames}: fn({argnames})", {"fn": fn})
74
+
75
+ @contextmanager
76
+ def decompose(decomposition_table):
77
+ global CURRENT_DECOMPOSITION_TABLE
78
+ old_decomposition_table = CURRENT_DECOMPOSITION_TABLE
79
+ CURRENT_DECOMPOSITION_TABLE = decomposition_table
80
+ try:
81
+ yield CURRENT_DECOMPOSITION_TABLE
82
+ finally:
83
+ CURRENT_DECOMPOSITION_TABLE = old_decomposition_table
84
+
85
+ # ensure we cannot collide with other properties
86
+ proxy_slot = object()
87
+ no_default = object()
88
+
89
+ py_sym_types = (SymInt, SymFloat, SymBool)
90
+
91
+ def is_sym_node(node):
92
+ assert hasattr(node, 'meta'), "All nodes traced with proxy_tensor should have meta"
93
+ return "val" in node.meta and isinstance(node.meta['val'], py_sym_types)
94
+
95
+ def set_proxy_slot(obj, tracer, proxy):
96
+ if isinstance(obj, torch.Tensor):
97
+ # We DO want to clobber proxies whenever we run an inplace operation
98
+ # on a tensor, and it affects the metadata on the proxy.
99
+ tracer.tensor_tracker[obj] = proxy
100
+ elif isinstance(obj, torch.ScriptObject):
101
+ # We DO want to clobber proxies, with a similar rationale as for tensors.
102
+ tracer.script_object_tracker[obj] = proxy
103
+ else:
104
+ # NB: Never clobber pre-existing proxy. Although the proxies
105
+ # are in principle equivalent, when we do graph partitioning
106
+ # we need there not to be spurious dependencies on tangent inputs.
107
+ # This works because primals get their SymInts set first, and
108
+ # THEN later we allocate tangent inputs. Make sure if a SymInt
109
+ # is derivable from a primal that we use that.
110
+ assert isinstance(obj, py_sym_types), type(obj)
111
+ if obj not in tracer.symnode_tracker:
112
+ tracer.symnode_tracker[obj] = proxy
113
+
114
+ def has_proxy_slot(obj, tracer):
115
+ assert isinstance(obj, (torch.Tensor, SymNode)), type(obj)
116
+ return get_proxy_slot(obj, tracer, False, lambda _: True)
117
+
118
+ # the default argument is what to return if the slot is not set.
119
+ # the transform argument is handy if you need to extract a subfield from
120
+ # the successfully looked up result (but NOT the default.)
121
+ def get_proxy_slot(obj, tracer, default=no_default, transform=lambda x: x):
122
+ if isinstance(obj, torch.Tensor):
123
+ tracker = tracer.tensor_tracker
124
+ elif isinstance(obj, torch.ScriptObject):
125
+ tracker = tracer.script_object_tracker
126
+ else:
127
+ assert isinstance(obj, py_sym_types), type(obj)
128
+ tracker = tracer.symnode_tracker
129
+
130
+ if obj not in tracker:
131
+ if default is no_default:
132
+ raise RuntimeError(f"{obj} is not tracked with proxy for {tracer}")
133
+ return default
134
+ return transform(tracker[obj])
135
+
136
+ def snapshot_fake(val):
137
+ return val.detach()
138
+
139
+ def extract_val(val):
140
+ if is_fake(val):
141
+ return snapshot_fake(val)
142
+ elif isinstance(val, py_sym_types):
143
+ return val
144
+ elif isinstance(val, torch.ScriptObject):
145
+ return val
146
+ elif isinstance(val, BackwardState):
147
+ return val
148
+ elif isinstance(val, (list, tuple)):
149
+ return val.__class__([extract_val(x) for x in val])
150
+ elif isinstance(val, torch.Tensor):
151
+ if not val.is_sparse:
152
+ # NB: Kinda hacky, but we should try to get val as the metadata
153
+ # everywhere
154
+ # TODO: This doesn't properly track storages. A more robust
155
+ # approach would be to maintain a per-trace FakeTensorMode and
156
+ # from_real_tensor to create fake values (don't forget to
157
+ # snapshot_fake)
158
+ fake_tensor_mode = FakeTensorMode(allow_fallback_kernels=True)
159
+ with fake_tensor_mode:
160
+ return torch.empty_strided(val.shape, val.stride(), device=val.device, dtype=val.dtype)
161
+ else:
162
+ return None
163
+ elif isinstance(val, (int, float, bool)):
164
+ return val
165
+
166
+ # What invariants do we have for the 'val' set on the FX node? It has accurate
167
+ # metadata... but only for metadata that exists "below" all other subsystems
168
+ # (most notably autograd, but also vmap, functorch transforms, etc). This means
169
+ # you can get the dtype, shape, stride, storage, but you CANNOT get requires_grad,
170
+ # grad_fn, _base (_base actually may be set due to recursive call to
171
+ # ADInplaceOrView, but you shouldn't rely on it.)
172
+ def set_meta(proxy, val):
173
+ proxy.node.meta['val'] = extract_val(val)
174
+ # Best effort tensor_meta setting; prefer using val!
175
+ if is_fake(val):
176
+ proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(val)
177
+ elif isinstance(val, torch.Tensor) and not val.is_sparse:
178
+ proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(val)
179
+ return proxy
180
+
181
+ def thunkify(f, *args, **kwargs):
182
+ """
183
+ Delays computation of f until it's called again
184
+ Also caches the result
185
+ """
186
+ return functools.lru_cache(1)(functools.partial(f, *args, **kwargs))
187
+
188
+ def track_tensor(tensor, proxy, *, constant, tracer):
189
+ def try_set_proxy_slot(outer_s, proxy_callable, *args):
190
+ assert callable(proxy_callable)
191
+ if isinstance(outer_s, SymInt):
192
+ set_proxy_slot(outer_s, tracer, thunkify(proxy_callable, outer_s, *args))
193
+ # The basic idea is that we need to associate each tensor/SymInt
194
+ # with a Proxy. How do we setup this association? We just store
195
+ # the proxy on the proxy slot of the object, keyed on the tracer
196
+ # (so that if we have multiple tracers at the same time, they
197
+ # don't clobber each other.)
198
+ for i, s in enumerate(tensor.shape):
199
+ try_set_proxy_slot(s, lambda x, i: set_meta(torch.ops.aten.sym_size.int(proxy, i), x), i)
200
+
201
+ for i, s in enumerate(tensor.stride()):
202
+ try_set_proxy_slot(s, lambda x, i: set_meta(torch.ops.aten.sym_stride.int(proxy, i), x), i)
203
+
204
+ try_set_proxy_slot(tensor.numel(), lambda x: set_meta(torch.ops.aten.sym_numel.default(proxy), x))
205
+ try_set_proxy_slot(tensor.storage_offset(), lambda x: set_meta(torch.ops.aten.sym_storage_offset.default(proxy), x))
206
+ set_proxy_slot(tensor, tracer, _ProxyTensor(proxy, constant))
207
+
208
+ def track_tensor_tree(inner_res, proxy_res, *, constant, tracer):
209
+ def wrap_with_proxy(e, proxy, constant):
210
+ if isinstance(e, torch.Tensor):
211
+ track_tensor(e, proxy, tracer=tracer, constant=constant)
212
+ set_meta(proxy, e)
213
+ elif isinstance(e, py_sym_types):
214
+ # NB: eagerly set meta here, so that the numbering is in order
215
+ set_meta(proxy, e)
216
+ set_proxy_slot(e, tracer, lambda: proxy)
217
+ elif isinstance(e, torch.ScriptObject):
218
+ set_proxy_slot(e, tracer, proxy)
219
+ set_meta(proxy, e)
220
+ elif isinstance(e, (tuple, list)):
221
+ if isinstance(proxy, fx.Proxy):
222
+ set_meta(proxy, e)
223
+
224
+ # example use case: allreduce_ returns ([tensor], work)
225
+ for idx, ee in enumerate(e):
226
+ wrap_with_proxy(ee, proxy[idx], get_constant(idx))
227
+ elif isinstance(e, dict):
228
+ # In theory we could support const-prop when proxy-tensor-tracing
229
+ # operators that returns dicts of tensors, but we have no use case
230
+ # for it today (since the only op we currently trace that can
231
+ # return a dict is triton_kernel_wrapper_functional/mutation,
232
+ # which does not participate in const-prop)
233
+ assert constant is None
234
+
235
+ if isinstance(proxy, fx.Proxy):
236
+ set_meta(proxy, e)
237
+
238
+ # example use case: triton_kernel_wrapper takes arguments as kwargs
239
+ for key, val in e.items():
240
+ wrap_with_proxy(val, proxy[key], None)
241
+ elif isinstance(e, BackwardState):
242
+ set_meta(proxy, e)
243
+ e.proxy = proxy
244
+ else:
245
+ # intentionally pass on primitives
246
+ pass
247
+
248
+
249
+ def get_constant(idx):
250
+ if constant is None:
251
+ return None
252
+ else:
253
+ return constant[idx]
254
+
255
+ wrap_with_proxy(inner_res, proxy_res, constant)
256
+
257
+ return inner_res
258
+
259
+
260
+ def maybe_disable_fake_tensor_mode():
261
+ # TODO: figure out if this API generally makes sense and bake it into the
262
+ # library
263
+ return unset_fake_temporarily()
264
+
265
+
266
+ @dataclass
267
+ class _ProxyTensor:
268
+ proxy: Proxy
269
+ constant: Optional[torch.Tensor]
270
+
271
+
272
+ def fetch_sym_proxy(tracer):
273
+ def inner(e):
274
+ n = e.node
275
+ if n.constant is not None:
276
+ return n.constant
277
+ if e.node.expr.is_number:
278
+ if isinstance(e, SymBool):
279
+ return bool(e.node.expr)
280
+ elif isinstance(e, SymInt):
281
+ return int(e.node.expr)
282
+ return float(e.node.expr)
283
+ else:
284
+ # NB: we REQUIRE all symints to be tracked
285
+ return get_proxy_slot(e, tracer)()
286
+ return inner
287
+
288
+
289
+ def fetch_object_proxy(tracer):
290
+ return lambda t: get_proxy_slot(t, tracer, t)
291
+
292
+ HANDLED_TYPES = (torch.Tensor, torch.nn.Parameter, FakeTensor)
293
+
294
+ def proxy_call(proxy_mode, func, pre_dispatch, args, kwargs):
295
+ unrecognized_types = []
296
+
297
+ def can_handle_tensor(x):
298
+ r = type(x) in HANDLED_TYPES or has_proxy_slot(x, proxy_mode.tracer)
299
+ if proxy_mode._allow_fake_constant:
300
+ r = r or type(x) in (torch._subclasses.FakeTensor,)
301
+ if not r:
302
+ unrecognized_types.append(type(x))
303
+ return r
304
+
305
+ # If there are any tensor subclasses, we need to handle those tensor subclasses first
306
+ # TODO: we could use types to test this
307
+ if not pytree.tree_all_only(torch.Tensor, can_handle_tensor, (args, kwargs)):
308
+ not_implemented_log.debug("ProxyTensorMode tensors without proxy had unrecognized subclasses: %s", unrecognized_types)
309
+ return NotImplemented
310
+
311
+ r = maybe_handle_decomp(proxy_mode, func, args, kwargs)
312
+ if r is not NotImplemented:
313
+ return r
314
+
315
+ # For pre-autograd tracing, we do not want to run CompositeImplicit decomps.
316
+ if not pre_dispatch and func not in [
317
+ torch.ops.aten.size.default, torch.ops.aten.stride.default, torch.ops.aten.storage_offset.default
318
+ ]:
319
+ with proxy_mode:
320
+ r = func.decompose(*args, **kwargs)
321
+ if r is not NotImplemented:
322
+ return r
323
+
324
+ tracer = proxy_mode.tracer
325
+ f_args, f_kwargs = pytree.tree_map_only((torch.Tensor, torch.ScriptObject), fetch_object_proxy(tracer), (args, kwargs))
326
+
327
+ # If there are SymInts, we also should not consider this constant.
328
+ # However, fake tensor handling of SymInts is sufficiently broken that
329
+ # I couldn't write a test for this case
330
+ all_constant = (
331
+ pytree.tree_all_only(_ProxyTensor, lambda t: t.constant is not None, (f_args, f_kwargs))
332
+ # TODO: maybe constant SymInts should also be allowed? Not sure if
333
+ # this can happen
334
+ and pytree.tree_all_only((SymInt, SymFloat, SymBool), lambda _: False, (args, kwargs))
335
+ )
336
+
337
+ if torch.Tag.data_dependent_output in func.tags:
338
+ # Check if all of the Tensor inputs are constants
339
+ if all_constant:
340
+ const_args, const_kwargs = pytree.tree_map_only(
341
+ _ProxyTensor, lambda t: t.constant, (f_args, f_kwargs)
342
+ )
343
+ with maybe_disable_fake_tensor_mode():
344
+ return func(*const_args, **const_kwargs)
345
+ # If any of the Tensor inputs are "real" (not FakeTensor), we may
346
+ # incorrectly burn in constants by allowing this access. Raise
347
+ # an error in this case
348
+ if proxy_mode._error_on_data_dependent_ops and pytree.tree_all_only(torch.Tensor, lambda t: not is_fake(t), (args, kwargs)):
349
+ raise RuntimeError(
350
+ f"It appears that you're trying to get value out of a tracing tensor with {func} - erroring out! "
351
+ "It's likely that this is caused by data-dependent control flow or similar. "
352
+ "It may be possible to trace this with dynamic shapes; try setting tracing_mode='symbolic' "
353
+ "in your make_fx call."
354
+ )
355
+ proxy_args, proxy_kwargs = pytree.tree_map_only(
356
+ (SymInt, SymFloat, SymBool),
357
+ fetch_sym_proxy(proxy_mode.tracer),
358
+ pytree.tree_map_only(_ProxyTensor, lambda e: e.proxy, (f_args, f_kwargs))
359
+ )
360
+
361
+ # When we trace through a torch.tensor invocation, you never actually
362
+ # see a torch.ops.aten.tensor call. Instead, the way this function is
363
+ # implemented internally is that we allocate a plain tensor (this is
364
+ # *guaranteed* to be a plain tensor, we disable all modes when doing
365
+ # so), and then call at::lift_fresh on it (to give modes a chance to do
366
+ # their stuff). Furthermore, the tensor argument to lift_fresh is guaranteed
367
+ # to be freshly allocated, so we want lift_fresh to be a no-op (directly
368
+ # returning the input argument).
369
+ #
370
+ # Here is the basic problem: when we trace this sequence of executions
371
+ # into an FX graph, what happens to this call sequence? Traditionally,
372
+ # tensor constants get interned as buffers on the FX GraphModule. But
373
+ # this is dangerous. Consider:
374
+ #
375
+ # x = torch.tensor(1)
376
+ # x.add_(2)
377
+ #
378
+ # Naively, this traces into:
379
+ #
380
+ # t = self._tensor_constant0 # initialized to torch.tensor(1)
381
+ # x = torch.ops.aten.lift_fresh(t)
382
+ # x.add_(2)
383
+ #
384
+ # If lift_fresh returns t directly, the subsequent add_ call will
385
+ # modify the tensor constant. Really, the problem is we've violated
386
+ # the invariant the argument to lift is fresh. So what we should
387
+ # preserve the invariant by replacing lift_fresh with lift_fresh_copy:
388
+ #
389
+ # t = self._tensor_constant0 # initialized to torch.tensor(1)
390
+ # x = torch.ops.aten.lift_fresh_copy(t)
391
+ # x.add_(2)
392
+ #
393
+ # This is what the overload modification does.
394
+ if func is torch.ops.aten.lift_fresh.default:
395
+ func = torch.ops.aten.lift_fresh_copy.default
396
+
397
+
398
+ proxy_out = proxy_mode.tracer.create_proxy('call_function', func, proxy_args, proxy_kwargs,
399
+ name=proxy_mode.tracer.graph._target_to_str(func.overloadpacket.__name__))
400
+
401
+ # This makes DCE marginally less likely to DCE inplace operations.
402
+ # It is not strictly necessary
403
+ # Kind of a hacky way to test if an op is in-place or not
404
+ if func.overloadpacket.__name__[-1] == "_" and func.overloadpacket.__name__[0] != "_":
405
+ if isinstance(args[0], List):
406
+ # e.g., c10d::allreduce_ returns a list of tensors as the first element
407
+ # in the output.
408
+ for i, a in enumerate(args[0]):
409
+ a.proxy = proxy_out[0][i]
410
+ else:
411
+ args[0].proxy = proxy_out
412
+
413
+ out = func(*args, **kwargs)
414
+
415
+ # In some circumstances, we will be tracing in a situation where a tensor
416
+ # is *statically* known to be a constant (currently, this only happens if
417
+ # you run torch.tensor; deterministic factory functions like torch.arange
418
+ # don't get this treatment). When the tensor in question is small, it's
419
+ # helpful to due constant propagation in case we call item() (in which
420
+ # case we can return the constant value that is known, rather than give
421
+ # an error.) The logic here tests if constant propagation is possible
422
+ # (because all of the inputs are constant). If so, we disable fake tensor
423
+ # mode (if it is on) and do true compute on the constant.
424
+ #
425
+ # It's worth highlighting that we're making a policy decision here.
426
+ # There is a potential that the tensor is actually quite large, and we
427
+ # don't actually want to run the compute. The tensor being quite large
428
+ # is one of the reasons why factory functions don't get this treatment
429
+ # (since they can be quite large; if a parameter is initialized to a
430
+ # constant value it will be!) Similarly, there is also a potential
431
+ # to run an operator that blows up the size of a small tensor; we don't
432
+ # protect against this case, but we could force, e.g., only single
433
+ # element constant computation by testing the numel of the result before
434
+ # propagating const-ness. Similarly, we don't require the constant to
435
+ # live on CPU, but we could.
436
+ any_constant = pytree.tree_any_only(_ProxyTensor, lambda t: t.constant is not None, (f_args, f_kwargs))
437
+
438
+ constant = None
439
+
440
+ # If this is a lift, the input tensor is guaranteed to be a
441
+ # constant, so we keep a copy of the original argument along so
442
+ # we can query it if we're asked to item() it at some later point
443
+ if func is torch.ops.aten.lift_fresh_copy.default and out.numel() <= CONSTANT_NUMEL_LIMIT:
444
+ with maybe_disable_fake_tensor_mode():
445
+ constant = args[0].clone()
446
+ elif (
447
+ torch.Tag.nondeterministic_seeded not in func.tags
448
+ and all_constant
449
+ and any_constant
450
+ and pytree.tree_all_only(torch.Tensor, lambda t: t.numel() <= CONSTANT_NUMEL_LIMIT, out)
451
+ ):
452
+ # NB: do NOT include factories as constants
453
+ with maybe_disable_fake_tensor_mode():
454
+ const_args, const_kwargs = pytree.tree_map_only(
455
+ _ProxyTensor, lambda t: t.constant, (f_args, f_kwargs)
456
+ )
457
+ constant = func(*const_args, **const_kwargs)
458
+ else:
459
+ constant = None
460
+
461
+ track_tensor_tree(out, proxy_out, constant=constant, tracer=tracer)
462
+ return out
463
+
464
+ class _SymNodeDict:
465
+ """
466
+ Wrapper around a dictionary that will hash SymInts with their nodes
467
+ """
468
+ def __init__(self):
469
+ self.sym_node_dict = {}
470
+
471
+ def __setitem__(self, key: py_sym_types, value: Any):
472
+ self.sym_node_dict[key.node] = value
473
+
474
+ def __getitem__(self, key: py_sym_types):
475
+ return self.sym_node_dict[key.node]
476
+
477
+ def __contains__(self, key: py_sym_types):
478
+ return key.node in self.sym_node_dict
479
+
480
+ def get(self, key: py_sym_types, default: Any = None):
481
+ return self.sym_node_dict.get(key.node, default)
482
+
483
+ class PythonKeyTracer(Tracer):
484
+ def __init__(self):
485
+ super().__init__(autowrap_modules=())
486
+ self.tensor_tracker = WeakTensorKeyDictionary()
487
+ self.symnode_tracker = _SymNodeDict() # type: ignore[var-annotated]
488
+ self.script_object_tracker = WeakIdKeyDictionary(dict=None, ref_type=_WeakHashRef)
489
+
490
+ # In general, we don't want to make modules leaves. In principle, users of
491
+ # this tracer might want to override this in order to turn a couple specific
492
+ # modules into leaves in the traced graph.
493
+ def call_module(
494
+ self, m: torch.nn.Module, forward: Callable[..., Any], args: Tuple[Any, ...], kwargs: Dict[str, Any]
495
+ ) -> Any:
496
+ return forward(*args, **kwargs)
497
+
498
+ # We don't want to turn getattr calls into proxies. So we just return the actual value.
499
+ def getattr(self, attr, attr_val, parameter_proxy_cache):
500
+ return attr_val
501
+
502
+ def create_arg(self, a: Any):
503
+ if isinstance(a, torch.nn.Parameter):
504
+ for n, p in self.root.named_parameters():
505
+ if a is p:
506
+ return self.create_node('get_attr', n, (), {})
507
+ qualname: Optional[str] = None
508
+
509
+ if not qualname:
510
+ i = 0
511
+ while True:
512
+ qualname = f'_param_constant{i}'
513
+ if not hasattr(self.root, qualname):
514
+ break
515
+ i += 1
516
+ setattr(self.root, qualname, a)
517
+
518
+ return self.create_node('get_attr', qualname, (), {})
519
+ elif isinstance(a, (SymInt, SymFloat, SymBool)):
520
+ assert a.node.constant is not None
521
+ return a.node.constant
522
+ return super().create_arg(a)
523
+
524
+ def unwrap_proxy(self, e):
525
+ if isinstance(e, torch.Tensor):
526
+ return get_proxy_slot(e, self, e, lambda e: e.proxy)
527
+ elif isinstance(e, (torch.SymInt, torch.SymFloat, torch.SymBool)):
528
+ return get_proxy_slot(e, self, e, lambda e: e())
529
+ elif isinstance(e, torch.ScriptObject):
530
+ return get_proxy_slot(e, self, e)
531
+ else:
532
+ return e
533
+
534
+
535
+ @torch._disable_dynamo
536
+ def dispatch_trace(
537
+ root: Union[torch.nn.Module, Callable],
538
+ tracer: Tracer,
539
+ concrete_args: Optional[Tuple[Any, ...]] = None,
540
+ ) -> GraphModule:
541
+ graph = tracer.trace(root, concrete_args)
542
+ from torch._inductor.fx_passes.dedupe_symint_uses import dedupe_symints
543
+ dedupe_symints(graph)
544
+ name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
545
+ return fx._lazy_graph_module._make_graph_module(tracer.root, graph, name)
546
+
547
+
548
+ def wrap_key(f, tensors, tracer, pre_dispatch: bool):
549
+ flat_tensors, tensors_spec = pytree.tree_flatten(tensors)
550
+
551
+ @functools.wraps(f)
552
+ def wrapped(*proxies):
553
+ flat_proxies, proxies_spec = pytree.tree_flatten(proxies)
554
+ assert len(flat_proxies) == len(flat_tensors)
555
+ with disable_proxy_modes_tracing() as m:
556
+ assert isinstance(m, ProxyTorchDispatchMode)
557
+ track_tensor_tree(flat_tensors, flat_proxies, constant=None, tracer=tracer)
558
+
559
+ out = f(*tensors)
560
+ out = pytree.tree_map_only(
561
+ torch.Tensor,
562
+ lambda t: get_proxy_slot(t, tracer, t, lambda x: x.proxy),
563
+ out
564
+ )
565
+ out = pytree.tree_map_only(
566
+ (SymInt, SymFloat, SymBool),
567
+ lambda t: get_proxy_slot(t, tracer)(),
568
+ out
569
+ )
570
+ return out
571
+
572
+ return wrapped
573
+
574
+ ORIGINAL_ATEN = None
575
+ @contextmanager
576
+ def set_original_aten_op(func):
577
+ global ORIGINAL_ATEN
578
+ if ORIGINAL_ATEN is None and fx_traceback.has_preserved_node_meta():
579
+ ORIGINAL_ATEN = func
580
+ fx_traceback.current_meta['original_aten'] = func
581
+ try:
582
+ yield
583
+ finally:
584
+ ORIGINAL_ATEN = None
585
+ fx_traceback.current_meta['original_aten'] = None
586
+ else:
587
+ yield
588
+
589
+
590
+
591
+ # This mode is **only** used for pre_dispatch tracing.
592
+ # In particular, we need to make sure that autograd/autocast API's
593
+ # that do not desugar into dispatcher operators stay in the graph.
594
+ class PreDispatchTorchFunctionMode(TorchFunctionMode):
595
+
596
+ def __init__(self, tracer):
597
+ self.tracer = tracer
598
+
599
+ def __torch_function__(self, func, types, args=(), kwargs=None):
600
+ kwargs = kwargs or {}
601
+ if func in _side_effectful_need_to_be_preserved_pre_dispatch:
602
+ # It's for passing the export verifier which needs to verify the meta['val']
603
+ # TODO(tmanlaibaatar): we should systematically couple it with expoert verifier,
604
+ # instead of hardcoding it here.
605
+ node = self.tracer.create_node("call_function", func, args, {})
606
+ if func is torch._C._set_grad_enabled:
607
+ node.meta['val'] = None
608
+ return node
609
+ # Don't actually run the function! We just want to trace the calls
610
+ # into a graph. We don't actualy want to change global autograd state.
611
+ return func(*args, **kwargs)
612
+
613
+
614
+ class ProxyTorchDispatchMode(TorchDispatchMode):
615
+ def __init__(self, tracer, tracing_mode, pre_dispatch=False, _allow_fake_constant=False, _error_on_data_dependent_ops=True):
616
+ dk = torch._C.DispatchKey.PreDispatch if pre_dispatch else None
617
+ super().__init__(dk)
618
+ self.tracer = tracer
619
+ self.tracing_mode = tracing_mode
620
+ self.enable_tracing = True
621
+ self.pre_dispatch = pre_dispatch
622
+ self._allow_fake_constant = _allow_fake_constant
623
+ self._error_on_data_dependent_ops = _error_on_data_dependent_ops
624
+ self.sym_mode = ProxySymDispatchMode(tracer)
625
+ self.trace_state = {}
626
+ self._managers = []
627
+ # Indicates to our torch_dispatch dispatching infra that
628
+ # this is an "infra" mode with lower dispatching precedence.
629
+ self._mode_key = torch._C._TorchDispatchModeKey.PROXY
630
+ # Every time we enter a mode, we maintain a stack telling us what the previous
631
+ # ProxyTorchDispatchMode state was (if there was any).
632
+ # This lets us properly reset the state on exit.
633
+ self.enter_stack: List[Optional[ProxyTorchDispatchMode]] = []
634
+
635
+ @count
636
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
637
+ with self.sym_mode.enable(False), set_original_aten_op(func):
638
+ return self.inner_torch_dispatch(func, types, args, kwargs)
639
+
640
+ def __enter__(self):
641
+ # sym mode first, then us...
642
+ m = self.sym_mode.enable(True)
643
+ self._managers.append(m)
644
+ m.__enter__()
645
+ # Stash and store the previous proxy mode (there may or may not be one)
646
+ maybe_prev_proxy_mode = _unset_infra_mode(torch._C._TorchDispatchModeKey.PROXY)
647
+ self.enter_stack.append(maybe_prev_proxy_mode)
648
+ return super().__enter__()
649
+
650
+ def __exit__(self, exc_type, exc_value, traceback):
651
+ m = self._managers.pop()
652
+ # ...exit us first, then sym mode
653
+ b = super().__exit__(exc_type, exc_value, traceback)
654
+
655
+ # Re-enable the previous proxy mode, if there was one.
656
+ mb_previous_proxy_mode = self.enter_stack.pop()
657
+ if mb_previous_proxy_mode is not None:
658
+ _push_mode(mb_previous_proxy_mode)
659
+
660
+ if not b:
661
+ return m.__exit__(exc_type, exc_value, traceback)
662
+ else:
663
+ return m.__exit__(None, None, None)
664
+
665
+
666
+ def inner_torch_dispatch(self, func, types, args=(), kwargs=None):
667
+ if not self.enable_tracing:
668
+ return func(*args, **kwargs)
669
+
670
+ if func in [prim.device.default]:
671
+ return func(*args, **kwargs)
672
+
673
+ return proxy_call(self, func, self.pre_dispatch, args, kwargs)
674
+
675
+
676
+ class ProxySymDispatchMode(SymDispatchMode):
677
+ def __init__(self, tracer):
678
+ super().__init__()
679
+ self.tracer = tracer
680
+ # When false, we don't trace operations. If you do this, you MUST
681
+ # call track_tensor/track_tensor_tree on all results of the operation
682
+ # to ensure we can adequately track the results
683
+ self.enable_tracing = True
684
+
685
+ @contextmanager
686
+ def enable(self, b):
687
+ old = self.enable_tracing
688
+ self.enable_tracing = b
689
+ try:
690
+ yield
691
+ finally:
692
+ self.enable_tracing = old
693
+
694
+ def _compute_proxy(self, func, args, out: Union[SymInt, SymFloat, SymBool]):
695
+ n_args = tuple(
696
+ get_proxy_slot(a, self.tracer)().node if isinstance(a, py_sym_types) else a
697
+ for a in args
698
+ )
699
+
700
+ # func doesn't have a __torch_function__ that Proxy can interpose, so
701
+ # we gotta do it manually
702
+ n_out = self.tracer.create_node("call_function", func, n_args, {})
703
+ p_out = fx.Proxy(n_out, self.tracer)
704
+ set_meta(p_out, out)
705
+ return p_out
706
+
707
+ def __sym_dispatch__(self, func, types, args, kwargs):
708
+ if not self.enable_tracing:
709
+ return func(*args, **kwargs)
710
+
711
+ # Peephole optimize multiply by one
712
+ # NB: be careful not to trigger guards here!
713
+ if func == operator.mul:
714
+ if isinstance(args[1], int) and args[1] == 1:
715
+ return args[0]
716
+ elif isinstance(args[0], int) and args[0] == 1:
717
+ return args[1]
718
+
719
+ # For speed, we assume there are no nested data structures
720
+ # (otherwise we could use tree_map)
721
+ # We also assume there are no keyword arguments.
722
+ assert not kwargs
723
+ out = func(*args, **kwargs)
724
+
725
+ # If func returned a constant, we don't need to trace; we have
726
+ # determined that the result is constant (no matter if the inputs
727
+ # were symbolic) and it is no longer necessary to trace the
728
+ # computation. This could occur if func triggered some guards.
729
+ if isinstance(out, py_sym_types):
730
+ # Delays tracing out the proxies on this op until we actually need it
731
+ p_out_thunk = thunkify(self._compute_proxy, func=func, args=args, out=out)
732
+ set_proxy_slot(out, self.tracer, p_out_thunk)
733
+
734
+ return out
735
+
736
+
737
+ # TODO: I'm not sure what the point of this class is; you can just
738
+ # make_fx through a regular Interpreter
739
+ class DecompositionInterpreter(torch.fx.Interpreter):
740
+ def __init__(self, module: torch.fx.GraphModule, new_graph: torch.fx.Graph, decomposition_table=None, **kwargs):
741
+ super().__init__(module, **kwargs)
742
+ self.new_graph = new_graph
743
+ self.tracer = torch.fx.proxy.GraphAppendingTracer(self.new_graph)
744
+ # Blegh
745
+ self.tracer.tensor_tracker = WeakTensorKeyDictionary() # type: ignore[attr-defined]
746
+ self.tracer.symnode_tracker = weakref.WeakKeyDictionary() # type: ignore[attr-defined]
747
+ self.decomposition_table = decomposition_table
748
+ if self.decomposition_table is None:
749
+ self.decomposition_table = {}
750
+ self.mode = ProxyTorchDispatchMode(self.tracer, tracing_mode="real")
751
+
752
+ def placeholder(self, target, args, kwargs):
753
+ out = super().placeholder(target, args, kwargs)
754
+ proxy = torch.fx.Proxy(self.new_graph.placeholder(target), self.tracer)
755
+ track_tensor_tree(out, proxy, constant=None, tracer=self.tracer)
756
+ # TODO handle case where the first character of target is '*'
757
+ return out
758
+
759
+ def get_attr(self, target, args, kwargs):
760
+ out = super().get_attr(target, args, kwargs)
761
+ proxy = torch.fx.Proxy(self.new_graph.get_attr(target), self.tracer)
762
+ track_tensor_tree(out, proxy, constant=None, tracer=self.tracer)
763
+ return out
764
+
765
+ # call_function, call_method, call_module get traced automatically by the outer mode.
766
+
767
+ def output(self, target, args, kwargs):
768
+ out = super().output(target, args, kwargs)
769
+
770
+ def unwrap(e):
771
+ return get_proxy_slot(e, self.tracer, e, lambda x: x.proxy.node)
772
+ self.new_graph.output(pytree.tree_map(unwrap, out))
773
+ return out
774
+
775
+ def run(self, *args, **kwargs):
776
+ # Should enter the mode at least once for being able to restore it later
777
+ # See: https://github.com/pytorch/pytorch/pull/82549#discussion_r934782025
778
+ with decompose(self.decomposition_table), self.mode:
779
+ return super().run(*args, **kwargs)
780
+
781
+
782
+ def wrapper_and_args_for_make_fx(func, args, kwargs):
783
+ # make_fx doesn't support kwargs, so we need to do this flattening
784
+ # and then unflatten the args before calling func
785
+ flat_args, spec = pytree.tree_flatten((args, kwargs))
786
+
787
+ def wrapped(flat_args):
788
+ fn_args, fn_kwargs = pytree.tree_unflatten(flat_args, spec)
789
+ return func(*fn_args, **fn_kwargs)
790
+ return wrapped, flat_args
791
+
792
+ @contextmanager
793
+ def disable_autocast_cache():
794
+ old_value = torch.is_autocast_cache_enabled()
795
+ torch.set_autocast_cache_enabled(False)
796
+ try:
797
+ yield
798
+ finally:
799
+ torch.set_autocast_cache_enabled(old_value)
800
+
801
+
802
+ class _ModuleStackTracer(PythonKeyTracer):
803
+ r"""Customized version of PythonKeyTracer that retains module stack
804
+ information in node.meta["nn_module_stack"].
805
+
806
+ FX symbolic trace actually does this already, but it relies on `self.root`
807
+ being the actual module being traced. Since make_fx traces a lambda of our
808
+ creation, things don't work properly.
809
+
810
+ So for this version we hold onto a reference to the original module
811
+ (scope_root) and use that to match the path. Also when we see,
812
+ A
813
+ / \
814
+ B C
815
+ \ /
816
+ D
817
+ we want to record the path as A.B.D by recording only one path.
818
+ See Note [Preserving the nn module stack metadata during export non-strict mode] # noqa: W605
819
+ """
820
+
821
+ def __init__(self, scope_root):
822
+ super().__init__()
823
+ self.scope_root = scope_root
824
+ self.proxy_paths = WeakKeyDictionary()
825
+ self.proxy_modules = WeakKeyDictionary()
826
+ self.counter = 0
827
+
828
+ self.module_id_cache = defaultdict(list)
829
+ for name, mod in self.scope_root.named_modules(remove_duplicate=False):
830
+ self.module_id_cache[id(mod)].append(name)
831
+
832
+ self_ = self
833
+
834
+ class AttrProxy:
835
+ def __init__(self, base, path):
836
+ self.__class__ = type(
837
+ base.__class__.__name__,
838
+ (self.__class__, base.__class__),
839
+ {},
840
+ )
841
+ self.__dict__ = base.__dict__
842
+ self.__class__.__module__ = base.__class__.__module__
843
+ self.__class__.__qualname__ = base.__class__.__qualname__
844
+ self_.proxy_paths[self] = path
845
+ self_.proxy_modules[self] = base
846
+
847
+ def __getattr__(self, name):
848
+ assert isinstance(self, torch.nn.Module)
849
+ attr_val = super().__getattr__(name)
850
+ if isinstance(attr_val, AttrProxy):
851
+ attr_val = self_.proxy_modules[attr_val]
852
+ elif not isinstance(attr_val, torch.nn.Module):
853
+ return attr_val
854
+ return AttrProxy(attr_val, self_.proxy_paths[self] + "." + name)
855
+
856
+ @property
857
+ def _modules(self):
858
+ assert "_modules" in self.__dict__
859
+ submodules = self.__dict__["_modules"]
860
+ assert isinstance(submodules, dict)
861
+ return {
862
+ key: AttrProxy(value, self_.proxy_paths[self] + "." + str(key))
863
+ for key, value in submodules.items()
864
+ }
865
+
866
+ self.proxy_type = AttrProxy
867
+
868
+ def path_of_module(self, mod: torch.nn.Module) -> str:
869
+ """
870
+ Use tracked access path during tracing instead of the default BFS behavior.
871
+ Still use all the possible module paths to verify the result.
872
+ """
873
+ if mod is self.scope_root:
874
+ return ""
875
+
876
+ if isinstance(mod, self.proxy_type):
877
+ return self.proxy_paths[mod]
878
+
879
+ return Tracer.path_of_module(self, mod)
880
+
881
+ def getattr(self, attr, attr_val, parameter_proxy_cache):
882
+ if not isinstance(attr_val, torch.nn.Module) or isinstance(attr_val, torch.fx.GraphModule):
883
+ return super().getattr(attr, attr_val, parameter_proxy_cache)
884
+ if isinstance(attr_val, self.proxy_type):
885
+ return attr_val
886
+ return self.proxy_type(attr_val, attr)
887
+
888
+ def trace(self, root, concrete_args):
889
+ res = super().trace(root, concrete_args)
890
+ # Since we are making AttrProxy mimic the original
891
+ # submodule, when someone registers a module directly
892
+ # to the tracer while tracing, the proxy object gets registered
893
+ # first. So we need to replace the proxy modules with the real ones
894
+ # This can happen during HOO tracing
895
+ proxy_module_names_to_be_replaced = []
896
+ for name, module in self.root.named_modules():
897
+ if module in self.proxy_modules:
898
+ proxy_module_names_to_be_replaced.append((name, module))
899
+
900
+ def _delete_proxy_attr(obj, target):
901
+ # Copied from fx/graph_module.py
902
+ # Customized it for proxy type
903
+ atoms = target.split(".")
904
+ path, target_submod = atoms[:-1], atoms[-1]
905
+ assert isinstance(obj, torch.nn.Module)
906
+ mod = obj
907
+
908
+ # Get the parent module
909
+ for item in path:
910
+
911
+ if not hasattr(mod, item):
912
+ return False
913
+
914
+ mod = getattr(mod, item)
915
+
916
+ if not isinstance(mod, (self.proxy_type, torch.nn.Module)):
917
+ return False
918
+
919
+ if not hasattr(mod, target_submod):
920
+ return False
921
+
922
+ # At least the leaf module should be proxy type.
923
+ if not isinstance(getattr(mod, target_submod), self.proxy_type):
924
+ return False
925
+
926
+ delattr(mod, target_submod)
927
+ return True
928
+
929
+ for (proxy_module_name, proxy_module) in proxy_module_names_to_be_replaced:
930
+ _delete_proxy_attr(self.root, proxy_module_name)
931
+ actual_module = self.proxy_modules[proxy_module]
932
+ _assign_attr(actual_module, self.root, proxy_module_name)
933
+
934
+ return res
935
+
936
+
937
+ def call_module(self, m, forward, args, kwargs):
938
+ """PythonKeyTracer overrides call_module to avoid the scope handling,
939
+ but we actually want it.
940
+ """
941
+ from torch._dynamo import OptimizedModule
942
+ # FIXME (tmanlaibaatar)
943
+ # When we call torch.compile inside HOO, we will end up
944
+ # invoking a module that is not registered on the root. For
945
+ # now, we just inline them. But once we start supporting
946
+ # mark_strict in export, we do need to properly handle this.
947
+ # Right now, it doesn't matter because current non-strict
948
+ # use cases don't need to work with HOO.
949
+ if isinstance(m, (OptimizedModule, GraphModule)):
950
+ return forward(*args, **kwargs)
951
+ return Tracer.call_module(self, m, forward, args, kwargs)
952
+
953
+
954
+ def is_leaf_module(self, m, module_qualified_name):
955
+ return False
956
+
957
+
958
+ def make_fx(f,
959
+ decomposition_table=None,
960
+ tracing_mode="real",
961
+ _allow_non_fake_inputs=False,
962
+ *,
963
+ pre_dispatch=False,
964
+ record_module_stack=False,
965
+ _allow_fake_constant=False,
966
+ _error_on_data_dependent_ops=True):
967
+ assert tracing_mode in ["real", "fake", "symbolic"]
968
+
969
+ if decomposition_table is None:
970
+ decomposition_table = {}
971
+
972
+ if torch.ops.aten.sym_numel.default not in decomposition_table:
973
+ decomposition_table = {
974
+ **decomposition_table,
975
+ torch.ops.aten.sym_numel.default: torch._decomp.decompositions.sym_numel
976
+ }
977
+
978
+ @functools.wraps(f)
979
+ def wrapped(*args):
980
+ # Avoid importing sympy at a module level
981
+ from .symbolic_shapes import ShapeEnv
982
+
983
+ phs = pytree.tree_map(lambda _: fx.PH, args) # type: ignore[attr-defined]
984
+
985
+ if hasattr(f, "_orig_mod") and record_module_stack:
986
+ scope_root = f._orig_mod
987
+ fx_tracer = _ModuleStackTracer(scope_root)
988
+ else:
989
+ fx_tracer = PythonKeyTracer()
990
+ fake_tensor_mode: Any = nullcontext()
991
+ if tracing_mode == "real":
992
+ fake_tensor_mode = nullcontext()
993
+ elif tracing_mode == "fake":
994
+ import torch._dynamo
995
+ fake_tensor_mode = torch._dynamo.utils.detect_fake_mode(args)
996
+ if fake_tensor_mode is None:
997
+ fake_tensor_mode = FakeTensorMode(
998
+ allow_fallback_kernels=True,
999
+ allow_non_fake_inputs=_allow_non_fake_inputs,
1000
+ shape_env=ShapeEnv(),
1001
+ static_shapes=True,
1002
+ )
1003
+ elif tracing_mode == "symbolic":
1004
+ import torch._dynamo
1005
+ fake_tensor_mode = torch._dynamo.utils.detect_fake_mode(args)
1006
+ if fake_tensor_mode is None:
1007
+ shape_env = ShapeEnv()
1008
+ fake_tensor_mode = FakeTensorMode(
1009
+ allow_fallback_kernels=False,
1010
+ allow_non_fake_inputs=_allow_non_fake_inputs,
1011
+ shape_env=shape_env)
1012
+ else:
1013
+ shape_env = fake_tensor_mode.shape_env
1014
+ assert shape_env is not None, "shape_env should be set if tracing with 'symbolic'"
1015
+
1016
+ else:
1017
+ raise AssertionError(f"Unexpected tracing type: {tracing_mode}")
1018
+
1019
+ python_dispatcher_mode: Any = nullcontext()
1020
+ pre_dispatch_mode: Any = nullcontext()
1021
+ # pre-autograd tracing uses per-dispatch-key modes,
1022
+ # which requires the python dispatcher
1023
+ if tracing_mode == "symbolic" or pre_dispatch:
1024
+ python_dispatcher_mode = enable_python_dispatcher()
1025
+ if pre_dispatch:
1026
+ pre_dispatch_mode = enable_pre_dispatch()
1027
+
1028
+ proxy_function_mode: Any = nullcontext()
1029
+ if pre_dispatch:
1030
+ proxy_function_mode = PreDispatchTorchFunctionMode(fx_tracer)
1031
+
1032
+ proxy_mode = ProxyTorchDispatchMode(fx_tracer,
1033
+ tracing_mode,
1034
+ pre_dispatch=pre_dispatch,
1035
+ _allow_fake_constant=_allow_fake_constant,
1036
+ _error_on_data_dependent_ops=_error_on_data_dependent_ops)
1037
+
1038
+ arg_count = 0
1039
+
1040
+ def wrap_fake(x):
1041
+ nonlocal arg_count
1042
+ # TODO: it would be nice to line these up with the names
1043
+ # FX will choose for the placeholders, but we don't
1044
+ # actually know what the names will be at this point yet
1045
+ # NB: the Source here is actually meaningless
1046
+ from torch._dynamo.source import ConstantSource
1047
+ source = ConstantSource(f"input{arg_count}")
1048
+ if isinstance(x, torch.Tensor):
1049
+ arg_count += 1
1050
+ return fake_tensor_mode.from_tensor(x, source=source) # type: ignore[attr-defined]
1051
+ # NB: don't match on bools
1052
+ elif type(x) is int and tracing_mode == "symbolic":
1053
+ return shape_env.create_symintnode(shape_env.create_symbol(x, source, positive=None), hint=x, source=source)
1054
+
1055
+ return x
1056
+
1057
+ sym_mode = proxy_mode.sym_mode
1058
+
1059
+ wrap_fn_map = {
1060
+ "real": lambda x: x,
1061
+ "fake": wrap_fake,
1062
+ "symbolic": wrap_fake,
1063
+ }
1064
+ args = pytree.tree_map(wrap_fn_map[tracing_mode], args)
1065
+
1066
+ if not hasattr(inspect.unwrap(f), '__code__') or inspect.unwrap(f).__code__.co_flags & inspect.CO_VARARGS:
1067
+ # FX doesn't support varargs, so we gotta fake up a wrapper
1068
+ # TODO: Would be nice to fix this at the source...
1069
+ func = fake_signature(f, len(phs))
1070
+ else:
1071
+ func = f
1072
+
1073
+ # We disable the autocast cache as the autocast cache causes type conversions on parameters to
1074
+ # check a cache, which introduces untracked tensors into the graph
1075
+ #
1076
+ # We also disable tracing by any other tensor proxy-based tracers except the current. The
1077
+ # purpose of `make_fx` is to produce graphmodules as a side effect; its internal execution is
1078
+ # thus irrelevant to any external functional trace.
1079
+ with decompose(decomposition_table), fake_tensor_mode, python_dispatcher_mode, pre_dispatch_mode, proxy_function_mode, \
1080
+ sym_mode, proxy_mode, disable_autocast_cache():
1081
+ t = dispatch_trace(wrap_key(func, args, fx_tracer, pre_dispatch), tracer=fx_tracer, concrete_args=tuple(phs))
1082
+
1083
+ # TODO: kind of a bad way to do it, should maybe figure out a better way
1084
+ if tracing_mode == "symbolic":
1085
+ t.shape_env = shape_env # type: ignore[assignment]
1086
+ return t
1087
+
1088
+ return wrapped
1089
+
1090
+
1091
+ def get_torch_dispatch_modes():
1092
+ return torch.utils._python_dispatch._get_current_dispatch_mode_stack()
1093
+
1094
+
1095
+ def get_innermost_proxy_mode():
1096
+ return torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY)
1097
+
1098
+
1099
+ @contextlib.contextmanager
1100
+ def disable_proxy_modes_tracing():
1101
+ return _disable_infra_mode(torch._C._TorchDispatchModeKey.PROXY)
1102
+
1103
+
1104
+ def maybe_handle_decomp(proxy_mode, op, args, kwargs):
1105
+ if op in CURRENT_DECOMPOSITION_TABLE:
1106
+ with proxy_mode:
1107
+ return CURRENT_DECOMPOSITION_TABLE[op](*args, **kwargs)
1108
+ return NotImplemented
1109
+
1110
+
1111
+ def get_isolated_graphmodule(func, args, kwargs, tracing_mode="real"):
1112
+ """A helper function used to get the GraphModule for the given func.
1113
+
1114
+ It's expected to be used in the ProxyTensor tracing context.
1115
+ It detaches the args and kwargs from the current tracer so that the trace of
1116
+ the current graph module can be created without any side-effects.
1117
+ """
1118
+ wrapped, all_args = wrapper_and_args_for_make_fx(func, args, kwargs)
1119
+
1120
+ with disable_proxy_modes_tracing():
1121
+ gm = make_fx(wrapped, tracing_mode=tracing_mode)(all_args)
1122
+ return gm
venv/lib/python3.10/site-packages/torch/fx/experimental/recording.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ import torch.utils._pytree as pytree
8
+
9
+
10
+ __all__ = [
11
+ "ShapeEnvEvent",
12
+ "record_shapeenv_event",
13
+ "replay_shape_env_events",
14
+ "FakeTensorMeta",
15
+ "shape_env_check_state_equal",
16
+ "NotEqualError",
17
+ ]
18
+
19
+ # [Note: Recording ShapeEnv Events]
20
+ # =================================
21
+ #
22
+ # What is a ShapeEnv event?
23
+ # -------------------------
24
+ # We consider a ShapeEnv event every function call (ShapeEnv method or
25
+ # independent function) that modifies the state of the ShapeEnv instance.
26
+ # Such calls are recorded alongside their positional and keyword arguments,
27
+ # so that it may be replayed over a different ShapeEnv instance.
28
+ #
29
+ # See [Note: ShapeEnv State Equality] for what is considered the state
30
+ # of a ShapeEnv instance.
31
+ #
32
+ # What is it for?
33
+ # ---------------
34
+ # ShapeEnv events recording is used for reconstructing the ShapeEnv in an
35
+ # arbitrary state in time.
36
+ #
37
+ # Being able to arbitrarily replay events like so is useful, mainly for
38
+ # translation validation bisection. i.e. if a ValidationException has been
39
+ # raised, find the earliest point in time where the translation validation
40
+ # fails.
41
+ #
42
+ # Besides that, it also allows us to inspect the given instance and,
43
+ # for example, check the guards that would actually be issued at that point.
44
+ #
45
+ # What kind of arguments can be stored in an event?
46
+ # -------------------------------------------------
47
+ # There's no specific rule for what cannot be used as an argument.
48
+ # That said, pay special attention to the following cases:
49
+ #
50
+ # 1. Tensor inputs: there are some tests that check whether the inputs
51
+ # were garbage collected after execution. These will fail if there's
52
+ # an event that is holding a reference to those inputs.
53
+ #
54
+ # 2. ShapeEnv arguments: if there is an argument of ShapeEnv type, that
55
+ # will be automatically replaced by the new given ShapeEnv instance.
56
+ #
57
+ # 3. SymTypes arguments: they also hold references to ShapeEnv. So,
58
+ # whenever we see them, we create a new instance, replacing the
59
+ # ShapeEnv reference.
60
+ #
61
+ # 4. FX nodes: specifically, FX nodes from the FX graph for symbolic
62
+ # shapes. That argument must be replaced when replaying the event at
63
+ # ShapeEnvEvent.run, since it has to reference a node from the given
64
+ # instance, and not from the recorded instance.
65
+
66
+
67
+ # Event class for reconstructing ShapeEnv at arbitrary time.
68
+ #
69
+ # Represents a method call that mutates ShapeEnv in a way that affects the
70
+ # issued guards, when ShapeEnv.produce_guards is called.
71
+ @dataclass
72
+ class ShapeEnvEvent:
73
+ # ShapeEnv method.
74
+ f: Callable
75
+
76
+ # Arguments and keyword arguments called with.
77
+ args: Optional[List[Any]] = None
78
+ kwargs: Optional[Dict[str, Any]] = None
79
+
80
+ # List of tracked_fakes at the time the method was called.
81
+ tracked_fakes: Optional[List[Any]] = None
82
+
83
+ # Name of the captured event.
84
+ # Used for special handling of particular methods.
85
+ name: Optional[str] = None
86
+
87
+ # Replay itself, but using shape_env as self.
88
+ def run(self, shape_env=None) -> Any:
89
+ from torch.fx.experimental.symbolic_shapes import (
90
+ is_symbolic,
91
+ ShapeEnv,
92
+ SymTypes,
93
+ )
94
+
95
+ # Special handling for the constructor event.
96
+ if self.f is ShapeEnv:
97
+ assert shape_env is None and self.args is None and self.kwargs is not None
98
+ return ShapeEnv(**self.kwargs)
99
+
100
+ assert shape_env is not None
101
+ args = list(self.args or list())
102
+ kwargs = dict(self.kwargs or dict())
103
+
104
+ # Replace any argument of type ShapeEnv by the given one.
105
+ args, kwargs = pytree.tree_map_only(
106
+ ShapeEnv, lambda _: shape_env, (args, kwargs)
107
+ )
108
+
109
+ # Replace any argument of type SymTypes by a new instance,
110
+ # replacing its ShapeEnv reference.
111
+ args, kwargs = pytree.tree_map_only(
112
+ lambda x: isinstance(x, SymTypes) and is_symbolic(x),
113
+ lambda a: type(a)(a.node.with_shape_env(shape_env)),
114
+ (args, kwargs),
115
+ )
116
+
117
+ # Converts FX nodes using the mapping argument.
118
+ def maybe_convert_node(x: Any) -> Any:
119
+ if not isinstance(x, torch.fx.Node):
120
+ # Don't do anything to x if it's not an FX node.
121
+ return x
122
+
123
+ # If, at some point, we created an FX node, it means that translation validation is on.
124
+ # It also means we are building an FX graph for symbolic shapes at shape_env.graph, and
125
+ # we are tracking node names at shape_env.name_to_node.
126
+ assert hasattr(shape_env, "name_to_node")
127
+ name_to_node = shape_env.name_to_node # type: ignore[attr-defined]
128
+ assert x.name in name_to_node
129
+ return name_to_node[x.name]
130
+
131
+ # Replaces the value of an specific argument by the result of fn.
132
+ def replacearg(index: int, key: str, fn: Callable):
133
+ if index < len(args):
134
+ args[index] = fn(args[index])
135
+ if key in kwargs:
136
+ kwargs[key] = fn(kwargs[key])
137
+
138
+ if self.is_create_fx_call_function():
139
+ # ShapeEnv.create_fx_call_function:
140
+ # "args" parameter is a tuple of FX nodes from the FX graph of the old ShapeEnv.
141
+ # They must be replaced, since a "call_function" FX node with this tuple as argument
142
+ # will be added to the FX graph of the new shape_env.
143
+ replacearg(
144
+ index=2,
145
+ key="args",
146
+ fn=lambda args: tuple(maybe_convert_node(a) for a in args),
147
+ )
148
+ if self.is_evaluate_expr() or self.is_defer_runtime_assert():
149
+ # ShapeEnv.evaluate_expr and ShapeEnv.defer_runtime_assert:
150
+ # "fx_node" parameter is an (optional) FX node that represents the evaluate expression.
151
+ # They must be replaced, since it will be part of a "call_function" FX node for
152
+ # torch._assert, which will be added to the FX graph of the new shape_env.
153
+ replacearg(index=3, key="fx_node", fn=maybe_convert_node)
154
+
155
+ # Actually call the method with the converted arguments.
156
+ return self.f(*args, **kwargs)
157
+
158
+ def __str__(self) -> str:
159
+ name = self.name if self.name is not None else self.f.__name__
160
+ return f"event: {name} ({self.args}, {self.kwargs})"
161
+
162
+ def is_create_fx_call_function(self) -> bool:
163
+ return self.name == "_create_fx_call_function"
164
+
165
+ def is_evaluate_expr(self) -> bool:
166
+ return self.name == "evaluate_expr"
167
+
168
+ def is_defer_runtime_assert(self) -> bool:
169
+ return self.name == "defer_runtime_assert"
170
+
171
+
172
+ # Extracts a ShapeEnv instance inside args and kwargs.
173
+ # Specifically, it looks for:
174
+ # 1. ShapeEnv arguments
175
+ # 2. SymInt, SymFloat, or SymBool arguments
176
+ # If we find more than one object of any of the above types, we
177
+ # also check that the ShapeEnv instance is the same for all of them.
178
+ def _extract_shape_env_and_assert_equal(args, kwargs):
179
+ from torch.fx.experimental.symbolic_shapes import is_symbolic, ShapeEnv, SymTypes
180
+
181
+ def assert_equal(old: Optional[ShapeEnv], new: ShapeEnv) -> ShapeEnv:
182
+ if old is not None:
183
+ assert old is new, "call with different ShapeEnv"
184
+ return new
185
+
186
+ shape_env = None
187
+ for val in itertools.chain(args, kwargs.values()):
188
+ if isinstance(val, ShapeEnv):
189
+ shape_env = assert_equal(shape_env, val)
190
+ if isinstance(val, SymTypes) and is_symbolic(val):
191
+ shape_env = assert_equal(shape_env, val.node.shape_env)
192
+
193
+ return shape_env
194
+
195
+
196
+ # Decorator for recording the given function as a replayable event.
197
+ #
198
+ # This decorator should be used at every function that mutates the state of
199
+ # ShapeEnv in some way that affects the resulting issued guards (i.e. when
200
+ # ShapeEnv.produce_guards is called).
201
+ #
202
+ # save_tracked_fakes: saves a snapshot of the TrackedFake list.
203
+ # This is used when calling ShapeEnv.produce_guards at arbitrary points in time.
204
+ #
205
+ # When to save the list of TrackedFake?
206
+ # =====================================
207
+ # We should save the list of TrackedFake whenever the translation validation
208
+ # bisection may actually stop and call the produce_guards method at the moment
209
+ # right after the recorded function was played. In other words, since the
210
+ # bisection bisects through torch._assert calls, we should save in all methods
211
+ # that adds a torch._assert call to the symbolic shapes FX graph.
212
+ #
213
+ # At the moment, there are 2 methods that save the list:
214
+ # - ShapeEnv.evaluate_expr
215
+ # - ShapeEnv.defer_runtime_assert
216
+ def record_shapeenv_event(*, save_tracked_fakes: bool = False) -> Callable:
217
+ def decorator(fn: Callable) -> Callable:
218
+ assert callable(fn)
219
+ name = fn.__name__
220
+
221
+ @functools.wraps(fn)
222
+ def wrapper(*args, **kwargs):
223
+ from torch.fx.experimental.symbolic_shapes import ShapeEnv
224
+
225
+ if isinstance(args[0], ShapeEnv) and args[0].is_recording: # type: ignore[has-type]
226
+ # If ShapeEnv is already recording an event, call the wrapped
227
+ # function directly.
228
+ #
229
+ # NB: here, we skip the check of whether all ShapeEnv instances
230
+ # are equal, in favor of a faster dispatch.
231
+ return fn(*args, **kwargs)
232
+
233
+ # Retrieve an instance of ShapeEnv.
234
+ # Assumption: the collection of args and kwargs may not reference
235
+ # different ShapeEnv instances.
236
+ self = _extract_shape_env_and_assert_equal(args, kwargs)
237
+
238
+ # If we are calling this function without any ShapeEnv instance
239
+ # alive in its arguments, we don't record and call the original.
240
+ if self is None:
241
+ return fn(*args, **kwargs)
242
+
243
+ # Otherwise, start recording and call the function.
244
+ with self._recording():
245
+ # Take a snapshot of the current tracked_fakes.
246
+ tracked_fakes = (
247
+ self._snapshot_tracked_fakes() if save_tracked_fakes else None
248
+ )
249
+ # Record the event for 'fn'.
250
+ event = ShapeEnvEvent(
251
+ fn, list(args), kwargs, tracked_fakes, name=fn.__name__
252
+ )
253
+ self.events.append(event)
254
+ # Play the event on this ShapeEnv.
255
+ return event.run(self)
256
+
257
+ return wrapper
258
+
259
+ return decorator
260
+
261
+
262
+ # Replays the ShapeEnvEvents list.
263
+ # It assumes the first event is the constructor call.
264
+ #
265
+ # fn: transforms an old FX node into one corresponding to the newly created ShapeEnv.
266
+ def replay_shape_env_events(events):
267
+ from torch.fx.experimental.symbolic_shapes import ShapeEnv
268
+
269
+ constructor_event = events[0]
270
+ assert constructor_event.f == ShapeEnv
271
+
272
+ # Constructs the new ShapeEnv.
273
+ shape_env = constructor_event.run()
274
+
275
+ for event in events[1:]:
276
+ try:
277
+ # Actually replays each event.
278
+ # We need to call create_mapping_fn every time, since the node list might
279
+ # change after each event is replayed.
280
+ event.run(shape_env)
281
+ except Exception as e:
282
+ raise RuntimeError(f"failed when running event: {event}") from e
283
+
284
+ return shape_env
285
+
286
+
287
+ # FakeTensor metadata.
288
+ # This is to be used in place of FakeTensor placeholders when calling
289
+ # ShapeEnv.produce_guards.
290
+ @dataclass
291
+ class FakeTensorMeta:
292
+ tensor_size: Tuple[Union[int, torch.SymInt], ...]
293
+ tensor_stride: Tuple[Union[int, torch.SymInt], ...]
294
+ tensor_storage_offset: Union[int, torch.SymInt]
295
+ is_nested: bool
296
+
297
+ def size(self) -> Tuple[Union[int, torch.SymInt], ...]:
298
+ return self.tensor_size
299
+
300
+ def stride(self) -> Tuple[Union[int, torch.SymInt], ...]:
301
+ return self.tensor_stride
302
+
303
+ def storage_offset(self) -> Union[int, torch.SymInt]:
304
+ return self.tensor_storage_offset
305
+
306
+ def dim(self) -> int:
307
+ return len(self.tensor_size)
308
+
309
+ @staticmethod
310
+ def from_fake(fake) -> "FakeTensorMeta":
311
+ return FakeTensorMeta(
312
+ fake.size(), fake.stride(), fake.storage_offset(), fake.is_nested
313
+ )
314
+
315
+
316
+ # [Note: ShapeEnv State Equality]
317
+ # ===============================
318
+ #
319
+ # What is considered ShapeEnv state?
320
+ # ----------------------------------
321
+ # We consider to be the state of a ShapeEnv instance everything that
322
+ # is not in the inline tuple inside remove_nonstate_variables function.
323
+ # That is: the fields within ShapeEnv that modify the flow of execution
324
+ # of the program.
325
+ #
326
+ # So, for example: the replacements field might influence on how an
327
+ # expression is simplified. That, in turn, may result in a guard being
328
+ # statically known (i.e. not added).
329
+ #
330
+ # On the other hand, var_to_stack serves only changes what is printed
331
+ # in the screen, i.e. used only for debugging purposes. Therefore, we
332
+ # should not consider it when comparing states.
333
+ #
334
+ # What to do on NotEqualError?
335
+ # ----------------------------
336
+ # Here are a few possible causes for getting a NotEqualError raised:
337
+ #
338
+ # 1. New field that does not belong in the ShapeEnv state.
339
+ # For example: log field of type ShapeEnvLoggerAdapter. Different
340
+ # ShapeEnv instances will always have different ShapeEnvLoggerAdapter
341
+ # instances, i.e. equality comparison would fail.
342
+ # Solution: add it to the inlined tuple inside remove_nonstate_variables
343
+ # function inside check_equal method.
344
+ #
345
+ # 2. New field that is not directly comparable across instances.
346
+ # For example: guards field of type List[ShapeGuard]. More specifically,
347
+ # the ShapeGuard type holds an expression and a stack information
348
+ # for debugging purposes. When replaying the even on a new ShapeEnv
349
+ # instance, the stack would be different, which would trigger this error.
350
+ # Solution: add a special case to the map_value function inside
351
+ # check_equal function.
352
+ #
353
+ # 3. Mutation of ShapeEnv on some not recorded function.
354
+ # If a mutation of the state of ShapeEnv happens inside a function
355
+ # that is not recorded (or that no caller in the stack is recorded),
356
+ # then, the replayed ShapeEnv won't catch that.
357
+ # Solution: decorate the function with record_shape_env_event.
358
+
359
+
360
+ # Checks whether the state of two ShapeEnv are equal w.r.t. the guards
361
+ # returned by ShapeEnv.produce_guards.
362
+ def shape_env_check_state_equal(env1, env2, non_state_variable_names, map_value):
363
+ # Collect and remove variables that don't necessarily represent the state
364
+ # of a ShapeEnv. Note: we copy the dictionary so that we don't modify the
365
+ # instance itself.
366
+ env1_vars = vars(env1).copy()
367
+ env2_vars = vars(env2).copy()
368
+
369
+ for v in non_state_variable_names:
370
+ if v in env1_vars:
371
+ env1_vars.pop(v)
372
+ if v in env2_vars:
373
+ env2_vars.pop(v)
374
+
375
+ # Function for transforming the mismatched values into string.
376
+ # Needed, since dict and set entries order might not be the same every time.
377
+ def value_to_str(value: Any) -> str:
378
+ if isinstance(value, dict):
379
+ return (
380
+ "{"
381
+ + ", ".join(f"{k}: {value[k]}" for k in sorted(value.keys(), key=str))
382
+ + "}"
383
+ )
384
+ if isinstance(value, set):
385
+ return "{" + ", ".join(f"{v}" for v in sorted(value)) + "}"
386
+ return str(value)
387
+
388
+ # Compares env1_vars with env2_vars.
389
+ # Here, we allow the value of each field to be mapped, so that we appropriately
390
+ # compare the two values.
391
+ def compare_vars(
392
+ map_value: Callable[[str, Any], Any]
393
+ ) -> List[Tuple[str, str, str]]:
394
+ env1_set, env2_set = set(env1_vars), set(env2_vars)
395
+
396
+ # First, compare the set of keys in each vars dictionary.
397
+ if env1_set != env2_set:
398
+ raise NotEqualError(
399
+ "field set mismatch:",
400
+ [
401
+ (
402
+ "found unique fields:",
403
+ str(sorted(env1_set - env2_set)),
404
+ str(sorted(env2_set - env1_set)),
405
+ ),
406
+ ],
407
+ )
408
+
409
+ # Then, sort the keys, and compare the mapped values of each key.
410
+ sorted_keys = list(env1_set)
411
+ sorted_keys.sort()
412
+
413
+ mapped_dict = [
414
+ (k, map_value(k, env1_vars[k]), map_value(k, env2_vars[k]))
415
+ for k in sorted_keys
416
+ ]
417
+
418
+ # Return a list of tuples representing the fields that did not match
419
+ # alongside their respective mapped values.
420
+ return [
421
+ (f"{k}: values don't match.", value_to_str(val1), value_to_str(val2))
422
+ for k, val1, val2 in mapped_dict
423
+ if val1 != val2
424
+ ]
425
+
426
+ # Accumulate the mismatching fields.
427
+ errors = compare_vars(map_value)
428
+
429
+ if len(errors) > 0:
430
+ raise NotEqualError("field values don't match:", errors)
431
+
432
+
433
+ class NotEqualError(Exception):
434
+ def __init__(
435
+ self,
436
+ msg: str,
437
+ mismatched: List[Tuple[str, str, str]],
438
+ ) -> None:
439
+ details = "\n".join(
440
+ [
441
+ "\n".join(
442
+ [
443
+ f"==> {inner_msg}",
444
+ f" > Left: {str1}",
445
+ f" > Right: {str2}",
446
+ ]
447
+ )
448
+ for inner_msg, str1, str2 in mismatched
449
+ ]
450
+ )
451
+
452
+ super().__init__(
453
+ f"""\
454
+ ShapeEnv not equal: {msg}
455
+
456
+ {details}
457
+ """
458
+ )
venv/lib/python3.10/site-packages/torch/fx/experimental/refinement_types.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class Equality:
2
+ def __init__(self, lhs, rhs):
3
+ self.lhs = lhs
4
+ self.rhs = rhs
5
+
6
+ def __str__(self):
7
+ return f'{self.lhs} = {self.rhs}'
8
+
9
+ def __repr__(self):
10
+ return f'{self.lhs} = {self.rhs}'
11
+
12
+ def __eq__(self, other):
13
+ if isinstance(other, Equality):
14
+ return self.lhs == other.lhs and self.rhs == other.rhs
15
+ else:
16
+ return False
venv/lib/python3.10/site-packages/torch/fx/experimental/rewriter.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import inspect
3
+ import textwrap
4
+ import copy
5
+ import functools
6
+ from types import FunctionType
7
+ from typing import cast, Union, Callable, Dict, Optional, Any
8
+ from torch.fx._symbolic_trace import Tracer
9
+ from torch.fx.graph import Graph
10
+ from torch._sources import normalize_source_lines
11
+ import torch
12
+
13
+ class AST_Rewriter(ast.NodeTransformer):
14
+ """
15
+ Take a FunctionType object representing a `forward` method, then
16
+ perform an AST rewrite to swap out nodes that are not symbolically
17
+ traceable with a callsite to the FX alternative.
18
+
19
+ To support swapping out an AST node, define a new `visit` method on
20
+ that node. For more details, see:
21
+ https://docs.python.org/3/library/ast.html#ast.NodeTransformer
22
+ """
23
+
24
+ def rewrite(self, fn: FunctionType):
25
+
26
+ # Normalize the source lines
27
+ sourcelines, _ = inspect.getsourcelines(fn)
28
+ sourcelines = normalize_source_lines(sourcelines)
29
+ source = ''.join(sourcelines)
30
+ normalized_str = textwrap.dedent(source)
31
+
32
+ # Rewrite the original AST
33
+ source_ast = ast.parse(normalized_str)
34
+ dest_ast = ast.fix_missing_locations(self.visit(source_ast))
35
+
36
+ # Pull out the compiled function from the newly-created Module
37
+ code = compile(dest_ast, "", "exec")
38
+ globals_dict = copy.copy(fn.__globals__)
39
+ keys_before = set(globals_dict.keys())
40
+ exec(code, globals_dict)
41
+ new_keys = list(set(globals_dict.keys()) - keys_before)
42
+ assert len(new_keys) == 1
43
+ fn_compiled = globals_dict[new_keys[0]]
44
+
45
+ # return the compiled function with the original globals
46
+ def change_func_globals(f, globals):
47
+ """Based on https://stackoverflow.com/a/13503277/2988730 (@unutbu)"""
48
+ # __globals__ is a private member of the function class
49
+ # so we have to copy the function, f, all of its member, except f.__globals__
50
+ g = FunctionType(
51
+ f.__code__,
52
+ globals,
53
+ name=f.__name__,
54
+ argdefs=f.__defaults__,
55
+ closure=f.__closure__,
56
+ )
57
+ g = functools.update_wrapper(g, f)
58
+ g.__kwdefaults__ = copy.copy(f.__kwdefaults__)
59
+ return g
60
+ # Return the correct FunctionType object
61
+ return change_func_globals(fn_compiled, globals=fn.__globals__)
62
+
63
+ def visit_Assert(self, node):
64
+ """
65
+ Swap out the Assert node (Python's `assert`) with a callsite to the
66
+ symbolically-traceable torch._assert function
67
+ """
68
+ # Create the Call node
69
+ n = ast.parse('torch._assert()', mode='eval')
70
+ assert isinstance(n, ast.Expression)
71
+ call_node = n.body
72
+ assert isinstance(call_node, ast.Call)
73
+ msg = node.msg if node.msg else ast.Constant(value="", kind=None)
74
+ call_node.args = [node.test, msg]
75
+
76
+ # Ensure that the new node conforms to the Python AST grammar
77
+ expr_wrapper = ast.Expr(value=call_node)
78
+
79
+ # Return the new Call node to signify that we want to use it as
80
+ # a replacement for the original _assert node
81
+ return ast.copy_location(expr_wrapper, node)
82
+
83
+ def visit_AnnAssign(self, node):
84
+ """
85
+ Swap out Python's AnnAssign with an Assign node where the annotation function is called.
86
+ Example:
87
+ Original:
88
+ y: Tensor_Type(1,2,3, Dyn) = f2(x)
89
+ Output:
90
+ y = annotate(f2(x),Tensor_Type((1,2,3,Dyn)))
91
+ """
92
+ return ast.Assign(targets=[node.target], value=ast.Call(
93
+ func=ast.Name(id='annotate', ctx=ast.Load()),
94
+ args=[node.value, node.annotation], keywords=[]))
95
+
96
+
97
+ class RewritingTracer(Tracer):
98
+ def trace(self, root: Union[torch.nn.Module, Callable], concrete_args: Optional[Dict[str, Any]] = None) -> Graph:
99
+ return super().trace(_rewrite(root), concrete_args)
100
+
101
+
102
+ def _rewrite(fn: Union[torch.nn.Module, Callable]) -> Union[torch.nn.Module, Callable]:
103
+ if isinstance(fn, torch.nn.Module):
104
+ # Rewrite this module's `forward` as well as the `forward`s of
105
+ # all of this module's recursive descendents. Return the new,
106
+ # rewritten module hierarchy.
107
+ def rewrite_module(m : torch.nn.Module):
108
+ class RewrittenModule(torch.nn.Module):
109
+ def __init__(self, orig):
110
+ super().__init__()
111
+ for k, v in orig.__dict__.items():
112
+ if isinstance(v, torch.nn.Module):
113
+ self.__dict__[k] = copy.copy(rewrite_module(v))
114
+ else:
115
+ self.__dict__[k] = copy.copy(v)
116
+ RewrittenModule.forward = AST_Rewriter().rewrite(cast(FunctionType, m.forward))
117
+ return RewrittenModule(m)
118
+ return rewrite_module(fn)
119
+ else:
120
+ # Rewrite this single free function
121
+ return AST_Rewriter().rewrite(cast(FunctionType, fn))
venv/lib/python3.10/site-packages/torch/fx/experimental/schema_type_annotation.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.fx
3
+ import inspect
4
+ from typing import Any, Dict, Optional, Tuple
5
+ from torch.fx.node import Argument, Target
6
+ from torch._jit_internal import boolean_dispatched
7
+ from torch.fx.operator_schemas import _torchscript_type_to_python_type
8
+
9
+ from torch.fx import Transformer
10
+
11
+ class AnnotateTypesWithSchema(Transformer):
12
+ """
13
+ Use Python function signatures to annotate types for `Nodes` within an FX graph.
14
+ This pulls out Python function signatures for:
15
+
16
+ 1. Standard `torch.nn` Module calls
17
+ 2. `torch.nn.functional` calls
18
+ 3. Attribute fetches via `get_attr`
19
+
20
+ Example usage:
21
+
22
+ m = torchvision.models.resnet18()
23
+
24
+ traced = torch.fx.symbolic_trace(m)
25
+
26
+ traced = AnnotateTypesWithSchema(traced).transform()
27
+
28
+ """
29
+ def __init__(self, module : torch.nn.Module, annotate_functionals : bool = True,
30
+ annotate_modules : bool = True, annotate_get_attrs : bool = True):
31
+ super().__init__(module)
32
+ self.annotate_functionals = annotate_functionals
33
+ self.annotate_modules = annotate_modules
34
+ self.annotate_get_attrs = annotate_get_attrs
35
+
36
+ def call_function(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
37
+ python_ret_type = None
38
+ if self.annotate_functionals and target.__module__ == 'torch.nn.functional':
39
+ target_for_analysis = target
40
+ if target in boolean_dispatched:
41
+ # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
42
+ # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
43
+ # branches of the dispatch have exactly the same signature. If they do, use the `true`
44
+ # branch signature for analysis. Otherwise, leave this un-normalized
45
+ assert not isinstance(target, str)
46
+ dispatched = boolean_dispatched[target]
47
+ if_true, if_false = dispatched['if_true'], dispatched['if_false']
48
+ # TODO: can we emit the union of these? What are the implications on TorchScript
49
+ # compilation?
50
+ if inspect.signature(if_true).return_annotation != inspect.signature(if_false).return_annotation:
51
+ return super().call_function(target, args, kwargs)
52
+ target_for_analysis = if_true
53
+
54
+ python_ret_type = self._extract_python_return_type(target_for_analysis)
55
+
56
+ return_proxy = super().call_function(target, args, kwargs)
57
+ return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type
58
+ return return_proxy
59
+
60
+ def call_module(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
61
+ python_ret_type = None
62
+ assert isinstance(target, str)
63
+ submod = self.fetch_attr(target)
64
+ if self.annotate_modules and hasattr(submod.__class__, '__name__'):
65
+ classname = submod.__class__.__name__
66
+ if getattr(torch.nn, classname, None) == submod.__class__:
67
+ python_ret_type = self._extract_python_return_type(submod.forward)
68
+ return_proxy = super().call_module(target, args, kwargs)
69
+ return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type
70
+ return return_proxy
71
+
72
+ def get_attr(self, target : torch.fx.node.Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
73
+ attr_proxy = super().get_attr(target, args, kwargs)
74
+
75
+ if self.annotate_get_attrs:
76
+ module_itr = self.module
77
+ assert isinstance(target, str)
78
+ atoms = target.split('.')
79
+ for i, atom in enumerate(atoms):
80
+ if not hasattr(module_itr, atom):
81
+ raise RuntimeError(f'Node referenced nonextent target {".".join(atoms[:i])}!')
82
+ module_itr = getattr(module_itr, atom)
83
+
84
+ maybe_inferred_ts_type = torch._C._jit_try_infer_type(module_itr)
85
+ if maybe_inferred_ts_type.success():
86
+ python_type = _torchscript_type_to_python_type(maybe_inferred_ts_type.type())
87
+ attr_proxy.node.type = python_type if not attr_proxy.node.type else attr_proxy.node.type
88
+
89
+ return attr_proxy
90
+
91
+ def _extract_python_return_type(self, target : Target) -> Optional[Any]:
92
+ """
93
+ Given a Python call target, try to extract the Python return annotation
94
+ if it is available, otherwise return None
95
+
96
+ Args:
97
+
98
+ target (Callable): Python callable to get return annotation for
99
+
100
+ Returns:
101
+
102
+ Optional[Any]: Return annotation from the `target`, or None if it was
103
+ not available.
104
+ """
105
+ assert callable(target)
106
+ try:
107
+ sig = inspect.signature(target)
108
+ except (ValueError, TypeError):
109
+ return None
110
+
111
+ return sig.return_annotation if sig.return_annotation is not inspect.Signature.empty else None
venv/lib/python3.10/site-packages/torch/fx/experimental/unification/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # mypy: disable-error-code=attr-defined
2
+ from .core import unify, reify # noqa: F403
3
+ from .more import unifiable # noqa: F403
4
+ from .variable import var, isvar, vars, variables, Var # noqa: F403
venv/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from .multipledispatch import dispatch # type: ignore[import]
3
+
4
+ namespace = {} # type: ignore[var-annotated]
5
+
6
+ dispatch = partial(dispatch, namespace=namespace)
venv/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import operator
3
+ from functools import reduce
4
+ from collections.abc import Mapping
5
+
6
+ __all__ = ('merge', 'merge_with', 'valmap', 'keymap', 'itemmap',
7
+ 'valfilter', 'keyfilter', 'itemfilter',
8
+ 'assoc', 'dissoc', 'assoc_in', 'update_in', 'get_in')
9
+
10
+
11
+ def _get_factory(f, kwargs):
12
+ factory = kwargs.pop('factory', dict)
13
+ if kwargs:
14
+ raise TypeError(f"{f.__name__}() got an unexpected keyword argument '{kwargs.popitem()[0]}'")
15
+ return factory
16
+
17
+
18
+ def merge(*dicts, **kwargs):
19
+ """ Merge a collection of dictionaries
20
+
21
+ >>> merge({1: 'one'}, {2: 'two'})
22
+ {1: 'one', 2: 'two'}
23
+
24
+ Later dictionaries have precedence
25
+
26
+ >>> merge({1: 2, 3: 4}, {3: 3, 4: 4})
27
+ {1: 2, 3: 3, 4: 4}
28
+
29
+ See Also:
30
+ merge_with
31
+ """
32
+ if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
33
+ dicts = dicts[0]
34
+ factory = _get_factory(merge, kwargs)
35
+
36
+ rv = factory()
37
+ for d in dicts:
38
+ rv.update(d)
39
+ return rv
40
+
41
+
42
+ def merge_with(func, *dicts, **kwargs):
43
+ """ Merge dictionaries and apply function to combined values
44
+
45
+ A key may occur in more than one dict, and all values mapped from the key
46
+ will be passed to the function as a list, such as func([val1, val2, ...]).
47
+
48
+ >>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20})
49
+ {1: 11, 2: 22}
50
+
51
+ >>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30}) # doctest: +SKIP
52
+ {1: 1, 2: 2, 3: 30}
53
+
54
+ See Also:
55
+ merge
56
+ """
57
+ if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
58
+ dicts = dicts[0]
59
+ factory = _get_factory(merge_with, kwargs)
60
+
61
+ result = factory()
62
+ for d in dicts:
63
+ for k, v in d.items():
64
+ if k not in result:
65
+ result[k] = [v]
66
+ else:
67
+ result[k].append(v)
68
+ return valmap(func, result, factory)
69
+
70
+
71
+ def valmap(func, d, factory=dict):
72
+ """ Apply function to values of dictionary
73
+
74
+ >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
75
+ >>> valmap(sum, bills) # doctest: +SKIP
76
+ {'Alice': 65, 'Bob': 45}
77
+
78
+ See Also:
79
+ keymap
80
+ itemmap
81
+ """
82
+ rv = factory()
83
+ rv.update(zip(d.keys(), map(func, d.values())))
84
+ return rv
85
+
86
+
87
+ def keymap(func, d, factory=dict):
88
+ """ Apply function to keys of dictionary
89
+
90
+ >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
91
+ >>> keymap(str.lower, bills) # doctest: +SKIP
92
+ {'alice': [20, 15, 30], 'bob': [10, 35]}
93
+
94
+ See Also:
95
+ valmap
96
+ itemmap
97
+ """
98
+ rv = factory()
99
+ rv.update(zip(map(func, d.keys()), d.values()))
100
+ return rv
101
+
102
+
103
+ def itemmap(func, d, factory=dict):
104
+ """ Apply function to items of dictionary
105
+
106
+ >>> accountids = {"Alice": 10, "Bob": 20}
107
+ >>> itemmap(reversed, accountids) # doctest: +SKIP
108
+ {10: "Alice", 20: "Bob"}
109
+
110
+ See Also:
111
+ keymap
112
+ valmap
113
+ """
114
+ rv = factory()
115
+ rv.update(map(func, d.items()))
116
+ return rv
117
+
118
+
119
+ def valfilter(predicate, d, factory=dict):
120
+ """ Filter items in dictionary by value
121
+
122
+ >>> iseven = lambda x: x % 2 == 0
123
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
124
+ >>> valfilter(iseven, d)
125
+ {1: 2, 3: 4}
126
+
127
+ See Also:
128
+ keyfilter
129
+ itemfilter
130
+ valmap
131
+ """
132
+ rv = factory()
133
+ for k, v in d.items():
134
+ if predicate(v):
135
+ rv[k] = v
136
+ return rv
137
+
138
+
139
+ def keyfilter(predicate, d, factory=dict):
140
+ """ Filter items in dictionary by key
141
+
142
+ >>> iseven = lambda x: x % 2 == 0
143
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
144
+ >>> keyfilter(iseven, d)
145
+ {2: 3, 4: 5}
146
+
147
+ See Also:
148
+ valfilter
149
+ itemfilter
150
+ keymap
151
+ """
152
+ rv = factory()
153
+ for k, v in d.items():
154
+ if predicate(k):
155
+ rv[k] = v
156
+ return rv
157
+
158
+
159
+ def itemfilter(predicate, d, factory=dict):
160
+ """ Filter items in dictionary by item
161
+
162
+ >>> def isvalid(item):
163
+ ... k, v = item
164
+ ... return k % 2 == 0 and v < 4
165
+
166
+ >>> d = {1: 2, 2: 3, 3: 4, 4: 5}
167
+ >>> itemfilter(isvalid, d)
168
+ {2: 3}
169
+
170
+ See Also:
171
+ keyfilter
172
+ valfilter
173
+ itemmap
174
+ """
175
+ rv = factory()
176
+ for item in d.items():
177
+ if predicate(item):
178
+ k, v = item
179
+ rv[k] = v
180
+ return rv
181
+
182
+
183
+ def assoc(d, key, value, factory=dict):
184
+ """ Return a new dict with new key value pair
185
+
186
+ New dict has d[key] set to value. Does not modify the initial dictionary.
187
+
188
+ >>> assoc({'x': 1}, 'x', 2)
189
+ {'x': 2}
190
+ >>> assoc({'x': 1}, 'y', 3) # doctest: +SKIP
191
+ {'x': 1, 'y': 3}
192
+ """
193
+ d2 = factory()
194
+ d2.update(d)
195
+ d2[key] = value
196
+ return d2
197
+
198
+
199
+ def dissoc(d, *keys, **kwargs):
200
+ """ Return a new dict with the given key(s) removed.
201
+
202
+ New dict has d[key] deleted for each supplied key.
203
+ Does not modify the initial dictionary.
204
+
205
+ >>> dissoc({'x': 1, 'y': 2}, 'y')
206
+ {'x': 1}
207
+ >>> dissoc({'x': 1, 'y': 2}, 'y', 'x')
208
+ {}
209
+ >>> dissoc({'x': 1}, 'y') # Ignores missing keys
210
+ {'x': 1}
211
+ """
212
+ factory = _get_factory(dissoc, kwargs)
213
+ d2 = factory()
214
+
215
+ if len(keys) < len(d) * .6:
216
+ d2.update(d)
217
+ for key in keys:
218
+ if key in d2:
219
+ del d2[key]
220
+ else:
221
+ remaining = set(d)
222
+ remaining.difference_update(keys)
223
+ for k in remaining:
224
+ d2[k] = d[k]
225
+ return d2
226
+
227
+
228
+ def assoc_in(d, keys, value, factory=dict):
229
+ """ Return a new dict with new, potentially nested, key value pair
230
+
231
+ >>> purchase = {'name': 'Alice',
232
+ ... 'order': {'items': ['Apple', 'Orange'],
233
+ ... 'costs': [0.50, 1.25]},
234
+ ... 'credit card': '5555-1234-1234-1234'}
235
+ >>> assoc_in(purchase, ['order', 'costs'], [0.25, 1.00]) # doctest: +SKIP
236
+ {'credit card': '5555-1234-1234-1234',
237
+ 'name': 'Alice',
238
+ 'order': {'costs': [0.25, 1.00], 'items': ['Apple', 'Orange']}}
239
+ """
240
+ return update_in(d, keys, lambda x: value, value, factory)
241
+
242
+
243
+ def update_in(d, keys, func, default=None, factory=dict):
244
+ """ Update value in a (potentially) nested dictionary
245
+
246
+ inputs:
247
+ d - dictionary on which to operate
248
+ keys - list or tuple giving the location of the value to be changed in d
249
+ func - function to operate on that value
250
+
251
+ If keys == [k0,..,kX] and d[k0]..[kX] == v, update_in returns a copy of the
252
+ original dictionary with v replaced by func(v), but does not mutate the
253
+ original dictionary.
254
+
255
+ If k0 is not a key in d, update_in creates nested dictionaries to the depth
256
+ specified by the keys, with the innermost value set to func(default).
257
+
258
+ >>> inc = lambda x: x + 1
259
+ >>> update_in({'a': 0}, ['a'], inc)
260
+ {'a': 1}
261
+
262
+ >>> transaction = {'name': 'Alice',
263
+ ... 'purchase': {'items': ['Apple', 'Orange'],
264
+ ... 'costs': [0.50, 1.25]},
265
+ ... 'credit card': '5555-1234-1234-1234'}
266
+ >>> update_in(transaction, ['purchase', 'costs'], sum) # doctest: +SKIP
267
+ {'credit card': '5555-1234-1234-1234',
268
+ 'name': 'Alice',
269
+ 'purchase': {'costs': 1.75, 'items': ['Apple', 'Orange']}}
270
+
271
+ >>> # updating a value when k0 is not in d
272
+ >>> update_in({}, [1, 2, 3], str, default="bar")
273
+ {1: {2: {3: 'bar'}}}
274
+ >>> update_in({1: 'foo'}, [2, 3, 4], inc, 0)
275
+ {1: 'foo', 2: {3: {4: 1}}}
276
+ """
277
+ ks = iter(keys)
278
+ k = next(ks)
279
+
280
+ rv = inner = factory()
281
+ rv.update(d)
282
+
283
+ for key in ks:
284
+ if k in d:
285
+ d = d[k]
286
+ dtemp = factory()
287
+ dtemp.update(d)
288
+ else:
289
+ d = dtemp = factory()
290
+
291
+ inner[k] = inner = dtemp
292
+ k = key
293
+
294
+ if k in d:
295
+ inner[k] = func(d[k])
296
+ else:
297
+ inner[k] = func(default)
298
+ return rv
299
+
300
+
301
+ def get_in(keys, coll, default=None, no_default=False):
302
+ """ Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
303
+
304
+ If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
305
+ ``no_default`` is specified, then it raises KeyError or IndexError.
306
+
307
+ ``get_in`` is a generalization of ``operator.getitem`` for nested data
308
+ structures such as dictionaries and lists.
309
+
310
+ >>> transaction = {'name': 'Alice',
311
+ ... 'purchase': {'items': ['Apple', 'Orange'],
312
+ ... 'costs': [0.50, 1.25]},
313
+ ... 'credit card': '5555-1234-1234-1234'}
314
+ >>> get_in(['purchase', 'items', 0], transaction)
315
+ 'Apple'
316
+ >>> get_in(['name'], transaction)
317
+ 'Alice'
318
+ >>> get_in(['purchase', 'total'], transaction)
319
+ >>> get_in(['purchase', 'items', 'apple'], transaction)
320
+ >>> get_in(['purchase', 'items', 10], transaction)
321
+ >>> get_in(['purchase', 'total'], transaction, 0)
322
+ 0
323
+ >>> get_in(['y'], {}, no_default=True)
324
+ Traceback (most recent call last):
325
+ ...
326
+ KeyError: 'y'
327
+
328
+ See Also:
329
+ itertoolz.get
330
+ operator.getitem
331
+ """
332
+ try:
333
+ return reduce(operator.getitem, keys, coll)
334
+ except (KeyError, IndexError, TypeError):
335
+ if no_default:
336
+ raise
337
+ return default
338
+
339
+
340
+ def getter(index):
341
+ if isinstance(index, list):
342
+ if len(index) == 1:
343
+ index = index[0]
344
+ return lambda x: (x[index],)
345
+ elif index:
346
+ return operator.itemgetter(*index)
347
+ else:
348
+ return lambda x: ()
349
+ else:
350
+ return operator.itemgetter(index)
351
+
352
+
353
+ def groupby(key, seq):
354
+ """ Group a collection by a key function
355
+
356
+ >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
357
+ >>> groupby(len, names) # doctest: +SKIP
358
+ {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
359
+
360
+ >>> iseven = lambda x: x % 2 == 0
361
+ >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
362
+ {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
363
+
364
+ Non-callable keys imply grouping on a member.
365
+
366
+ >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
367
+ ... {'name': 'Bob', 'gender': 'M'},
368
+ ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
369
+ {'F': [{'gender': 'F', 'name': 'Alice'}],
370
+ 'M': [{'gender': 'M', 'name': 'Bob'},
371
+ {'gender': 'M', 'name': 'Charlie'}]}
372
+
373
+ Not to be confused with ``itertools.groupby``
374
+
375
+ See Also:
376
+ countby
377
+ """
378
+ if not callable(key):
379
+ key = getter(key)
380
+ d = collections.defaultdict(lambda: [].append) # type: ignore[var-annotated]
381
+ for item in seq:
382
+ d[key(item)](item)
383
+ rv = {}
384
+ for k, v in d.items():
385
+ rv[k] = v.__self__ # type: ignore[var-annotated, attr-defined]
386
+ return rv
387
+
388
+
389
+ def first(seq):
390
+ """ The first element in a sequence
391
+
392
+ >>> first('ABC')
393
+ 'A'
394
+ """
395
+ return next(iter(seq))
venv/lib/python3.10/site-packages/torch/fx/experimental/unify_refinements.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.graph_gradual_typechecker import Refine
2
+ from torch.fx.tensor_type import TensorType
3
+ from torch.fx.experimental.unification import Var, unify # type: ignore[attr-defined]
4
+
5
+
6
+ def infer_symbolic_types_single_pass(traced):
7
+ """
8
+ Calls our symbolic inferencer once.
9
+ """
10
+ r = Refine(traced)
11
+ r.refine()
12
+ mgu = unify_eq(r.constraints)
13
+ substitute_all_types(traced.graph, mgu)
14
+
15
+ def infer_symbolic_types(traced):
16
+ """
17
+ Calls our symbolic inferencer twice.
18
+ This is useful when one pass is not enough
19
+ to infer all the information such as the case
20
+ for braodcasting.
21
+ """
22
+ r = Refine(traced)
23
+ r.refine()
24
+ mgu = unify_eq(r.constraints)
25
+ substitute_all_types(traced.graph, mgu)
26
+
27
+ r = Refine(traced)
28
+ r.refine()
29
+ mgu = unify_eq(r.constraints)
30
+ substitute_all_types(traced.graph, mgu)
31
+
32
+ r.symbolic_relations()
33
+
34
+ def convert_eq(list_of_eq):
35
+ """
36
+ Convert equality constraints in the right format
37
+ to be used by unification library.
38
+ """
39
+ lhs = []
40
+ rhs = []
41
+ for eq in list_of_eq:
42
+ lhs.append(eq.lhs)
43
+ rhs.append(eq.rhs)
44
+ return tuple(lhs), tuple(rhs)
45
+
46
+
47
+ def unify_eq(list_of_eq):
48
+ """
49
+ Apply unification to a set of
50
+ equality constraints
51
+ """
52
+ lhs, rhs = convert_eq(list_of_eq)
53
+ return unify(lhs, rhs)
54
+
55
+
56
+ def substitute_solution_one_type(mapping, t):
57
+ """
58
+ Apply the most general unifier to a type
59
+ """
60
+ if isinstance(t, Var):
61
+ if t in mapping.keys():
62
+ return mapping[t]
63
+ else:
64
+ return t
65
+
66
+ elif isinstance(t, TensorType):
67
+ new_type = []
68
+ for typ in t.__args__:
69
+ if typ in mapping.keys():
70
+ new_type.append(mapping[typ])
71
+ else:
72
+ new_type.append(typ)
73
+ return TensorType(tuple(new_type))
74
+
75
+ elif isinstance(t, list):
76
+ new_type = []
77
+ for typ in t:
78
+ new_type.append(substitute_solution_one_type(mapping, typ))
79
+ return new_type
80
+
81
+ elif isinstance(t, tuple):
82
+ new_type = []
83
+ for typ in t:
84
+ new_type.append(substitute_solution_one_type(mapping, typ))
85
+ return tuple(new_type)
86
+
87
+ else:
88
+ return t
89
+
90
+
91
+ def substitute_all_types(graph, mapping):
92
+ """
93
+ Apply the most general unifier to all types in a graph
94
+ till reaching a fixed point. If the input and output graph
95
+ are the same, we converge.
96
+ """
97
+ flag = True
98
+ while flag:
99
+ flag = False
100
+ for k in mapping:
101
+ old_mapping_val = mapping[k]
102
+ if mapping[k] in mapping.keys():
103
+ new_key = mapping[k]
104
+ mapping[k] = mapping[new_key]
105
+ if old_mapping_val != mapping[k]:
106
+ flag = True
107
+
108
+ for n in graph.nodes:
109
+ n.type = substitute_solution_one_type(mapping, n.type)
110
+
111
+ def check_for_type_equality(g1, g2):
112
+ """
113
+ A check equality to be used in fixed points.
114
+ We do not use graph equality but instead type
115
+ equality.
116
+ """
117
+ for n, m in zip(g1.nodes, g2.nodes):
118
+ if n.type != m.type:
119
+ return False
120
+ return True
venv/lib/python3.10/site-packages/torch/fx/graph.py ADDED
@@ -0,0 +1,1653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
3
+ import torch.utils._pytree as pytree
4
+ from . import _pytree as fx_pytree
5
+ from ._compatibility import compatibility
6
+
7
+ import contextlib
8
+ from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
9
+ from dataclasses import dataclass
10
+ from contextlib import contextmanager
11
+ import copy
12
+ import enum
13
+ import torch
14
+ import keyword
15
+ import re
16
+ import builtins
17
+ import math
18
+ import warnings
19
+ import inspect
20
+
21
+ __all__ = ["PythonCode", "CodeGen", "Graph"]
22
+
23
+ if TYPE_CHECKING:
24
+ from .graph_module import GraphModule # noqa: F401
25
+ from ._symbolic_trace import Tracer # noqa: F401
26
+
27
+
28
+ # Mapping of builtins to their `typing` equivalent.
29
+ _origin_type_map = {
30
+ list: List,
31
+ dict: Dict,
32
+ set: Set,
33
+ frozenset: FrozenSet,
34
+ tuple: Tuple,
35
+ }
36
+
37
+
38
+ # Signature for functions thattransforms the body (`list[str]`) of the
39
+ # generated code
40
+ TransformCodeFunc = Callable[[List[str]], List[str]]
41
+
42
+
43
+ class _CustomBuiltin(NamedTuple):
44
+ """Additional objs that we add to every graph's globals.
45
+
46
+ The repr() for some standard library objects is not valid Python code without
47
+ an import. For common objects of this sort, we bundle them in the globals of
48
+ every FX graph.
49
+ """
50
+ # How to import this object from the standard library.
51
+ import_str: str
52
+ # The actual object, produced from that import string.
53
+ obj: Any
54
+
55
+ _custom_builtins: Dict[str, _CustomBuiltin] = {}
56
+
57
+
58
+ def _register_custom_builtin(name: str, import_str: str, obj: Any):
59
+ _custom_builtins[name] = _CustomBuiltin(import_str, obj)
60
+
61
+
62
+ _register_custom_builtin('inf', 'from math import inf', math.inf)
63
+ _register_custom_builtin('nan', 'from math import nan', math.nan)
64
+ _register_custom_builtin('NoneType', 'NoneType = type(None)', type(None))
65
+ _register_custom_builtin('torch', 'import torch', torch)
66
+ _register_custom_builtin('device', 'from torch import device', torch.device)
67
+ _register_custom_builtin('fx_pytree', 'import torch.fx._pytree as fx_pytree', fx_pytree)
68
+ _register_custom_builtin('pytree', 'import torch.utils._pytree as pytree', pytree)
69
+
70
+
71
+ def _is_magic(x: str) -> bool:
72
+ return x.startswith('__') and x.endswith('__')
73
+
74
+
75
+ def _snake_case(s: str) -> str:
76
+ """
77
+ Transforms the given string ``s`` to a Python-style variable name
78
+
79
+ Examples:
80
+ ``mod.snake_case`` -> ``mod.snake_case``
81
+ ``mod.pascalCase``-> ``mod.pascal_case``
82
+ ``mod.ALL_CAPS`` -> ``mod.all_caps``
83
+ """
84
+ chars = []
85
+ prev_lower = False
86
+ for c in s:
87
+ if prev_lower and c.isupper():
88
+ chars.append('_')
89
+ chars.append(c.lower())
90
+ prev_lower = c.islower()
91
+ return ''.join(chars)
92
+
93
+
94
+ def _is_from_torch(obj: Any) -> bool:
95
+ module_name = getattr(obj, '__module__', None)
96
+ if module_name is not None:
97
+ base_module = module_name.partition('.')[0]
98
+ return (
99
+ base_module == 'torch' and
100
+ not module_name.startswith("torch._dynamo.") and
101
+ not module_name.startswith("torch._inductor.")
102
+ )
103
+
104
+ name = getattr(obj, '__name__', None)
105
+ # exclude torch because torch.torch.torch.torch works. idk mang
106
+ if name is not None and name != 'torch':
107
+ for guess in [torch, torch.nn.functional]:
108
+ if getattr(guess, name, None) is obj:
109
+ return True
110
+
111
+ return False
112
+
113
+
114
+ class _Namespace:
115
+ """A context for associating names uniquely with objects.
116
+
117
+ The following invariants are enforced:
118
+ - Each object gets a single name.
119
+ - Each name is unique within a given namespace.
120
+ - Names generated do not shadow builtins, unless the object is indeed that builtin.
121
+ """
122
+ def __init__(self):
123
+ self._obj_to_name: Dict[Any, str] = {}
124
+ self._unassociated_names = set()
125
+ self._used_names: Set[str] = set()
126
+ self._base_count: Dict[str, int] = defaultdict(int)
127
+
128
+ self._illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
129
+ self._name_suffix_regex = re.compile(r"(.*)_(\d+)$")
130
+
131
+ def create_name(self, candidate: str, obj: Optional[Any]) -> str:
132
+ """Create a unique name.
133
+
134
+ Arguments:
135
+ candidate: used as the basis for the unique name, relevant to the user.
136
+ obj: If not None, an object that will be associated with the unique name.
137
+ """
138
+ if obj is not None and obj in self._obj_to_name:
139
+ return self._obj_to_name[obj]
140
+
141
+ # delete all characters that are illegal in a Python identifier
142
+ candidate = self._illegal_char_regex.sub('_', candidate)
143
+
144
+ if not candidate:
145
+ candidate = '_unnamed'
146
+
147
+ if candidate[0].isdigit():
148
+ candidate = f'_{candidate}'
149
+
150
+ match = self._name_suffix_regex.match(candidate)
151
+ if match is None:
152
+ base = candidate
153
+ num = None
154
+ else:
155
+ base, num_str = match.group(1, 2)
156
+ num = int(num_str)
157
+
158
+ candidate = base if num is None else f'{base}_{num}'
159
+ if not num:
160
+ num = self._base_count[base]
161
+
162
+ while candidate in self._used_names or self._is_illegal_name(candidate, obj):
163
+ num += 1
164
+ candidate = f'{base}_{num}'
165
+
166
+ self._used_names.add(candidate)
167
+ self._base_count[base] = num
168
+ if obj is None:
169
+ self._unassociated_names.add(candidate)
170
+ else:
171
+ self._obj_to_name[obj] = candidate
172
+ return candidate
173
+
174
+ def associate_name_with_obj(self, name: str, obj: Any):
175
+ """Associate a unique name with an object.
176
+
177
+ Neither `name` nor `obj` should be associated already.
178
+ """
179
+ assert obj not in self._obj_to_name
180
+ assert name in self._unassociated_names
181
+ self._obj_to_name[obj] = name
182
+ self._unassociated_names.remove(name)
183
+
184
+ def _is_illegal_name(self, name: str, obj: Any) -> bool:
185
+ # 1. keywords are never allowed as names.
186
+ if name in keyword.kwlist:
187
+ return True
188
+
189
+ # 2. Can't shadow a builtin name, unless you *are* that builtin.
190
+ if name in builtins.__dict__:
191
+ return obj is not builtins.__dict__[name]
192
+
193
+ # 3. Can't shadow our custom builtins either
194
+ if name in _custom_builtins:
195
+ return obj is not _custom_builtins[name].obj
196
+
197
+ return False
198
+
199
+ def _rename_object(self, obj: Any, name: str):
200
+ assert obj in self._obj_to_name
201
+ self._obj_to_name[obj] = name
202
+ self._used_names.add(name)
203
+
204
+ dtype_abbrs = {
205
+ torch.bfloat16: 'bf16',
206
+ torch.float64: 'f64',
207
+ torch.float32: 'f32',
208
+ torch.float16: 'f16',
209
+ torch.float8_e4m3fn: 'f8e4m3fn',
210
+ torch.float8_e5m2: 'f8e5m2',
211
+ torch.float8_e4m3fnuz: 'f8e4m3fnuz',
212
+ torch.float8_e5m2fnuz: 'f8e5m2fnuz',
213
+ torch.complex32: 'c32',
214
+ torch.complex64: 'c64',
215
+ torch.complex128: 'c128',
216
+ torch.int8: 'i8',
217
+ torch.int16: 'i16',
218
+ torch.int32: 'i32',
219
+ torch.int64: 'i64',
220
+ torch.bool: 'b8',
221
+ torch.uint8: 'u8',
222
+ torch.uint32: 'u32',
223
+ torch.uint64: 'u64',
224
+ }
225
+
226
+ @compatibility(is_backward_compatible=True)
227
+ @dataclass
228
+ class PythonCode:
229
+ """
230
+ Represents all the information necessary to exec or save a graph as Python code.
231
+ """
232
+ # Python source code for the forward function definition.
233
+ src: str
234
+ # Values in global scope during execution of `src_def`.
235
+ globals: Dict[str, Any]
236
+ # Optional mapping from the forward function's line number to
237
+ # node index.
238
+ _lineno_map: Optional[Dict[int, Optional[int]]]
239
+
240
+
241
+ def _format_target(base: str, target: str) -> str:
242
+ elems = target.split('.')
243
+ r = base
244
+ for e in elems:
245
+ if not e.isidentifier():
246
+ r = f'getattr({r}, "{e}")'
247
+ else:
248
+ r = f'{r}.{e}'
249
+ return r
250
+
251
+ class _InsertPoint:
252
+ def __init__(self, graph, new_insert):
253
+ self.graph = graph
254
+ self.orig_insert, graph._insert = graph._insert, new_insert
255
+
256
+ def __enter__(self):
257
+ pass
258
+
259
+ def __exit__(self, type, value, tb):
260
+ self.graph._insert = self.orig_insert
261
+
262
+ class _node_list:
263
+ def __init__(self, graph: 'Graph', direction: str = '_next'):
264
+ assert direction in ['_next', '_prev']
265
+ self.graph = graph
266
+ self.direction = direction
267
+
268
+ def __len__(self):
269
+ return self.graph._len
270
+
271
+ def __iter__(self):
272
+ root = self.graph._root
273
+ if self.direction == "_next":
274
+ cur = root._next
275
+ while cur is not root:
276
+ if not cur._erased:
277
+ yield cur
278
+ cur = cur._next
279
+ else:
280
+ assert self.direction == "_prev"
281
+ cur = root._prev
282
+ while cur is not root:
283
+ if not cur._erased:
284
+ yield cur
285
+ cur = cur._prev
286
+
287
+ def __reversed__(self):
288
+ return _node_list(self.graph, '_next' if self.direction == '_prev' else '_prev')
289
+
290
+ class _PyTreeInfo(NamedTuple):
291
+ """
292
+ Contains extra info stored when we're using Pytrees
293
+ """
294
+ orig_args: List[str]
295
+ in_spec: pytree.TreeSpec
296
+ out_spec: Optional[pytree.TreeSpec]
297
+
298
+ @dataclass(frozen=True)
299
+ class _ParsedStackTrace:
300
+ """
301
+ Represents the top-most frame of a parsed stack trace
302
+ """
303
+ file: str
304
+ lineno: str
305
+ name: str
306
+ code: str
307
+
308
+ # get File:lineno code from stack_trace
309
+ def _parse_stack_trace(stack_trace: str):
310
+ if stack_trace is None:
311
+ return None
312
+ pattern = re.compile(r"^File \"(.+)\", line (\d+), in (.+)$")
313
+ lines = stack_trace.strip().split('\n')
314
+ # stacktrace should have innermost frame last, so we
315
+ # iterate backwards to find the first line that starts
316
+ # with 'File '
317
+ summary_str = ""
318
+ for idx in range(len(lines) - 2, -1, -1):
319
+ line = lines[idx].strip()
320
+ matches = pattern.match(line)
321
+ if matches:
322
+ file = matches.group(1)
323
+ lineno = matches.group(2)
324
+ name = matches.group(3)
325
+ # next line should be the code
326
+ code = lines[idx + 1].strip()
327
+ return _ParsedStackTrace(file, lineno, name, code)
328
+ return None
329
+
330
+ @compatibility(is_backward_compatible=False)
331
+ class CodeGen:
332
+ def __init__(self):
333
+ self._body_transformer: Optional[TransformCodeFunc] = None
334
+ self._func_name: str = "forward"
335
+
336
+ def gen_fn_def(self, free_vars: List[str], maybe_return_annotation: str) -> str:
337
+ """
338
+ Given the free variables and a return annotation, generates the beginning of the FX function.
339
+ By default, `gen_fn_def(['a', 'b'], '') == 'def {self._func_name}(a, b):'`
340
+ """
341
+ # If the original function didn't have self as its first argument, we
342
+ # would have added it.
343
+ if len(free_vars) == 0 or free_vars[0] != 'self':
344
+ free_vars.insert(0, 'self')
345
+ return f"def {self._func_name}({', '.join(free_vars)}){maybe_return_annotation}:"
346
+
347
+ def generate_output(self, output_args: Argument) -> str:
348
+ """
349
+ Given the output arguments, generates the return statement of the FX function.
350
+ Note: The returned statement should not be indented.
351
+ """
352
+ return f'return {repr(output_args)}'
353
+
354
+ def process_inputs(self, *args: Any) -> Any:
355
+ """
356
+ Transforms the inputs so that the graph can take them as arguments, as
357
+ non-default codegen may result in the inputs to the function being
358
+ different from the inputs to the graph.
359
+
360
+ If the graph was directly runnable, this invariant should hold true
361
+ `f.graph.process_outputs(f.graph(*f.graph.process_inputs(*inputs))) == f(*inputs)`
362
+ """
363
+ return args
364
+
365
+ def process_outputs(self, outputs: Any) -> Any:
366
+ """
367
+ Transforms the outputs of the graph to be identical to the codegen.
368
+
369
+ See ``process_inputs`` for more details.
370
+ """
371
+ return outputs
372
+
373
+ def additional_globals(self) -> List[Tuple[str, Any]]:
374
+ """
375
+ If your codegen uses extra global values, add tuples of (identifier,reference to the value) here.
376
+ For example, return ['List', typing.List] if you need ``List`` in the global context.
377
+ """
378
+ return []
379
+
380
+ def _gen_python_code(
381
+ self, nodes, root_module: str, namespace: _Namespace, *, verbose: bool = False,
382
+ ) -> PythonCode:
383
+ free_vars: List[str] = []
384
+ body: List[str] = []
385
+ globals_: Dict[str, Any] = {}
386
+ wrapped_fns: Dict[str, None] = {}
387
+
388
+ # Wrap string in list to pass by reference
389
+ maybe_return_annotation : List[str] = ['']
390
+
391
+ def add_global(name_hint: str, obj: Any):
392
+ """Add an obj to be tracked as a global.
393
+
394
+ We call this for names that reference objects external to the
395
+ Graph, like functions or types.
396
+
397
+ Returns: the global name that should be used to reference 'obj' in generated source.
398
+ """
399
+ if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device
400
+ # HACK: workaround for how torch custom ops are registered. We
401
+ # can't import them like normal modules so they must retain their
402
+ # fully qualified name.
403
+ return _get_qualified_name(obj)
404
+
405
+ # normalize the name hint to get a proper identifier
406
+ global_name = namespace.create_name(name_hint, obj)
407
+
408
+ if global_name in globals_:
409
+ assert globals_[global_name] is obj
410
+ return global_name
411
+ globals_[global_name] = obj
412
+ return global_name
413
+
414
+ # Pre-fill the globals table with registered builtins.
415
+ for name, (_, obj) in _custom_builtins.items():
416
+ add_global(name, obj)
417
+
418
+ def type_repr(o : Any):
419
+ if o == ():
420
+ # Empty tuple is used for empty tuple type annotation Tuple[()]
421
+ return '()'
422
+
423
+ typename = _type_repr(o)
424
+
425
+ if hasattr(o, '__origin__'):
426
+ # This is a generic type, e.g. typing.List[torch.Tensor]
427
+ origin_type = _origin_type_map.get(o.__origin__, o.__origin__)
428
+ origin_typename = add_global(_type_repr(origin_type), origin_type)
429
+
430
+ if hasattr(o, '__args__'):
431
+ # Assign global names for each of the inner type variables.
432
+ args = [type_repr(arg) for arg in o.__args__]
433
+
434
+ if len(args) == 0:
435
+ # Bare type, such as `typing.Tuple` with no subscript
436
+ # This code-path used in Python < 3.9
437
+ return origin_typename
438
+
439
+ return f'{origin_typename}[{",".join(args)}]'
440
+ else:
441
+ # Bare type, such as `typing.Tuple` with no subscript
442
+ # This code-path used in Python 3.9+
443
+ return origin_typename
444
+
445
+ # Common case: this is a regular module name like 'foo.bar.baz'
446
+ return add_global(typename, o)
447
+
448
+ def _get_repr(arg: Any) -> str:
449
+ # Handle NamedTuples (if it has `_fields`) via add_global.
450
+ if isinstance(arg, tuple) and hasattr(arg, '_fields'):
451
+ qualified_name = _get_qualified_name(type(arg))
452
+ global_name = add_global(qualified_name, type(arg))
453
+ return f"{global_name}{repr(tuple(arg))}"
454
+ elif isinstance(arg, torch._ops.OpOverload):
455
+ qualified_name = _get_qualified_name(arg)
456
+ global_name = add_global(qualified_name, arg)
457
+ return f"{global_name}"
458
+ elif isinstance(arg, enum.Enum):
459
+ cls = arg.__class__
460
+ clsname = add_global(cls.__name__, cls)
461
+ return f"{clsname}.{arg.name}"
462
+ return repr(arg)
463
+
464
+ def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str:
465
+ args_s = ', '.join(_get_repr(a) for a in args)
466
+ kwargs_s = ', '.join(f'{k} = {_get_repr(v)}' for k, v in kwargs.items())
467
+ if args_s and kwargs_s:
468
+ return f'{args_s}, {kwargs_s}'
469
+ return args_s or kwargs_s
470
+
471
+ # Run through reverse nodes and record the first instance of a use
472
+ # of a given node. This represents the *last* use of the node in the
473
+ # execution order of the program, which we will use to free unused
474
+ # values
475
+ node_to_last_use : Dict[Node, Node] = {}
476
+ user_to_last_uses : Dict[Node, List[Node]] = {}
477
+
478
+ def register_last_uses(n : Node, user : Node):
479
+ if n not in node_to_last_use:
480
+ node_to_last_use[n] = user
481
+ user_to_last_uses.setdefault(user, []).append(n)
482
+
483
+ for node in reversed(nodes):
484
+ map_arg(node.args, lambda n: register_last_uses(n, node))
485
+ map_arg(node.kwargs, lambda n: register_last_uses(n, node))
486
+
487
+ def delete_unused_values(user : Node):
488
+ """
489
+ Delete values after their last use. This ensures that values that are
490
+ not used in the remainder of the code are freed and the memory usage
491
+ of the code is optimal.
492
+ """
493
+ if user.op == 'placeholder':
494
+ return
495
+ if user.op == 'output':
496
+ body.append('\n')
497
+ return
498
+ nodes_to_delete = user_to_last_uses.get(user, [])
499
+ if len(nodes_to_delete):
500
+ to_delete_str = ' = '.join([repr(n) for n in nodes_to_delete] + ['None'])
501
+ body.append(f'; {to_delete_str}\n')
502
+ else:
503
+ body.append('\n')
504
+
505
+ prev_stacktrace = None
506
+
507
+ def append_stacktrace_summary(node : Node):
508
+ """
509
+ Append a summary of the stacktrace to the generated code. This is
510
+ useful for debugging.
511
+ """
512
+ nonlocal prev_stacktrace
513
+
514
+ if node.op not in {'placeholder', 'output'}:
515
+ if node.stack_trace:
516
+ if node.stack_trace != prev_stacktrace:
517
+ prev_stacktrace = node.stack_trace
518
+ summary_str = ""
519
+
520
+ parsed_stack_trace = _parse_stack_trace(node.stack_trace)
521
+
522
+ if parsed_stack_trace is not None:
523
+ lineno = parsed_stack_trace.lineno
524
+ code = parsed_stack_trace.code
525
+ name = parsed_stack_trace.name
526
+ summary_str = f'File: {parsed_stack_trace.file}:{lineno} in {name}, code: {code}'
527
+
528
+ body.append(f'\n# {summary_str}\n')
529
+ elif prev_stacktrace != "":
530
+ prev_stacktrace = ""
531
+ body.append('\n# No stacktrace found for following nodes\n')
532
+
533
+ def stringify_shape(shape : torch.Size) -> str:
534
+ return f"[{', '.join(str(x) for x in shape)}]"
535
+
536
+ def emit_node(node : Node):
537
+ maybe_type_annotation = '' if node.type is None else f' : {type_repr(node.type)}'
538
+
539
+ if verbose:
540
+ # override annotation with more detailed information
541
+ from torch._subclasses.fake_tensor import FakeTensor
542
+ from torch.fx.experimental.proxy_tensor import py_sym_types
543
+ from torch.fx.passes.shape_prop import TensorMetadata
544
+
545
+ meta_val = node.meta.get('val', node.meta.get('tensor_meta', None))
546
+
547
+ # use string as annotation, to make it valid python code
548
+ if isinstance(meta_val, FakeTensor):
549
+ maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"'
550
+ elif isinstance(meta_val, py_sym_types):
551
+ maybe_type_annotation = f': "Sym({meta_val})"'
552
+ elif isinstance(meta_val, TensorMetadata):
553
+ maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"'
554
+
555
+ if node.op == 'placeholder':
556
+ assert isinstance(node.target, str)
557
+ maybe_default_arg = '' if not node.args else f' = {_get_repr(node.args[0])}'
558
+ free_vars.append(f'{node.target}{maybe_type_annotation}{maybe_default_arg}')
559
+ raw_name = node.target.replace('*', '')
560
+ if raw_name != repr(node):
561
+ body.append(f'{repr(node)} = {raw_name}\n')
562
+ return
563
+ elif node.op == 'call_method':
564
+ assert isinstance(node.target, str)
565
+ body.append(
566
+ f'{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.target)}'
567
+ f'({_format_args(node.args[1:], node.kwargs)})')
568
+ return
569
+ elif node.op == 'call_function':
570
+ assert callable(node.target)
571
+ # pretty print operators
572
+ if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in magic_methods:
573
+ assert isinstance(node.args, tuple)
574
+ body.append(f'{repr(node)}{maybe_type_annotation} = '
575
+ f'{magic_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}')
576
+ return
577
+
578
+ # pretty print inplace operators; required for jit.script to work properly
579
+ # not currently supported in normal FX graphs, but generated by torchdynamo
580
+ if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in inplace_methods:
581
+ body.append(f'{inplace_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}; '
582
+ f'{repr(node)}{maybe_type_annotation} = {_get_repr(node.args[0])}')
583
+ return
584
+
585
+ qualified_name = _get_qualified_name(node.target)
586
+ global_name = add_global(qualified_name, node.target)
587
+ # special case for getattr: node.args could be 2-argument or 3-argument
588
+ # 2-argument: attribute access; 3-argument: fall through to attrib function call with default value
589
+ if global_name == 'getattr' and \
590
+ isinstance(node.args, tuple) and \
591
+ isinstance(node.args[1], str) and \
592
+ node.args[1].isidentifier() and \
593
+ len(node.args) == 2:
594
+ body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.args[1])}')
595
+ return
596
+ body.append(f'{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})')
597
+ if node.meta.get('is_wrapped', False):
598
+ wrapped_fns.setdefault(global_name)
599
+ return
600
+ elif node.op == 'call_module':
601
+ assert isinstance(node.target, str)
602
+ body.append(f'{repr(node)}{maybe_type_annotation} = '
603
+ f'{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})')
604
+ return
605
+ elif node.op == 'get_attr':
606
+ assert isinstance(node.target, str)
607
+ body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}')
608
+ return
609
+ elif node.op == 'output':
610
+ if node.type is not None:
611
+ maybe_return_annotation[0] = f" -> {type_repr(node.type)}"
612
+ body.append(self.generate_output(node.args[0]))
613
+ return
614
+ raise NotImplementedError(f'node: {node.op} {node.target}')
615
+
616
+ for i, node in enumerate(nodes):
617
+ # NOTE: emit_node does not emit a string with newline. It depends
618
+ # on delete_unused_values to append one
619
+ if verbose:
620
+ append_stacktrace_summary(node)
621
+ # emit a counter comment to keep track of
622
+ # node index, which will be deleted later
623
+ # after going through _body_transformer
624
+ body.append(f"# COUNTER: {i}\n")
625
+ emit_node(node)
626
+ delete_unused_values(node)
627
+
628
+ if len(body) == 0:
629
+ # If the Graph has no non-placeholder nodes, no lines for the body
630
+ # have been emitted. To continue to have valid Python code, emit a
631
+ # single pass statement
632
+ body.append('pass\n')
633
+
634
+
635
+
636
+ if len(wrapped_fns) > 0:
637
+ wrap_name = add_global('wrap', torch.fx.wrap)
638
+ wrap_stmts = '\n'.join([f'{wrap_name}("{name}")' for name in wrapped_fns])
639
+ else:
640
+ wrap_stmts = ''
641
+
642
+ if self._body_transformer:
643
+ body = self._body_transformer(body)
644
+
645
+ for name, value in self.additional_globals():
646
+ add_global(name, value)
647
+
648
+ prologue = self.gen_fn_def(free_vars, maybe_return_annotation[0])
649
+
650
+ # remove counter and generate lineno to node index mapping
651
+ lineno_map: Dict[int, Optional[int]] = {}
652
+ prologue_len = prologue.count('\n') + 1
653
+ new_lines: List[str] = []
654
+ cur_idx = None
655
+ for line in ''.join(body).split('\n'):
656
+ counter = re.search(r"# COUNTER: (\d+)", line)
657
+ if counter and counter.group(1) is not None:
658
+ cur_idx = int(counter.group(1))
659
+ else:
660
+ lineno_map[len(new_lines) + prologue_len] = cur_idx
661
+ new_lines.append(line)
662
+
663
+ code = "\n".join(new_lines).lstrip('\n')
664
+ code = '\n'.join(' ' + line for line in code.split('\n'))
665
+
666
+ fn_code = f"""
667
+ {wrap_stmts}
668
+
669
+ {prologue}
670
+ {code}"""
671
+ return PythonCode(fn_code, globals_, _lineno_map=lineno_map)
672
+
673
+
674
+ # Ideally, we'd like to refactor all of the pytree logic into this codegen
675
+ # class. Unfortunately, there are 3 areas we currently need extra logic in FX.
676
+ # 1. In the initial symbolic trace, the pytree logic is tied up with `concrete_args`.
677
+ # 2. In the FX graph, we need to access 2 attributes - in_spec and out_spec.
678
+ # Since we can't access .graph within the FX forward, we need to copy the attribute to the module.
679
+ # 3. We currently can't register the pytree imports with `add_global` - not sure why.
680
+ class _PyTreeCodeGen(CodeGen):
681
+ def __init__(self, pytree_info: _PyTreeInfo):
682
+ super().__init__()
683
+ self.pytree_info: _PyTreeInfo = pytree_info
684
+
685
+ def process_inputs(self, *inputs: Any) -> Any:
686
+ flat_args = pytree.arg_tree_leaves(*inputs)
687
+ return flat_args
688
+
689
+ def process_outputs(self, out: Any) -> Any:
690
+ if self.pytree_info is None or self.pytree_info.out_spec is None:
691
+ return out
692
+ if not isinstance(out, (list, tuple)):
693
+ out = [out]
694
+ assert self.pytree_info.out_spec is not None
695
+ return pytree.tree_unflatten(out, self.pytree_info.out_spec)
696
+
697
+ def gen_fn_def(self, free_vars, maybe_return_annotation):
698
+ # Given a user function/model:
699
+ # myargs = [myargs0, myargs1]
700
+ # mykwargs = {'mykwargs0': ..., 'mykwargs1': ...}
701
+ # def forward(self, mypos, *myargs, mykey=None, **mykwargs):
702
+ #
703
+ # The generated code flattens all keywords into positional arguments for `forward()`
704
+ # e.g forward(self, mypos, myargs0, myargs1, mykey, mykwargs0, mykwargs1):
705
+ #
706
+ # Within `forward`, `tree_flatten_spec``still parses args and kwargs separately
707
+ # e.g. tree_flatten_spec(([mypos, myargs0, myargs1],
708
+ # {'mykey':mykey, 'mykwargs0':mykwargs0, 'mykwargs1':mykwargs1}),
709
+ # self._in_spec)
710
+ #
711
+ # If the user function/model does not have keywords, the dict is suppressed from tree_flatten_spec
712
+ # e.g. tree_flatten_spec([mypos, myargs0, myargs1]), self._in_spec)
713
+ if self.pytree_info is None:
714
+ return super().gen_fn_def(free_vars, maybe_return_annotation)
715
+
716
+ fn_args = self.pytree_info.orig_args
717
+ has_orig_self = (fn_args[0] == 'self') if len(fn_args) > 0 else False
718
+ if has_orig_self:
719
+ free_vars.insert(0, 'self')
720
+ fn_definition = super().gen_fn_def(fn_args[:], maybe_return_annotation)
721
+
722
+ if len(free_vars) > 0: # pytree has placeholders in it
723
+ # when kwargs is present, in_spec is tuple(args, kwargs)
724
+ has_args_kwargs_tuple = self.pytree_info.in_spec.type == tuple and \
725
+ self.pytree_info.in_spec.num_children == 2 and \
726
+ self.pytree_info.in_spec.children_specs[0].type == tuple and \
727
+ self.pytree_info.in_spec.children_specs[1].type == dict
728
+ fn_kwargs = '{}'
729
+ fn_signature = f"[{', '.join(fn_args)}], self._in_spec"
730
+ if has_args_kwargs_tuple:
731
+ count_args = self.pytree_info.in_spec.children_specs[0].num_children
732
+ fn_args = self.pytree_info.orig_args[:count_args]
733
+ fn_kwargs = '{' + ', '.join(f"'{k}':{v}" for k, v in zip(
734
+ self.pytree_info.in_spec.children_specs[1].context,
735
+ self.pytree_info.orig_args[count_args:])) + '}'
736
+ fn_signature = f"([{', '.join(fn_args)}], {fn_kwargs}), self._in_spec"
737
+
738
+ # in Python, `var1: annotation1, var2: annotation2 = function_call()` is invalid.
739
+ # we need to split it to two lines:
740
+ # one for annotation: `var1: annotation1; var2: annotation2;` (note the semicolon)
741
+ # one for code: `var1, var2, = function_call()`
742
+ without_annotation = [x.split(":")[0] for x in free_vars]
743
+ has_annotation = [x + "; " for x in free_vars if ":" in x]
744
+ if len(has_annotation) > 0:
745
+ fn_definition += "\n " + "".join(has_annotation) + "\n"
746
+ fn_definition += f"""
747
+ {', '.join(without_annotation)}, = fx_pytree.tree_flatten_spec({fn_signature})"""
748
+ return fn_definition
749
+
750
+ def generate_output(self, output_args):
751
+ if self.pytree_info and self.pytree_info.out_spec:
752
+ return f'return pytree.tree_unflatten({repr(output_args)}, self._out_spec)'
753
+ else:
754
+ return super().generate_output(output_args)
755
+
756
+ @compatibility(is_backward_compatible=True)
757
+ class Graph:
758
+ """
759
+ ``Graph`` is the main data structure used in the FX Intermediate Representation.
760
+ It consists of a series of ``Node`` s, each representing callsites (or other
761
+ syntactic constructs). The list of ``Node`` s, taken together, constitute a
762
+ valid Python function.
763
+
764
+ For example, the following code
765
+
766
+ .. code-block:: python
767
+
768
+ import torch
769
+ import torch.fx
770
+
771
+ class MyModule(torch.nn.Module):
772
+ def __init__(self):
773
+ super().__init__()
774
+ self.param = torch.nn.Parameter(torch.rand(3, 4))
775
+ self.linear = torch.nn.Linear(4, 5)
776
+
777
+ def forward(self, x):
778
+ return torch.topk(torch.sum(self.linear(x + self.linear.weight).relu(), dim=-1), 3)
779
+
780
+ m = MyModule()
781
+ gm = torch.fx.symbolic_trace(m)
782
+
783
+ Will produce the following Graph::
784
+
785
+ print(gm.graph)
786
+
787
+ .. code-block:: text
788
+
789
+ graph(x):
790
+ %linear_weight : [num_users=1] = self.linear.weight
791
+ %add_1 : [num_users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {})
792
+ %linear_1 : [num_users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
793
+ %relu_1 : [num_users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {})
794
+ %sum_1 : [num_users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1})
795
+ %topk_1 : [num_users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {})
796
+ return topk_1
797
+
798
+ For the semantics of operations represented in the ``Graph``, please see :class:`Node`.
799
+ """
800
+
801
+ @compatibility(is_backward_compatible=True)
802
+ def __init__(self, owning_module: Optional["GraphModule"] = None, tracer_cls: Optional[Type["Tracer"]] = None,
803
+ tracer_extras: Optional[Dict[str, Any]] = None):
804
+ """
805
+ Construct an empty Graph.
806
+ """
807
+ self._root : Node = Node(self, '', 'root', '', (), {})
808
+ self._used_names : Dict[str, int] = {} # base name -> number
809
+ self._insert = self._root.prepend
810
+ self._len = 0
811
+ self._graph_namespace = _Namespace()
812
+ self._owning_module = owning_module
813
+ self._tracer_cls = tracer_cls
814
+ self._tracer_extras = tracer_extras
815
+ self._codegen = CodeGen()
816
+ self._co_fields : Dict[str, Any] = {}
817
+
818
+ @property
819
+ def owning_module(self):
820
+ return self._owning_module
821
+
822
+ @owning_module.setter
823
+ def owning_module(self, mod: Optional["GraphModule"]):
824
+ self._owning_module = mod
825
+
826
+ @property
827
+ def nodes(self) -> _node_list:
828
+ """
829
+ Get the list of Nodes that constitute this Graph.
830
+
831
+ Note that this ``Node`` list representation is a doubly-linked list. Mutations
832
+ during iteration (e.g. delete a Node, add a Node) are safe.
833
+
834
+ Returns:
835
+
836
+ A doubly-linked list of Nodes. Note that ``reversed`` can be called on
837
+ this list to switch iteration order.
838
+ """
839
+ return _node_list(self)
840
+
841
+ @compatibility(is_backward_compatible=True)
842
+ def graph_copy(self, g : 'Graph', val_map : Dict[Node, Node], return_output_node=False) -> 'Optional[Argument]':
843
+ """
844
+ Copy all nodes from a given graph into ``self``.
845
+
846
+ Args:
847
+
848
+ g (Graph): The source graph from which to copy Nodes.
849
+
850
+ val_map (Dict[Node, Node]): a dictionary that will be populated with a mapping
851
+ from nodes in ``g`` to nodes in ``self``. Note that ``val_map`` can be passed
852
+ in with values in it already to override copying of certain values.
853
+
854
+ Returns:
855
+
856
+ The value in ``self`` that is now equivalent to the output value in ``g``,
857
+ if ``g`` had an ``output`` node. ``None`` otherwise.
858
+ """
859
+ for node in g.nodes:
860
+ if node in val_map:
861
+ continue
862
+ if node.op == 'output':
863
+ rv = map_arg(node.args[0], lambda n: val_map[n])
864
+ return rv if not return_output_node else (rv, node)
865
+ val_map[node] = self.node_copy(node, lambda n : val_map[n])
866
+ return None
867
+
868
+ def __deepcopy__(self, memo=None) -> 'Graph':
869
+ """
870
+ Explicitly implement __deepcopy__ to prevent excessive recursion depth
871
+ from the default implementation. This uses graph_copy to copy the nodes
872
+ in an iterative way, rather than recursive. It also populates the
873
+ memoization table to prevent unnecessary copies (e.g. references to
874
+ nodes or other parts of the Graph from a custom GraphModule implementation.
875
+ """
876
+ memo = memo if memo else {}
877
+ g = Graph(tracer_cls=self._tracer_cls)
878
+ output_vals = g.graph_copy(self, val_map=memo, return_output_node=True)
879
+ g._codegen = copy.deepcopy(self._codegen)
880
+ assert isinstance(output_vals, tuple)
881
+ output_val, old_output_node = output_vals
882
+ new_output_node = g.output(output_val, type_expr=getattr(old_output_node, 'type', None))
883
+ new_output_node.meta = copy.copy(old_output_node.meta)
884
+ return g
885
+
886
+ @compatibility(is_backward_compatible=True)
887
+ def create_node(self, op: str, target: 'Target',
888
+ args: Optional[Tuple['Argument', ...]] = None,
889
+ kwargs: Optional[Dict[str, 'Argument']] = None,
890
+ name: Optional[str] = None,
891
+ type_expr: Optional[Any] = None) -> Node:
892
+ """
893
+ Create a ``Node`` and add it to the ``Graph`` at the current insert-point.
894
+ Note that the current insert-point can be set via :meth:`Graph.inserting_before`
895
+ and :meth:`Graph.inserting_after`.
896
+
897
+ Args:
898
+ op (str): the opcode for this Node. One of 'call_function', 'call_method', 'get_attr',
899
+ 'call_module', 'placeholder', or 'output'. The semantics of these opcodes are
900
+ described in the ``Graph`` docstring.
901
+
902
+ args (Optional[Tuple[Argument, ...]]): is a tuple of arguments to this node.
903
+
904
+ kwargs (Optional[Dict[str, Argument]]): the kwargs of this Node
905
+
906
+ name (Optional[str]): an optional string name for the ``Node``.
907
+ This will influence the name of the value assigned to in the
908
+ Python generated code.
909
+
910
+ type_expr (Optional[Any]): an optional type annotation representing the
911
+ Python type the output of this node will have.
912
+
913
+ Returns:
914
+
915
+ The newly-created and inserted node.
916
+ """
917
+ assert op in ('call_function', 'call_method', 'get_attr', 'call_module', 'placeholder', 'output')
918
+ args = () if args is None else args
919
+ kwargs = {} if kwargs is None else kwargs
920
+ assert isinstance(args, tuple), "args must be a tuple"
921
+ assert isinstance(kwargs, dict), "kwargs must be a dict"
922
+
923
+ candidate = name if name is not None else self._target_to_str(target)
924
+ name = self._graph_namespace.create_name(candidate, None)
925
+ n = Node(self, name, op, target, args, kwargs, type_expr)
926
+
927
+ self._graph_namespace.associate_name_with_obj(name, n)
928
+
929
+ self._insert(n)
930
+ self._len += 1
931
+ return n
932
+
933
+ @compatibility(is_backward_compatible=False)
934
+ def process_inputs(self, *args):
935
+ """
936
+ Processes args so that they can be passed to the FX graph.
937
+ """
938
+ return self._codegen.process_inputs(*args)
939
+
940
+ @compatibility(is_backward_compatible=False)
941
+ def process_outputs(self, out):
942
+ return self._codegen.process_outputs(out)
943
+
944
+
945
+ @compatibility(is_backward_compatible=True)
946
+ def erase_node(self, to_erase : Node) -> None:
947
+ """
948
+ Erases a ``Node`` from the ``Graph``. Throws an exception if
949
+ there are still users of that node in the ``Graph``.
950
+
951
+ Args:
952
+
953
+ to_erase (Node): The ``Node`` to erase from the ``Graph``.
954
+ """
955
+ if len(to_erase.users) > 0:
956
+ raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} '
957
+ f'users in the graph: {to_erase.users}!')
958
+ if to_erase.graph != self:
959
+ raise RuntimeError(f"Attempting to remove {to_erase} from wrong graph!")
960
+ if to_erase._erased:
961
+ warnings.warn(f"erase_node({to_erase}) on an already erased node")
962
+ return
963
+
964
+ to_erase._remove_from_list()
965
+ to_erase._erased = True # iterators may retain handles to erased nodes
966
+ self._len -= 1
967
+
968
+ # Null out this Node's argument nodes so that the Nodes referred to
969
+ # can update their ``users`` accordingly
970
+ new_args = map_arg(to_erase.args, lambda n: None)
971
+ assert isinstance(new_args, tuple)
972
+ to_erase.args = new_args
973
+ new_kwargs = map_arg(to_erase.kwargs, lambda n: None)
974
+ assert isinstance(new_kwargs, dict)
975
+ to_erase.kwargs = new_kwargs
976
+
977
+ @compatibility(is_backward_compatible=True)
978
+ def inserting_before(self, n: Optional[Node] = None):
979
+ """Set the point at which create_node and companion methods will insert into the graph.
980
+ When used within a 'with' statement, this will temporary set the insert point and
981
+ then restore it when the with statement exits::
982
+
983
+ with g.inserting_before(n):
984
+ ... # inserting before node n
985
+ ... # insert point restored to what it was previously
986
+ g.inserting_before(n) # set the insert point permanently
987
+
988
+ Args:
989
+
990
+ n (Optional[Node]): The node before which to insert. If None this will insert before
991
+ the beginning of the entire graph.
992
+
993
+ Returns:
994
+ A resource manager that will restore the insert point on ``__exit__``.
995
+ """
996
+ if n is None:
997
+ return self.inserting_after(self._root)
998
+ assert n.graph == self, "Node to insert before is not in graph."
999
+ return _InsertPoint(self, n.prepend)
1000
+
1001
+ @compatibility(is_backward_compatible=True)
1002
+ def inserting_after(self, n: Optional[Node] = None):
1003
+ """Set the point at which create_node and companion methods will insert into the graph.
1004
+ When used within a 'with' statement, this will temporary set the insert point and
1005
+ then restore it when the with statement exits::
1006
+
1007
+ with g.inserting_after(n):
1008
+ ... # inserting after node n
1009
+ ... # insert point restored to what it was previously
1010
+ g.inserting_after(n) # set the insert point permanently
1011
+
1012
+ Args:
1013
+
1014
+ n (Optional[Node]): The node before which to insert. If None this will insert after
1015
+ the beginning of the entire graph.
1016
+
1017
+ Returns:
1018
+ A resource manager that will restore the insert point on ``__exit__``.
1019
+ """
1020
+ if n is None:
1021
+ return self.inserting_before(self._root)
1022
+ assert n.graph == self, "Node to insert after is not in graph."
1023
+ return _InsertPoint(self, n.append)
1024
+
1025
+ @compatibility(is_backward_compatible=True)
1026
+ def placeholder(self, name: str, type_expr: Optional[Any] = None,
1027
+ default_value : Any = inspect.Signature.empty) -> Node:
1028
+ """
1029
+ Insert a ``placeholder`` node into the Graph. A ``placeholder`` represents
1030
+ a function input.
1031
+
1032
+ Args:
1033
+
1034
+ name (str): A name for the input value. This corresponds to the name
1035
+ of the positional argument to the function this ``Graph`` represents.
1036
+
1037
+ type_expr (Optional[Any]): an optional type annotation representing the
1038
+ Python type the output of this node will have. This is needed in some
1039
+ cases for proper code generation (e.g. when the function is used
1040
+ subsequently in TorchScript compilation).
1041
+
1042
+ default_value (Any): The default value this function argument should take
1043
+ on. NOTE: to allow for `None` as a default value, `inspect.Signature.empty`
1044
+ should be passed as this argument to specify that the parameter does _not_
1045
+ have a default value.
1046
+
1047
+ .. note::
1048
+ The same insertion point and type expression rules apply for this method
1049
+ as ``Graph.create_node``.
1050
+ """
1051
+ args = () if default_value is inspect.Signature.empty else (default_value,)
1052
+ return self.create_node('placeholder', name, args=args, type_expr=type_expr)
1053
+
1054
+ @compatibility(is_backward_compatible=True)
1055
+ def get_attr(self, qualified_name: str, type_expr: Optional[Any] = None) -> Node:
1056
+ """
1057
+ Insert a ``get_attr`` node into the Graph. A ``get_attr`` ``Node`` represents the
1058
+ fetch of an attribute from the ``Module`` hierarchy.
1059
+
1060
+ Args:
1061
+
1062
+ qualified_name (str): the fully-qualified name of the attribute to be retrieved.
1063
+ For example, if the traced Module has a submodule named ``foo``, which has a
1064
+ submodule named ``bar``, which has an attribute named ``baz``, the qualified
1065
+ name ``foo.bar.baz`` should be passed as ``qualified_name``.
1066
+
1067
+ type_expr (Optional[Any]): an optional type annotation representing the
1068
+ Python type the output of this node will have.
1069
+
1070
+
1071
+ Returns:
1072
+
1073
+ The newly-created and inserted ``get_attr`` node.
1074
+
1075
+ .. note::
1076
+ The same insertion point and type expression rules apply for this method
1077
+ as ``Graph.create_node``.
1078
+ """
1079
+ def _get_attr_reference_exists(mod: torch.nn.Module, qualified_name: str) -> bool:
1080
+ module_path, _, name = qualified_name.rpartition(".")
1081
+
1082
+ try:
1083
+ submod: torch.nn.Module = mod.get_submodule(module_path)
1084
+ except AttributeError:
1085
+ warnings.warn(f"Failed to fetch module {module_path}!")
1086
+ return False
1087
+
1088
+ if not hasattr(submod, name):
1089
+ return False
1090
+
1091
+ res = getattr(submod, name)
1092
+
1093
+ if (not isinstance(res, torch.nn.Module)
1094
+ and not isinstance(res, torch.nn.Parameter)
1095
+ and name not in submod._buffers):
1096
+ return False
1097
+
1098
+ return True
1099
+
1100
+ if (self.owning_module and
1101
+ not _get_attr_reference_exists(self.owning_module, qualified_name)):
1102
+ warnings.warn("Attempted to insert a get_attr Node with no "
1103
+ "underlying reference in the owning "
1104
+ "GraphModule! Call "
1105
+ "GraphModule.add_submodule to add the "
1106
+ "necessary submodule, "
1107
+ "GraphModule.add_parameter to add the "
1108
+ "necessary Parameter, or "
1109
+ "nn.Module.register_buffer to add the "
1110
+ "necessary buffer", stacklevel=2)
1111
+ return self.create_node('get_attr', qualified_name, type_expr=type_expr)
1112
+
1113
+ @compatibility(is_backward_compatible=True)
1114
+ def call_module(self,
1115
+ module_name: str,
1116
+ args: Optional[Tuple['Argument', ...]] = None,
1117
+ kwargs: Optional[Dict[str, 'Argument']] = None,
1118
+ type_expr: Optional[Any] = None) -> Node:
1119
+ """
1120
+ Insert a ``call_module`` ``Node`` into the ``Graph``. A ``call_module`` node
1121
+ represents a call to the forward() function of a ``Module`` in the ``Module``
1122
+ hierarchy.
1123
+
1124
+ Args:
1125
+
1126
+ module_name (str): The qualified name of the ``Module`` in the ``Module``
1127
+ hierarchy to be called. For example, if the traced ``Module`` has a
1128
+ submodule named ``foo``, which has a submodule named ``bar``, the
1129
+ qualified name ``foo.bar`` should be passed as ``module_name`` to
1130
+ call that module.
1131
+
1132
+ args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
1133
+ to the called method. Note that this should *not* include a ``self`` argument.
1134
+
1135
+ kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
1136
+ to the called method
1137
+
1138
+ type_expr (Optional[Any]): an optional type annotation representing the
1139
+ Python type the output of this node will have.
1140
+
1141
+ Returns:
1142
+
1143
+ The newly-created and inserted ``call_module`` node.
1144
+
1145
+ .. note::
1146
+ The same insertion point and type expression rules apply for this method
1147
+ as :meth:`Graph.create_node`.
1148
+ """
1149
+ if (self.owning_module and
1150
+ self.owning_module.get_submodule(module_name) is None):
1151
+ warnings.warn("Attempted to insert a call_module Node with "
1152
+ "no underlying reference in the owning "
1153
+ "GraphModule! Call "
1154
+ "GraphModule.add_submodule to add the "
1155
+ "necessary submodule")
1156
+ return self.create_node('call_module', module_name, args, kwargs, type_expr=type_expr)
1157
+
1158
+ @compatibility(is_backward_compatible=True)
1159
+ def call_method(self,
1160
+ method_name: str,
1161
+ args: Optional[Tuple['Argument', ...]] = None,
1162
+ kwargs: Optional[Dict[str, 'Argument']] = None,
1163
+ type_expr: Optional[Any] = None) -> Node:
1164
+ """
1165
+ Insert a ``call_method`` ``Node`` into the ``Graph``. A ``call_method`` node
1166
+ represents a call to a given method on the 0th element of ``args``.
1167
+
1168
+ Args:
1169
+
1170
+ method_name (str): The name of the method to apply to the self argument.
1171
+ For example, if args[0] is a ``Node`` representing a ``Tensor``,
1172
+ then to call ``relu()`` on that ``Tensor``, pass ``relu`` to ``method_name``.
1173
+
1174
+ args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
1175
+ to the called method. Note that this *should* include a ``self`` argument.
1176
+
1177
+ kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
1178
+ to the called method
1179
+
1180
+ type_expr (Optional[Any]): an optional type annotation representing the
1181
+ Python type the output of this node will have.
1182
+
1183
+ Returns:
1184
+
1185
+ The newly created and inserted ``call_method`` node.
1186
+
1187
+ .. note::
1188
+ The same insertion point and type expression rules apply for this method
1189
+ as :meth:`Graph.create_node`.
1190
+ """
1191
+ return self.create_node('call_method', method_name, args, kwargs, type_expr=type_expr)
1192
+
1193
+ @compatibility(is_backward_compatible=True)
1194
+ def call_function(self,
1195
+ the_function: Callable[..., Any],
1196
+ args: Optional[Tuple['Argument', ...]] = None,
1197
+ kwargs: Optional[Dict[str, 'Argument']] = None,
1198
+ type_expr: Optional[Any] = None) -> Node:
1199
+ """
1200
+ Insert a ``call_function`` ``Node`` into the ``Graph``. A ``call_function`` node
1201
+ represents a call to a Python callable, specified by ``the_function``.
1202
+
1203
+ Args:
1204
+
1205
+ the_function (Callable[..., Any]): The function to be called. Can be any PyTorch
1206
+ operator, Python function, or member of the ``builtins`` or ``operator``
1207
+ namespaces.
1208
+
1209
+ args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
1210
+ to the called function.
1211
+
1212
+ kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
1213
+ to the called function
1214
+
1215
+ type_expr (Optional[Any]): an optional type annotation representing the
1216
+ Python type the output of this node will have.
1217
+
1218
+ Returns:
1219
+
1220
+ The newly created and inserted ``call_function`` node.
1221
+
1222
+ .. note::
1223
+ The same insertion point and type expression rules apply for this method
1224
+ as :meth:`Graph.create_node`.
1225
+ """
1226
+ return self.create_node('call_function', the_function, args, kwargs, type_expr=type_expr)
1227
+
1228
+ @compatibility(is_backward_compatible=True)
1229
+ def node_copy(self, node: Node, arg_transform: Callable[[Node], 'Argument'] = lambda x: x) -> Node:
1230
+ """
1231
+ Copy a node from one graph into another. ``arg_transform`` needs to transform arguments from
1232
+ the graph of node to the graph of self. Example::
1233
+
1234
+ # Copying all the nodes in `g` into `new_graph`
1235
+ g : torch.fx.Graph = ...
1236
+ new_graph = torch.fx.graph()
1237
+ value_remap = {}
1238
+ for node in g.nodes:
1239
+ value_remap[node] = new_graph.node_copy(node, lambda n : value_remap[n])
1240
+
1241
+ Args:
1242
+
1243
+ node (Node): The node to copy into ``self``.
1244
+
1245
+ arg_transform (Callable[[Node], Argument]): A function that transforms
1246
+ ``Node`` arguments in node's ``args`` and ``kwargs`` into the
1247
+ equivalent argument in ``self``. In the simplest case, this should
1248
+ retrieve a value out of a table mapping Nodes in the original
1249
+ graph to ``self``.
1250
+ """
1251
+ args = map_arg(node.args, arg_transform)
1252
+ kwargs = map_arg(node.kwargs, arg_transform)
1253
+ assert isinstance(args, tuple)
1254
+ assert isinstance(kwargs, dict)
1255
+ result_node = self.create_node(node.op, node.target, args, kwargs, node.name, node.type)
1256
+ result_node.meta = copy.copy(node.meta)
1257
+ return result_node
1258
+
1259
+ @compatibility(is_backward_compatible=True)
1260
+ def output(self, result: 'Argument', type_expr: Optional[Any] = None):
1261
+ """
1262
+ Insert an ``output`` ``Node`` into the ``Graph``. An ``output`` node represents
1263
+ a ``return`` statement in Python code. ``result`` is the value that should
1264
+ be returned.
1265
+
1266
+ Args:
1267
+
1268
+ result (Argument): The value to be returned.
1269
+
1270
+ type_expr (Optional[Any]): an optional type annotation representing the
1271
+ Python type the output of this node will have.
1272
+
1273
+ .. note::
1274
+
1275
+ The same insertion point and type expression rules apply for this method
1276
+ as ``Graph.create_node``.
1277
+ """
1278
+ return self.create_node(op='output', target='output', args=(result,), type_expr=type_expr)
1279
+
1280
+ def _target_to_str(self, target : Target) -> str:
1281
+ if callable(target):
1282
+ op = target.__name__
1283
+ else:
1284
+ assert isinstance(target, str)
1285
+ op = target
1286
+ if _is_magic(op):
1287
+ op = op[2:-2]
1288
+ op = _snake_case(op)
1289
+ return op
1290
+
1291
+ @compatibility(is_backward_compatible=True)
1292
+ def python_code(self, root_module: str, *, verbose: bool = False) -> PythonCode:
1293
+ """
1294
+ Turn this ``Graph`` into valid Python code.
1295
+
1296
+ Args:
1297
+
1298
+ root_module (str): The name of the root module on which to look-up
1299
+ qualified name targets. This is usually 'self'.
1300
+
1301
+ Returns:
1302
+
1303
+ A PythonCode object, consisting of two fields:
1304
+ src: the Python source code representing the object
1305
+ globals: a dictionary of global names in `src` -> the objects that they reference.
1306
+ """
1307
+ # NOTE: [Graph Namespaces]
1308
+ #
1309
+ # There are two types of symbols in generated Python source code:
1310
+ # locals and globals.
1311
+ # Locals are locally defined by the output of a node in the Graph.
1312
+ # Globals are references to external objects, like functions or types.
1313
+ #
1314
+ # When generating Python code, we need to make sure to name things
1315
+ # appropriately. In particular:
1316
+ # - All names should be unique, to avoid weird shadowing bugs.
1317
+ # - These names need to be consistent, e.g. a object should always be
1318
+ # referenced by the same name.
1319
+ #
1320
+ # To do this, we create a new namespace just for this source. All names
1321
+ # that get printed must come from this namespace.
1322
+ #
1323
+ # Why can't we re-use node.name? Because it was generated within the
1324
+ # namespace `self._graph_namespace`. In order to provide uniqueness
1325
+ # over both locals (node.name) *and* globals, we create a completely
1326
+ # new namespace to put all identifiers in.
1327
+ namespace = _Namespace()
1328
+
1329
+ # Override Node's repr to generate a valid name within our namespace.
1330
+ # Since repr() is designed to produce a valid Python expression, it
1331
+ # makes sense to re-use it. This way, it's easy to print something like
1332
+ # Tuple[Node, Node] by simply calling repr() on it. Node's __repr__ is
1333
+ # implemented cooperatively to allow this.
1334
+ def node_repr(n: Node):
1335
+ return namespace.create_name(n.name, n)
1336
+
1337
+ @contextmanager
1338
+ def override_node_repr(graph: Graph):
1339
+ orig_repr_fns = {}
1340
+ for node in graph.nodes:
1341
+ orig_repr_fns[node] = node._repr_fn
1342
+ node._repr_fn = node_repr
1343
+ try:
1344
+ yield None
1345
+ finally:
1346
+ # restore the original repr functions
1347
+ for node in graph.nodes:
1348
+ node._repr_fn = orig_repr_fns[node]
1349
+
1350
+ with override_node_repr(self):
1351
+ return self._python_code(root_module, namespace, verbose=verbose)
1352
+
1353
+ def _python_code(self, root_module: str, namespace: _Namespace, *, verbose: bool = False) -> PythonCode:
1354
+ return self._codegen._gen_python_code(self.nodes, root_module, namespace, verbose=verbose)
1355
+
1356
+
1357
+ def __str__(self) -> str:
1358
+ """
1359
+ Return a human-readable (not machine-readable) string representation
1360
+ of this Graph
1361
+ """
1362
+ placeholder_names : List[str] = []
1363
+ # This is a one-element array just so ``format_node`` can modify the closed
1364
+ # over value
1365
+ maybe_return_typename : List[str] = ['']
1366
+
1367
+ node_strs = [node.format_node(placeholder_names) for node in self.nodes]
1368
+ param_str = ', '.join(placeholder_names)
1369
+ s = f'graph({param_str}){maybe_return_typename[0]}:'
1370
+ for node_str in node_strs:
1371
+ if node_str:
1372
+ s += '\n ' + node_str
1373
+ return s
1374
+
1375
+ @compatibility(is_backward_compatible=True)
1376
+ def print_tabular(self):
1377
+ """
1378
+ Prints the intermediate representation of the graph in tabular
1379
+ format. Note that this API requires the ``tabulate`` module to be
1380
+ installed.
1381
+ """
1382
+ try:
1383
+ from tabulate import tabulate
1384
+ except ImportError:
1385
+ print("`print_tabular` relies on the library `tabulate`, "
1386
+ "which could not be found on this machine. Run `pip "
1387
+ "install tabulate` to install the library.")
1388
+ raise
1389
+
1390
+ node_specs = [[n.op, n.name, n.target, n.args, n.kwargs]
1391
+ for n in self.nodes]
1392
+ print(tabulate(node_specs,
1393
+ headers=['opcode', 'name', 'target', 'args', 'kwargs']))
1394
+
1395
+ @compatibility(is_backward_compatible=True)
1396
+ def lint(self):
1397
+ """
1398
+ Runs various checks on this Graph to make sure it is well-formed. In
1399
+ particular:
1400
+ - Checks Nodes have correct ownership (owned by this graph)
1401
+ - Checks Nodes appear in topological order
1402
+ - If this Graph has an owning GraphModule, checks that targets
1403
+ exist in that GraphModule
1404
+ """
1405
+
1406
+ # Check topo order
1407
+ def check_arg(arg : Node, n : Optional[Node] = None) -> None:
1408
+ context_str = f' of Node \'{n}\' ' if n else ' '
1409
+ if arg.graph is not self:
1410
+ raise RuntimeError(f'Argument \'{arg}\'{context_str}does not belong to this Graph, '
1411
+ f'but was used as an argument! If you are copying nodes from another graph, make '
1412
+ f'sure to use ``arg_transform`` on node_copy() to remap values\n{self}')
1413
+ if arg not in seen_values:
1414
+ raise RuntimeError(f'Argument \'{arg}\'{context_str}was used before it has been '
1415
+ f'defined! Please check that Nodes in the graph are topologically ordered\n{self}')
1416
+
1417
+ seen_names : Set[str] = set()
1418
+ seen_values : Set[Node] = set()
1419
+ for node in self.nodes:
1420
+ if node.op not in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output']:
1421
+ raise RuntimeError(f'Node {node} had unknown opcode {node.op}!')
1422
+ if node.graph is not self:
1423
+ raise RuntimeError(f'Node \'{node}\' does not belong to this Graph!')
1424
+ map_arg(node.args, lambda arg: check_arg(arg, node))
1425
+ map_arg(node.kwargs, lambda arg: check_arg(arg, node))
1426
+ seen_values.add(node)
1427
+
1428
+ if node.name in seen_names:
1429
+ raise RuntimeError(f'Node redefined name {node.name}!')
1430
+ seen_names.add(node.name)
1431
+
1432
+ # Check targets are legit
1433
+ if self.owning_module:
1434
+ for node in self.nodes:
1435
+ if node.op == 'call_function':
1436
+ if not callable(node.target):
1437
+ raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
1438
+ 'a Callable is expected')
1439
+ else:
1440
+ if not isinstance(node.target, str):
1441
+ raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
1442
+ 'a str is expected')
1443
+ if node.op in ['get_attr', 'call_module']:
1444
+ target_atoms = node.target.split('.')
1445
+ m_itr = self.owning_module
1446
+ for i, atom in enumerate(target_atoms):
1447
+ new_m_itr = getattr(m_itr, atom, None)
1448
+ seen_qualname = '.'.join(target_atoms[:i])
1449
+ if new_m_itr is None:
1450
+ raise RuntimeError(f'Node {node} target {node.target} references nonexistent attribute '
1451
+ f'{atom} of {seen_qualname}')
1452
+ if (node.op == "call_module"
1453
+ and not isinstance(new_m_itr, torch.nn.Module)):
1454
+ raise RuntimeError(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
1455
+ 'not reference an nn.Module')
1456
+ elif (node.op == "get_attr"
1457
+ and not isinstance(new_m_itr, torch.nn.Module)
1458
+ and not isinstance(new_m_itr, torch.nn.Parameter)
1459
+ and atom not in m_itr._buffers):
1460
+ warnings.warn(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
1461
+ 'not reference an nn.Module, nn.Parameter, or buffer, which is '
1462
+ 'what \'get_attr\' Nodes typically target')
1463
+ else:
1464
+ m_itr = new_m_itr
1465
+
1466
+ @compatibility(is_backward_compatible=True)
1467
+ def eliminate_dead_code(self):
1468
+ """
1469
+ Remove all dead code from the graph, based on each node's number of
1470
+ users, and whether the nodes have any side effects. The graph must be
1471
+ topologically sorted before calling.
1472
+
1473
+ Returns:
1474
+ bool: Whether the graph was changed as a result of the pass.
1475
+
1476
+ Example:
1477
+
1478
+ Before dead code is eliminated, `a` from `a = x + 1` below has no users
1479
+ and thus can be eliminated from the graph without having an effect.
1480
+
1481
+ .. code-block:: python
1482
+
1483
+ def forward(self, x):
1484
+ a = x + 1
1485
+ return x + self.attr_1
1486
+
1487
+ After dead code is eliminated, `a = x + 1` has been removed, and the rest
1488
+ of `forward` remains.
1489
+
1490
+ .. code-block:: python
1491
+
1492
+ def forward(self, x):
1493
+ return x + self.attr_1
1494
+
1495
+ .. warning::
1496
+
1497
+ Dead code elimination has some heuristics to avoid removing
1498
+ side-effectful nodes (see Node.is_impure) but in general coverage
1499
+ is very bad, so you should assume that this method is not sound
1500
+ to call unless you know that your FX graph consists entirely
1501
+ of functional operations.
1502
+ """
1503
+ # Lint the graph first to make sure its topologically sorted, otherwise
1504
+ # DCE below will not behave as expected.
1505
+ self.lint()
1506
+
1507
+ # Reverse iterate so that when we remove a node, any nodes used as an
1508
+ # input to that node have an updated user count that no longer reflects
1509
+ # the removed node.
1510
+ changed = False
1511
+ for node in reversed(self.nodes):
1512
+ if not node.is_impure() and len(node.users) == 0:
1513
+ self.erase_node(node)
1514
+ changed = True
1515
+
1516
+ return changed
1517
+
1518
+ @compatibility(is_backward_compatible=False)
1519
+ def set_codegen(self, codegen: CodeGen):
1520
+ self._codegen = codegen
1521
+
1522
+ @compatibility(is_backward_compatible=False)
1523
+ def on_generate_code(
1524
+ self,
1525
+ make_transformer: Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]
1526
+ ):
1527
+ """Register a transformer function when python code is generated
1528
+
1529
+ Args:
1530
+ make_transformer (Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]):
1531
+ a function that returns a code transformer to be registered.
1532
+ This function is called by `on_generate_code` to obtain the
1533
+ code transformer.
1534
+
1535
+ This function is also given as its input the currently
1536
+ registered code transformer (or None if nothing is registered),
1537
+ in case it is not desirable to overwrite it. This is useful to
1538
+ chain code transformers together.
1539
+
1540
+ Returns:
1541
+ a context manager that when used in a `with` statement, to automatically
1542
+ restore the previously registered code transformer.
1543
+
1544
+ Example:
1545
+
1546
+ .. code-block:: python
1547
+
1548
+
1549
+ gm: fx.GraphModule = ...
1550
+
1551
+ # This is a code transformer we want to register. This code
1552
+ # transformer prepends a pdb import and trace statement at the very
1553
+ # beginning of the generated torch.fx code to allow for manual
1554
+ # debugging with the PDB library.
1555
+ def insert_pdb(body):
1556
+ return ["import pdb; pdb.set_trace()\\n", *body]
1557
+
1558
+ # Registers `insert_pdb`, and overwrites the current registered
1559
+ # code transformer (given by `_` to the lambda):
1560
+ gm.graph.on_generate_code(
1561
+ lambda _: insert_pdb
1562
+ )
1563
+
1564
+ # Or alternatively, registers a code transformer which first
1565
+ # runs `body` through existing registered transformer, then
1566
+ # through `insert_pdb`:
1567
+ gm.graph.on_generate_code(
1568
+ lambda current_trans: (
1569
+ lambda body: insert_pdb(
1570
+ current_trans(body) if current_trans
1571
+ else body
1572
+ )
1573
+ )
1574
+ )
1575
+
1576
+ gm.recompile()
1577
+ gm(*inputs) # drops into pdb
1578
+
1579
+
1580
+ This function can also be used as a context manager, with the benefit to
1581
+ automatically restores the previously registered code transformer:
1582
+
1583
+ .. code-block:: python
1584
+
1585
+ # ... continue from previous example
1586
+
1587
+ with gm.graph.on_generate_code(lambda _: insert_pdb):
1588
+ # do more stuff with `gm`...
1589
+ gm.recompile()
1590
+ gm(*inputs) # drops into pdb
1591
+
1592
+ # now previous code transformer is restored (but `gm`'s code with pdb
1593
+ # remains - that means you can run `gm` with pdb here too, until you
1594
+ # run next `recompile()`).
1595
+ """
1596
+ on_gen_code_old = self._codegen._body_transformer
1597
+ self._codegen._body_transformer = make_transformer(on_gen_code_old)
1598
+
1599
+ @contextlib.contextmanager
1600
+ def on_generate_code_context_manager():
1601
+ try:
1602
+ yield
1603
+ finally:
1604
+ self._codegen._body_transformer = on_gen_code_old
1605
+
1606
+ return on_generate_code_context_manager()
1607
+
1608
+
1609
+ reflectable_magic_methods = {
1610
+ 'add': '{} + {}',
1611
+ 'sub': '{} - {}',
1612
+ 'mul': '{} * {}',
1613
+ 'floordiv': '{} // {}',
1614
+ 'truediv': '{} / {}',
1615
+ 'div': '{} / {}',
1616
+ 'mod': '{} % {}',
1617
+ 'pow': '{} ** {}',
1618
+ 'lshift': '{} << {}',
1619
+ 'rshift': '{} >> {}',
1620
+ 'and_': '{} & {}',
1621
+ 'or_': '{} | {}',
1622
+ 'xor': '{} ^ {}',
1623
+ 'getitem': '{}[{}]',
1624
+ 'matmul': '{} @ {}',
1625
+ }
1626
+
1627
+ magic_methods = dict({
1628
+ 'eq': '{} == {}',
1629
+ 'ne': '{} != {}',
1630
+ 'lt': '{} < {}',
1631
+ 'gt': '{} > {}',
1632
+ 'le': '{} <= {}',
1633
+ 'ge': '{} >= {}',
1634
+ 'pos': '+{}',
1635
+ 'neg': '-{}',
1636
+ 'invert': '~{}'}, **reflectable_magic_methods)
1637
+
1638
+ inplace_methods = {
1639
+ 'iadd': '{} += {}',
1640
+ 'iand': '{} &= {}',
1641
+ 'ifloordiv': '{} //= {}',
1642
+ 'ilshift': '{} <<= {}',
1643
+ 'imod': '{} %= {}',
1644
+ 'imul': '{} *= {}',
1645
+ 'imatmul': '{} @= {}',
1646
+ 'ior': '{} |= {}',
1647
+ 'ipow': '{} **= {}',
1648
+ 'irshift': '{} >>= {}',
1649
+ 'isub': '{} -= {}',
1650
+ 'itruediv': '{} /= {}',
1651
+ 'ixor': '{} ^= {}',
1652
+ 'setitem': '{}[{}] = {}',
1653
+ }
venv/lib/python3.10/site-packages/torch/fx/graph_module.py ADDED
@@ -0,0 +1,884 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import copy
3
+ import itertools
4
+ import linecache
5
+ import os
6
+ import sys
7
+ import traceback
8
+ import warnings
9
+ from pathlib import Path
10
+ from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.overrides
15
+ from torch.nn.modules.module import _addindent
16
+ from torch.package import Importer, PackageExporter, PackageImporter, sys_importer
17
+
18
+ from ._compatibility import compatibility
19
+ from .graph import _custom_builtins, _is_from_torch, _PyTreeCodeGen, Graph, PythonCode
20
+
21
+ __all__ = [
22
+ "reduce_graph_module",
23
+ "reduce_package_graph_module",
24
+ "reduce_deploy_graph_module",
25
+ "GraphModule",
26
+ ]
27
+
28
+ _USER_PRESERVED_ATTRIBUTES_KEY = "_user_preserved_attributes"
29
+
30
+ # Normal exec loses the source code, however we can work with
31
+ # the linecache module to recover it.
32
+ # Using _exec_with_source will add it to our local cache
33
+ # and then tools like TorchScript will be able to get source info.
34
+ class _EvalCacheLoader:
35
+ def __init__(self):
36
+ self.eval_cache = {}
37
+ self.next_id = 0
38
+
39
+ def cache(self, src: str, globals: Dict[str, Any], co_fields=None):
40
+ """Store the source in a private cache, and add a lazy entry in linecache
41
+ that allows the source to be retrieved by 'filename'.
42
+
43
+ Args:
44
+ src (str): The module source to cache
45
+ globals (dict): The module globals
46
+
47
+ Returns:
48
+ str: The cache key (and dummy filename) generated for src.
49
+ """
50
+
51
+ key = self._get_key()
52
+ if co_fields:
53
+ key += f" from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}"
54
+ self.eval_cache[key] = src
55
+
56
+ # Don't mutate globals so that this loader is only used
57
+ # to populate linecache, and doesn't interact with other modules
58
+ # that might check `__loader__`
59
+ globals_copy = globals.copy()
60
+ globals_copy["__file__"] = key
61
+ globals_copy["__name__"] = key
62
+ globals_copy["__loader__"] = self
63
+ linecache.lazycache(key, globals_copy)
64
+
65
+ return key
66
+
67
+ # Part of the loader protocol (PEP 302)
68
+ # linecache will use this method when trying to find source code
69
+ def get_source(self, module_name) -> Optional[str]:
70
+ if module_name in self.eval_cache:
71
+ return self.eval_cache[module_name]
72
+ return None
73
+
74
+ def _get_key(self):
75
+ key = f"<eval_with_key>.{self.next_id}"
76
+ self.next_id += 1
77
+ return key
78
+
79
+
80
+ _loader = _EvalCacheLoader()
81
+
82
+
83
+ def _exec_with_source(src: str, globals: Dict[str, Any], co_fields=None):
84
+ key = _loader.cache(src, globals, co_fields)
85
+ exec(compile(src, key, "exec"), globals)
86
+
87
+
88
+ def _forward_from_src(src: str, globals: Dict[str, Any], co_fields=None):
89
+ return _method_from_src(
90
+ method_name="forward", src=src, globals=globals, co_fields=co_fields
91
+ )
92
+
93
+
94
+ def _method_from_src(
95
+ method_name: str, src: str, globals: Dict[str, Any], co_fields=None
96
+ ) -> Callable:
97
+ # avoid mutating the passed in dict
98
+ globals_copy = globals.copy()
99
+ _exec_with_source(src, globals_copy, co_fields)
100
+ fn = globals_copy[method_name]
101
+ del globals_copy[method_name]
102
+ return fn
103
+
104
+
105
+ def _format_import_statement(name: str, obj: Any, importer: Importer) -> str:
106
+ if name in _custom_builtins:
107
+ return _custom_builtins[name].import_str
108
+ if _is_from_torch(name):
109
+ return "import torch"
110
+ module_name, attr_name = importer.get_name(obj)
111
+ return f"from {module_name} import {attr_name} as {name}"
112
+
113
+
114
+ def _format_import_block(globals: Dict[str, Any], importer: Importer):
115
+ import_strs: Set[str] = set()
116
+ for name, obj in globals.items():
117
+ import_strs.add(_format_import_statement(name, obj, importer))
118
+ # Sort the imports so we have a stable import block that allows us to
119
+ # hash the graph module and get a consistent key for use in a cache.
120
+ return "\n".join(sorted(import_strs))
121
+
122
+
123
+ @compatibility(is_backward_compatible=True)
124
+ def reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module:
125
+ # BC: attribute name was changed from `code` to `_code` to facilitate
126
+ # making `code` into a property and adding a docstring to it
127
+ fn_src = body.get("_code") or body["code"]
128
+ forward = _forward_from_src(import_block + fn_src, {})
129
+ return _deserialize_graph_module(forward, body)
130
+
131
+
132
+ @compatibility(is_backward_compatible=True)
133
+ def reduce_package_graph_module(
134
+ importer: PackageImporter, body: Dict[Any, Any], generated_module_name: str
135
+ ) -> torch.nn.Module:
136
+ forward = importer.import_module(generated_module_name).forward
137
+ return _deserialize_graph_module(forward, body)
138
+
139
+
140
+ @compatibility(is_backward_compatible=True)
141
+ def reduce_deploy_graph_module(
142
+ importer: PackageImporter, body: Dict[Any, Any], import_block: str
143
+ ) -> torch.nn.Module:
144
+ ns = {}
145
+ ns["__builtins__"] = importer.patched_builtins
146
+ fn_src = body.get("_code")
147
+ assert fn_src is not None
148
+ forward = _forward_from_src(import_block + fn_src, ns)
149
+ return _deserialize_graph_module(forward, body)
150
+
151
+
152
+ # We create a dummy class here because symbolic_trace pulls the forward()
153
+ # function off of the class, rather than the instance. This class is used
154
+ # in _deserialize_graph_module() below.
155
+ class _CodeOnlyModule(torch.nn.Module):
156
+ def __init__(self, body):
157
+ super().__init__()
158
+ self.__dict__ = body
159
+
160
+
161
+ def _deserialize_graph_module(forward, body: Dict[Any, Any], graph_module_cls=None) -> torch.nn.Module:
162
+ """
163
+ Deserialize a GraphModule given the dictionary of the original module,
164
+ using the code to reconstruct the graph. We delete the actual graph before
165
+ saving the dictionary so that changes to the in-memory graph format do not
166
+ get serialized.
167
+ """
168
+
169
+ # Try to retrieve the forward source in a backward-compatible way
170
+ _CodeOnlyModule.forward = forward
171
+
172
+ tracer_cls = body.get("_tracer_cls")
173
+ if tracer_cls is None:
174
+ from ._symbolic_trace import Tracer
175
+
176
+ tracer_cls = Tracer
177
+
178
+ graphmodule_cls_name = body.get("_graphmodule_cls_name", "GraphModule")
179
+
180
+ # This is a workaround for a mypy linter issue related to
181
+ # passing base class as an argument - https://github.com/python/mypy/issues/5865.
182
+ cls_tracer: Any = tracer_cls
183
+
184
+ class KeepModules(cls_tracer):
185
+ # we shouldn't trace into any of the submodules,
186
+ # because they were not traced in the original GraphModule
187
+ def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
188
+ return True
189
+
190
+ com = _CodeOnlyModule(body)
191
+
192
+ tracer_extras = body.get("_tracer_extras", {})
193
+ graph = KeepModules().trace(com, **tracer_extras)
194
+
195
+ # Manually set Tracer class on the reconstructed Graph, to avoid
196
+ # referencing the private local subclass KeepModules.
197
+ graph._tracer_cls = tracer_cls
198
+ from ._lazy_graph_module import _make_graph_module
199
+ gm = _make_graph_module(com, graph, class_name=graphmodule_cls_name, graph_module_cls=graph_module_cls)
200
+
201
+ # The GraphModule constructor only retains attributes referenced by the graph.
202
+ # In this case, our goal is return a GraphModule as close to identical as the one
203
+ # put into the package. If any additional attributes were present in body,
204
+ # we should keep them.
205
+ for k, v in body.items():
206
+ if not hasattr(gm, k):
207
+ setattr(gm, k, v)
208
+ return gm
209
+
210
+
211
+ # copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
212
+ # This installs empty Modules where none exist yet if they are subpaths of target
213
+ def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
214
+ *prefix, field = target.split(".")
215
+ for item in prefix:
216
+ f = getattr(from_module, item)
217
+ t = getattr(to_module, item, None)
218
+ if f is t:
219
+ # we have already installed one of its parents
220
+ # (e.g. target = root.linear.weight, but we have already installed root.linear)
221
+ # once we install a parent, we no longer need to copy the children
222
+ # since all the needed properties will already be present
223
+ return
224
+
225
+ if t is None:
226
+ t = torch.nn.Module()
227
+ setattr(to_module, item, t)
228
+ from_module, to_module = f, t
229
+
230
+ orig = getattr(from_module, field)
231
+ # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
232
+ # So, we register it as a named buffer in the target module.
233
+ if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter):
234
+ to_module.register_buffer(field, orig)
235
+ else:
236
+ setattr(to_module, field, orig)
237
+
238
+
239
+ # Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
240
+ # This installs empty Modules where none exist yet if they are subpaths of target
241
+ def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
242
+ *prefix, field = target.split(".")
243
+ for item in prefix:
244
+ t = getattr(to_module, item, None)
245
+
246
+ if t is None:
247
+ t = torch.nn.Module()
248
+ setattr(to_module, item, t)
249
+ to_module = t
250
+
251
+ # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
252
+ # So, we register it as a named buffer in the target module.
253
+ if isinstance(from_obj, torch.Tensor) and not isinstance(
254
+ from_obj, torch.nn.Parameter
255
+ ):
256
+ to_module.register_buffer(field, from_obj)
257
+ else:
258
+ setattr(to_module, field, from_obj)
259
+
260
+
261
+ class _WrappedCall:
262
+ def __init__(self, cls, cls_call):
263
+ self.cls = cls
264
+ self.cls_call = cls_call
265
+
266
+ # Previously, if an error occurred when valid
267
+ # symbolically-traced code was run with an invalid input, the
268
+ # user would see the source of the error as coming from
269
+ # `File "<eval_with_key_N">`, where N is some number. We use
270
+ # this function to generate a more informative error message. We
271
+ # return the traceback itself, a message explaining that the
272
+ # error occurred in a traced Module's generated forward
273
+ # function, and five lines of context surrounding the faulty
274
+ # line
275
+ @staticmethod
276
+ def _generate_error_message(frame_summary: traceback.FrameSummary) -> str:
277
+ # auxiliary variables (for readability)
278
+ err_lineno = frame_summary.lineno
279
+ assert err_lineno is not None
280
+ line = frame_summary.line
281
+ assert line is not None
282
+ err_line_len = len(line)
283
+ all_src_lines = linecache.getlines(frame_summary.filename)
284
+
285
+ # constituent substrings of the error message
286
+ tb_repr = traceback.format_exc()
287
+ custom_msg = (
288
+ "Call using an FX-traced Module, "
289
+ f"line {err_lineno} of the traced Module's "
290
+ "generated forward function:"
291
+ )
292
+ before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno])
293
+ marker = "~" * err_line_len + "~~~ <--- HERE"
294
+ err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2])
295
+
296
+ # joined message
297
+ return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err])
298
+
299
+ def __call__(self, obj, *args, **kwargs):
300
+ try:
301
+ if self.cls_call is not None:
302
+ return self.cls_call(obj, *args, **kwargs)
303
+ else:
304
+ return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
305
+ except Exception as e:
306
+ assert e.__traceback__
307
+ topmost_framesummary: traceback.FrameSummary = (
308
+ traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1]
309
+ ) # type: ignore[arg-type]
310
+ if "eval_with_key" in topmost_framesummary.filename:
311
+ print(
312
+ _WrappedCall._generate_error_message(topmost_framesummary),
313
+ file=sys.stderr,
314
+ )
315
+ raise e.with_traceback(None) # noqa: TRY200
316
+ else:
317
+ raise e
318
+
319
+ @compatibility(is_backward_compatible=True)
320
+ class GraphModule(torch.nn.Module):
321
+ """
322
+ GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a
323
+ ``graph`` attribute, as well as ``code`` and ``forward`` attributes generated
324
+ from that ``graph``.
325
+
326
+ .. warning::
327
+
328
+ When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically
329
+ regenerated. However, if you edit the contents of the ``graph`` without reassigning
330
+ the ``graph`` attribute itself, you must call ``recompile()`` to update the generated
331
+ code.
332
+ """
333
+
334
+ def __new__(cls: "Type[GraphModule]", *args, **kwargs):
335
+ # each instance of a graph module needs its own forward method
336
+ # so create a new singleton class for each instance.
337
+ # it is a subclass of the user-defined class, the only difference
338
+ # is an extra layer to install the forward method
339
+
340
+ # address issue described at https://github.com/pytorch/pytorch/issues/63883
341
+ # in other words, traverse class hierarchy to fix the redundant class definition problem
342
+ for t in cls.__mro__:
343
+ c = t.__qualname__.split(".")[-1]
344
+ if c != "GraphModuleImpl":
345
+ cls = t
346
+ break
347
+
348
+ class GraphModuleImpl(cls): # type: ignore[misc, valid-type]
349
+ pass
350
+
351
+ return super().__new__(GraphModuleImpl)
352
+
353
+ @compatibility(is_backward_compatible=True)
354
+ def __init__(
355
+ self,
356
+ root: Union[torch.nn.Module, Dict[str, Any]],
357
+ graph: Graph,
358
+ class_name: str = "GraphModule",
359
+ ):
360
+ """
361
+ Construct a GraphModule.
362
+
363
+ Args:
364
+
365
+ root (Union[torch.nn.Module, Dict[str, Any]):
366
+ ``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
367
+ In the case that ``root`` is a Module, any references to Module-based objects (via qualified
368
+ name) in the Graph's Nodes' ``target`` field will be copied over from the respective place
369
+ within ``root``'s Module hierarchy into the GraphModule's module hierarchy.
370
+ In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be
371
+ looked up directly in the dict's keys. The object mapped to by the Dict will be copied
372
+ over into the appropriate place within the GraphModule's module hierarchy.
373
+
374
+ graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation
375
+
376
+ class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all
377
+ error messages will report as originating from ``GraphModule``. It may be helpful to set this
378
+ to ``root``'s original name or a name that makes sense within the context of your transform.
379
+ """
380
+ super().__init__()
381
+ self.__class__.__name__ = class_name
382
+ if isinstance(root, torch.nn.Module):
383
+ if hasattr(root, "training"):
384
+ self.training = root.training
385
+
386
+ # When we pickle/unpickle graph module, we don't want to drop any module or attributes.
387
+ if isinstance(root, _CodeOnlyModule):
388
+ for k, _ in root.named_children():
389
+ _copy_attr(root, self, k)
390
+
391
+ for k, _ in root.named_buffers():
392
+ _copy_attr(root, self, k)
393
+
394
+ for k, _ in root.named_parameters():
395
+ _copy_attr(root, self, k)
396
+
397
+ for node in graph.nodes:
398
+ if node.op in ["get_attr", "call_module"]:
399
+ assert isinstance(node.target, str)
400
+ _copy_attr(root, self, node.target)
401
+ elif isinstance(root, dict):
402
+ targets_to_copy = []
403
+ for node in graph.nodes:
404
+ if node.op in ["get_attr", "call_module"]:
405
+ assert isinstance(node.target, str)
406
+ if node.target not in root:
407
+ raise RuntimeError(
408
+ "Node "
409
+ + str(node)
410
+ + " referenced target "
411
+ + node.target
412
+ + " but that target was not provided in ``root``!"
413
+ )
414
+ targets_to_copy.append(node.target)
415
+ # Sort targets in ascending order of the # of atoms.
416
+ # This will ensure that less deeply nested attributes are assigned
417
+ # before more deeply nested attributes. For example, foo.bar
418
+ # will be assigned before foo.bar.baz. Otherwise, we might assign
419
+ # the user-provided ``foo.bar`` and wipe out the previously-assigned
420
+ # ``foo.bar.baz``
421
+ targets_to_copy.sort(key=lambda t: t.count("."))
422
+ for target_to_copy in targets_to_copy:
423
+ _assign_attr(root[target_to_copy], self, target_to_copy)
424
+ else:
425
+ raise RuntimeError("Unsupported type " + str(root) + " passed for root!")
426
+
427
+ self.graph = graph
428
+
429
+ # Store the Tracer class responsible for creating a Graph separately as part of the
430
+ # GraphModule state, except when the Tracer is defined in a local namespace.
431
+ # Locally defined Tracers are not pickleable. This is needed because torch.package will
432
+ # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
433
+ # to re-create the Graph during deserialization.
434
+ self._tracer_cls = None
435
+ if (
436
+ self.graph._tracer_cls
437
+ and "<locals>" not in self.graph._tracer_cls.__qualname__
438
+ ):
439
+ self._tracer_cls = self.graph._tracer_cls
440
+
441
+ self._tracer_extras = {}
442
+ if self.graph._tracer_extras:
443
+ self._tracer_extras = self.graph._tracer_extras
444
+
445
+ # Dictionary to store metadata
446
+ self.meta: Dict[str, Any] = {}
447
+ self._replace_hook = None
448
+
449
+ # TorchScript breaks trying to compile the graph setter because of the
450
+ # continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
451
+ #
452
+ # Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
453
+ __jit_unused_properties__ = ["graph"]
454
+
455
+ @property
456
+ def graph(self) -> Graph:
457
+ """
458
+ Return the ``Graph`` underlying this ``GraphModule``
459
+ """
460
+ return self._graph
461
+
462
+ @graph.setter
463
+ def graph(self, g: Graph) -> None:
464
+ """
465
+ Set the underlying ``Graph`` for this ``GraphModule``. This will internally
466
+ recompile the ``GraphModule`` so that the generated ``forward()`` function
467
+ corresponds to ``g``
468
+ """
469
+ assert isinstance(g, Graph), f"Expected a Graph instance, but got {type(g)}"
470
+ self._graph = g
471
+ g.owning_module = self
472
+ self.recompile()
473
+
474
+ @compatibility(is_backward_compatible=False)
475
+ def to_folder(self, folder: Union[str, os.PathLike], module_name: str = "FxModule"):
476
+ """Dumps out module to ``folder`` with ``module_name`` so that it can be
477
+ imported with ``from <folder> import <module_name>``
478
+
479
+ Args:
480
+
481
+ folder (Union[str, os.PathLike]): The folder to write the code out to
482
+
483
+ module_name (str): Top-level name to use for the ``Module`` while
484
+ writing out the code
485
+ """
486
+ folder = Path(folder)
487
+ Path(folder).mkdir(exist_ok=True)
488
+ torch.save(self.state_dict(), folder / "state_dict.pt")
489
+ tab = " " * 4
490
+ custom_builtins = "\n".join([v.import_str for v in _custom_builtins.values()])
491
+ model_str = f"""
492
+ import torch
493
+ {custom_builtins}
494
+
495
+ from torch.nn import *
496
+ class {module_name}(torch.nn.Module):
497
+ def __init__(self):
498
+ super().__init__()
499
+ """
500
+
501
+ def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]:
502
+ safe_reprs = [
503
+ nn.Linear,
504
+ nn.Conv1d,
505
+ nn.Conv2d,
506
+ nn.Conv3d,
507
+ nn.BatchNorm1d,
508
+ nn.BatchNorm2d,
509
+ nn.BatchNorm3d,
510
+ ]
511
+ if type(module) in safe_reprs:
512
+ return f"{module.__repr__()}"
513
+ else:
514
+ return None
515
+
516
+ blobified_modules = []
517
+ for module_name, module in self.named_children():
518
+ module_str = _gen_model_repr(module_name, module)
519
+ if module_str is None:
520
+ module_file = folder / f"{module_name}.pt"
521
+ torch.save(module, module_file)
522
+ blobified_modules.append(module_name)
523
+ module_repr = module.__repr__().replace("\r", " ").replace("\n", " ")
524
+ module_str = f"torch.load(r'{module_file}') # {module_repr}"
525
+ model_str += f"{tab*2}self.{module_name} = {module_str}\n"
526
+
527
+ for buffer_name, buffer in self._buffers.items():
528
+ if buffer is None:
529
+ continue
530
+ model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n"
531
+
532
+ for param_name, param in self._parameters.items():
533
+ if param is None:
534
+ continue
535
+ model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n"
536
+
537
+ model_str += (
538
+ f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n"
539
+ )
540
+ model_str += f"{_addindent(self.code, 4)}\n"
541
+
542
+ module_file = folder / "module.py"
543
+ module_file.write_text(model_str)
544
+
545
+ init_file = folder / "__init__.py"
546
+ init_file.write_text("from .module import *")
547
+
548
+ if len(blobified_modules) > 0:
549
+ warnings.warn(
550
+ "Was not able to save the following children modules as reprs -"
551
+ f"saved as pickled files instead: {blobified_modules}"
552
+ )
553
+
554
+ @compatibility(is_backward_compatible=True)
555
+ def add_submodule(self, target: str, m: torch.nn.Module) -> bool:
556
+ """
557
+ Adds the given submodule to ``self``.
558
+
559
+ This installs empty Modules where none exist yet if they are
560
+ subpaths of ``target``.
561
+
562
+ Args:
563
+ target: The fully-qualified string name of the new submodule
564
+ (See example in ``nn.Module.get_submodule`` for how to
565
+ specify a fully-qualified string.)
566
+ m: The submodule itself; the actual object we want to
567
+ install in the current Module
568
+
569
+ Return:
570
+ bool: Whether or not the submodule could be inserted. For
571
+ this method to return True, each object in the chain
572
+ denoted by ``target`` must either a) not exist yet,
573
+ or b) reference an ``nn.Module`` (not a parameter or
574
+ other attribute)
575
+ """
576
+ *prefix, field = target.split(".")
577
+ mod: torch.nn.Module = self
578
+
579
+ for item in prefix:
580
+
581
+ submod = getattr(mod, item, None)
582
+
583
+ if submod is None:
584
+ submod = torch.nn.Module()
585
+ setattr(mod, item, submod)
586
+
587
+ if not isinstance(submod, torch.nn.Module):
588
+ return False
589
+
590
+ mod = submod
591
+
592
+ mod.add_module(field, m)
593
+ return True
594
+
595
+ @compatibility(is_backward_compatible=True)
596
+ def delete_submodule(self, target: str) -> bool:
597
+ """
598
+ Deletes the given submodule from ``self``.
599
+
600
+ The module will not be deleted if ``target`` is not a valid
601
+ target.
602
+
603
+ Args:
604
+ target: The fully-qualified string name of the new submodule
605
+ (See example in ``nn.Module.get_submodule`` for how to
606
+ specify a fully-qualified string.)
607
+
608
+ Returns:
609
+ bool: Whether or not the target string referenced a
610
+ submodule we want to delete. A return value of ``False``
611
+ means that the ``target`` was not a valid reference to
612
+ a submodule.
613
+ """
614
+ atoms = target.split(".")
615
+ path, target_submod = atoms[:-1], atoms[-1]
616
+ mod: torch.nn.Module = self
617
+
618
+ # Get the parent module
619
+ for item in path:
620
+
621
+ if not hasattr(mod, item):
622
+ return False
623
+
624
+ mod = getattr(mod, item)
625
+
626
+ if not isinstance(mod, torch.nn.Module):
627
+ return False
628
+
629
+ if not hasattr(mod, target_submod):
630
+ return False
631
+
632
+ if not isinstance(getattr(mod, target_submod), torch.nn.Module):
633
+ return False
634
+
635
+ delattr(mod, target_submod)
636
+ return True
637
+
638
+ @compatibility(is_backward_compatible=True)
639
+ def delete_all_unused_submodules(self) -> None:
640
+ """
641
+ Deletes all unused submodules from ``self``.
642
+
643
+ A Module is considered "used" if any one of the following is
644
+ true:
645
+ 1. It has children that are used
646
+ 2. Its forward is called directly via a ``call_module`` node
647
+ 3. It has a non-Module attribute that is used from a
648
+ ``get_attr`` node
649
+
650
+ This method can be called to clean up an ``nn.Module`` without
651
+ manually calling ``delete_submodule`` on each unused submodule.
652
+ """
653
+ used: List[str] = []
654
+
655
+ for node in self.graph.nodes:
656
+
657
+ if node.op == "call_module" or node.op == "get_attr":
658
+
659
+ # A list of strings representing the different parts
660
+ # of the path. For example, `foo.bar.baz` gives us
661
+ # ["foo", "bar", "baz"]
662
+ fullpath = node.target.split(".")
663
+
664
+ # If we're looking at multiple parts of a path, join
665
+ # join them with a dot. Otherwise, return that single
666
+ # element without doing anything to it.
667
+ def join_fn(x: str, y: str) -> str:
668
+ return ".".join([x, y] if y else [x])
669
+
670
+ # Progressively collect all the names of intermediate
671
+ # modules. For example, if we have the target
672
+ # `foo.bar.baz`, we'll add `foo`, `foo.bar`, and
673
+ # `foo.bar.baz` to the list.
674
+ used.extend(itertools.accumulate(fullpath, join_fn))
675
+
676
+ # For a `call_module` node, also register all recursive submodules
677
+ # as used
678
+ if node.op == "call_module":
679
+ try:
680
+ submod = self.get_submodule(node.target)
681
+
682
+ for submod_name, _ in submod.named_modules():
683
+ if submod_name != "":
684
+ used.append(".".join([node.target, submod_name]))
685
+ except AttributeError:
686
+ # Node referenced nonexistent submodule, don't need to
687
+ # worry about GCing anything
688
+ pass
689
+
690
+ to_delete = [name for name, _ in self.named_modules() if name not in used]
691
+
692
+ for name in to_delete:
693
+ self.delete_submodule(name)
694
+
695
+ @property
696
+ def code(self) -> str:
697
+ """
698
+ Return the Python code generated from the ``Graph`` underlying this
699
+ ``GraphModule``.
700
+ """
701
+ if not hasattr(self, "_code"):
702
+ raise RuntimeError(
703
+ "Code has not been generated! Please report a bug to PyTorch"
704
+ )
705
+ return self._code
706
+
707
+ @compatibility(is_backward_compatible=True)
708
+ def recompile(self) -> PythonCode:
709
+ """
710
+ Recompile this GraphModule from its ``graph`` attribute. This should be
711
+ called after editing the contained ``graph``, otherwise the generated
712
+ code of this ``GraphModule`` will be out of date.
713
+ """
714
+ if isinstance(self._graph._codegen, _PyTreeCodeGen):
715
+ self._in_spec = self._graph._codegen.pytree_info.in_spec
716
+ self._out_spec = self._graph._codegen.pytree_info.out_spec
717
+ python_code = self._graph.python_code(root_module="self")
718
+ self._code = python_code.src
719
+ self._lineno_map = python_code._lineno_map
720
+
721
+ cls = type(self)
722
+ co_fields = self._graph._co_fields if hasattr(self._graph, "_co_fields") else {}
723
+ cls.forward = _forward_from_src(self._code, python_code.globals, co_fields)
724
+
725
+ # Determine whether this class explicitly defines a __call__ implementation
726
+ # to wrap. If it does, save it in order to have wrapped_call invoke it.
727
+ # If it does not, wrapped_call can use a dynamic call to super() instead.
728
+ # In most cases, super().__call__ should be torch.nn.Module.__call__.
729
+ # We do not want to hold a reference to Module.__call__ here; doing so will
730
+ # bypass patching of torch.nn.Module.__call__ done while symbolic tracing.
731
+ cls_call = cls.__call__ if "__call__" in vars(cls) else None
732
+
733
+ if "_wrapped_call" not in vars(cls):
734
+ cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined]
735
+
736
+ def call_wrapped(self, *args, **kwargs):
737
+ return self._wrapped_call(self, *args, **kwargs)
738
+
739
+ cls.__call__ = call_wrapped # type: ignore[method-assign]
740
+
741
+ return python_code
742
+
743
+ # Passing Tracer as argument allows subclasses extending fx.GraphModule
744
+ # define their own Tracer (extending fx.Tracer).
745
+ def __reduce_deploy__(self, importer: Importer):
746
+ dict_without_graph = self.__dict__.copy()
747
+ dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__
748
+ del dict_without_graph["_graph"]
749
+
750
+ python_code = self.recompile()
751
+ import_block = _format_import_block(python_code.globals, importer)
752
+ return (reduce_deploy_graph_module, (dict_without_graph, import_block))
753
+
754
+ def __reduce_package__(self, exporter: PackageExporter):
755
+ dict_without_graph = self.__dict__.copy()
756
+ dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__
757
+ del dict_without_graph["_graph"]
758
+
759
+ generated_module_name = f"fx-generated._{exporter.get_unique_id()}"
760
+ python_code = self.recompile()
761
+ import_block = _format_import_block(python_code.globals, exporter.importer)
762
+ module_code = import_block + self.code
763
+ exporter.save_source_string(generated_module_name, module_code)
764
+ return (
765
+ reduce_package_graph_module,
766
+ (dict_without_graph, generated_module_name),
767
+ )
768
+
769
+ def __reduce__(self):
770
+ """
771
+ Serialization of GraphModule. We serialize only the generated code, not
772
+ the underlying ``Graph``. This is because ``Graph`` does not have on-disk
773
+ backward-compatibility guarantees, whereas Python source code does.
774
+ On the deserialization side, we symbolically trace through the generated
775
+ code to regenerate the underlying ``Graph``
776
+ """
777
+ dict_without_graph = self.__dict__.copy()
778
+
779
+ python_code = self.recompile()
780
+ import_block = _format_import_block(python_code.globals, sys_importer)
781
+ del dict_without_graph["_graph"]
782
+ return (reduce_graph_module, (dict_without_graph, import_block))
783
+
784
+ def _deepcopy_init(self):
785
+ return GraphModule.__init__
786
+
787
+ # because __reduce__ is defined for serialization,
788
+ # we need to define deepcopy otherwise it will call __reduce__
789
+ # and cause symbolic tracing to occur every time we try to copy the object
790
+ def __deepcopy__(self, memo):
791
+ res = type(self).__new__(type(self))
792
+ memo[id(self)] = res
793
+ fake_mod = _CodeOnlyModule(copy.deepcopy(self.__dict__, memo))
794
+ self._deepcopy_init()(res, fake_mod, fake_mod.__dict__["_graph"])
795
+ # hooks are lost during `GraphModule.__init__`, so we need to copy over
796
+ # them explicitly, note right now we are only copying state_dict related
797
+ # hooks, to reduce bc-related issues, we can copy forward/backward related
798
+ # hooks in the future as well if needed
799
+ extra_preserved_attrs = [
800
+ "_state_dict_hooks",
801
+ "_load_state_dict_pre_hooks",
802
+ "_load_state_dict_post_hooks",
803
+ "_replace_hook",
804
+ ]
805
+ for attr in extra_preserved_attrs:
806
+ if attr in self.__dict__:
807
+ setattr(res, attr, copy.deepcopy(self.__dict__[attr], memo))
808
+ res.meta = copy.deepcopy(getattr(self, "meta", {}), memo)
809
+ if _USER_PRESERVED_ATTRIBUTES_KEY in res.meta:
810
+ for attr_name, attr in res.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items():
811
+ setattr(res, attr_name, attr)
812
+ return res
813
+
814
+ def __copy__(self):
815
+ from ._lazy_graph_module import _make_graph_module
816
+ res = _make_graph_module(self, self.graph)
817
+ res.meta = getattr(self, "meta", {})
818
+ return res
819
+
820
+ @compatibility(is_backward_compatible=False)
821
+ def print_readable(self, print_output=True):
822
+ """
823
+ Return the Python code generated for current GraphModule and its children GraphModules
824
+ """
825
+ verbose_python_code = self._graph.python_code(root_module="self", verbose=True)
826
+ module_code = verbose_python_code.src
827
+ module_code = module_code.lstrip("\n")
828
+ module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code
829
+ module_code = _addindent(module_code, 4)
830
+
831
+ submodule_code_list = [""]
832
+ for submodule in self.children():
833
+ if isinstance(submodule, GraphModule):
834
+ submodule_code_list.append(submodule.print_readable(print_output=False))
835
+ submodule_code = "\n".join(submodule_code_list)
836
+ submodule_code = _addindent(submodule_code, 4)
837
+
838
+ output = module_code + submodule_code
839
+ if print_output:
840
+ print(module_code + submodule_code)
841
+ return output
842
+
843
+ def __str__(self) -> str:
844
+ orig_str = super().__str__()
845
+ print_readable_reminder = (
846
+ "# To see more debug info, please use `graph_module.print_readable()`"
847
+ )
848
+ return "\n".join([orig_str, self._code, print_readable_reminder])
849
+
850
+ def _replicate_for_data_parallel(self):
851
+ new_gm = self.__copy__()
852
+ new_gm._is_replica = True
853
+ return new_gm
854
+
855
+ @contextlib.contextmanager
856
+ def _set_replace_hook(self, f):
857
+ """
858
+ Takes a callable which will be called everytime when we replace a node
859
+ to a new node, or change the node's name. Callable takes three arguments:
860
+ the old node we're changing, and NAME of the new node, followed by the
861
+ user node which consumes the old node to be replaced.
862
+ """
863
+ assert callable(f), "Replace hook must be a callable."
864
+ prev, self._replace_hook = self._replace_hook, f
865
+ try:
866
+ yield
867
+ finally:
868
+ self._replace_hook = prev
869
+
870
+
871
+ # workarounds for issues in __torch_function__
872
+
873
+ # WAR for __torch_function__ not handling tensor lists,
874
+ # fix is in https://github.com/pytorch/pytorch/pull/34725
875
+ # orig_cat = torch.cat
876
+ # def patched_cat(*args, **kwargs):
877
+ # tensors = args[0]
878
+ # for t in tensors:
879
+ # if isinstance(t, Proxy):
880
+ # return t.__torch_function__(patched_cat, (), args, kwargs)
881
+ # return orig_cat(*args, **kwargs)
882
+ # patched_cat.__module__ = 'torch'
883
+ # patched_cat.__name__ = 'cat'
884
+ # torch.cat = patched_cat
venv/lib/python3.10/site-packages/torch/fx/immutable_collections.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Iterable, List, Tuple
2
+
3
+ from torch.utils._pytree import (
4
+ _dict_flatten,
5
+ _dict_flatten_with_keys,
6
+ _dict_unflatten,
7
+ _list_flatten,
8
+ _list_flatten_with_keys,
9
+ _list_unflatten,
10
+ Context,
11
+ register_pytree_node,
12
+ )
13
+
14
+ from ._compatibility import compatibility
15
+
16
+
17
+ __all__ = ["immutable_list", "immutable_dict"]
18
+
19
+ _help_mutation = """\
20
+ If you are attempting to modify the kwargs or args of a torch.fx.Node object,
21
+ instead create a new copy of it and assign the copy to the node:
22
+ new_args = ... # copy and mutate args
23
+ node.args = new_args
24
+ """
25
+
26
+
27
+ def _no_mutation(self, *args, **kwargs):
28
+ raise NotImplementedError(
29
+ f"'{type(self).__name__}' object does not support mutation. {_help_mutation}",
30
+ )
31
+
32
+
33
+ def _create_immutable_container(base, mutable_functions):
34
+ container = type("immutable_" + base.__name__, (base,), {})
35
+ for attr in mutable_functions:
36
+ setattr(container, attr, _no_mutation)
37
+ return container
38
+
39
+
40
+ immutable_list = _create_immutable_container(
41
+ list,
42
+ [
43
+ "__delitem__",
44
+ "__iadd__",
45
+ "__imul__",
46
+ "__setitem__",
47
+ "append",
48
+ "clear",
49
+ "extend",
50
+ "insert",
51
+ "pop",
52
+ "remove",
53
+ ],
54
+ )
55
+ immutable_list.__reduce__ = lambda self: (immutable_list, (tuple(iter(self)),))
56
+ immutable_list.__hash__ = lambda self: hash(tuple(self))
57
+
58
+ compatibility(is_backward_compatible=True)(immutable_list)
59
+
60
+ immutable_dict = _create_immutable_container(
61
+ dict,
62
+ [
63
+ "__delitem__",
64
+ "__setitem__",
65
+ "clear",
66
+ "pop",
67
+ "popitem",
68
+ "update",
69
+ ],
70
+ )
71
+ immutable_dict.__reduce__ = lambda self: (immutable_dict, (iter(self.items()),))
72
+ immutable_dict.__hash__ = lambda self: hash(tuple(self.items()))
73
+ compatibility(is_backward_compatible=True)(immutable_dict)
74
+
75
+
76
+ # Register immutable collections for PyTree operations
77
+ def _immutable_dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:
78
+ return _dict_flatten(d)
79
+
80
+
81
+ def _immutable_dict_unflatten(
82
+ values: Iterable[Any],
83
+ context: Context,
84
+ ) -> Dict[Any, Any]:
85
+ return immutable_dict(_dict_unflatten(values, context))
86
+
87
+
88
+ def _immutable_list_flatten(d: List[Any]) -> Tuple[List[Any], Context]:
89
+ return _list_flatten(d)
90
+
91
+
92
+ def _immutable_list_unflatten(
93
+ values: Iterable[Any],
94
+ context: Context,
95
+ ) -> List[Any]:
96
+ return immutable_list(_list_unflatten(values, context))
97
+
98
+
99
+ register_pytree_node(
100
+ immutable_dict,
101
+ _immutable_dict_flatten,
102
+ _immutable_dict_unflatten,
103
+ serialized_type_name="torch.fx.immutable_collections.immutable_dict",
104
+ flatten_with_keys_fn=_dict_flatten_with_keys,
105
+ )
106
+ register_pytree_node(
107
+ immutable_list,
108
+ _immutable_list_flatten,
109
+ _immutable_list_unflatten,
110
+ serialized_type_name="torch.fx.immutable_collections.immutable_list",
111
+ flatten_with_keys_fn=_list_flatten_with_keys,
112
+ )
venv/lib/python3.10/site-packages/torch/fx/interpreter.py ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .graph_module import GraphModule
2
+ from ._lazy_graph_module import _make_graph_module
3
+ from .graph import Graph
4
+ from .node import Argument, Node, Target, map_arg, map_aggregate
5
+ from .proxy import Proxy
6
+ from ._symbolic_trace import Tracer
7
+ from ._compatibility import compatibility
8
+ from . import config
9
+ import torch.fx.traceback as fx_traceback
10
+ import torch
11
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
12
+ import inspect
13
+ from contextlib import contextmanager
14
+ from torch.hub import tqdm
15
+
16
+ __all__ = ['Interpreter', 'Transformer']
17
+
18
+ @compatibility(is_backward_compatible=True)
19
+ class Interpreter:
20
+ """
21
+ An Interpreter executes an FX graph Node-by-Node. This pattern
22
+ can be useful for many things, including writing code
23
+ transformations as well as analysis passes.
24
+
25
+ Methods in the Interpreter class can be overridden to customize
26
+ the behavior of execution. The map of overrideable methods
27
+ in terms of call hierarchy::
28
+
29
+ run()
30
+ +-- run_node
31
+ +-- placeholder()
32
+ +-- get_attr()
33
+ +-- call_function()
34
+ +-- call_method()
35
+ +-- call_module()
36
+ +-- output()
37
+
38
+ Example:
39
+
40
+ Suppose we want to swap all instances of ``torch.neg`` with
41
+ ``torch.sigmoid`` and vice versa (including their ``Tensor``
42
+ method equivalents). We could subclass Interpreter like so::
43
+
44
+ class NegSigmSwapInterpreter(Interpreter):
45
+ def call_function(self, target : Target,
46
+ args : Tuple, kwargs : Dict) -> Any:
47
+ if target == torch.sigmoid:
48
+ return torch.neg(*args, **kwargs)
49
+ return super().call_function(n)
50
+
51
+ def call_method(self, target : Target,
52
+ args : Tuple, kwargs : Dict) -> Any:
53
+ if target == 'neg':
54
+ call_self, *args_tail = args
55
+ return call_self.sigmoid(*args_tail, **kwargs)
56
+ return super().call_method(n)
57
+
58
+ def fn(x):
59
+ return torch.sigmoid(x).neg()
60
+
61
+ gm = torch.fx.symbolic_trace(fn)
62
+ input = torch.randn(3, 4)
63
+ result = NegSigmSwapInterpreter(gm).run(input)
64
+ torch.testing.assert_close(result, torch.neg(input).sigmoid())
65
+
66
+ Args:
67
+ module (torch.nn.Module): The module to be executed
68
+ garbage_collect_values (bool): Whether to delete values after their last
69
+ use within the Module's execution. This ensures optimal memory usage during
70
+ execution. This can be disabled to, for example, examine all of the intermediate
71
+ values in the execution by looking at the ``Interpreter.env`` attribute.
72
+ graph (Optional[Graph]): If passed, the interpreter will execute this
73
+ graph instead of `module.graph`, using the provided `module`
74
+ argument to satisfy any requests for state.
75
+ """
76
+ @compatibility(is_backward_compatible=True)
77
+ def __init__(self, module: torch.nn.Module, garbage_collect_values: bool = True, graph: Optional[Graph] = None):
78
+ self.module = module
79
+ self.submodules = dict(self.module.named_modules())
80
+ if graph is not None:
81
+ self.graph = graph
82
+ else:
83
+ self.graph = self.module.graph
84
+ self.env : Dict[Node, Any] = {}
85
+ self.name = "Interpreter"
86
+ self.garbage_collect_values = garbage_collect_values
87
+ self.extra_traceback = True
88
+
89
+ if self.garbage_collect_values:
90
+ # Run through reverse nodes and record the first instance of a use
91
+ # of a given node. This represents the *last* use of the node in the
92
+ # execution order of the program, which we will use to free unused
93
+ # values
94
+ node_to_last_use : Dict[Node, Node] = {}
95
+ self.user_to_last_uses : Dict[Node, List[Node]] = {}
96
+
97
+ def register_last_uses(n : Node, user : Node):
98
+ if n not in node_to_last_use:
99
+ node_to_last_use[n] = user
100
+ self.user_to_last_uses.setdefault(user, []).append(n)
101
+
102
+ for node in reversed(self.graph.nodes):
103
+ map_arg(node.args, lambda n: register_last_uses(n, node))
104
+ map_arg(node.kwargs, lambda n: register_last_uses(n, node))
105
+
106
+ @compatibility(is_backward_compatible=True)
107
+ def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None, enable_io_processing : bool = True) -> Any:
108
+ """
109
+ Run `module` via interpretation and return the result.
110
+
111
+ Args:
112
+ *args: The arguments to the Module to run, in positional order
113
+ initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
114
+ This is a dict mapping `Node` to any value. This can be used, for example, to
115
+ pre-populate results for certain `Nodes` so as to do only partial evaluation within
116
+ the interpreter.
117
+ enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and
118
+ process_outputs function first before using them.
119
+
120
+ Returns:
121
+ Any: The value returned from executing the Module
122
+ """
123
+ self.env = initial_env if initial_env is not None else {}
124
+
125
+ # Positional function args are consumed left-to-right by
126
+ # `placeholder` nodes. Use an iterator to keep track of
127
+ # position and extract those values.
128
+ if enable_io_processing:
129
+ args = self.graph.process_inputs(*args)
130
+ self.args_iter : Iterator[Any] = iter(args)
131
+ pbar = tqdm(total=len(self.graph.nodes),
132
+ desc=f"{self.name}: {str(list(self.graph.nodes)) if config.verbose_progress else ''}",
133
+ initial=0, position=0, leave=True, disable=config.disable_progress, delay=0)
134
+
135
+ for node in self.graph.nodes:
136
+ pbar.update(1)
137
+ if node in self.env:
138
+ # Short circuit if we have this value. This could
139
+ # be used, for example, for partial evaluation
140
+ # where the caller has pre-populated `env` with
141
+ # values for a subset of the program.
142
+ continue
143
+
144
+ try:
145
+ self.env[node] = self.run_node(node)
146
+ except Exception as e:
147
+ if self.extra_traceback:
148
+ msg = f"While executing {node.format_node()}"
149
+ msg = f'{e.args[0]}\n\n{msg}' if e.args else str(msg)
150
+ msg += f"\nOriginal traceback:\n{node.stack_trace}"
151
+ e.args = (msg,) + e.args[1:]
152
+ if isinstance(e, KeyError):
153
+ raise RuntimeError(*e.args) from e
154
+ raise
155
+
156
+ if self.garbage_collect_values:
157
+ for to_delete in self.user_to_last_uses.get(node, []):
158
+ del self.env[to_delete]
159
+
160
+ if node.op == 'output':
161
+ output_val = self.env[node]
162
+ return self.graph.process_outputs(output_val) if enable_io_processing else output_val
163
+
164
+ @compatibility(is_backward_compatible=True)
165
+ def boxed_run(self, args_list):
166
+ """
167
+ Run `module` via interpretation and return the result. This uses the "boxed"
168
+ calling convention, where you pass a list of arguments, which will be cleared
169
+ by the interpreter. This ensures that input tensors are promptly deallocated.
170
+ """
171
+ args_iter = iter(args_list)
172
+ env = {}
173
+ for n in self.graph.nodes:
174
+ if n.op == "placeholder":
175
+ env[n] = next(args_iter)
176
+ args_list.clear()
177
+ return self.run(initial_env=env)
178
+
179
+ @contextmanager
180
+ def _set_current_node(self, node):
181
+ with fx_traceback.set_current_meta(node):
182
+ yield
183
+
184
+ @compatibility(is_backward_compatible=True)
185
+ def run_node(self, n : Node) -> Any:
186
+ """
187
+ Run a specific node ``n`` and return the result.
188
+ Calls into placeholder, get_attr, call_function,
189
+ call_method, call_module, or output depending
190
+ on ``node.op``
191
+
192
+ Args:
193
+ n (Node): The Node to execute
194
+
195
+ Returns:
196
+ Any: The result of executing ``n``
197
+ """
198
+ with self._set_current_node(n):
199
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
200
+ assert isinstance(args, tuple)
201
+ assert isinstance(kwargs, dict)
202
+ return getattr(self, n.op)(n.target, args, kwargs)
203
+
204
+ # Main Node running APIs
205
+ @compatibility(is_backward_compatible=True)
206
+ def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
207
+ """
208
+ Execute a ``placeholder`` node. Note that this is stateful:
209
+ ``Interpreter`` maintains an internal iterator over
210
+ arguments passed to ``run`` and this method returns
211
+ next() on that iterator.
212
+
213
+ Args:
214
+ target (Target): The call target for this node. See
215
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
216
+ details on semantics
217
+ args (Tuple): Tuple of positional args for this invocation
218
+ kwargs (Dict): Dict of keyword arguments for this invocation
219
+
220
+ Returns:
221
+ Any: The argument value that was retrieved.
222
+ """
223
+ assert isinstance(target, str)
224
+ if target.startswith('*'):
225
+ # For a starred parameter e.g. `*args`, retrieve all
226
+ # remaining values from the args list.
227
+ return list(self.args_iter)
228
+ else:
229
+ try:
230
+ return next(self.args_iter)
231
+ except StopIteration as si:
232
+ if len(args) > 0:
233
+ return args[0]
234
+ else:
235
+ raise RuntimeError(f'Expected positional argument for parameter {target}, but one was not passed in!') from si
236
+
237
+ @compatibility(is_backward_compatible=True)
238
+ def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
239
+ """
240
+ Execute a ``get_attr`` node. Will retrieve an attribute
241
+ value from the ``Module`` hierarchy of ``self.module``.
242
+
243
+ Args:
244
+ target (Target): The call target for this node. See
245
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
246
+ details on semantics
247
+ args (Tuple): Tuple of positional args for this invocation
248
+ kwargs (Dict): Dict of keyword arguments for this invocation
249
+
250
+ Return:
251
+ Any: The value of the attribute that was retrieved
252
+ """
253
+ assert isinstance(target, str)
254
+ return self.fetch_attr(target)
255
+
256
+ @compatibility(is_backward_compatible=True)
257
+ def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
258
+ """
259
+ Execute a ``call_function`` node and return the result.
260
+
261
+ Args:
262
+ target (Target): The call target for this node. See
263
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
264
+ details on semantics
265
+ args (Tuple): Tuple of positional args for this invocation
266
+ kwargs (Dict): Dict of keyword arguments for this invocation
267
+
268
+ Return
269
+ Any: The value returned by the function invocation
270
+ """
271
+ assert not isinstance(target, str)
272
+
273
+ # Execute the function and return the result
274
+ return target(*args, **kwargs)
275
+
276
+ @compatibility(is_backward_compatible=True)
277
+ def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
278
+ """
279
+ Execute a ``call_method`` node and return the result.
280
+
281
+ Args:
282
+ target (Target): The call target for this node. See
283
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
284
+ details on semantics
285
+ args (Tuple): Tuple of positional args for this invocation
286
+ kwargs (Dict): Dict of keyword arguments for this invocation
287
+
288
+ Return
289
+ Any: The value returned by the method invocation
290
+ """
291
+ # args[0] is the `self` object for this method call
292
+ self_obj, *args_tail = args
293
+
294
+ # Execute the method and return the result
295
+ assert isinstance(target, str)
296
+ return getattr(self_obj, target)(*args_tail, **kwargs)
297
+
298
+ @compatibility(is_backward_compatible=True)
299
+ def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
300
+ """
301
+ Execute a ``call_module`` node and return the result.
302
+
303
+ Args:
304
+ target (Target): The call target for this node. See
305
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
306
+ details on semantics
307
+ args (Tuple): Tuple of positional args for this invocation
308
+ kwargs (Dict): Dict of keyword arguments for this invocation
309
+
310
+ Return
311
+ Any: The value returned by the module invocation
312
+ """
313
+ # Retrieve executed args and kwargs values from the environment
314
+
315
+ # Execute the method and return the result
316
+ assert isinstance(target, str)
317
+ submod = self.fetch_attr(target)
318
+
319
+ return submod(*args, **kwargs)
320
+
321
+ @compatibility(is_backward_compatible=True)
322
+ def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
323
+ """
324
+ Execute an ``output`` node. This really just retrieves
325
+ the value referenced by the ``output`` node and returns it.
326
+
327
+ Args:
328
+ target (Target): The call target for this node. See
329
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
330
+ details on semantics
331
+ args (Tuple): Tuple of positional args for this invocation
332
+ kwargs (Dict): Dict of keyword arguments for this invocation
333
+
334
+ Return:
335
+ Any: The return value referenced by the output node
336
+ """
337
+ return args[0]
338
+
339
+ # Helper methods
340
+ @compatibility(is_backward_compatible=True)
341
+ def fetch_attr(self, target : str):
342
+ """
343
+ Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
344
+
345
+ Args:
346
+ target (str): The fully-qualified name of the attribute to fetch
347
+
348
+ Return:
349
+ Any: The value of the attribute.
350
+ """
351
+ target_atoms = target.split('.')
352
+ attr_itr = self.module
353
+ for i, atom in enumerate(target_atoms):
354
+ if not hasattr(attr_itr, atom):
355
+ raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
356
+ attr_itr = getattr(attr_itr, atom)
357
+ return attr_itr
358
+
359
+ @compatibility(is_backward_compatible=True)
360
+ def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
361
+ """
362
+ Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
363
+ from the current execution environment.
364
+
365
+ Args:
366
+ n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
367
+
368
+ Return:
369
+ Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
370
+ """
371
+ args = self.map_nodes_to_values(n.args, n)
372
+ assert isinstance(args, tuple)
373
+ kwargs = self.map_nodes_to_values(n.kwargs, n)
374
+ assert isinstance(kwargs, dict)
375
+ return args, kwargs
376
+
377
+ @compatibility(is_backward_compatible=True)
378
+ def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
379
+ """
380
+ Recursively descend through ``args`` and look up the concrete value
381
+ for each ``Node`` in the current execution environment.
382
+
383
+ Args:
384
+ args (Argument): Data structure within which to look up concrete values
385
+
386
+ n (Node): Node to which ``args`` belongs. This is only used for error reporting.
387
+ """
388
+ def load_arg(n_arg : Node) -> Any:
389
+ if n_arg not in self.env:
390
+ raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
391
+ f'to diagnose such issues')
392
+ return self.env[n_arg]
393
+ return map_arg(args, load_arg)
394
+
395
+ @compatibility(is_backward_compatible=True)
396
+ class Transformer(Interpreter):
397
+ """
398
+ ``Transformer`` is a special type of interpreter that produces a
399
+ new ``Module``. It exposes a ``transform()`` method that returns
400
+ the transformed ``Module``. ``Transformer`` does not require
401
+ arguments to run, as ``Interpreter`` does. ``Transformer`` works
402
+ entirely symbolically.
403
+
404
+ Example:
405
+
406
+ Suppose we want to swap all instances of ``torch.neg`` with
407
+ ``torch.sigmoid`` and vice versa (including their ``Tensor``
408
+ method equivalents). We could subclass ``Transformer`` like so::
409
+
410
+ class NegSigmSwapXformer(Transformer):
411
+ def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
412
+ if target == torch.sigmoid:
413
+ return torch.neg(*args, **kwargs)
414
+ return super().call_function(n)
415
+
416
+ def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
417
+ if target == 'neg':
418
+ call_self, *args_tail = args
419
+ return call_self.sigmoid(*args_tail, **kwargs)
420
+ return super().call_method(n)
421
+
422
+ def fn(x):
423
+ return torch.sigmoid(x).neg()
424
+
425
+ gm = torch.fx.symbolic_trace(fn)
426
+
427
+ transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform()
428
+ input = torch.randn(3, 4)
429
+ torch.testing.assert_close(transformed(input), torch.neg(input).sigmoid())
430
+
431
+ Args:
432
+ module (GraphModule): The ``Module`` to be transformed.
433
+ """
434
+
435
+ @compatibility(is_backward_compatible=True)
436
+ def __init__(self, module):
437
+ super().__init__(module)
438
+ self.new_graph = Graph()
439
+ self.new_graph.set_codegen(module.graph._codegen)
440
+
441
+ class TransformerTracer(Tracer):
442
+ def __init__(self, graph: Graph):
443
+ super().__init__()
444
+ self.graph = graph
445
+ self.tensor_attrs: Dict[torch.Tensor, str] = {} # type: ignore[assignment]
446
+
447
+ def is_leaf_module(self, _, __) -> bool:
448
+ return True
449
+
450
+ self.tracer = TransformerTracer(self.new_graph)
451
+ self.tracer.root = module
452
+
453
+ @compatibility(is_backward_compatible=True)
454
+ def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
455
+ """
456
+ Execute a ``placeholder`` node. In ``Transformer``, this is
457
+ overridden to insert a new ``placeholder`` into the output
458
+ graph.
459
+
460
+ Args:
461
+ target (Target): The call target for this node. See
462
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
463
+ details on semantics
464
+ args (Tuple): Tuple of positional args for this invocation
465
+ kwargs (Dict): Dict of keyword arguments for this invocation
466
+ """
467
+ assert isinstance(target, str)
468
+ default_value = next(iter(args)) if args else inspect.Signature.empty
469
+ return Proxy(self.new_graph.placeholder(target, default_value=default_value), self.tracer)
470
+
471
+ @compatibility(is_backward_compatible=True)
472
+ def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
473
+ """
474
+ Execute a ``get_attr`` node. In ``Transformer``, this is
475
+ overridden to insert a new ``get_attr`` node into the output
476
+ graph.
477
+
478
+ Args:
479
+ target (Target): The call target for this node. See
480
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
481
+ details on semantics
482
+ args (Tuple): Tuple of positional args for this invocation
483
+ kwargs (Dict): Dict of keyword arguments for this invocation
484
+ """
485
+ assert isinstance(target, str)
486
+ return self.tracer.create_proxy("get_attr", target, args, kwargs)
487
+
488
+ @compatibility(is_backward_compatible=True)
489
+ def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
490
+ # Override so that the leaf module policy from `self.tracer` is respected.
491
+ assert isinstance(target, str)
492
+ submod = self.fetch_attr(target)
493
+ return self.tracer.call_module(submod, submod.forward, args, kwargs)
494
+
495
+ @compatibility(is_backward_compatible=True)
496
+ def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
497
+ # Override so that functions that were wrapped are still wrapped.
498
+ return self.tracer.create_proxy('call_function', target, args, kwargs)
499
+
500
+ @compatibility(is_backward_compatible=True)
501
+ def transform(self) -> GraphModule:
502
+ """
503
+ Transform ``self.module`` and return the transformed
504
+ ``GraphModule``.
505
+ """
506
+ with fx_traceback.preserve_node_meta():
507
+ result = super().run(enable_io_processing=False)
508
+ if result is not None:
509
+ def strip_proxy(a : Union[Argument, Proxy]) -> Any:
510
+ return a.node if isinstance(a, Proxy) else a
511
+ self.new_graph.output(map_aggregate(result, strip_proxy))
512
+ return _make_graph_module(self.module, self.new_graph)
venv/lib/python3.10/site-packages/torch/fx/node.py ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Nodes represent a definition of a value in our graph of operators.
4
+ from typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set
5
+ from ._compatibility import compatibility
6
+ from .immutable_collections import immutable_dict, immutable_list
7
+ import torch
8
+ import builtins
9
+ import types
10
+ import inspect
11
+ import warnings
12
+ from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair
13
+ from .._ops import ops as _ops
14
+
15
+ if TYPE_CHECKING:
16
+ from .graph import Graph
17
+
18
+ __all__ = ['Node', 'map_arg', 'map_aggregate', "has_side_effect"]
19
+
20
+ BaseArgumentTypes = Union[str, int, float, bool, complex, torch.dtype,
21
+ torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload]
22
+ base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
23
+
24
+ Target = Union[Callable[..., Any], str]
25
+
26
+ Argument = Optional[Union[
27
+ Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types
28
+ List[Any], # actually Argument
29
+ Dict[str, Any], # actually Argument
30
+ slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing
31
+ range,
32
+ 'Node',
33
+ BaseArgumentTypes
34
+ ]]
35
+
36
+ _side_effectful_need_to_be_preserved_pre_dispatch: Set[Callable] = {
37
+ torch._C._set_grad_enabled,
38
+ torch.amp._enter_autocast,
39
+ torch.amp._exit_autocast,
40
+ }
41
+
42
+ # TODO: Either refactor this into 2 functions 1 dce for functional graphs and 1 dce for all graphs,
43
+ # or add logic to correctly mark all inplace ops as side effectful.
44
+ _side_effectful_functions: Set[Callable] = {
45
+ torch._assert,
46
+ torch._assert_async,
47
+ _ops.aten._assert_async.msg,
48
+ _ops.aten._assert_scalar.default,
49
+ _ops.aten.copy_.default,
50
+ _ops.aten.index_put_.default,
51
+ _ops.aten.sym_constrain_range.default,
52
+ _ops.aten.sym_constrain_range_for_size.default,
53
+ _ops.profiler._record_function_enter,
54
+ _ops.profiler._record_function_enter_new,
55
+ _ops.profiler._record_function_exit,
56
+ _ops.inductor.accumulate_grad_.default,
57
+ _ops.inductor.resize_storage_bytes_.default,
58
+ } | _side_effectful_need_to_be_preserved_pre_dispatch
59
+
60
+
61
+ @compatibility(is_backward_compatible=False)
62
+ def has_side_effect(fn: Callable) -> None:
63
+ _side_effectful_functions.add(fn)
64
+ return fn
65
+
66
+
67
+ # this is fixed on master, WAR for 1.5
68
+ def _find_module_of_method(orig_method: Callable[..., Any]) -> str:
69
+ name = orig_method.__name__
70
+ module = orig_method.__module__
71
+ if module is not None:
72
+ return module
73
+ for guess in [torch, torch.nn.functional]:
74
+ if getattr(guess, name, None) is orig_method:
75
+ return guess.__name__
76
+ raise RuntimeError(f'cannot find module for {orig_method}')
77
+
78
+ # Borrowed from CPython typing module
79
+ # https://github.com/python/cpython/blob/f90dc36c15d7fee0efaf6d39e97be0bdf2683e93/Lib/typing.py#L156
80
+ def _type_repr(obj):
81
+ """Return the repr() of an object, special-casing types (internal helper).
82
+ If obj is a type, we return a shorter version than the default
83
+ type.__repr__, based on the module and qualified name, which is
84
+ typically enough to uniquely identify a type. For everything
85
+ else, we fall back on repr(obj).
86
+ """
87
+ if isinstance(obj, type):
88
+ if obj.__module__ == 'builtins':
89
+ return obj.__qualname__
90
+ return f'{obj.__module__}.{obj.__qualname__}'
91
+ if obj is ...:
92
+ return '...'
93
+ if isinstance(obj, types.FunctionType):
94
+ return obj.__name__
95
+ return repr(obj)
96
+
97
+ def _get_qualified_name(func: Callable[..., Any]) -> str:
98
+ # things like getattr just appear in builtins
99
+ if getattr(builtins, func.__name__, None) is func:
100
+ return func.__name__
101
+ # torch.Tensor.{fn}
102
+ if (isinstance(func, (types.MethodDescriptorType, types.WrapperDescriptorType))
103
+ and func is getattr(torch.Tensor, func.__name__, None)):
104
+ return f"torch.Tensor.{func.__name__}"
105
+ name = func.__name__
106
+ if name == "<lambda>":
107
+ # For lambdas, try to get their defining name in the module
108
+ try:
109
+ name = inspect.getsource(func).split("=")[0].strip()
110
+ except Exception as e:
111
+ raise RuntimeError("Unable to represent lambda") from e
112
+ module = _find_module_of_method(func)
113
+ module = module.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module
114
+ # Fixup segment_reduce mismatch
115
+ if module == "torch" and name == "segment_reduce":
116
+ name = "_" + name
117
+ return f'{module}.{name}'
118
+
119
+ def _format_arg(arg, max_list_len=float('inf')) -> str:
120
+ if hasattr(arg, '_custom_fx_repr_fn'):
121
+ return arg._custom_fx_repr_fn()
122
+ elif isinstance(arg, list):
123
+ items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
124
+ maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
125
+ return f'[{items}{maybe_len}]'
126
+ elif isinstance(arg, tuple):
127
+ items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
128
+ maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
129
+ maybe_comma = ',' if len(arg) == 1 else ''
130
+ return f'({items}{maybe_comma}{maybe_len})'
131
+ elif isinstance(arg, dict):
132
+ items_str = ', '.join(f'{k}: {_format_arg(v)}' for k, v in arg.items())
133
+ return f'{{{items_str}}}'
134
+
135
+ if isinstance(arg, Node):
136
+ return '%' + str(arg)
137
+ else:
138
+ return str(arg)
139
+
140
+ @compatibility(is_backward_compatible=True)
141
+ class Node:
142
+ """
143
+ ``Node`` is the data structure that represents individual operations within
144
+ a ``Graph``. For the most part, Nodes represent callsites to various entities,
145
+ such as operators, methods, and Modules (some exceptions include nodes that
146
+ specify function inputs and outputs). Each ``Node`` has a function specified
147
+ by its ``op`` property. The ``Node`` semantics for each value of ``op`` are as follows:
148
+
149
+ - ``placeholder`` represents a function input. The ``name`` attribute specifies the name this value will take on.
150
+ ``target`` is similarly the name of the argument. ``args`` holds either: 1) nothing, or 2) a single argument
151
+ denoting the default parameter of the function input. ``kwargs`` is don't-care. Placeholders correspond to
152
+ the function parameters (e.g. ``x``) in the graph printout.
153
+ - ``get_attr`` retrieves a parameter from the module hierarchy. ``name`` is similarly the name the result of the
154
+ fetch is assigned to. ``target`` is the fully-qualified name of the parameter's position in the module hierarchy.
155
+ ``args`` and ``kwargs`` are don't-care
156
+ - ``call_function`` applies a free function to some values. ``name`` is similarly the name of the value to assign
157
+ to. ``target`` is the function to be applied. ``args`` and ``kwargs`` represent the arguments to the function,
158
+ following the Python calling convention
159
+ - ``call_module`` applies a module in the module hierarchy's ``forward()`` method to given arguments. ``name`` is
160
+ as previous. ``target`` is the fully-qualified name of the module in the module hierarchy to call.
161
+ ``args`` and ``kwargs`` represent the arguments to invoke the module on, *excluding the self argument*.
162
+ - ``call_method`` calls a method on a value. ``name`` is as similar. ``target`` is the string name of the method
163
+ to apply to the ``self`` argument. ``args`` and ``kwargs`` represent the arguments to invoke the module on,
164
+ *including the self argument*
165
+ - ``output`` contains the output of the traced function in its ``args[0]`` attribute. This corresponds to the "return" statement
166
+ in the Graph printout.
167
+ """
168
+
169
+ @compatibility(is_backward_compatible=True)
170
+ def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target',
171
+ args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'],
172
+ return_type : Optional[Any] = None) -> None:
173
+ """
174
+ Instantiate an instance of ``Node``. Note: most often, you want to use the
175
+ Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather
176
+ than instantiating a ``Node`` directly.
177
+
178
+ Args:
179
+ graph (Graph): The ``Graph`` to which this ``Node`` should belong.
180
+
181
+ name (str): The name to which the output of this ``Node`` should be assigned
182
+
183
+ op (str): The opcode for this ``Node``. Can be one of 'placeholder',
184
+ 'call_method', 'call_module', 'call_function', 'get_attr',
185
+ 'output'
186
+
187
+ target ('Target'): The target this op should call. See the broader
188
+ ``Node`` docstring for more details.
189
+
190
+ args (Tuple['Argument']): The args to be passed to ``target``
191
+
192
+ kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target``
193
+
194
+ return_type (Optional[Any]): The python type expression representing the
195
+ type of the output of this node. This field can be used for
196
+ annotation of values in the generated code or for other types
197
+ of analyses.
198
+ """
199
+ self.graph = graph
200
+ self.name = name # unique name of value being created
201
+ assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']
202
+ self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr
203
+ if op == 'call_function':
204
+ if not callable(target):
205
+ raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
206
+ 'but a Callable is expected')
207
+ else:
208
+ if not isinstance(target, str):
209
+ raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
210
+ 'but a str is expected')
211
+ self.target = target # for method/module/function, the name of the method/module/function/attr
212
+ # being invoked, e.g add, layer1, or torch.add
213
+
214
+ # All `Node`-valued inputs. Key is the Node, value is don't-care.
215
+ # The public API for this is `all_input_nodes`, this private attribute
216
+ # should not be accessed directly.
217
+ self._input_nodes : Dict[Node, None] = {}
218
+ self.__update_args_kwargs(map_arg(args, lambda x: x), map_arg(kwargs, lambda x: x)) # type: ignore[arg-type]
219
+
220
+ # All of the nodes that use the value produced by this Node
221
+ # Note one user may correspond to several uses, e.g. the node fo ``x + x``
222
+ # would appear once here, but represents two uses.
223
+ #
224
+ # Is a dict to act as an "ordered set". Keys are significant, value dont-care
225
+ self.users : Dict[Node, None] = {}
226
+ # Type expression representing the output value of this node.
227
+ # This should contain the same class of Type objects that would appear
228
+ # as type annotations for function inputs/outputs.
229
+ #
230
+ # For placeholder nodes, this value will be used to type-annotate the
231
+ # generated function parameters.
232
+ # For the return node, this value will be used to type-annotate the
233
+ # generated function return type. (Note this is a special case. ``return``
234
+ # does not produce a value, it's more of a notation. Thus, this value
235
+ # describes the type of args[0] in the ``return`` node.
236
+ self.type : Optional[Any] = return_type
237
+ self._prev = self
238
+ self._next = self
239
+ self._erased = False
240
+
241
+ # If set, use this fn to print this node
242
+ self._repr_fn : Optional[Callable[[Node], str]] = None
243
+
244
+ # Dictionary to store metadata passes need to do their
245
+ # transformations. This metadata is preserved across node copies
246
+ self.meta : Dict[str, Any] = {}
247
+
248
+ @property
249
+ def next(self) -> 'Node':
250
+ """
251
+ Returns the next ``Node`` in the linked list of Nodes.
252
+
253
+ Returns:
254
+
255
+ The next ``Node`` in the linked list of Nodes.
256
+ """
257
+ return self._next
258
+
259
+ @property
260
+ def prev(self) -> 'Node':
261
+ """
262
+ Returns the previous ``Node`` in the linked list of Nodes.
263
+
264
+ Returns:
265
+
266
+ The previous ``Node`` in the linked list of Nodes.
267
+ """
268
+ return self._prev
269
+
270
+ @compatibility(is_backward_compatible=True)
271
+ def prepend(self, x: 'Node') -> None:
272
+ """
273
+ Insert x before this node in the list of nodes in the graph. Example::
274
+
275
+ Before: p -> self
276
+ bx -> x -> ax
277
+ After: p -> x -> self
278
+ bx -> ax
279
+
280
+ Args:
281
+ x (Node): The node to put before this node. Must be a member of the same graph.
282
+ """
283
+ assert self.graph == x.graph, "Attempting to move a Node into a different Graph"
284
+ if self == x:
285
+ warnings.warn("Trying to prepend a node to itself. This behavior has no effect on the graph.")
286
+ return
287
+ x._remove_from_list()
288
+ p = self._prev
289
+ p._next, x._prev = x, p
290
+ x._next, self._prev = self, x
291
+
292
+ @compatibility(is_backward_compatible=True)
293
+ def append(self, x: 'Node') -> None:
294
+ """
295
+ Insert ``x`` after this node in the list of nodes in the graph.
296
+ Equivalent to ``self.next.prepend(x)``
297
+
298
+ Args:
299
+ x (Node): The node to put after this node. Must be a member of the same graph.
300
+ """
301
+ self._next.prepend(x)
302
+
303
+ def _remove_from_list(self):
304
+ p, n = self._prev, self._next
305
+ p._next, n._prev = n, p
306
+
307
+ @property
308
+ def args(self) -> Tuple[Argument, ...]:
309
+ """
310
+ The tuple of arguments to this ``Node``. The interpretation of arguments
311
+ depends on the node's opcode. See the :class:`Node` docstring for more
312
+ information.
313
+
314
+ Assignment to this property is allowed. All accounting of uses and users
315
+ is updated automatically on assignment.
316
+ """
317
+ return self._args
318
+
319
+ @args.setter
320
+ def args(self, a : Tuple[Argument, ...]):
321
+ """
322
+ Set the tuple of arguments to this Node. The interpretation of arguments
323
+ depends on the node's opcode. See the ``fx.Graph`` docstring for more
324
+ information.
325
+ """
326
+ # DO NOT CALL `__update_args_kwargs` directly. The correct way to
327
+ # set `args` is via direct assignment, i.e. `node.args = new_args`
328
+ self.__update_args_kwargs(map_arg(a, lambda x: x), self._kwargs) # type: ignore[arg-type]
329
+
330
+ @property
331
+ def kwargs(self) -> Dict[str, Argument]:
332
+ """
333
+ The dict of keyword arguments to this ``Node``. The interpretation of arguments
334
+ depends on the node's opcode. See the :class:`Node` docstring for more
335
+ information.
336
+
337
+ Assignment to this property is allowed. All accounting of uses and users
338
+ is updated automatically on assignment.
339
+ """
340
+ return self._kwargs
341
+
342
+ @kwargs.setter
343
+ def kwargs(self, k : Dict[str, Argument]):
344
+ """
345
+ Set the dict of kwargs to this Node. The interpretation of arguments
346
+ depends on the node's opcode. See the ``fx.Graph`` docstring for more
347
+ information.
348
+ """
349
+ # DO NOT CALL `__update_args_kwargs` directly. The correct way to
350
+ # set `args` is via direct assignment, i.e. `node.kwargs = new_kwargs`
351
+ self.__update_args_kwargs(self._args, map_arg(k, lambda x: x)) # type: ignore[arg-type]
352
+
353
+ @property
354
+ def all_input_nodes(self) -> List['Node']:
355
+ """
356
+ Return all Nodes that are inputs to this Node. This is equivalent to
357
+ iterating over ``args`` and ``kwargs`` and only collecting the values that
358
+ are Nodes.
359
+
360
+ Returns:
361
+
362
+ List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this
363
+ ``Node``, in that order.
364
+ """
365
+ return list(self._input_nodes.keys())
366
+
367
+ @compatibility(is_backward_compatible=True)
368
+ def update_arg(self, idx : int, arg : Argument) -> None:
369
+ """
370
+ Update an existing positional argument to contain the new value
371
+ ``arg``. After calling, ``self.args[idx] == arg``.
372
+
373
+ Args:
374
+
375
+ idx (int): The index into ``self.args`` of the element to update
376
+ arg (Argument): The new argument value to write into ``args``
377
+ """
378
+ args = list(self.args)
379
+ args[idx] = arg
380
+ self.args = tuple(args)
381
+
382
+ @compatibility(is_backward_compatible=True)
383
+ def insert_arg(self, idx : int, arg : Argument) -> None:
384
+ """
385
+ Insert an positional argument to the argument list with given index.
386
+
387
+ Args:
388
+
389
+ idx (int): The index of the element in ``self.args`` to be inserted before.
390
+ arg (Argument): The new argument value to insert into ``args``
391
+ """
392
+ assert 0 <= idx <= len(self.args), "insert_args index must be between 0 and len(self.args)"
393
+ args_left = self.args[:idx]
394
+ args_right = self.args[idx:]
395
+
396
+ self._args = args_left + (arg,) + args_right
397
+
398
+ _new_input_nodes = {}
399
+ map_arg(arg, _new_input_nodes.setdefault)
400
+
401
+ for new_use in _new_input_nodes.keys():
402
+ if new_use not in self._input_nodes:
403
+ self._input_nodes.setdefault(new_use)
404
+ new_use.users.setdefault(self)
405
+
406
+ @compatibility(is_backward_compatible=True)
407
+ def update_kwarg(self, key : str, arg : Argument) -> None:
408
+ """
409
+ Update an existing keyword argument to contain the new value
410
+ ``arg``. After calling, ``self.kwargs[key] == arg``.
411
+
412
+ Args:
413
+
414
+ key (str): The key in ``self.kwargs`` of the element to update
415
+ arg (Argument): The new argument value to write into ``kwargs``
416
+ """
417
+ kwargs = dict(self.kwargs)
418
+ kwargs[key] = arg
419
+ self.kwargs = kwargs
420
+
421
+ @property
422
+ def stack_trace(self) -> Optional[str]:
423
+ """
424
+ Return the Python stack trace that was recorded during tracing, if any.
425
+ When traced with fx.Tracer, this property is usually populated by
426
+ `Tracer.create_proxy`. To record stack traces during tracing for debug purposes,
427
+ set `record_stack_traces = True` on the `Tracer` instance.
428
+ When traced with dynamo, this property will be populated by default by
429
+ `OutputGraph.create_proxy`.
430
+
431
+ stack_trace would have the innermost frame at the end of the string.
432
+ """
433
+ return self.meta.get("stack_trace", None)
434
+
435
+ @stack_trace.setter
436
+ def stack_trace(self, trace : Optional[str]):
437
+ self.meta["stack_trace"] = trace
438
+
439
+ def __update_args_kwargs(self, new_args : Tuple['Argument', ...], new_kwargs : Dict[str, 'Argument']):
440
+ """
441
+ This API is internal. Do *not* call it directly.
442
+ """
443
+ self._args = new_args
444
+ self._kwargs = new_kwargs
445
+
446
+ for old_use in self._input_nodes.keys():
447
+ old_use.users.pop(self)
448
+
449
+ self._input_nodes = {}
450
+ map_arg(self._args, self._input_nodes.setdefault)
451
+ map_arg(self._kwargs, self._input_nodes.setdefault)
452
+
453
+ for new_use in self._input_nodes.keys():
454
+ new_use.users.setdefault(self)
455
+
456
+ def __repr__(self) -> str:
457
+ if self._repr_fn:
458
+ return self._repr_fn(self)
459
+ return self.name
460
+
461
+ def _pretty_print_target(self, target):
462
+ """
463
+ Make target printouts more user-friendly.
464
+ 1) builtins will be printed as `builtins.xyz`
465
+ 2) operators will be printed as `operator.xyz`
466
+ 3) other callables will be printed with qualified name, e.g. torch.add
467
+ """
468
+ if isinstance(target, str):
469
+ return target
470
+ if hasattr(target, '__module__'):
471
+ if not hasattr(target, '__name__'):
472
+ # Just to be defensive, if we don't have `__name__`, get the
473
+ # qualname. Not sure if this happens for any members of `operator`
474
+ # or `builtins`. This fallback path is not as good, since e.g.
475
+ # things in `operator` have `_operator` as their __module__.
476
+ return _get_qualified_name(target)
477
+ if target.__module__ == 'builtins':
478
+ return f'builtins.{target.__name__}'
479
+ elif target.__module__ == '_operator':
480
+ return f'operator.{target.__name__}'
481
+ return _get_qualified_name(target)
482
+
483
+ @compatibility(is_backward_compatible=True)
484
+ def format_node(self,
485
+ placeholder_names: Optional[List[str]] = None,
486
+ maybe_return_typename: Optional[List[str]] = None) -> Optional[str]:
487
+ """
488
+ Return a descriptive string representation of ``self``.
489
+
490
+ This method can be used with no arguments as a debugging
491
+ utility.
492
+
493
+ This function is also used internally in the ``__str__`` method
494
+ of ``Graph``. Together, the strings in ``placeholder_names``
495
+ and ``maybe_return_typename`` make up the signature of the
496
+ autogenerated ``forward`` function in this Graph's surrounding
497
+ GraphModule. ``placeholder_names`` and ``maybe_return_typename``
498
+ should not be used otherwise.
499
+
500
+ Args:
501
+ placeholder_names: A list that will store formatted strings
502
+ representing the placeholders in the generated
503
+ ``forward`` function. Internal use only.
504
+ maybe_return_typename: A single-element list that will store
505
+ a formatted string representing the output of the
506
+ generated ``forward`` function. Internal use only.
507
+
508
+ Returns:
509
+ str: If 1) we're using ``format_node`` as an internal helper
510
+ in the ``__str__`` method of ``Graph``, and 2) ``self``
511
+ is a placeholder Node, return ``None``. Otherwise,
512
+ return a descriptive string representation of the
513
+ current Node.
514
+ """
515
+ if self.op == 'placeholder':
516
+ assert isinstance(self.target, str)
517
+ arg_str = self.target
518
+ arg_str += arg_str + f': {_type_repr(self.type)}' if self.type else ''
519
+ if placeholder_names:
520
+ placeholder_names.append(arg_str)
521
+ return None
522
+ maybe_typename = f'{_type_repr(self.type)} ' if self.type else ''
523
+ default_val = '(default=' + str(self.args[0]) + ')' if self.args else ''
524
+ return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = {self.op}[target={self.target}]{default_val}'
525
+ elif self.op == 'get_attr':
526
+ maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
527
+ return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
528
+ f'{self.op}[target={self._pretty_print_target(self.target)}]'
529
+ elif self.op == 'output':
530
+ if self.type and maybe_return_typename:
531
+ maybe_return_typename[0] = f' -> {_type_repr(self.type)}'
532
+ return f'return {self.args[0]}'
533
+ else:
534
+ maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
535
+ return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
536
+ f'{self.op}[target={self._pretty_print_target(self.target)}](' \
537
+ f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'
538
+
539
+ @compatibility(is_backward_compatible=True)
540
+ def replace_all_uses_with(self,
541
+ replace_with : 'Node',
542
+ delete_user_cb: Callable[['Node'], bool] = lambda user: True,
543
+ *,
544
+ propagate_meta=False
545
+ ) -> List['Node']:
546
+ """
547
+ Replace all uses of ``self`` in the Graph with the Node ``replace_with``.
548
+
549
+ Args:
550
+
551
+ replace_with (Node): The node to replace all uses of ``self`` with.
552
+ delete_user_cb (Callable): Callback that is called to determine
553
+ whether a given user of the self node should be removed.
554
+ propagate_meta (bool): Whether or not to copy all properties
555
+ on the .meta field of the original node onto the replacement node.
556
+ For safety, this is only valid to do if the replacement node
557
+ doesn't already have an existing .meta field.
558
+
559
+ Returns:
560
+
561
+ The list of Nodes on which this change was made.
562
+ """
563
+ if propagate_meta:
564
+ assert len(replace_with.meta) == 0, \
565
+ 'Called node.replace_all_uses_with(replace_with, propagate_meta=True), ' \
566
+ 'but replace_with already has .meta keys'
567
+ for k, v in self.meta.items():
568
+ replace_with.meta[k] = v
569
+ to_process = list(self.users)
570
+ skipped = []
571
+ m = self.graph.owning_module
572
+ for use_node in to_process:
573
+ if not delete_user_cb(use_node):
574
+ skipped.append(use_node)
575
+ continue
576
+
577
+ def maybe_replace_node(n : Node) -> Node:
578
+ if n == self:
579
+ return replace_with
580
+ else:
581
+ return n
582
+
583
+ if getattr(m, "_replace_hook", None):
584
+ m._replace_hook(old=self, new=replace_with.name, user=use_node)
585
+
586
+ new_args = map_arg(use_node.args, maybe_replace_node)
587
+ new_kwargs = map_arg(use_node.kwargs, maybe_replace_node)
588
+ assert isinstance(new_args, tuple)
589
+ assert isinstance(new_kwargs, dict)
590
+ use_node.__update_args_kwargs(new_args, new_kwargs)
591
+
592
+ assert len(self.users) - len(skipped) == 0
593
+ return [n for n in to_process if n not in skipped]
594
+
595
+ @compatibility(is_backward_compatible=False)
596
+ def is_impure(self):
597
+ """
598
+ Returns whether this op is impure, i.e. if its op is a placeholder or
599
+ output, or if a call_function or call_module which is impure.
600
+
601
+ Returns:
602
+
603
+ bool: If the op is impure or not.
604
+ """
605
+ if self.op in {"placeholder", "output"}:
606
+ return True
607
+
608
+ # Check if an impure function.
609
+ if self.op == "call_function":
610
+ return self.target in _side_effectful_functions
611
+
612
+ # Check if an impure module.
613
+ if self.op == "call_module":
614
+ assert (
615
+ self.graph.owning_module is not None
616
+ ), "self.graph.owning_module not set for purity check"
617
+ target_mod = self.graph.owning_module.get_submodule(self.target)
618
+ assert (
619
+ target_mod is not None
620
+ ), f"Did not find expected submodule target {self.target}"
621
+ return getattr(target_mod, "_is_impure", False)
622
+
623
+ return False
624
+
625
+ @compatibility(is_backward_compatible=False)
626
+ def normalized_arguments(
627
+ self, root : torch.nn.Module, arg_types : Optional[Tuple[Any]] = None,
628
+ kwarg_types : Optional[Dict[str, Any]] = None,
629
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
630
+ """
631
+ Returns normalized arguments to Python targets. This means that
632
+ `args/kwargs` will be matched up to the module/functional's
633
+ signature and return exclusively kwargs in positional order
634
+ if `normalize_to_only_use_kwargs` is true.
635
+ Also populates default values. Does not support positional-only
636
+ parameters or varargs parameters.
637
+
638
+ Supports module calls.
639
+
640
+ May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
641
+
642
+ Args:
643
+ root (torch.nn.Module): Module upon which to resolve module targets.
644
+ arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
645
+ kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
646
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
647
+
648
+ Returns:
649
+
650
+ Returns NamedTuple ArgsKwargsPair, or `None` if not successful.
651
+ """
652
+ if self.op == 'call_function':
653
+ assert callable(self.target)
654
+ return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) # type: ignore[arg-type]
655
+ elif self.op == 'call_module':
656
+ assert isinstance(self.target, str)
657
+ return normalize_module(root, self.target, self.args, self.kwargs) # type: ignore[arg-type]
658
+
659
+ return None
660
+
661
+ @compatibility(is_backward_compatible=True)
662
+ def replace_input_with(self, old_input: 'Node', new_input: 'Node'):
663
+ """
664
+ Loop through input nodes of ``self``, and replace all instances of
665
+ ``old_input`` with ``new_input``.
666
+
667
+ Args:
668
+
669
+ old_input (Node): The old input node to be replaced.
670
+ new_input (Node): The new input node to replace ``old_input``.
671
+ """
672
+ def maybe_replace_node(n : Node) -> Node:
673
+ return new_input if n == old_input else n
674
+
675
+ m = self.graph.owning_module
676
+ if getattr(m, "_replace_hook", None):
677
+ m._replace_hook(old=old_input, new=new_input.name, user=self)
678
+
679
+ new_args = map_arg(self.args, maybe_replace_node)
680
+ new_kwargs = map_arg(self.kwargs, maybe_replace_node)
681
+ assert isinstance(new_args, tuple)
682
+ assert isinstance(new_kwargs, dict)
683
+ self.__update_args_kwargs(new_args, new_kwargs)
684
+
685
+ def _rename(self, candidate: str):
686
+ if candidate == self.name:
687
+ return
688
+ name = self.graph._graph_namespace.create_name(candidate, None)
689
+ self.name = name
690
+ self.graph._graph_namespace._rename_object(self, name)
691
+
692
+ def __setattr__(self, name: str, value: Any) -> None:
693
+ if name == 'name' and hasattr(self, "name"):
694
+ m = self.graph.owning_module
695
+ if getattr(m, "_replace_hook", None):
696
+ assert isinstance(value, str)
697
+ for user in self.users:
698
+ m._replace_hook(old=self, new=value, user=user)
699
+ object.__setattr__(self, name, value)
700
+
701
+
702
+ @compatibility(is_backward_compatible=True)
703
+ def map_arg(a: Argument, fn: Callable[[Node], Argument]) -> Argument:
704
+ """
705
+ Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
706
+ """
707
+ assert callable(fn), "torch.fx.map_arg(a, fn): fn must be a callable"
708
+ return map_aggregate(a, lambda x: fn(x) if isinstance(x, Node) else x)
709
+
710
+ @compatibility(is_backward_compatible=True)
711
+ def map_aggregate(a: Argument, fn: Callable[[Argument], Argument]) -> Argument:
712
+ """
713
+ Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
714
+ """
715
+ if isinstance(a, tuple):
716
+ t = tuple(map_aggregate(elem, fn) for elem in a)
717
+ # Support NamedTuple (if it has `_fields`) by repacking into original type.
718
+ return t if not hasattr(a, '_fields') else type(a)(*t)
719
+ elif isinstance(a, list):
720
+ return immutable_list(map_aggregate(elem, fn) for elem in a)
721
+ elif isinstance(a, dict):
722
+ return immutable_dict((k, map_aggregate(v, fn)) for k, v in a.items())
723
+ elif isinstance(a, slice):
724
+ return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn))
725
+ else:
726
+ return fn(a)
venv/lib/python3.10/site-packages/torch/fx/operator_schemas.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import inspect
3
+ import numbers
4
+ import types
5
+ import typing
6
+ import enum
7
+ import warnings
8
+ from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING
9
+ from torch._jit_internal import boolean_dispatched
10
+ from ._compatibility import compatibility
11
+ from torch._ops import OpOverloadPacket, OpOverload
12
+
13
+ if TYPE_CHECKING:
14
+ from .node import Argument
15
+
16
+ __all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint",
17
+ "type_matches", "normalize_function", "normalize_module"]
18
+
19
+ @compatibility(is_backward_compatible=False)
20
+ class ArgsKwargsPair(NamedTuple):
21
+ """
22
+ Simple named tuple for wrapping args/kwargs pairs.
23
+ """
24
+ args: Tuple[Any, ...]
25
+ kwargs: Dict[str, Any]
26
+
27
+ _manual_overrides : Dict[Callable, List[inspect.Signature]] = {}
28
+
29
+ def _nonzero_schemas():
30
+ signatures = []
31
+
32
+ def nonzero(self):
33
+ pass
34
+ signatures.append(inspect.signature(nonzero))
35
+
36
+ def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef]
37
+ pass
38
+ signatures.append(inspect.signature(nonzero))
39
+
40
+ return signatures
41
+
42
+ _manual_overrides[torch.nonzero] = _nonzero_schemas()
43
+
44
+ class _FakeGlobalNamespace:
45
+ def __getattr__(self, name):
46
+ if name == 'torch':
47
+ return torch
48
+ raise RuntimeError('Expected a torch namespace lookup')
49
+
50
+ _type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout,
51
+ 'number' : numbers.Number, 'Future' : torch.jit.Future,
52
+ 'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme,
53
+ '__torch__': _FakeGlobalNamespace(), 'NoneType': type(None),
54
+ 'Storage': torch.UntypedStorage,
55
+ 't': typing.TypeVar('t')}
56
+ for k in dir(typing):
57
+ _type_eval_globals[k] = getattr(typing, k)
58
+
59
+ def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any:
60
+ """
61
+ Convert a TorchScript type to a Python type (including subtypes) via
62
+ eval'ing the annotation_str. _type_eval_globals sets up expressions
63
+ like "List" and "Future" to map to actual types (typing.List and jit.Future)
64
+ """
65
+ return eval(ts_type.annotation_str, _type_eval_globals)
66
+
67
+ def _torchscript_schema_to_signature_impl(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
68
+ from inspect import Parameter
69
+ parameters : List[Parameter] = []
70
+ for arg in ts_schema.arguments:
71
+ arg_type = _torchscript_type_to_python_type(arg.type)
72
+ default = arg.default_value if arg.has_default_value() else Parameter.empty
73
+ # TODO: Figure out if this is safe. It seems like when generating the type signatures for
74
+ # PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor
75
+ # argument name. Downstream, if someone converts that positional argument to a keyword
76
+ # argument, the name mismatch will break things, so here we're going to normalize the
77
+ # name to "input"
78
+ name = arg.name if arg.name != 'self' else 'input'
79
+ kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD
80
+ # "from" is a keyword therefore it must be a POSITIONAL_ONLY argument
81
+ if name == "from":
82
+ assert kind == Parameter.POSITIONAL_OR_KEYWORD
83
+ # ParameterKind type is internal implementation detail to inspec package
84
+ # which makes it hard to do type annotation
85
+ kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment]
86
+ # This renders all previous arguments to positional only
87
+ for idx, p in enumerate(parameters):
88
+ assert p.kind == Parameter.POSITIONAL_OR_KEYWORD
89
+ parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation)
90
+ parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type))
91
+ return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns]
92
+ if len(return_types) == 0:
93
+ return_type = None
94
+ elif len(return_types) == 1:
95
+ return_type = return_types[0]
96
+ else:
97
+ return_type = tuple(return_types)
98
+
99
+ return inspect.Signature(parameters, return_annotation=return_type)
100
+
101
+ _SCHEMA_TO_SIGNATURE_CACHE : Dict[Tuple[str, str], inspect.Signature] = {}
102
+
103
+ def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
104
+ # Cached as it's called in the hot path of FakeTensor dispatch
105
+ cache_key = ts_schema.name, ts_schema.overload_name
106
+ cache_val = _SCHEMA_TO_SIGNATURE_CACHE.get(cache_key)
107
+ if cache_val is not None:
108
+ return cache_val
109
+
110
+ res = _torchscript_schema_to_signature_impl(ts_schema)
111
+ _SCHEMA_TO_SIGNATURE_CACHE[cache_key] = res
112
+ return res
113
+
114
+ @compatibility(is_backward_compatible=False)
115
+ def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']):
116
+ signatures, schemas = get_signature_for_torch_op(target, return_schemas=True)
117
+
118
+ if signatures and schemas:
119
+ matched_schemas = []
120
+
121
+ # Iterate through all of the schema until we find one that matches
122
+ # If one matches, populate `new_args_and_kwargs` with the new args/kwargs
123
+ # values. If none matches, `new_args_and_kwargs` will be None
124
+ for candidate_signature, schema in zip(signatures, schemas):
125
+ try:
126
+ candidate_signature.bind(*args, **kwargs)
127
+ matched_schemas.append((candidate_signature, schema))
128
+ except TypeError as e:
129
+ continue
130
+
131
+ def throw_if_mutable(schema):
132
+ if schema.is_mutable:
133
+ raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional '
134
+ f'code, so operations that mutate operands in-place (e.g. via `out` arguments) '
135
+ f'are not supported')
136
+
137
+ if len(matched_schemas) == 0:
138
+ # Did not match any schema. Cannot check for mutation
139
+ pass
140
+ elif len(matched_schemas) == 1:
141
+ # Matched exactly one schema, unambiguous
142
+ _, schema_to_check = matched_schemas[0]
143
+ throw_if_mutable(schema_to_check)
144
+ pass
145
+ else:
146
+ # Ambiguous schema match. Since mutability checking is best effort,
147
+ # do nothing.
148
+ pass
149
+
150
+ @compatibility(is_backward_compatible=False)
151
+ def get_signature_for_torch_op(op : Callable, return_schemas : bool = False):
152
+ """
153
+ Given an operator on the `torch` namespace, return a list of `inspect.Signature`
154
+ objects corresponding to the overloads of that op.. May return `None` if a signature
155
+ could not be retrieved.
156
+
157
+ Args:
158
+ op (Callable): An operator on the `torch` namespace to look up a signature for
159
+
160
+ Returns:
161
+ Optional[List[inspect.Signature]]: A list of signatures for the overloads of this
162
+ operator, or None if the operator signatures could not be retrieved. If
163
+ return_schemas=True, returns a tuple containing the optional Python signatures
164
+ and the optional TorchScript Function signature
165
+ """
166
+ if isinstance(op, OpOverload):
167
+ schemas = [op._schema]
168
+ elif isinstance(op, OpOverloadPacket):
169
+ schemas = [getattr(op, overload)._schema for overload in op.overloads()]
170
+ else:
171
+ override = _manual_overrides.get(op)
172
+ if override:
173
+ return (override, None) if return_schemas else None
174
+
175
+ aten_fn = torch.jit._builtins._find_builtin(op)
176
+
177
+ if aten_fn is None:
178
+ return (None, None) if return_schemas else None
179
+ schemas = torch._C._jit_get_schemas_for_operator(aten_fn)
180
+
181
+ signatures = [_torchscript_schema_to_signature(schema) for schema in schemas]
182
+ return (signatures, schemas) if return_schemas else signatures
183
+
184
+ @compatibility(is_backward_compatible=False)
185
+ def create_type_hint(x):
186
+ try:
187
+ if isinstance(x, (list, tuple)):
188
+ # todo(chilli): Figure out the right way for mypy to handle this
189
+ if isinstance(x, list):
190
+ def ret_type(x):
191
+ return List[x] # type: ignore[valid-type]
192
+ else:
193
+ def ret_type(x):
194
+ return Tuple[x, ...]
195
+ if len(x) == 0:
196
+ return ret_type(Any)
197
+ base_type = x[0]
198
+ for t in x:
199
+ if issubclass(t, base_type):
200
+ continue
201
+ elif issubclass(base_type, t):
202
+ base_type = t
203
+ else:
204
+ return ret_type(Any)
205
+ return ret_type(base_type)
206
+ except Exception as e:
207
+ # We tried to create a type hint for list but failed.
208
+ warnings.warn(f"We were not able to successfully create type hint from the type {x}")
209
+ pass
210
+ return x
211
+
212
+ @compatibility(is_backward_compatible=False)
213
+ def type_matches(signature_type : Any, argument_type : Any):
214
+ sig_origin_type = getattr(signature_type, '__origin__', signature_type)
215
+
216
+ if signature_type is argument_type:
217
+ return True
218
+
219
+ # Union types in signature. Given type needs to match one of the
220
+ # contained types in the Union
221
+ if sig_origin_type is typing.Union and signature_type != argument_type:
222
+ sig_contained = signature_type.__args__
223
+ return any(type_matches(c, argument_type) for c in sig_contained)
224
+
225
+ if signature_type is List[int] and argument_type is int:
226
+ # int can be promoted to List[int]
227
+ return True
228
+
229
+ if getattr(signature_type, '__origin__', None) in {list, List}:
230
+ sig_el_type = signature_type.__args__[0]
231
+ if not inspect.isclass(sig_el_type):
232
+ warnings.warn(
233
+ f"Does not support nested parametric types, got {signature_type}. Please file a bug.")
234
+ return False
235
+ if getattr(argument_type, '__origin__', None) in {list, List}:
236
+ return issubclass(argument_type.__args__[0], sig_el_type)
237
+
238
+ def is_homogeneous_tuple(t):
239
+ if getattr(t, "__origin__", None) not in {tuple, Tuple}:
240
+ return False
241
+ contained = t.__args__
242
+ if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason
243
+ return True
244
+ return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained)
245
+
246
+ # Tuple[T] is accepted for List[T] parameters
247
+ return is_homogeneous_tuple(argument_type)
248
+
249
+ # Dtype is an int in schemas
250
+ if signature_type is int and argument_type is torch.dtype:
251
+ return True
252
+
253
+ if signature_type is numbers.Number and argument_type in {int, float}:
254
+ return True
255
+ if inspect.isclass(argument_type) and inspect.isclass(signature_type):
256
+ return issubclass(argument_type, signature_type)
257
+
258
+ return False
259
+
260
+ @compatibility(is_backward_compatible=False)
261
+ def normalize_function(
262
+ target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None,
263
+ kwarg_types : Optional[Dict[str, Any]] = None,
264
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
265
+ """
266
+ Returns normalized arguments to PyTorch functions. This means that
267
+ `args/kwargs` will be matched up to the functional's
268
+ signature and return exclusively kwargs in positional order if
269
+ `normalize_to_only_use_kwargs` is True.
270
+ Also populates default values. Does not support positional-only
271
+ parameters or varargs parameters (*args, **kwargs). Does not support modules.
272
+
273
+ May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
274
+
275
+ Args:
276
+ target (Callable): Function that we are normalizing
277
+ args (Tuple[Any]): Tuple of args to the function
278
+ kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
279
+ arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
280
+ kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
281
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
282
+
283
+ Returns:
284
+
285
+ Returns normalized_args_and_kwargs, or `None` if not successful.
286
+ """
287
+ if kwargs is None:
288
+ kwargs = {}
289
+ new_args_and_kwargs = None
290
+ if not isinstance(target, types.BuiltinFunctionType) and not (
291
+ isinstance(target, (OpOverloadPacket, OpOverload))
292
+ ):
293
+ target_for_analysis = target
294
+ if target in boolean_dispatched:
295
+ # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
296
+ # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
297
+ # branches of the dispatch have exactly the same signature. If they do, use the `true`
298
+ # branch signature for analysis. Otherwise, leave this un-normalized
299
+ assert not isinstance(target, str)
300
+ dispatched = boolean_dispatched[target]
301
+ if_true, if_false = dispatched['if_true'], dispatched['if_false']
302
+ if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters:
303
+ return None
304
+ target_for_analysis = if_true
305
+
306
+ assert callable(target_for_analysis)
307
+ sig = inspect.signature(inspect.unwrap(target_for_analysis))
308
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs)
309
+ else:
310
+ assert callable(target)
311
+ torch_op_schemas = get_signature_for_torch_op(target)
312
+ matched_schemas = []
313
+ if torch_op_schemas:
314
+ # Iterate through all of the schema until we find one that matches
315
+ # If one matches, populate `new_args_and_kwargs` with the new args/kwargs
316
+ # values. If none matches, `new_args_and_kwargs` will be None
317
+ for candidate_signature in torch_op_schemas:
318
+ try:
319
+ candidate_signature.bind(*args, **kwargs)
320
+ matched_schemas.append(candidate_signature)
321
+ except TypeError as e:
322
+ continue
323
+
324
+ if len(matched_schemas) == 0:
325
+ # Did not match any schema. Cannot normalize
326
+ pass
327
+ elif len(matched_schemas) == 1:
328
+ # Matched exactly one schema, unambiguous
329
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs,
330
+ normalize_to_only_use_kwargs)
331
+ else:
332
+ if arg_types is not None or kwarg_types is not None:
333
+ arg_types = arg_types if arg_types else cast(Tuple[Any], ())
334
+ kwarg_types = kwarg_types if kwarg_types else {}
335
+ for candidate_signature in torch_op_schemas:
336
+ sig_matches = True
337
+ try:
338
+ bound_types = candidate_signature.bind(*arg_types, **kwarg_types)
339
+ for arg_name, arg_type in bound_types.arguments.items():
340
+ param = candidate_signature.parameters[arg_name]
341
+ sig_matches = sig_matches and type_matches(param.annotation, arg_type)
342
+ except TypeError as e:
343
+ sig_matches = False
344
+ if sig_matches:
345
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs,
346
+ normalize_to_only_use_kwargs)
347
+ break
348
+ else:
349
+ # Matched more than one schema. In this situation, the caller must provide the types of
350
+ # the arguments of the overload they expect.
351
+ schema_printouts = '\n'.join(str(schema) for schema in matched_schemas)
352
+ raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but '
353
+ f'the schema match was ambiguous! Please provide argument types to '
354
+ f'the normalize_arguments() call. Available schemas:\n{schema_printouts}')
355
+
356
+ return new_args_and_kwargs
357
+
358
+ @compatibility(is_backward_compatible=False)
359
+ def normalize_module(
360
+ root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None,
361
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
362
+ """
363
+ Returns normalized arguments to PyTorch modules. This means that
364
+ `args/kwargs` will be matched up to the functional's
365
+ signature and return exclusively kwargs in positional order if
366
+ `normalize_to_only_use_kwargs` is True.
367
+ Also populates default values. Does not support positional-only
368
+ parameters or varargs parameters (*args, **kwargs).
369
+
370
+ Args:
371
+ root (nn.Module): root module upon which we query modules
372
+ target (Callable): Function that we are normalizing
373
+ args (Tuple[Any]): Tuple of args to the function
374
+ kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
375
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
376
+
377
+ Returns:
378
+
379
+ Returns normalized_args_and_kwargs, or `None` if not successful.
380
+ """
381
+ try:
382
+ submod = root.get_submodule(target)
383
+ except AttributeError as e:
384
+ raise RuntimeError(f"Tried to normalize node with target {target} but root did not "
385
+ f"have that target!") from e
386
+ if hasattr(submod.__class__, '__name__'):
387
+ classname = submod.__class__.__name__
388
+ if getattr(torch.nn, classname, None) == submod.__class__:
389
+ sig = inspect.signature(inspect.unwrap(submod.forward))
390
+ if kwargs is None:
391
+ kwargs = {}
392
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs,
393
+ normalize_to_only_use_kwargs)
394
+ return new_args_and_kwargs
395
+ return None
396
+
397
+ def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...],
398
+ kwargs : Dict[str, Any],
399
+ normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]:
400
+ """
401
+ Given a call target, args, and kwargs, return the arguments normalized into
402
+ an ArgsKwargsPair, or None if the type signature is not supported by
403
+ this normalization.
404
+
405
+ Args:
406
+
407
+ sig (inspect.Signature): Signature object for the target
408
+ args (Tuple): Arguments that appear at the callsite for `target`
409
+ kwargs (Dict): Keyword arguments that appear at the callsite for `target`
410
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
411
+
412
+ Returns:
413
+
414
+ Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if
415
+ this target is not supported.
416
+ """
417
+
418
+ # Don't currently support positional-only
419
+ # or varargs (*args, **kwargs) signatures
420
+ supported_parameter_types = {
421
+ inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}
422
+ if any(p.kind not in supported_parameter_types for p in sig.parameters.values()):
423
+ # Add an exception for one signature, which is common for random/uniform, i.e.:
424
+ # Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None
425
+ # `from` is Python keyword and as such functions with that signature should have
426
+ # positional-only args, but at the same time they could be dispatched as kwargs
427
+ if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']:
428
+ return None
429
+
430
+ bound_args = sig.bind(*args, **kwargs)
431
+ bound_args.apply_defaults()
432
+
433
+ new_kwargs : Dict[str, Any] = {}
434
+ new_args : List[Any] = []
435
+ for i, param in enumerate(sig.parameters):
436
+ if not normalize_to_only_use_kwargs and i < len(args):
437
+ new_args.append(bound_args.arguments[param])
438
+ else:
439
+ new_kwargs[param] = bound_args.arguments[param]
440
+
441
+ return ArgsKwargsPair(tuple(new_args), new_kwargs)
venv/lib/python3.10/site-packages/torch/fx/proxy.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import enum
4
+ import dis
5
+ import copy
6
+ import sys
7
+ import torch
8
+ import inspect
9
+ import operator
10
+ import traceback
11
+ import collections
12
+
13
+ from dataclasses import is_dataclass, fields
14
+
15
+
16
+ from .graph import magic_methods, reflectable_magic_methods, Graph
17
+ from typing import Tuple, Dict, OrderedDict, Optional, Any, Iterator, Callable
18
+ from .node import Target, Node, Argument, base_types, map_aggregate
19
+ from ._compatibility import compatibility
20
+ from .operator_schemas import check_for_mutable_operation
21
+ import torch.fx.traceback as fx_traceback
22
+
23
+ __all__ = ['TracerBase', 'GraphAppendingTracer', 'TraceError',
24
+ 'Proxy', 'Attribute', 'ParameterProxy', 'Scope',
25
+ 'ScopeContextManager']
26
+
27
+
28
+ @compatibility(is_backward_compatible=False)
29
+ class Scope:
30
+ """ Scope object that records the module path and the module type
31
+ of a module. Scope is used to track the information of the module
32
+ that contains a Node in a Graph of GraphModule. For example::
33
+
34
+ class Sub(torch.nn.Module):
35
+ def forward(self, x):
36
+ # This will be a call_method Node in GraphModule,
37
+ # scope for this would be (module_path="sub", module_type=Sub)
38
+ return x.transpose(1, 2)
39
+
40
+ class M(torch.nn.Module):
41
+ def __init__(self):
42
+ self.sub = Sub()
43
+
44
+ def forward(self, x):
45
+ # This will be a call_method Node as well,
46
+ # scope for this would be (module_path="", None)
47
+ x = x.transpose(1, 2)
48
+ x = self.sub(x)
49
+ return x
50
+
51
+ """
52
+
53
+ def __init__(self, module_path: str, module_type: Any):
54
+ super().__init__()
55
+ self.module_path = module_path
56
+ self.module_type = module_type
57
+
58
+
59
+ @compatibility(is_backward_compatible=False)
60
+ class ScopeContextManager:
61
+ """ A context manager to track the Scope of Node during symbolic tracing.
62
+ When entering a forward function of a Module, we'll update the scope information of
63
+ the current module, and when we exit, we'll restore the previous scope information.
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ scope: Scope,
69
+ current_scope: Scope,
70
+ ):
71
+ super().__init__()
72
+ # Keep a copy of prev scope to restore on exit
73
+ self._prev_scope = copy.copy(scope)
74
+ # Update scope to current scope
75
+ scope.module_path = current_scope.module_path
76
+ scope.module_type = current_scope.module_type
77
+ # Save a reference so we can restore it
78
+ self._scope = scope
79
+
80
+ def __enter__(self):
81
+ return self._scope
82
+
83
+ def __exit__(self, *args):
84
+ self._scope.module_path = self._prev_scope.module_path
85
+ self._scope.module_type = self._prev_scope.module_type
86
+ return
87
+
88
+
89
+ _COPY_META_FIELDS = ["nn_module_stack", "source_fn_stack", "original_aten", "recompute", "from_node", "quantization_tag"]
90
+
91
+
92
+ @compatibility(is_backward_compatible=True)
93
+ class TracerBase:
94
+ graph: Graph
95
+ record_stack_traces : bool = False
96
+ # Feature flag for mutable schema checking
97
+ # Enableby default in 1.12
98
+ check_mutable_operations : bool = False
99
+ # Feature flag for assert tracing
100
+ trace_asserts : bool = False
101
+ # Feature flag for proxying accesses to buffer values
102
+ proxy_buffer_attributes : bool = False
103
+
104
+ # Name of the function to be traced. It will only be used when
105
+ # ``root`` is an instance of ``nn.Module``
106
+ traced_func_name: str = "forward"
107
+
108
+ # Maps the containing module's name to the operator name
109
+ scope : Scope
110
+
111
+ # Records the module call stack
112
+ module_stack: OrderedDict[str, Tuple[str, Any]]
113
+
114
+ # Mapping of node name to module scope
115
+ node_name_to_scope: Dict[str, Tuple[str, type]]
116
+
117
+ @compatibility(is_backward_compatible=True)
118
+ def create_node(self, kind : str, target : Target,
119
+ args : Tuple[Argument, ...], kwargs : Dict[str, Argument], name : Optional[str] = None,
120
+ type_expr : Optional[Any] = None) -> Node:
121
+ """
122
+ Inserts a graph node given target, args, kwargs, and name.
123
+
124
+ This method can be overridden to do extra checking, validation, or
125
+ modification of values used in node creation. For example, one might
126
+ want to disallow in-place operations from being recorded.
127
+ """
128
+ if kind == 'call_function' and self.check_mutable_operations:
129
+ check_for_mutable_operation(target, args, kwargs)
130
+
131
+ node = self.graph.create_node(kind, target, args, kwargs, name, type_expr)
132
+ # TODO node_name_to_scope will be depreciated in favor of
133
+ # node.meta['nn_module_stack']
134
+ self.node_name_to_scope[node.name] = (
135
+ self.scope.module_path,
136
+ self.scope.module_type,
137
+ )
138
+ # Optionally set stack trace on the created Node for debugging purposes
139
+ if fx_traceback.has_preserved_node_meta():
140
+ current_meta: Dict[str, Any] = fx_traceback.get_current_meta()
141
+
142
+ stack_trace = current_meta.get("stack_trace")
143
+ if stack_trace:
144
+ node.stack_trace = stack_trace
145
+ # Explicitly set the stack_trace, nn_module_stack and source_fn on the node.meta
146
+ # If other meta fields are needed, they can be added here
147
+ for field in _COPY_META_FIELDS:
148
+ if field in current_meta:
149
+ node.meta[field] = copy.copy(current_meta[field])
150
+
151
+ # Here we decrement to account for the sequence_nr having
152
+ # just been incremented while tracing this lowered aten op.
153
+ new_seq_nr = torch.autograd._get_sequence_nr() - 1
154
+ # The sequence_nr increments every time a new autograd Node
155
+ # is created. During the FWD pass we store the sequence_nr
156
+ # corresponding to the last autograd Node created on this fx
157
+ # node's meta. A single aten op can create multiple autograd
158
+ # nodes as is the case with in-place foreach ops. During the
159
+ # BWD pass we retrieve the sequence_nr stored on the current
160
+ # executing autograd Node. See NOTE [ Sequence Number ].
161
+ if current_meta.get("in_grad_fn", 0) > 0:
162
+ new_seq_nr = current_meta["grad_fn_seq_nr"][-1]
163
+ node.meta["seq_nr"] = new_seq_nr
164
+
165
+ elif self.module_stack:
166
+ node.meta['nn_module_stack'] = copy.copy(self.module_stack)
167
+ return node
168
+
169
+ @compatibility(is_backward_compatible=True)
170
+ def proxy(self, node: Node) -> 'Proxy':
171
+ return Proxy(node, self)
172
+
173
+ @compatibility(is_backward_compatible=True)
174
+ def create_proxy(self, kind: str, target: Target, args: Tuple[Any, ...], kwargs: Dict[str, Any],
175
+ name: Optional[str] = None, type_expr : Optional[Any] = None,
176
+ proxy_factory_fn: Callable[[Node], 'Proxy'] = None):
177
+ '''
178
+ Create a Node from the given arguments, then return the Node
179
+ wrapped in a Proxy object.
180
+
181
+ If kind = 'placeholder', then we're creating a Node that
182
+ represents the parameter of a function. If we need to encode
183
+ a default parameter, we use the ``args`` tuple. ``args`` is
184
+ otherwise empty for ``placeholder`` Nodes.
185
+ '''
186
+
187
+ args_ = self.create_arg(args)
188
+ kwargs_ = self.create_arg(kwargs)
189
+ assert isinstance(args_, tuple)
190
+ assert isinstance(kwargs_, dict)
191
+
192
+ node = self.create_node(kind, target, args_, kwargs_, name, type_expr)
193
+
194
+ if not proxy_factory_fn:
195
+ proxy = self.proxy(node)
196
+ else:
197
+ proxy = proxy_factory_fn(node)
198
+
199
+ if self.record_stack_traces and not proxy.node.stack_trace:
200
+ user_frame = self._find_user_frame()
201
+ if user_frame:
202
+ summary = traceback.extract_stack(user_frame)
203
+ tb_lines = summary.format()
204
+ # stack_trace would have innermost frame at the bottom
205
+ proxy.node.stack_trace = ''.join(tb_lines)
206
+
207
+ return proxy
208
+
209
+ def _find_user_frame(self):
210
+ """
211
+ Find the Python stack frame executing the user code during
212
+ symbolic tracing.
213
+ """
214
+ # We have to do a little dance here. Basically, walk up the callstack and
215
+ # record the first frame not in the pytorch source. This is the frame executing
216
+ # the user code during tracing.
217
+ frame = inspect.currentframe()
218
+
219
+ pt_files = ['torch/fx/proxy.py',
220
+ 'torch/fx/_symbolic_trace.py',
221
+ 'torch/fx/experimental/proxy_tensor.py',
222
+ 'torch/_ops.py',
223
+ 'torch/_tensor.py',
224
+ 'torch/utils/_python_dispatch.py',
225
+ 'torch/_prims_common/wrappers.py',
226
+ 'torch/_refs/__init__.py',
227
+ 'torch/_refs/nn/functional/__init__.py',
228
+ 'torch/utils/_stats.py',
229
+ ]
230
+ while frame:
231
+ frame = frame.f_back
232
+ if frame and all(not frame.f_code.co_filename.endswith(file) for file in pt_files):
233
+ break
234
+
235
+ if not frame:
236
+ return None
237
+
238
+ return frame
239
+
240
+ @compatibility(is_backward_compatible=True)
241
+ def create_arg(self, a: Any) -> Argument:
242
+ """
243
+ A method that lowers the objects seen as arguments during symbolic evaluation
244
+ into Argument types that can be stored in IR.
245
+
246
+ Can be override to support more trace-specific types.
247
+ """
248
+ if not isinstance(a, Proxy) and hasattr(a, '__fx_create_arg__'):
249
+ return a.__fx_create_arg__(self)
250
+ # aggregates
251
+ elif isinstance(a, tuple) and hasattr(a, '_fields'):
252
+ # NamedTuple constructors don't seem to like getting a generator
253
+ # expression as an argument to their constructor, so build this
254
+ # intermediate tuple and unpack it into the NamedTuple constructor
255
+ args = tuple(self.create_arg(elem) for elem in a)
256
+ return type(a)(*args) # type: ignore[arg-type]
257
+ elif isinstance(a, (tuple, list)):
258
+ return type(a)(self.create_arg(elem) for elem in a)
259
+ elif isinstance(a, dict):
260
+ r = {}
261
+ for k, v in a.items():
262
+ # Check for invalid dict keys. We do not want a Proxy to appear
263
+ # anywhere within the key. Since keys can be collection types,
264
+ # we iterate through the key with map_aggregate
265
+ k = self.create_arg(k)
266
+
267
+ def no_node(arg):
268
+ if isinstance(arg, Node):
269
+ raise RuntimeError("Keys for dictionaries used as an argument cannot contain a "
270
+ f"Node. Got key: {k}")
271
+ map_aggregate(k, no_node)
272
+
273
+ r[k] = self.create_arg(v)
274
+ return r
275
+ elif isinstance(a, slice):
276
+ return slice(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step))
277
+
278
+ elif isinstance(a, range):
279
+ return range(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step))
280
+
281
+ elif isinstance(a, torch._ops.OpOverload):
282
+ return a
283
+
284
+ if isinstance(a, Proxy):
285
+ # base case: we unwrap the Proxy object
286
+ return a.node
287
+
288
+ if is_dataclass(a):
289
+ kwargs = {field.name: self.create_arg(getattr(a, field.name)) for field in fields(a)}
290
+ return self.create_node("call_function", a.__class__, (), kwargs)
291
+
292
+ elif isinstance(a, (*base_types, enum.Enum)) or a is None or a is ...:
293
+ return a
294
+ raise NotImplementedError(f"argument of type: {type(a)}")
295
+
296
+ @compatibility(is_backward_compatible=True)
297
+ def to_bool(self, obj: 'Proxy') -> bool:
298
+ """Called when a proxy object is being converted to a boolean, such as
299
+ when used in control flow. Normally we don't know what to do because
300
+ we don't know the value of the proxy, but a custom tracer can attach more
301
+ information to the graph node using create_node and can choose to return a value.
302
+ """
303
+ raise TraceError('symbolically traced variables cannot be used as inputs to control flow')
304
+
305
+ @compatibility(is_backward_compatible=True)
306
+ def iter(self, obj: 'Proxy') -> Iterator:
307
+ """Called when a proxy object is being iterated over, such as
308
+ when used in control flow. Normally we don't know what to do because
309
+ we don't know the value of the proxy, but a custom tracer can attach more
310
+ information to the graph node using create_node and can choose to return an iterator.
311
+ """
312
+ raise TraceError('Proxy object cannot be iterated. This can be '
313
+ 'attempted when the Proxy is used in a loop or'
314
+ ' as a *args or **kwargs function argument. '
315
+ 'See the torch.fx docs on pytorch.org for a '
316
+ 'more detailed explanation of what types of '
317
+ 'control flow can be traced, and check out the'
318
+ ' Proxy docstring for help troubleshooting '
319
+ 'Proxy iteration errors')
320
+
321
+ @compatibility(is_backward_compatible=True)
322
+ def keys(self, obj: 'Proxy') -> Any:
323
+ """Called when a proxy object is has the keys() method called.
324
+ This is what happens when ** is called on a proxy. This should return an
325
+ iterator it ** is suppose to work in your custom tracer.
326
+ """
327
+ return Attribute(obj, 'keys')()
328
+
329
+
330
+ # used in Proxy object when just appending to the graph while not tracing.
331
+ @compatibility(is_backward_compatible=True)
332
+ class GraphAppendingTracer(TracerBase):
333
+ def __init__(self, graph: Graph):
334
+ super().__init__()
335
+ self.graph = graph
336
+ self.scope = Scope("", None)
337
+ self.module_stack = collections.OrderedDict()
338
+ self.node_name_to_scope = {}
339
+
340
+ @compatibility(is_backward_compatible=False)
341
+ def assert_fn(x):
342
+ assert x
343
+
344
+ @compatibility(is_backward_compatible=True)
345
+ class TraceError(ValueError):
346
+ pass
347
+
348
+ @compatibility(is_backward_compatible=True)
349
+ class Proxy:
350
+ """
351
+ ``Proxy`` objects are ``Node`` wrappers that flow through the
352
+ program during symbolic tracing and record all the operations
353
+ (``torch`` function calls, method calls, operators) that they touch
354
+ into the growing FX Graph.
355
+
356
+ If you're doing graph transforms, you can wrap your own ``Proxy``
357
+ method around a raw ``Node`` so that you can use the overloaded
358
+ operators to add additional things to a ``Graph``.
359
+
360
+ ``Proxy`` objects cannot be iterated. In other words, the symbolic
361
+ tracer will throw an error if a ``Proxy`` is used in a loop or as
362
+ an ``*args``/``**kwargs`` function argument.
363
+
364
+ There are two main ways around this:
365
+ 1. Factor out the untraceable logic into a top-level function and
366
+ use ``fx.wrap`` on it.
367
+ 2. If the control flow is static (i.e. the loop trip count is
368
+ based on some hyperparameter), the code can be kept in its original
369
+ position and refactored into something like::
370
+
371
+ for i in range(self.some_hyperparameter):
372
+ indexed_item = proxied_value[i]
373
+
374
+ For a more detailed description into the Proxy internals, check out
375
+ the "Proxy" section in `torch/fx/OVERVIEW.md`
376
+ """
377
+
378
+ @compatibility(is_backward_compatible=True)
379
+ def __init__(self, node: Node, tracer: 'Optional[TracerBase]' = None):
380
+ if tracer is None:
381
+ # This allows you to create a Proxy object around a raw Node
382
+ tracer = GraphAppendingTracer(node.graph)
383
+ self.tracer = tracer
384
+ self.node = node
385
+
386
+ def __repr__(self) -> str:
387
+ return f'Proxy({self.node.name})'
388
+
389
+ def __getattr__(self, k) -> 'Attribute':
390
+ # note: not added to the graph yet, if this is a method call
391
+ # we peephole optimize to the method invocation
392
+ return Attribute(self, k)
393
+
394
+ def __call__(self, *args, **kwargs) -> 'Proxy':
395
+ return self.tracer.create_proxy('call_method', '__call__', (self,) + args, kwargs)
396
+
397
+ def __iter__(self) -> Iterator['Proxy']:
398
+ frame = inspect.currentframe()
399
+ assert frame is not None
400
+ calling_frame = frame.f_back
401
+ assert calling_frame is not None
402
+ inst_list = list(dis.get_instructions(calling_frame.f_code))
403
+ if sys.version_info >= (3, 11):
404
+ from bisect import bisect_left
405
+ inst_idx = bisect_left(inst_list, calling_frame.f_lasti, key=lambda x: x.offset)
406
+ else:
407
+ inst_idx = calling_frame.f_lasti // 2
408
+ inst = inst_list[inst_idx]
409
+ if inst.opname == 'UNPACK_SEQUENCE':
410
+ return (self[i] for i in range(inst.argval)) # type: ignore[index]
411
+
412
+ return self.tracer.iter(self)
413
+
414
+ def __abs__(self):
415
+ return self.tracer.create_proxy('call_function', operator.abs, (self,), {})
416
+
417
+ def __bool__(self) -> bool:
418
+ if self.tracer.trace_asserts:
419
+ # check if this boolean is used in an assertion, bytecode pattern for assertions
420
+ # is pretty stable for Python 3.7--3.9
421
+ frame = inspect.currentframe()
422
+ assert frame is not None
423
+ calling_frame = frame.f_back
424
+ assert calling_frame is not None
425
+ insts = list(dis.get_instructions(calling_frame.f_code))
426
+ if sys.version_info >= (3, 11):
427
+ from bisect import bisect_left
428
+ cur = bisect_left(insts, calling_frame.f_lasti, key=lambda x: x.offset)
429
+ else:
430
+ cur = calling_frame.f_lasti // 2
431
+ inst = insts[cur]
432
+
433
+ if inst.opname == 'POP_JUMP_IF_TRUE':
434
+ first = insts[cur + 1]
435
+ assert inst.arg is not None
436
+ last = insts[inst.arg // 2 - 1]
437
+ starts_with_assert = (first.opname == 'LOAD_GLOBAL' and first.argval == 'AssertionError'
438
+ or first.opname == 'LOAD_ASSERTION_ERROR')
439
+ if starts_with_assert and last.opname == 'RAISE_VARARGS':
440
+ self.tracer.create_proxy('call_function', assert_fn, (self,), {})
441
+ return True
442
+
443
+ return self.tracer.to_bool(self)
444
+
445
+ @compatibility(is_backward_compatible=True)
446
+ def keys(self):
447
+ return self.tracer.keys(self)
448
+
449
+ def __len__(self):
450
+ raise RuntimeError("'len' is not supported in symbolic tracing by default. If you want "
451
+ "this call to be recorded, please call torch.fx.wrap('len') at "
452
+ "module scope")
453
+
454
+ @classmethod
455
+ def __torch_function__(cls, orig_method, types, args=None, kwargs=None):
456
+ args = args if args else ()
457
+ kwargs = kwargs if kwargs else {}
458
+
459
+ tracers : Dict[Any, None] = {}
460
+
461
+ def find_tracer(a):
462
+ if isinstance(a, cls):
463
+ tracers[a.tracer] = None
464
+ torch.fx.node.map_aggregate(args, find_tracer)
465
+ torch.fx.node.map_aggregate(kwargs, find_tracer)
466
+
467
+ if len(tracers) > 1:
468
+ raise RuntimeError(f'Found multiple different tracers {list(tracers.keys())} while '
469
+ f'trying to trace operations {orig_method}')
470
+ tracer = next(iter(tracers.keys()))
471
+
472
+ if isinstance(orig_method, torch._C.ScriptMethod):
473
+ args = (orig_method.owner,) + args
474
+ return tracer.create_proxy('call_method', orig_method.name, args, kwargs)
475
+ if torch.overrides.is_tensor_method_or_property(orig_method):
476
+ return tracer.create_proxy('call_method', orig_method.__name__, args, kwargs)
477
+ else:
478
+ if isinstance(orig_method, torch._ops.HigherOrderOperator):
479
+ # TODO: Define how to symbolically trace HigherOrderOperators
480
+ raise RuntimeError("Unable to symbolically trace HigherOrderOperators")
481
+ return tracer.create_proxy('call_function', orig_method, args, kwargs,
482
+ name=tracer.graph._target_to_str(orig_method.__name__))
483
+
484
+
485
+ @compatibility(is_backward_compatible=True)
486
+ class Attribute(Proxy):
487
+ @compatibility(is_backward_compatible=True)
488
+ def __init__(self, root: Proxy, attr: str):
489
+ self.root = root
490
+ self.attr = attr
491
+ self.tracer = root.tracer
492
+ self._node: Optional[Node] = None
493
+
494
+ @property
495
+ def node(self):
496
+ # the node for attributes is added lazily, since most will just be method calls
497
+ # which do not rely on the getitem call
498
+ if self._node is None:
499
+ self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node
500
+ return self._node
501
+
502
+ def __call__(self, *args, **kwargs):
503
+ return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs)
504
+
505
+
506
+ @compatibility(is_backward_compatible=False)
507
+ class ParameterProxy(Proxy):
508
+ """
509
+ A special proxy which lets "shape", "size", "dim", and a few other
510
+ attribute accesses pass through to the underlying module parameter object,
511
+ so that conditional tests on these attributes will not throw exception during tracing
512
+ """
513
+ def __init__(self, tracer: TracerBase, node: Node, name, param):
514
+ super().__init__(node, tracer)
515
+ assert isinstance(param, torch.nn.Parameter)
516
+ self.param = param
517
+ self.name = name
518
+
519
+ def __repr__(self) -> str:
520
+ return f'ParameterProxy({self.name})'
521
+
522
+ @property
523
+ def shape(self):
524
+ return self.param.shape
525
+
526
+ def size(self):
527
+ return self.param.size()
528
+
529
+ def dim(self):
530
+ return self.param.dim()
531
+
532
+ @property
533
+ def ndim(self):
534
+ return self.param.ndim
535
+
536
+ def numel(self):
537
+ return self.param.numel()
538
+
539
+ def nelement(self):
540
+ return self.param.nelement()
541
+
542
+
543
+ for method in magic_methods:
544
+ def _scope(method):
545
+ def impl(*args, **kwargs):
546
+ tracer = args[0].tracer
547
+ target = getattr(operator, method)
548
+ return tracer.create_proxy('call_function', target, args, kwargs)
549
+ impl.__name__ = method
550
+ as_magic = f'__{method.strip("_")}__'
551
+ setattr(Proxy, as_magic, impl)
552
+ _scope(method)
553
+
554
+ def _define_reflectable(orig_method_name):
555
+ method_name = f'__r{orig_method_name.strip("_")}__'
556
+
557
+ def impl(self, rhs):
558
+ target = getattr(operator, orig_method_name)
559
+ return self.tracer.create_proxy('call_function', target, (rhs, self), {})
560
+ impl.__name__ = method_name
561
+ impl.__qualname__ = method_name
562
+ setattr(Proxy, method_name, impl)
563
+
564
+ for orig_method_name in reflectable_magic_methods:
565
+ _define_reflectable(orig_method_name)
venv/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .graph_module import GraphModule
2
+ from .graph import Graph
3
+ from .node import Node
4
+ from ._symbolic_trace import symbolic_trace
5
+ from ._compatibility import compatibility
6
+
7
+ import copy
8
+ from dataclasses import dataclass
9
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Union, TYPE_CHECKING
10
+ import torch
11
+
12
+ if TYPE_CHECKING:
13
+ from .passes.utils.matcher_with_name_node_map_utils import InternalMatch
14
+
15
+ __all__ = ['Match', 'replace_pattern', 'replace_pattern_with_filters', "ReplacedPatterns"]
16
+
17
+ @compatibility(is_backward_compatible=True)
18
+ class Match(NamedTuple):
19
+ # Node from which the match was found
20
+ anchor: Node
21
+ # Maps nodes in the pattern subgraph to nodes in the larger graph
22
+ nodes_map: Dict[Node, Node]
23
+
24
+ @compatibility(is_backward_compatible=False)
25
+ @dataclass
26
+ class ReplacedPatterns:
27
+ # Node from which the match was found
28
+ anchor: Node
29
+ # Maps nodes in the pattern subgraph to nodes in the larger graph
30
+ nodes_map: Dict[Node, Node]
31
+ # List of nodes that were added into the graph
32
+ replacements: List[Node]
33
+
34
+ def _replace_attributes(gm: GraphModule, replacement: torch.nn.Module) -> None:
35
+ gm.delete_all_unused_submodules()
36
+
37
+ if isinstance(replacement, GraphModule):
38
+ replacement.graph.lint()
39
+
40
+ def try_get_attr(gm: torch.nn.Module, target: str) -> Optional[Any]:
41
+ module_path, _, attr_name = target.rpartition(".")
42
+ try:
43
+ mod: torch.nn.Module = gm.get_submodule(module_path)
44
+ except AttributeError:
45
+ return None
46
+ attr = getattr(mod, attr_name, None)
47
+ return attr
48
+
49
+ for node in gm.graph.nodes:
50
+ if node.op == "call_module" or node.op == "get_attr":
51
+
52
+ gm_attr = try_get_attr(gm, node.target)
53
+ replacement_attr = try_get_attr(replacement, node.target)
54
+
55
+ # CASE 1: This target already exists as an attribute in our
56
+ # result GraphModule. Whether or not it exists in
57
+ # `replacement`, the existing submodule takes precedence.
58
+ if gm_attr is not None:
59
+ continue
60
+
61
+ # CASE 2: The target exists as an attribute in `replacement`
62
+ # only, so we need to copy it over.
63
+ elif replacement_attr is not None:
64
+ new_attr = copy.deepcopy(replacement_attr)
65
+ if isinstance(replacement_attr, torch.nn.Module):
66
+ gm.add_submodule(node.target, new_attr)
67
+ else:
68
+ setattr(gm, node.target, new_attr)
69
+
70
+ # CASE 3: The target doesn't exist as an attribute in `gm`
71
+ # or `replacement`
72
+ else:
73
+ raise RuntimeError("Attempted to create a \"", node.op,
74
+ "\" node during subgraph rewriting "
75
+ f"with target {node.target}, but "
76
+ "the referenced attribute does not "
77
+ "exist in the replacement GraphModule")
78
+
79
+ gm.graph.lint()
80
+
81
+
82
+ @compatibility(is_backward_compatible=True)
83
+ def replace_pattern(
84
+ gm: GraphModule,
85
+ pattern: Union[Callable, GraphModule],
86
+ replacement: Union[Callable, GraphModule]
87
+ ) -> List[Match]:
88
+ """
89
+ Matches all possible non-overlapping sets of operators and their
90
+ data dependencies (``pattern``) in the Graph of a GraphModule
91
+ (``gm``), then replaces each of these matched subgraphs with another
92
+ subgraph (``replacement``).
93
+
94
+ Args:
95
+ ``gm``: The GraphModule that wraps the Graph to operate on
96
+ ``pattern``: The subgraph to match in ``gm`` for replacement
97
+ ``replacement``: The subgraph to replace ``pattern`` with
98
+
99
+ Returns:
100
+ List[Match]: A list of ``Match`` objects representing the places
101
+ in the original graph that ``pattern`` was matched to. The list
102
+ is empty if there are no matches. ``Match`` is defined as:
103
+
104
+ .. code-block:: python
105
+
106
+ class Match(NamedTuple):
107
+ # Node from which the match was found
108
+ anchor: Node
109
+ # Maps nodes in the pattern subgraph to nodes in the larger graph
110
+ nodes_map: Dict[Node, Node]
111
+
112
+ Examples:
113
+
114
+ .. code-block:: python
115
+
116
+ import torch
117
+ from torch.fx import symbolic_trace, subgraph_rewriter
118
+
119
+ class M(torch.nn.Module):
120
+ def __init__(self):
121
+ super().__init__()
122
+
123
+ def forward(self, x, w1, w2):
124
+ m1 = torch.cat([w1, w2]).sum()
125
+ m2 = torch.cat([w1, w2]).sum()
126
+ return x + torch.max(m1) + torch.max(m2)
127
+
128
+ def pattern(w1, w2):
129
+ return torch.cat([w1, w2]).sum()
130
+
131
+ def replacement(w1, w2):
132
+ return torch.stack([w1, w2])
133
+
134
+ traced_module = symbolic_trace(M())
135
+
136
+ subgraph_rewriter.replace_pattern(traced_module, pattern, replacement)
137
+
138
+ The above code will first match ``pattern`` in the ``forward``
139
+ method of ``traced_module``. Pattern-matching is done based on
140
+ use-def relationships, not node names. For example, if you had
141
+ ``p = torch.cat([a, b])`` in ``pattern``, you could match
142
+ ``m = torch.cat([a, b])`` in the original ``forward`` function,
143
+ despite the variable names being different (``p`` vs ``m``).
144
+
145
+ The ``return`` statement in ``pattern`` is matched based on its
146
+ value only; it may or may not match to the ``return`` statement in
147
+ the larger graph. In other words, the pattern doesn't have to extend
148
+ to the end of the larger graph.
149
+
150
+ When the pattern is matched, it will be removed from the larger
151
+ function and replaced by ``replacement``. If there are multiple
152
+ matches for ``pattern`` in the larger function, each non-overlapping
153
+ match will be replaced. In the case of a match overlap, the first
154
+ found match in the set of overlapping matches will be replaced.
155
+ ("First" here being defined as the first in a topological ordering
156
+ of the Nodes' use-def relationships. In most cases, the first Node
157
+ is the parameter that appears directly after ``self``, while the
158
+ last Node is whatever the function returns.)
159
+
160
+ One important thing to note is that the parameters of the
161
+ ``pattern`` Callable must be used in the Callable itself,
162
+ and the parameters of the ``replacement`` Callable must match
163
+ the pattern. The first rule is why, in the above code block, the
164
+ ``forward`` function has parameters ``x, w1, w2``, but the
165
+ ``pattern`` function only has parameters ``w1, w2``. ``pattern``
166
+ doesn't use ``x``, so it shouldn't specify ``x`` as a parameter.
167
+ As an example of the second rule, consider replacing
168
+
169
+ .. code-block:: python
170
+
171
+ def pattern(x, y):
172
+ return torch.neg(x) + torch.relu(y)
173
+
174
+ with
175
+
176
+ .. code-block:: python
177
+
178
+ def replacement(x, y):
179
+ return torch.relu(x)
180
+
181
+ In this case, ``replacement`` needs the same number of parameters
182
+ as ``pattern`` (both ``x`` and ``y``), even though the parameter
183
+ ``y`` isn't used in ``replacement``.
184
+
185
+ After calling ``subgraph_rewriter.replace_pattern``, the generated
186
+ Python code looks like this:
187
+
188
+ .. code-block:: python
189
+
190
+ def forward(self, x, w1, w2):
191
+ stack_1 = torch.stack([w1, w2])
192
+ sum_1 = stack_1.sum()
193
+ stack_2 = torch.stack([w1, w2])
194
+ sum_2 = stack_2.sum()
195
+ max_1 = torch.max(sum_1)
196
+ add_1 = x + max_1
197
+ max_2 = torch.max(sum_2)
198
+ add_2 = add_1 + max_2
199
+ return add_2
200
+ """
201
+ match_and_replacements = _replace_pattern(gm, pattern, replacement)
202
+ return [Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements]
203
+
204
+
205
+ # Experimental API, not backward compatible
206
+ @compatibility(is_backward_compatible=False)
207
+ def replace_pattern_with_filters(
208
+ gm: GraphModule,
209
+ pattern: Union[Callable, Graph, GraphModule],
210
+ replacement: Union[Callable, Graph, GraphModule],
211
+ match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None,
212
+ ignore_literals: bool = False,
213
+ ) -> List[ReplacedPatterns]:
214
+ """
215
+ See replace_pattern for documentation. This function is an overload with an additional match_filter argument.
216
+
217
+ Args:
218
+ ``match_filters``: A list of functions that take in
219
+ (match: InternalMatch, original_graph: Graph, pattern_graph: Graph) and return a boolean indicating
220
+ whether the match satisfies the condition.
221
+ See matcher_utils.py for definition of InternalMatch.
222
+ """
223
+
224
+ return _replace_pattern(gm, pattern, replacement, match_filters, ignore_literals)
225
+
226
+
227
+ def _replace_pattern(
228
+ gm: GraphModule,
229
+ pattern: Union[Callable, Graph, GraphModule],
230
+ replacement: Union[Callable, Graph, GraphModule],
231
+ match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None,
232
+ ignore_literals: bool = False,
233
+ ) -> List[ReplacedPatterns]:
234
+
235
+ from torch.fx.passes.utils.matcher_utils import SubgraphMatcher, InternalMatch
236
+
237
+ if match_filters is None:
238
+ match_filters = []
239
+
240
+ # Get the graphs for `gm`, `pattern`, `replacement`
241
+ original_graph: Graph = gm.graph
242
+
243
+ if isinstance(pattern, GraphModule):
244
+ pattern_graph = pattern.graph
245
+ elif isinstance(pattern, Graph):
246
+ pattern_graph = pattern
247
+ else:
248
+ pattern_graph = symbolic_trace(pattern).graph
249
+
250
+ if isinstance(replacement, GraphModule):
251
+ replacement_graph = replacement.graph
252
+ elif isinstance(replacement, Graph):
253
+ replacement_graph = replacement
254
+ else:
255
+ replacement_graph = symbolic_trace(replacement).graph
256
+
257
+ matcher = SubgraphMatcher(pattern_graph, match_output=False, match_placeholder=False,
258
+ remove_overlapping_matches=True, ignore_literals=ignore_literals)
259
+ _matches: List[InternalMatch] = matcher.match(original_graph)
260
+
261
+ # Filter out matches that don't match the filter
262
+ _matches = [
263
+ m for m in _matches
264
+ if all(match_filter(m, original_graph, pattern_graph)
265
+ for match_filter in match_filters)
266
+ ]
267
+
268
+ replacement_placeholders = [n for n in replacement_graph.nodes if n.op == "placeholder"]
269
+
270
+ # As we progressively replace nodes, we'll need to keep track of how the match results should change
271
+ match_changed_node: Dict[Node, Node] = {}
272
+
273
+ match_and_replacements = []
274
+ for match in _matches:
275
+
276
+ # Build connecting between replacement graph's input and original graph input producer node
277
+
278
+ # Initialize `val_map` with mappings from placeholder nodes in
279
+ # `replacement` to their corresponding node in `original_graph`
280
+ assert len(match.placeholder_nodes) == len(replacement_placeholders)
281
+ val_map: Dict[Node, Node] = {}
282
+ for rn, gn in zip(replacement_placeholders, match.placeholder_nodes):
283
+ if isinstance(gn, Node):
284
+ val_map[rn] = match_changed_node.get(gn, gn)
285
+ if gn != val_map[rn]:
286
+ # Update match.placeholder_nodes and match.nodes_map with the node that replaced gn
287
+ gn_ind = match.placeholder_nodes.index(gn)
288
+ match.placeholder_nodes[gn_ind] = match_changed_node[gn]
289
+ map_key = list(match.nodes_map.keys())[list(match.nodes_map.values()).index(gn)]
290
+ match.nodes_map[map_key] = match_changed_node[gn]
291
+ else:
292
+ val_map[rn] = gn
293
+
294
+ # Copy the replacement graph over
295
+ user_nodes: Set[Node] = set()
296
+ for n in match.returning_nodes:
297
+ for user in n.users:
298
+ user_nodes.add(user)
299
+ assert user_nodes, "The returning_nodes should have at least one user node"
300
+
301
+ if len(user_nodes) == 1:
302
+ first_user_node = next(iter(user_nodes))
303
+ else:
304
+ # If there are multiple user nodes, we need to find the first user node
305
+ # in the current execution order of the `original_graph`
306
+ for n in original_graph.nodes:
307
+ if n in user_nodes:
308
+ first_user_node = n
309
+ break
310
+
311
+ with original_graph.inserting_before(first_user_node): # type: ignore[possibly-undefined]
312
+ copied_returning_nodes = original_graph.graph_copy(replacement_graph, val_map)
313
+
314
+ if isinstance(copied_returning_nodes, Node):
315
+ copied_returning_nodes = (copied_returning_nodes, )
316
+
317
+ # Get a list of nodes that have been replaced into the graph
318
+ replacement_nodes: List[Node] = [v for v in val_map.values() if v not in match.placeholder_nodes]
319
+
320
+ # Hook the output Node of the replacement subgraph in to the
321
+ # original Graph at the correct location
322
+ assert len(match.returning_nodes) == len(copied_returning_nodes)
323
+ for gn, copied_node in zip(match.returning_nodes, copied_returning_nodes):
324
+ gn.replace_all_uses_with(copied_node)
325
+ match_changed_node[gn] = copied_node
326
+ # Remove the original nodes
327
+ for node in reversed(pattern_graph.nodes):
328
+ if node.op != "placeholder" and node.op != "output":
329
+ gn = match.nodes_map[node]
330
+ gm.graph.erase_node(gn)
331
+
332
+ match_and_replacements.append(
333
+ ReplacedPatterns(
334
+ anchor=match.anchors[0],
335
+ nodes_map=match.nodes_map,
336
+ replacements=replacement_nodes
337
+ )
338
+ )
339
+
340
+ # Update the passed-in GraphModule to reflect the new state of
341
+ # `original_graph`
342
+ gm.recompile()
343
+
344
+ # If `replacement` was an nn.Module, we'll need to make sure that
345
+ # all the submodules have been copied over correctly
346
+ if isinstance(replacement, torch.nn.Module):
347
+ _replace_attributes(gm, replacement)
348
+
349
+ return match_and_replacements
venv/lib/python3.10/site-packages/torch/fx/tensor_type.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
2
+
3
+ from ._compatibility import compatibility
4
+
5
+
6
+ @compatibility(is_backward_compatible=False)
7
+ class TensorType:
8
+ """
9
+ TensorType defines a type for tensors, which consists of a list of dimensions.
10
+ Example:
11
+ class M(torch.nn.Module):
12
+ def forward(self, x:TensorType((1,2,3, Dyn)), y:TensorType((1,2,3, Dyn))):
13
+ return torch.add(x, y)
14
+ """
15
+
16
+ def __init__(self, dim):
17
+ self.__origin__ = TensorType
18
+ self.__args__ = dim
19
+
20
+ def __repr__(self):
21
+ return f'TensorType[{self.__args__}]'
22
+
23
+ def __eq__(self, other):
24
+ if isinstance(other, self.__class__):
25
+ return list(self.__args__) == list(other.__args__)
26
+ else:
27
+ return False
28
+
29
+ @staticmethod
30
+ def __class_getitem__(*args):
31
+ if len(args) == 1 and isinstance(args[0], tuple):
32
+ args = args[0]
33
+ return TensorType(tuple(args))
34
+
35
+
36
+ class _DynType:
37
+ """
38
+ _DynType defines a type which stands for the absence of type information.
39
+ """
40
+ def __init__(self):
41
+ self.__name__ = '_DynType'
42
+
43
+ def __eq__(self, other):
44
+ return isinstance(other, self.__class__)
45
+
46
+ def __str__(self):
47
+ return "Dyn"
48
+
49
+ def __repr__(self):
50
+ return "Dyn"
51
+
52
+
53
+ Dyn = _DynType()
54
+
55
+ @compatibility(is_backward_compatible=False)
56
+ def is_consistent(t1, t2):
57
+ """
58
+ A binary relation denoted by ~ that determines if t1 is consistent with t2.
59
+ The relation is reflexive, symmetric but not transitive.
60
+ returns True if t1 and t2 are consistent and False otherwise.
61
+ Example:
62
+ Dyn ~ TensorType((1,2,3))
63
+ int ~ Dyn
64
+ int ~ int
65
+ TensorType((1,Dyn,3)) ~ TensorType((1,2,3))
66
+ """
67
+
68
+ if t1 == t2:
69
+ return True
70
+
71
+ if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
72
+ return True
73
+
74
+ if isinstance(t1, TensorType) and isinstance(t2, TensorType):
75
+ return len(t1.__args__) == len(t2.__args__) and \
76
+ all(is_consistent(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__))
77
+ else:
78
+ return False
79
+
80
+
81
+ @compatibility(is_backward_compatible=False)
82
+ def is_more_precise(t1, t2):
83
+ """
84
+ A binary relation denoted by <= that determines if t1 is more precise than t2.
85
+ The relation is reflexive and transitive.
86
+ returns True if t1 is more precise than t2 and False otherwise.
87
+ Example:
88
+ Dyn >= TensorType((1,2,3))
89
+ int >= Dyn
90
+ int >= int
91
+ TensorType((1,Dyn,3)) <= TensorType((1,2,3))
92
+ """
93
+ if t1 == t2:
94
+ return True
95
+
96
+ if isinstance(t2, _DynType):
97
+ return True
98
+
99
+ if isinstance(t1, TensorType) and isinstance(t2, TensorType):
100
+ return len(t1.__args__) == len(t2.__args__) and \
101
+ all(is_more_precise(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__))
102
+
103
+ else:
104
+ return False
venv/lib/python3.10/site-packages/torch/fx/traceback.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import traceback
2
+ from contextlib import contextmanager
3
+ from typing import List, Any, Dict
4
+ from ._compatibility import compatibility
5
+
6
+ __all__ = ['preserve_node_meta', 'has_preserved_node_meta',
7
+ 'set_stack_trace', 'set_grad_fn_seq_nr', 'reset_grad_fn_seq_nr',
8
+ 'format_stack', 'set_current_meta', 'get_current_meta']
9
+
10
+ current_meta: Dict[str, Any] = {}
11
+ should_preserve_node_meta = False
12
+
13
+
14
+ @compatibility(is_backward_compatible=False)
15
+ @contextmanager
16
+ def preserve_node_meta():
17
+ global should_preserve_node_meta
18
+
19
+ saved_should_preserve_node_meta = should_preserve_node_meta
20
+ try:
21
+ should_preserve_node_meta = True
22
+ yield
23
+ finally:
24
+ should_preserve_node_meta = saved_should_preserve_node_meta
25
+
26
+
27
+ @compatibility(is_backward_compatible=False)
28
+ def set_stack_trace(stack : List[str]):
29
+ global current_meta
30
+
31
+ if should_preserve_node_meta and stack:
32
+ current_meta["stack_trace"] = "".join(stack)
33
+
34
+
35
+ @compatibility(is_backward_compatible=False)
36
+ def set_grad_fn_seq_nr(seq_nr):
37
+ global current_meta
38
+
39
+ if should_preserve_node_meta:
40
+ # The seq_nr is captured by eager mode in the grad_fn during forward
41
+ current_meta["grad_fn_seq_nr"] = current_meta.get("grad_fn_seq_nr", []) + [seq_nr]
42
+ current_meta["in_grad_fn"] = current_meta.get("in_grad_fn", 0) + 1
43
+
44
+
45
+ @compatibility(is_backward_compatible=False)
46
+ def reset_grad_fn_seq_nr():
47
+ # NB: reset state properly, this would be helpful towards supporting
48
+ # reentrant autograd if we actually wanted to do that.
49
+ global current_meta
50
+ if should_preserve_node_meta:
51
+ current_level = current_meta.get("in_grad_fn", 0)
52
+ assert current_level > 0
53
+ if current_level == 1:
54
+ del current_meta["in_grad_fn"]
55
+ del current_meta["grad_fn_seq_nr"]
56
+ else:
57
+ current_meta["in_grad_fn"] = current_level - 1
58
+ current_meta["grad_fn_seq_nr"].pop()
59
+
60
+
61
+ @compatibility(is_backward_compatible=False)
62
+ def format_stack() -> List[str]:
63
+ if should_preserve_node_meta:
64
+ return [current_meta.get("stack_trace", "")]
65
+ else:
66
+ # fallback to traceback.format_stack()
67
+ return traceback.format_list(traceback.extract_stack()[:-1])
68
+
69
+
70
+ @compatibility(is_backward_compatible=False)
71
+ def has_preserved_node_meta() -> bool:
72
+ return should_preserve_node_meta
73
+
74
+
75
+ @compatibility(is_backward_compatible=False)
76
+ @contextmanager
77
+ def set_current_meta(node):
78
+ global current_meta
79
+ if should_preserve_node_meta and node.meta:
80
+ saved_meta = current_meta
81
+ try:
82
+ current_meta = node.meta.copy()
83
+
84
+ # Append (node.name, node.target) onto "from_node" for provenance tracking
85
+ if "from_node" not in current_meta:
86
+ current_meta["from_node"] = [(node.name, node.target)]
87
+ elif current_meta["from_node"][-1][0] != node.name:
88
+ current_meta["from_node"].append((node.name, node.target))
89
+
90
+ yield
91
+ finally:
92
+ current_meta = saved_meta
93
+ else:
94
+ yield
95
+
96
+
97
+ @compatibility(is_backward_compatible=False)
98
+ def get_current_meta() -> Dict[str, Any]:
99
+ return current_meta