applied-ai-018 commited on
Commit
a1827f6
·
verified ·
1 Parent(s): 25d27a1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/11.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/11.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/5.post_attention_layernorm.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/8.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/9.post_attention_layernorm.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step120/zero/9.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step120/zero/9.post_attention_layernorm.weight/fp32.pt +3 -0
  8. venv/lib/python3.10/site-packages/torch/_decomp/__init__.py +463 -0
  9. venv/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/fx/passes/__init__.py +11 -0
  20. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py +44 -0
  36. venv/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py +0 -0
  37. venv/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py +56 -0
  40. venv/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py +0 -0
  41. venv/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py +0 -0
  43. venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py +112 -0
  46. venv/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py +73 -0
  47. venv/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py +421 -0
  48. venv/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py +110 -0
  49. venv/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py +2 -0
  50. venv/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/11.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21e00655ee9261547d811e81807f457d8dd35a306e24ba795f2132dd9b819b4f
3
+ size 50332828
ckpts/universal/global_step120/zero/11.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eadcc67bd6e8fcd78af6d15d15e437bb0ace877a8e4f105b9be926c4c5a50044
3
+ size 50332843
ckpts/universal/global_step120/zero/5.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:665e95e98e3918b7023879279788eb371dee7d2daa42326e7eba9757c760513e
3
+ size 9372
ckpts/universal/global_step120/zero/8.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c97189d6b8c5bac619107032a0da1bad449f6f82c6a2cd0c8b207bc2451845a1
3
+ size 50332843
ckpts/universal/global_step120/zero/9.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:620a6b6e3f2a06a908e80853ecd459ce28ac83a670e1a51e317ac4baf4491bd7
3
+ size 9372
ckpts/universal/global_step120/zero/9.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f02e0fc5a3c274b59f8104a13281df26cf2831c870bd379bdeb80ec580b00f45
3
+ size 9387
ckpts/universal/global_step120/zero/9.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38ae83439e0cc6affd45e03350cec69e2df5b8111f299a7d5c16dfb26a0e4a46
3
+ size 9293
venv/lib/python3.10/site-packages/torch/_decomp/__init__.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from collections import defaultdict
3
+ from functools import wraps
4
+ from itertools import chain
5
+ from typing import Callable, Dict, List, Sequence, Union
6
+
7
+ import torch
8
+ import torch.library
9
+ from torch._ops import HigherOrderOperator, OpOverload, OpOverloadPacket
10
+ from torch._prims_common import CustomOutParamAnnotation
11
+ from torch.utils import _pytree as pytree
12
+
13
+ __all__ = [
14
+ "decomposition_table",
15
+ "pre_autograd_decomposition_table",
16
+ "meta_table",
17
+ "register_decomposition",
18
+ "get_decompositions",
19
+ "core_aten_decompositions",
20
+ ]
21
+
22
+
23
+ # TODO: relax key type here; torch registrations should be possible to; but
24
+ # right now this type is accurate
25
+ global_decomposition_table: Dict[
26
+ str, Dict[torch._ops.OperatorBase, Callable]
27
+ ] = defaultdict(dict)
28
+
29
+ decomposition_table = global_decomposition_table["post_autograd"]
30
+ pre_autograd_decomposition_table = global_decomposition_table["pre_autograd"]
31
+ meta_table = global_decomposition_table["meta"]
32
+
33
+
34
+ def _add_op_to_registry(registry, op, fn):
35
+ """
36
+ This is an internal API for adding an op to the decomposition table.
37
+
38
+ If op is OpOverload, it will be added to the registry directly.
39
+ If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry.
40
+ """
41
+ overloads: List[Union[torch._ops.OperatorBase]] = []
42
+ if isinstance(op, HigherOrderOperator):
43
+ # There's no concept of overloads for HigherOrderOperator
44
+ registry[op] = fn
45
+ return
46
+ elif isinstance(op, OpOverload):
47
+ overloads.append(op)
48
+ else:
49
+ assert isinstance(op, OpOverloadPacket)
50
+ for ol in op.overloads():
51
+ overloads.append(getattr(op, ol))
52
+
53
+ for op_overload in overloads:
54
+ if op_overload in registry:
55
+ raise RuntimeError(f"duplicate registrations for {op_overload}")
56
+ # TorchScript dumps a bunch of extra nonsense overloads
57
+ # which don't have corresponding dispatcher entries, we need
58
+ # to filter those out, e.g aten.add.float_int
59
+ if torch._C._dispatch_has_kernel(op_overload.name()):
60
+ registry[op_overload] = fn
61
+
62
+
63
+ def _convert_out_params(f):
64
+ out_annotation = f.__annotations__.get("out")
65
+
66
+ # If there are no out params, do not wrap the function.
67
+ if not out_annotation:
68
+ return f
69
+
70
+ # Hack to detect when out is a Tuple. There seems to be no pretty way of doing this
71
+ if getattr(out_annotation, "__origin__", None) is tuple:
72
+ sig = inspect.signature(f)
73
+ out_names = sig.return_annotation._fields
74
+ # If out is a tuple, we need to register a function that unpacks all the out
75
+ # elements as this is what native_functions.yaml expects
76
+
77
+ @wraps(f)
78
+ def _fn(*args, **kwargs):
79
+ out_kwargs = tuple(kwargs.pop(o, None) for o in out_names)
80
+ # Either all of the out kwargs are set or none of them
81
+ is_none = out_kwargs[0] is None
82
+ assert all((o is None) == is_none for o in out_kwargs)
83
+ return f(*args, **kwargs, out=None if is_none else out_kwargs)
84
+
85
+ out_params = [
86
+ inspect.Parameter(
87
+ o,
88
+ kind=inspect.Parameter.KEYWORD_ONLY,
89
+ default=None,
90
+ annotation=t,
91
+ )
92
+ for o, t in zip(out_names, out_annotation.__args__)
93
+ ]
94
+ # Drop the out parameter and concatenate the new kwargs in the signature
95
+ params = chain((v for k, v in sig.parameters.items() if k != "out"), out_params)
96
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
97
+ parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
98
+ )
99
+ # Drop the out parameter and concatenate the new kwargs in the annotations
100
+ _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
101
+ for o in out_params:
102
+ _fn.__annotations__[o.name] = o.annotation
103
+
104
+ # Propagate that this function is wrapped by `out_wrapper`
105
+ _fn._torch_decompositions_out_wrapper = f._torch_decompositions_out_wrapper # type: ignore[attr-defined]
106
+
107
+ return _fn
108
+
109
+ # Alternatively, there may be a single tensor out parameter with a name
110
+ # other than "out". This will need special treatment and is indicated by an
111
+ # annotation, which we will remove here so it is not exposed after wrapping.
112
+ custom_out_param_name = f.__annotations__.pop(CustomOutParamAnnotation, None)
113
+ if custom_out_param_name:
114
+
115
+ @wraps(f)
116
+ def _fn(*args, **kwargs):
117
+ out_kwarg = kwargs.pop(custom_out_param_name, None)
118
+ return f(*args, **kwargs, out=out_kwarg)
119
+
120
+ out_param = inspect.Parameter(
121
+ custom_out_param_name,
122
+ kind=inspect.Parameter.KEYWORD_ONLY,
123
+ default=None,
124
+ annotation=out_annotation,
125
+ )
126
+
127
+ # Drop the out parameter and concatenate the new kwarg in the signature
128
+ sig = inspect.signature(f)
129
+ params = chain(
130
+ (v for k, v in sig.parameters.items() if k != "out"), (out_param,)
131
+ )
132
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
133
+ parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
134
+ )
135
+
136
+ # Drop the out parameter and concatenate the new kwargs in the annotations
137
+ _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
138
+ _fn.__annotations__[out_param.name] = out_param.annotation
139
+
140
+ return _fn
141
+
142
+ return f
143
+
144
+
145
+ def register_decomposition(
146
+ aten_op, registry=None, *, type="post_autograd", unsafe=False
147
+ ):
148
+ """
149
+ A decorator to register a function as a decomposition to the Python
150
+ decomposition table. Use it like this::
151
+
152
+ @register_decomposition(torch.ops.aten.clamp_min)
153
+ def clamp_min(x):
154
+ return torch.clamp(self, min=min)
155
+
156
+ If you are writing a new decomposition, consider contributing it
157
+ directly to PyTorch in torch._decomp.decompositions.
158
+
159
+ This API is experimental; we are almost certainly going to extend
160
+ the API when we make decompositions eligible for use in transforms (e.g.,
161
+ autograd) and not just backend tracing, where we then need to know if a
162
+ decomposition can be used to simulate a transform.
163
+
164
+ By default, we also will register it to the Meta key of dispatcher,
165
+ and replace the c++ Meta implementation if there is already one.
166
+
167
+ unsafe kwarg is for reuse of this function for registering non-function
168
+ things
169
+ """
170
+
171
+ assert type in {"post_autograd", "pre_autograd", "meta"}
172
+
173
+ def decomposition_decorator(fn: Callable) -> Callable:
174
+ orig_fn = fn
175
+ if not unsafe:
176
+ fn = _convert_out_params(fn)
177
+
178
+ nonlocal registry
179
+ if registry is None:
180
+ registry = global_decomposition_table[type]
181
+
182
+ def register(op):
183
+ _add_op_to_registry(registry, op, fn)
184
+
185
+ # To handle allowing multiple aten_ops at once
186
+ pytree.tree_map_(register, aten_op)
187
+ return orig_fn
188
+
189
+ return decomposition_decorator
190
+
191
+
192
+ def get_decompositions(
193
+ aten_ops: Sequence[Union[torch._ops.OperatorBase, OpOverloadPacket]],
194
+ type: str = "post_autograd",
195
+ ) -> Dict[torch._ops.OperatorBase, Callable]:
196
+ """
197
+ Retrieve a dictionary of decompositions corresponding to the list of
198
+ operator overloads and overload packets passed as input. Overload
199
+ packets will include all decomposed overloads in the packet. If there is
200
+ no decomposition for a requested operator, it is silently ignored.
201
+
202
+ This API is experimental; we are almost certainly going to give an alternate,
203
+ more recommended formulation, where a user provides the set of operators
204
+ they know how to implement, and we provide decompositions for everything
205
+ not in this set.
206
+ """
207
+ assert type in {"post_autograd", "pre_autograd", "meta"}
208
+
209
+ registry = global_decomposition_table[type]
210
+ packets_to_overloads = defaultdict(list)
211
+ for opo in registry:
212
+ if isinstance(opo, (OpOverload, OpOverloadPacket)):
213
+ packets_to_overloads[opo.overloadpacket].append(opo)
214
+ decompositions: Dict[torch._ops.OperatorBase, Callable] = {}
215
+ for op in aten_ops:
216
+ if isinstance(op, OpOverloadPacket) and op in packets_to_overloads:
217
+ for op_overload in packets_to_overloads[op]:
218
+ decompositions[op_overload] = registry[op_overload]
219
+ elif isinstance(op, (torch._ops.OperatorBase)) and op in registry:
220
+ decompositions[op] = registry[op]
221
+ return decompositions
222
+
223
+
224
+ def remove_decompositions(
225
+ decompositions: Dict[torch._ops.OperatorBase, Callable],
226
+ aten_ops: Sequence[Union[OpOverload, OpOverloadPacket]],
227
+ ) -> None:
228
+ """
229
+ Given a dictionary of decompositions obtained from get_decompositions(), removes
230
+ operators associated with a list of operator overloads and overload packets passed
231
+ as input. If the decomposition dictionary does not contain a decomposition that is
232
+ specified to be removed, it is silently ignored.
233
+ """
234
+ for op in aten_ops:
235
+ if isinstance(op, OpOverloadPacket):
236
+ for overload_name in op.overloads():
237
+ opo = getattr(op, overload_name)
238
+ decompositions.pop(opo, None)
239
+ elif isinstance(op, OpOverload):
240
+ decompositions.pop(op, None)
241
+
242
+
243
+ # populate the table
244
+ import torch._decomp.decompositions
245
+ import torch._refs
246
+
247
+
248
+ # See NOTE [Core ATen Ops]
249
+ #
250
+ # list was copied from torch/_inductor/decomposition.py
251
+ # excluding decompositions that results in prim ops
252
+ # Resulting opset of decomposition is core aten ops
253
+ def core_aten_decompositions() -> Dict[torch._ops.OperatorBase, Callable]:
254
+ aten = torch.ops.aten
255
+ return get_decompositions(
256
+ [
257
+ aten.addcdiv,
258
+ aten.addcdiv_,
259
+ aten.addcmul,
260
+ aten.addcmul_,
261
+ aten.addr,
262
+ aten.affine_grid_generator,
263
+ aten.all,
264
+ aten.aminmax,
265
+ aten.arange.default,
266
+ aten.arange.start,
267
+ aten.avg_pool2d_backward,
268
+ aten.baddbmm,
269
+ aten.binary_cross_entropy,
270
+ aten.binary_cross_entropy_backward,
271
+ aten.binary_cross_entropy_with_logits,
272
+ aten.block_diag,
273
+ aten.celu,
274
+ aten.celu_,
275
+ aten.clamp_max,
276
+ aten.clamp_min,
277
+ aten.col2im,
278
+ aten.count_nonzero,
279
+ aten.linalg_cross,
280
+ aten.cudnn_batch_norm,
281
+ aten.cudnn_batch_norm_backward,
282
+ aten.deg2rad,
283
+ aten.deg2rad_,
284
+ aten.detach,
285
+ aten.diag_embed,
286
+ aten.diagonal_backward,
287
+ aten.dot,
288
+ aten.vdot,
289
+ aten.elu,
290
+ aten.elu_,
291
+ aten.elu_backward,
292
+ aten._embedding_bag,
293
+ aten.embedding_dense_backward,
294
+ aten.empty_like,
295
+ aten._euclidean_dist.default,
296
+ aten.expand_as,
297
+ aten.eye,
298
+ aten.fill,
299
+ aten.fill_,
300
+ aten.floor_divide,
301
+ aten.frac,
302
+ aten.frac_,
303
+ aten._fused_moving_avg_obs_fq_helper,
304
+ aten.gelu_,
305
+ aten.gelu_backward,
306
+ aten.glu,
307
+ aten.glu_backward,
308
+ aten.hardshrink,
309
+ aten.hardsigmoid,
310
+ aten.hardsigmoid_,
311
+ aten.hardsigmoid_backward,
312
+ aten.hardswish,
313
+ aten.hardswish_,
314
+ aten.hardswish_backward,
315
+ aten.hardtanh_,
316
+ aten.hardtanh_backward,
317
+ aten.heaviside,
318
+ aten.heaviside_,
319
+ aten.huber_loss,
320
+ aten.huber_loss_backward,
321
+ aten.im2col,
322
+ aten.index_add,
323
+ aten.index_add_,
324
+ aten.index_copy,
325
+ aten.index_copy_,
326
+ aten.index_fill,
327
+ aten.index_fill_,
328
+ aten.isin,
329
+ aten.isneginf,
330
+ aten.isposinf,
331
+ aten.l1_loss,
332
+ aten._lazy_clone,
333
+ aten._test_parallel_materialize,
334
+ aten.leaky_relu_,
335
+ aten.leaky_relu_backward,
336
+ aten.lerp,
337
+ aten.lerp_,
338
+ aten.linspace,
339
+ aten.logaddexp,
340
+ aten.logaddexp2,
341
+ aten.logit,
342
+ aten.logit_,
343
+ aten.logit_backward,
344
+ aten.log_sigmoid_backward,
345
+ aten.log_sigmoid_forward,
346
+ aten._log_softmax_backward_data,
347
+ aten.logspace,
348
+ aten.logsumexp.default,
349
+ aten.masked_fill,
350
+ aten.masked_fill_,
351
+ aten.mish,
352
+ aten.mish_,
353
+ aten.mse_loss,
354
+ aten.mse_loss_backward,
355
+ aten.multi_margin_loss,
356
+ aten.multilabel_margin_loss_forward,
357
+ aten.mv,
358
+ aten.mvlgamma,
359
+ aten.mvlgamma_,
360
+ aten.nansum,
361
+ aten.nan_to_num,
362
+ aten.nan_to_num_,
363
+ aten.narrow,
364
+ aten.native_batch_norm_backward,
365
+ aten.native_dropout_backward,
366
+ aten.native_group_norm_backward,
367
+ aten.native_layer_norm_backward,
368
+ aten.new_empty,
369
+ aten.new_full,
370
+ aten.new_ones,
371
+ aten.new_zeros,
372
+ aten.nll_loss_backward,
373
+ aten.nll_loss_forward,
374
+ aten.norm,
375
+ aten.ones,
376
+ aten.ones_like,
377
+ aten.pixel_shuffle,
378
+ aten.pixel_unshuffle,
379
+ aten._prelu_kernel,
380
+ aten._prelu_kernel_backward,
381
+ aten._reshape_alias,
382
+ aten.rad2deg,
383
+ aten.rad2deg_,
384
+ aten.reflection_pad1d,
385
+ aten.reflection_pad2d,
386
+ aten.reflection_pad3d,
387
+ aten.replication_pad1d,
388
+ aten.replication_pad2d,
389
+ aten.replication_pad3d,
390
+ aten.renorm,
391
+ aten.renorm_,
392
+ aten.replication_pad2d,
393
+ aten.roll,
394
+ aten.rot90,
395
+ aten.rrelu_with_noise,
396
+ aten.rrelu_with_noise_,
397
+ aten.rsub,
398
+ aten._scaled_dot_product_flash_attention_for_cpu.default,
399
+ aten.select_backward,
400
+ aten.select_scatter,
401
+ aten.sgn,
402
+ aten.sgn_,
403
+ aten.sigmoid_backward,
404
+ aten.silu,
405
+ aten.silu_,
406
+ aten.silu_backward,
407
+ aten.sinc,
408
+ aten.sinc_,
409
+ aten.slice_backward,
410
+ aten.smooth_l1_loss,
411
+ aten.smooth_l1_loss_backward,
412
+ aten.soft_margin_loss,
413
+ aten.soft_margin_loss_backward,
414
+ aten._softmax_backward_data,
415
+ aten.softplus,
416
+ aten.softplus_backward,
417
+ aten.softshrink,
418
+ aten.special_entr,
419
+ aten.special_log_ndtr,
420
+ aten.special_xlog1py,
421
+ aten.split.Tensor,
422
+ aten.split_with_sizes_copy,
423
+ aten.squeeze.default,
424
+ aten.squeeze.dim,
425
+ aten.std,
426
+ aten.std_mean,
427
+ aten.stack,
428
+ aten.sum.default,
429
+ aten.sum.out,
430
+ aten.t,
431
+ aten.take,
432
+ aten.tanh_backward,
433
+ aten.threshold,
434
+ aten.threshold_,
435
+ aten.threshold_backward,
436
+ aten.trace,
437
+ aten.transpose.int,
438
+ aten.tril,
439
+ aten.tril_,
440
+ aten.triu,
441
+ aten.triu_,
442
+ aten.unbind,
443
+ aten.unfold_backward,
444
+ aten.unfold_copy,
445
+ aten._unsafe_index,
446
+ aten.unsafe_split.Tensor,
447
+ aten.unsafe_split_with_sizes,
448
+ aten._unsafe_view,
449
+ aten.upsample_linear1d,
450
+ aten.upsample_bilinear2d,
451
+ aten.upsample_trilinear3d,
452
+ aten.upsample_nearest2d_backward,
453
+ aten.view_as_complex,
454
+ aten.xlogy,
455
+ aten.xlogy_,
456
+ aten.zero,
457
+ aten.zero_,
458
+ aten.zeros,
459
+ aten.zeros_like,
460
+ aten._chunk_cat,
461
+ aten._weight_norm_interface,
462
+ ]
463
+ )
venv/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc ADDED
Binary file (826 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc ADDED
Binary file (55.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc ADDED
Binary file (24.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc ADDED
Binary file (26.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc ADDED
Binary file (3.78 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import graph_drawer
2
+ from . import graph_manipulation
3
+ from . import net_min_base
4
+ from . import operator_support
5
+ from . import param_fetch
6
+ from . import reinplace
7
+ from . import shape_prop
8
+ from . import split_module
9
+ from . import split_utils
10
+ from . import splitter_base
11
+ from . import tools_common
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (574 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/annotate_getitem_nodes.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/fake_tensor_prop.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_drawer.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/graph_manipulation.cpython-310.pyc ADDED
Binary file (3.58 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/net_min_base.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/operator_support.cpython-310.pyc ADDED
Binary file (7.53 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/param_fetch.cpython-310.pyc ADDED
Binary file (2.72 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/pass_manager.cpython-310.pyc ADDED
Binary file (7.56 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/reinplace.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/shape_prop.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_module.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/split_utils.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/splitter_base.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/__pycache__/tools_common.cpython-310.pyc ADDED
Binary file (7.19 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/annotate_getitem_nodes.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import torch
4
+
5
+
6
+ def annotate_getitem_nodes(graph: torch.fx.Graph) -> None:
7
+ """
8
+ Annotate the type of getitem nodes, inferred from the type of sequence node.
9
+ If sequence node is not annotated with a type, do nothing.
10
+ Currently support getitem nodes from Tuple, List, and NamedTuple sequence node.
11
+
12
+ This is helpful since annotations on local names within function are lost during FX transforms.
13
+ Adding back known type annotation for getitem nodes to improve jit scriptability.
14
+
15
+ Args:
16
+ graph (Graph): The graph to be annotated
17
+ """
18
+ for node in graph.nodes:
19
+ if node.target == operator.getitem:
20
+ sequence_node, index_node = node.args
21
+ if not sequence_node.type:
22
+ continue
23
+ # container types
24
+ if hasattr(sequence_node.type, "_name"):
25
+ parameterized_types = sequence_node.type.__args__
26
+ if sequence_node.type._name == "Tuple":
27
+ if len(parameterized_types) == 2 and isinstance(
28
+ parameterized_types[1], type(...)
29
+ ):
30
+ node.type = parameterized_types[0]
31
+ else:
32
+ assert len(parameterized_types) > index_node
33
+ node_type = parameterized_types[index_node]
34
+ node.type = node_type
35
+ elif sequence_node.type._name == "List":
36
+ assert len(parameterized_types) == 1
37
+ node.type = parameterized_types[0]
38
+ # NamedTuple type
39
+ elif hasattr(sequence_node.type, "__annotations__"):
40
+ if sequence_node.type == torch.Tensor:
41
+ continue
42
+ sequence_node_field_types = sequence_node.type.__annotations__
43
+ field_name = sequence_node.type._fields[index_node]
44
+ node.type = sequence_node_field_types[field_name]
venv/lib/python3.10/site-packages/torch/fx/passes/backends/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/backends/cudagraphs.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
3
+ from torch.fx.passes.operator_support import OperatorSupport
4
+ from torch.fx.passes.tools_common import CALLABLE_NODE_OPS
5
+ from torch.fx.passes.fake_tensor_prop import FakeTensorProp
6
+ from torch.utils import _pytree as pytree
7
+
8
+ import operator
9
+
10
+ class CudaGraphsSupport(OperatorSupport):
11
+ # TODO: why is submodules passed here
12
+ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
13
+ if node.op not in CALLABLE_NODE_OPS:
14
+ return False
15
+
16
+ if node.target in [torch.ops.aten.embedding_dense_backward.default]:
17
+ return False
18
+
19
+ if node.target in [operator.getitem]:
20
+ return True
21
+
22
+ found_not_cuda = False
23
+
24
+ def meta_fk(meta):
25
+ return meta["val"] if "val" in meta else meta["fake_result"]
26
+
27
+ def find_not_cuda(t):
28
+ nonlocal found_not_cuda
29
+ if isinstance(t, torch.Tensor) and t.device.type != 'cuda':
30
+ found_not_cuda = True
31
+
32
+ for n in node.all_input_nodes:
33
+ pytree.tree_map_(find_not_cuda, meta_fk(n.meta))
34
+
35
+ pytree.tree_map_(find_not_cuda, meta_fk(node.meta))
36
+
37
+ # NB: factory function is accounted for because the result would be
38
+ # cpu or cuda
39
+
40
+ return not found_not_cuda
41
+
42
+ def partition_cudagraphs(gm, inputs):
43
+ """
44
+ Partition an FX graph into sub-GraphModules that can be validly run under
45
+ CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations
46
+ must involve CUDA tensors only/
47
+ """
48
+
49
+ FakeTensorProp(gm).propagate(*inputs)
50
+ supported_ops = CudaGraphsSupport()
51
+ # TODO: single node partition may be wrong due to the pessimization
52
+ # from copying in and out the data. Check in benchmarks, perhaps
53
+ partitioner = CapabilityBasedPartitioner(gm, supported_ops, allows_single_node_partition=True)
54
+ partitions = partitioner.propose_partitions()
55
+ fused_graph = partitioner.fuse_partitions(partitions)
56
+ return fused_graph
venv/lib/python3.10/site-packages/torch/fx/passes/dialect/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/fx/passes/dialect/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/__pycache__/cse_pass.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
venv/lib/python3.10/site-packages/torch/fx/passes/dialect/common/cse_pass.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Tuple, Any
2
+
3
+ import torch
4
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
5
+ from torch.utils._pytree import tree_flatten
6
+
7
+ from torch.fx import GraphModule, Graph
8
+ from torch.fx import Node
9
+
10
+ aten = torch.ops.aten
11
+
12
+
13
+ # stateful ops are banned from CSE
14
+ rand_ops = {aten.dropout, aten._fused_dropout, aten._standard_gamma, aten.bernoulli, aten.multinomial, aten.native_dropout, aten.normal, aten.poisson, aten.binomial, aten.rrelu, aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm} # noqa: E501,B950
15
+
16
+ inplace_ops = {aten.add_, aten.sub_, aten.mul_, aten.div_, aten.pow_, aten.lerp_, aten.relu_, aten.sigmoid_, aten.tanh_} # noqa: E501
17
+
18
+
19
+ @torch.fx._compatibility.compatibility(is_backward_compatible=False)
20
+ def get_CSE_banned_ops():
21
+ return rand_ops.union(inplace_ops)
22
+
23
+
24
+ @torch.fx._compatibility.compatibility(is_backward_compatible=False)
25
+ class CSEPass(PassBase):
26
+
27
+ def __init__(self, banned_ops=None):
28
+ """
29
+ This version of CSE Pass aims to be dialect agnostic, and it's implemented purely based on the connectivity between fx.Node.
30
+
31
+ For functional dialects, user would only need to specify the random ops in ban list.
32
+
33
+ Warning: CSE Pass cannot be safely applied on a FX graph in non-functional dialects.
34
+ If your dialect contains stateful operators, please customized the banned_ops.
35
+
36
+ """
37
+ if banned_ops is None:
38
+ banned_ops = set()
39
+ self.banned_ops = banned_ops
40
+ super().__init__()
41
+
42
+ def call(self, graph_module: GraphModule) -> PassResult:
43
+ """
44
+ Return a new copy of torch.fx.GraphModule with CSE applied to the input graph
45
+
46
+ Example usage:
47
+
48
+ from torch.fx.experimental.proxy_tensor import make_fx
49
+ def f(a):
50
+ b = a * a
51
+ c = a * a
52
+ return b+c
53
+
54
+ p = CSEPass()
55
+ traced_graph = make_fx(f)(torch.tensor(1))
56
+ print(traced_graph)
57
+ result = p(traced_graph)
58
+ print(result.graph_module)
59
+ """
60
+ def get_aten_target(node):
61
+ if hasattr(node.target, 'overloadpacket'):
62
+ return node.target.overloadpacket
63
+ return node.target
64
+
65
+ modified = False
66
+ new_graph = Graph()
67
+ env: Dict[Node, Node] = {} # map from node in the old graph to node in the new graph
68
+ hash_env: Dict[Tuple[torch._ops.OpOverload, int], Node] = {} # map from hash to a node in the new graph
69
+ token_map: Dict[Tuple[torch._ops.OpOverload, int], Dict[str, Any]] = {} # map from hash to token
70
+ for n in graph_module.graph.nodes:
71
+ # The placeholder, output, and get_attr nodes are copied to the new graph without change
72
+ # do not CSE away random operations
73
+ if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in self.banned_ops:
74
+ new_node = new_graph.node_copy(n, lambda x: env[x])
75
+ env[n] = new_node
76
+ else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method'
77
+ # substitute args and kwargs members to their mapping in env if exists
78
+ # specs can be used to reconstruct nested list/dictionaries
79
+ def substitute(arg_list):
80
+ arg_list, spec = tree_flatten(arg_list)
81
+ for i in range(len(arg_list)):
82
+ v = arg_list[i]
83
+ if isinstance(v, Node) and v in env:
84
+ arg_list[i] = env[v]
85
+ return tuple(arg_list), spec
86
+ args, args_spec = substitute(n.args)
87
+ kwargs, kwargs_spec = substitute(n.kwargs)
88
+
89
+ # each token corresponds to a unique node
90
+ # nodes with the same token can be substituted
91
+ token = {"target": n.target, "args": args, "args_spec": args_spec,
92
+ "kwargs": kwargs, "kwargs_spec": kwargs_spec}
93
+
94
+ # hash substituted args to a number, do not hash specs because specs are not hashable
95
+ hash_arg = hash((args, kwargs))
96
+ hash_val = (n.target, hash_arg)
97
+
98
+ # check if a node has a substitute and can be eliminated
99
+ hash_val_in_hash_env = hash_val in hash_env
100
+ if hash_val_in_hash_env and token_map[hash_val] == token:
101
+ modified = True # substitution happens and the graph is modified
102
+ env[n] = hash_env[hash_val]
103
+ continue
104
+
105
+ new_node = new_graph.node_copy(n, lambda x: env[x])
106
+ env[n] = new_node
107
+ if not hash_val_in_hash_env:
108
+ hash_env[hash_val] = new_node
109
+ token_map[hash_val] = token
110
+
111
+ csed_gm = GraphModule(graph_module, new_graph)
112
+ return PassResult(csed_gm, modified)
venv/lib/python3.10/site-packages/torch/fx/passes/fake_tensor_prop.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch.fx
4
+ from torch.fx import Node
5
+ from torch.fx._compatibility import compatibility
6
+ from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
7
+ from torch.fx.experimental.proxy_tensor import py_sym_types, snapshot_fake
8
+ from torch.fx.node import map_aggregate
9
+
10
+ __all__ = ['FakeTensorProp']
11
+
12
+ @compatibility(is_backward_compatible=False)
13
+ class FakeTensorProp(torch.fx.Interpreter):
14
+ """
15
+ Execute an FX graph Node-by-Node and record a fake tensor representing
16
+ the metadata for the node. Unlike ShapeProp, (1) this propagation
17
+ is cheap--it does the propagation with meta tensors which do not actually
18
+ store data, and (2) the fake tensors have much more fine grained information,
19
+ e.g., they have accurate alias information that can be consulted by looking
20
+ at the storages.
21
+
22
+ Args:
23
+ module (GraphModule): The module to be executed
24
+ mode (Optional[FakeTensorMode]): The dispatch mode used to execute computation indicated by each FX Node.
25
+ """
26
+ def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None):
27
+ super().__init__(module)
28
+ if mode is None:
29
+ mode = FakeTensorMode()
30
+ self._mode = mode
31
+
32
+ def run_node(self, n: Node):
33
+ import sympy
34
+ from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
35
+
36
+ result = super().run_node(n)
37
+ sym = None
38
+ if (
39
+ 'val' in n.meta and
40
+ isinstance(v := n.meta['val'], torch.SymInt) and
41
+ isinstance(v.node.expr, sympy.Symbol) and free_unbacked_symbols(v)
42
+ ):
43
+ sym = v
44
+
45
+ def extract_val(obj):
46
+ if isinstance(obj, FakeTensor):
47
+ return snapshot_fake(obj)
48
+ elif isinstance(obj, torch.Tensor):
49
+ # TODO: How is it possible that we get a non fake tensor? We
50
+ # should be running under the mode...
51
+ return snapshot_fake(self._mode.from_tensor(obj, static_shapes=True))
52
+ elif isinstance(obj, py_sym_types):
53
+ return obj
54
+ else:
55
+ return None
56
+
57
+ meta = map_aggregate(result, extract_val)
58
+ if meta is not None:
59
+ n.meta['val'] = meta
60
+ if sym is not None:
61
+ torch._check(meta == v)
62
+ return result
63
+
64
+ def propagate(self, *args):
65
+ fake_args = [
66
+ self._mode.from_tensor(a) if isinstance(a, torch.Tensor) else a
67
+ for a in args
68
+ ]
69
+ return self.propagate_dont_convert_inputs(*fake_args)
70
+
71
+ def propagate_dont_convert_inputs(self, *args):
72
+ with self._mode:
73
+ return super().run(*args)
venv/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import hashlib
3
+ import torch
4
+ import torch.fx
5
+ from typing import Any, Dict, Optional, TYPE_CHECKING
6
+ from torch.fx.node import _get_qualified_name, _format_arg
7
+ from torch.fx.graph import _parse_stack_trace
8
+ from torch.fx.passes.shape_prop import TensorMetadata
9
+ from torch.fx._compatibility import compatibility
10
+ from itertools import chain
11
+
12
+ __all__ = ['FxGraphDrawer']
13
+ try:
14
+ import pydot
15
+ HAS_PYDOT = True
16
+ except ImportError:
17
+ HAS_PYDOT = False
18
+
19
+ _COLOR_MAP = {
20
+ "placeholder": '"AliceBlue"',
21
+ "call_module": "LemonChiffon1",
22
+ "get_param": "Yellow2",
23
+ "get_attr": "LightGrey",
24
+ "output": "PowderBlue",
25
+ }
26
+
27
+ _HASH_COLOR_MAP = [
28
+ "CadetBlue1",
29
+ "Coral",
30
+ "DarkOliveGreen1",
31
+ "DarkSeaGreen1",
32
+ "GhostWhite",
33
+ "Khaki1",
34
+ "LavenderBlush1",
35
+ "LightSkyBlue",
36
+ "MistyRose1",
37
+ "MistyRose2",
38
+ "PaleTurquoise2",
39
+ "PeachPuff1",
40
+ "Salmon",
41
+ "Thistle1",
42
+ "Thistle3",
43
+ "Wheat1",
44
+ ]
45
+
46
+ _WEIGHT_TEMPLATE = {
47
+ "fillcolor": "Salmon",
48
+ "style": '"filled,rounded"',
49
+ "fontcolor": "#000000",
50
+ }
51
+
52
+ if HAS_PYDOT:
53
+ @compatibility(is_backward_compatible=False)
54
+ class FxGraphDrawer:
55
+ """
56
+ Visualize a torch.fx.Graph with graphviz
57
+ Basic usage:
58
+ g = FxGraphDrawer(symbolic_traced, "resnet18")
59
+ g.get_dot_graph().write_svg("a.svg")
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ graph_module: torch.fx.GraphModule,
65
+ name: str,
66
+ ignore_getattr: bool = False,
67
+ ignore_parameters_and_buffers: bool = False,
68
+ skip_node_names_in_args: bool = True,
69
+ parse_stack_trace: bool = False,
70
+ dot_graph_shape: Optional[str] = None,
71
+ ):
72
+ self._name = name
73
+ self.dot_graph_shape = (
74
+ dot_graph_shape if dot_graph_shape is not None else "record"
75
+ )
76
+ _WEIGHT_TEMPLATE["shape"] = self.dot_graph_shape
77
+
78
+ self._dot_graphs = {
79
+ name: self._to_dot(
80
+ graph_module, name, ignore_getattr, ignore_parameters_and_buffers, skip_node_names_in_args, parse_stack_trace
81
+ )
82
+ }
83
+
84
+ for node in graph_module.graph.nodes:
85
+ if node.op != "call_module":
86
+ continue
87
+
88
+ leaf_node = self._get_leaf_node(graph_module, node)
89
+
90
+ if not isinstance(leaf_node, torch.fx.GraphModule):
91
+ continue
92
+
93
+
94
+ self._dot_graphs[f"{name}_{node.target}"] = self._to_dot(
95
+ leaf_node,
96
+ f"{name}_{node.target}",
97
+ ignore_getattr,
98
+ ignore_parameters_and_buffers,
99
+ skip_node_names_in_args,
100
+ parse_stack_trace,
101
+ )
102
+
103
+ def get_dot_graph(self, submod_name=None) -> pydot.Dot:
104
+ """
105
+ Visualize a torch.fx.Graph with graphviz
106
+ Example:
107
+ >>> # xdoctest: +REQUIRES(module:pydot)
108
+ >>> # define module
109
+ >>> class MyModule(torch.nn.Module):
110
+ >>> def __init__(self):
111
+ >>> super().__init__()
112
+ >>> self.linear = torch.nn.Linear(4, 5)
113
+ >>> def forward(self, x):
114
+ >>> return self.linear(x).clamp(min=0.0, max=1.0)
115
+ >>> module = MyModule()
116
+ >>> # trace the module
117
+ >>> symbolic_traced = torch.fx.symbolic_trace(module)
118
+ >>> # setup output file
119
+ >>> import ubelt as ub
120
+ >>> dpath = ub.Path.appdir('torch/tests/FxGraphDrawer').ensuredir()
121
+ >>> fpath = dpath / 'linear.svg'
122
+ >>> # draw the graph
123
+ >>> g = FxGraphDrawer(symbolic_traced, "linear")
124
+ >>> g.get_dot_graph().write_svg(fpath)
125
+ """
126
+ if submod_name is None:
127
+ return self.get_main_dot_graph()
128
+ else:
129
+ return self.get_submod_dot_graph(submod_name)
130
+
131
+ def get_main_dot_graph(self) -> pydot.Dot:
132
+ return self._dot_graphs[self._name]
133
+
134
+ def get_submod_dot_graph(self, submod_name) -> pydot.Dot:
135
+ return self._dot_graphs[f"{self._name}_{submod_name}"]
136
+
137
+ def get_all_dot_graphs(self) -> Dict[str, pydot.Dot]:
138
+ return self._dot_graphs
139
+
140
+ def _get_node_style(self, node: torch.fx.Node) -> Dict[str, str]:
141
+
142
+ template = {
143
+ "shape": self.dot_graph_shape,
144
+ "fillcolor": "#CAFFE3",
145
+ "style": '"filled,rounded"',
146
+ "fontcolor": "#000000",
147
+ }
148
+ if node.op in _COLOR_MAP:
149
+ template["fillcolor"] = _COLOR_MAP[node.op]
150
+ else:
151
+ # Use a random color for each node; based on its name so it's stable.
152
+ target_name = node._pretty_print_target(node.target)
153
+ target_hash = int(hashlib.md5(target_name.encode()).hexdigest()[:8], 16)
154
+ template["fillcolor"] = _HASH_COLOR_MAP[target_hash % len(_HASH_COLOR_MAP)]
155
+ return template
156
+
157
+ def _get_leaf_node(
158
+ self, module: torch.nn.Module, node: torch.fx.Node
159
+ ) -> torch.nn.Module:
160
+ py_obj = module
161
+ assert isinstance(node.target, str)
162
+ atoms = node.target.split(".")
163
+ for atom in atoms:
164
+ if not hasattr(py_obj, atom):
165
+ raise RuntimeError(
166
+ str(py_obj) + " does not have attribute " + atom + "!"
167
+ )
168
+ py_obj = getattr(py_obj, atom)
169
+ return py_obj
170
+
171
+ def _typename(self, target: Any) -> str:
172
+ if isinstance(target, torch.nn.Module):
173
+ ret = torch.typename(target)
174
+ elif isinstance(target, str):
175
+ ret = target
176
+ else:
177
+ ret = _get_qualified_name(target)
178
+
179
+ # Escape "{" and "}" to prevent dot files like:
180
+ # https://gist.github.com/SungMinCho/1a017aab662c75d805c5954d62c5aabc
181
+ # which triggers `Error: bad label format (...)` from dot
182
+ return ret.replace("{", r"\{").replace("}", r"\}")
183
+
184
+ # shorten path to avoid drawing long boxes
185
+ # for full path = '/home/weif/pytorch/test.py'
186
+ # return short path = 'pytorch/test.py'
187
+ def _shorten_file_name(
188
+ self,
189
+ full_file_name: str,
190
+ truncate_to_last_n: int = 2,
191
+ ):
192
+ splits = full_file_name.split('/')
193
+ if len(splits) >= truncate_to_last_n:
194
+ return '/'.join(splits[-truncate_to_last_n:])
195
+ return full_file_name
196
+
197
+
198
+ def _get_node_label(
199
+ self,
200
+ module: torch.fx.GraphModule,
201
+ node: torch.fx.Node,
202
+ skip_node_names_in_args: bool,
203
+ parse_stack_trace: bool,
204
+ ) -> str:
205
+ def _get_str_for_args_kwargs(arg):
206
+ if isinstance(arg, tuple):
207
+ prefix, suffix = r"|args=(\l", r",\n)\l"
208
+ arg_strs_list = [_format_arg(a, max_list_len=8) for a in arg]
209
+ elif isinstance(arg, dict):
210
+ prefix, suffix = r"|kwargs={\l", r",\n}\l"
211
+ arg_strs_list = [
212
+ f"{k}: {_format_arg(v, max_list_len=8)}"
213
+ for k, v in arg.items()
214
+ ]
215
+ else: # Fall back to nothing in unexpected case.
216
+ return ""
217
+
218
+ # Strip out node names if requested.
219
+ if skip_node_names_in_args:
220
+ arg_strs_list = [a for a in arg_strs_list if "%" not in a]
221
+ if len(arg_strs_list) == 0:
222
+ return ""
223
+ arg_strs = prefix + r",\n".join(arg_strs_list) + suffix
224
+ if len(arg_strs_list) == 1:
225
+ arg_strs = arg_strs.replace(r"\l", "").replace(r"\n", "")
226
+ return arg_strs.replace("{", r"\{").replace("}", r"\}")
227
+
228
+
229
+ label = "{" + f"name=%{node.name}|op_code={node.op}\n"
230
+
231
+ if node.op == "call_module":
232
+ leaf_module = self._get_leaf_node(module, node)
233
+ label += r"\n" + self._typename(leaf_module) + r"\n|"
234
+ extra = ""
235
+ if hasattr(leaf_module, "__constants__"):
236
+ extra = r"\n".join(
237
+ [f"{c}: {getattr(leaf_module, c)}" for c in leaf_module.__constants__] # type: ignore[union-attr]
238
+ )
239
+ label += extra + r"\n"
240
+ else:
241
+ label += f"|target={self._typename(node.target)}" + r"\n"
242
+ if len(node.args) > 0:
243
+ label += _get_str_for_args_kwargs(node.args)
244
+ if len(node.kwargs) > 0:
245
+ label += _get_str_for_args_kwargs(node.kwargs)
246
+ label += f"|num_users={len(node.users)}" + r"\n"
247
+
248
+ tensor_meta = node.meta.get('tensor_meta')
249
+ label += self._tensor_meta_to_label(tensor_meta)
250
+
251
+ # for original fx graph
252
+ # print buf=buf0, n_origin=6
253
+ buf_meta = node.meta.get('buf_meta', None)
254
+ if buf_meta is not None:
255
+ label += f"|buf={buf_meta.name}" + r"\n"
256
+ label += f"|n_origin={buf_meta.n_origin}" + r"\n"
257
+
258
+ # for original fx graph
259
+ # print file:lineno code
260
+ if parse_stack_trace and node.stack_trace is not None:
261
+ parsed_stack_trace = _parse_stack_trace(node.stack_trace)
262
+ fname = self._shorten_file_name(parsed_stack_trace.file)
263
+ label += f"|file={fname}:{parsed_stack_trace.lineno} {parsed_stack_trace.code}" + r"\n"
264
+
265
+
266
+ return label + "}"
267
+
268
+ def _tensor_meta_to_label(self, tm) -> str:
269
+ if tm is None:
270
+ return ""
271
+ elif isinstance(tm, TensorMetadata):
272
+ return self._stringify_tensor_meta(tm)
273
+ elif isinstance(tm, list):
274
+ result = ""
275
+ for item in tm:
276
+ result += self._tensor_meta_to_label(item)
277
+ return result
278
+ elif isinstance(tm, dict):
279
+ result = ""
280
+ for v in tm.values():
281
+ result += self._tensor_meta_to_label(v)
282
+ return result
283
+ elif isinstance(tm, tuple):
284
+ result = ""
285
+ for item in tm:
286
+ result += self._tensor_meta_to_label(item)
287
+ return result
288
+ else:
289
+ raise RuntimeError(f"Unsupported tensor meta type {type(tm)}")
290
+
291
+ def _stringify_tensor_meta(self, tm: TensorMetadata) -> str:
292
+ result = ""
293
+ if not hasattr(tm, "dtype"):
294
+ print("tm", tm)
295
+ result += "|" + "dtype" + "=" + str(tm.dtype) + r"\n"
296
+ result += "|" + "shape" + "=" + str(tuple(tm.shape)) + r"\n"
297
+ result += "|" + "requires_grad" + "=" + str(tm.requires_grad) + r"\n"
298
+ result += "|" + "stride" + "=" + str(tm.stride) + r"\n"
299
+ if tm.is_quantized:
300
+ assert tm.qparams is not None
301
+ assert "qscheme" in tm.qparams
302
+ qscheme = tm.qparams["qscheme"]
303
+ if qscheme in {
304
+ torch.per_tensor_affine,
305
+ torch.per_tensor_symmetric,
306
+ }:
307
+ result += "|" + "q_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
308
+ result += "|" + "q_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
309
+ elif qscheme in {
310
+ torch.per_channel_affine,
311
+ torch.per_channel_symmetric,
312
+ torch.per_channel_affine_float_qparams,
313
+ }:
314
+ result += "|" + "q_per_channel_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
315
+ result += "|" + "q_per_channel_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
316
+ result += "|" + "q_per_channel_axis" + "=" + str(tm.qparams["axis"]) + r"\n"
317
+ else:
318
+ raise RuntimeError(f"Unsupported qscheme: {qscheme}")
319
+ result += "|" + "qscheme" + "=" + str(tm.qparams["qscheme"]) + r"\n"
320
+ return result
321
+
322
+ def _get_tensor_label(self, t: torch.Tensor) -> str:
323
+ return str(t.dtype) + str(list(t.shape)) + r"\n"
324
+
325
+ # when parse_stack_trace=True
326
+ # print file:lineno code
327
+ def _to_dot(
328
+ self,
329
+ graph_module: torch.fx.GraphModule,
330
+ name: str,
331
+ ignore_getattr: bool,
332
+ ignore_parameters_and_buffers: bool,
333
+ skip_node_names_in_args: bool,
334
+ parse_stack_trace: bool,
335
+ ) -> pydot.Dot:
336
+ """
337
+ Actual interface to visualize a fx.Graph. Note that it takes in the GraphModule instead of the Graph.
338
+ If ignore_parameters_and_buffers is True, the parameters and buffers
339
+ created with the module will not be added as nodes and edges.
340
+ """
341
+
342
+ # "TB" means top-to-bottom rank direction in layout
343
+ dot_graph = pydot.Dot(name, rankdir="TB")
344
+
345
+
346
+ buf_name_to_subgraph = {}
347
+
348
+ for node in graph_module.graph.nodes:
349
+ if ignore_getattr and node.op == "get_attr":
350
+ continue
351
+
352
+ style = self._get_node_style(node)
353
+ dot_node = pydot.Node(
354
+ node.name, label=self._get_node_label(graph_module, node, skip_node_names_in_args, parse_stack_trace), **style
355
+ )
356
+
357
+ current_graph = dot_graph
358
+
359
+ buf_meta = node.meta.get('buf_meta', None)
360
+ if buf_meta is not None and buf_meta.n_origin > 1:
361
+ buf_name = buf_meta.name
362
+ if buf_name not in buf_name_to_subgraph:
363
+ buf_name_to_subgraph[buf_name] = pydot.Cluster(buf_name, label=buf_name)
364
+ current_graph = buf_name_to_subgraph.get(buf_name)
365
+
366
+ current_graph.add_node(dot_node)
367
+
368
+ def get_module_params_or_buffers():
369
+ for pname, ptensor in chain(
370
+ leaf_module.named_parameters(), leaf_module.named_buffers()
371
+ ):
372
+ pname1 = node.name + "." + pname
373
+ label1 = (
374
+ pname1 + "|op_code=get_" + "parameter"
375
+ if isinstance(ptensor, torch.nn.Parameter)
376
+ else "buffer" + r"\l"
377
+ )
378
+ dot_w_node = pydot.Node(
379
+ pname1,
380
+ label="{" + label1 + self._get_tensor_label(ptensor) + "}",
381
+ **_WEIGHT_TEMPLATE,
382
+ )
383
+ dot_graph.add_node(dot_w_node)
384
+ dot_graph.add_edge(pydot.Edge(pname1, node.name))
385
+
386
+ if node.op == "call_module":
387
+ leaf_module = self._get_leaf_node(graph_module, node)
388
+
389
+ if not ignore_parameters_and_buffers and not isinstance(leaf_module, torch.fx.GraphModule):
390
+ get_module_params_or_buffers()
391
+
392
+ for subgraph in buf_name_to_subgraph.values():
393
+ subgraph.set('color', 'royalblue')
394
+ subgraph.set('penwidth', '2')
395
+ dot_graph.add_subgraph(subgraph)
396
+
397
+ for node in graph_module.graph.nodes:
398
+ if ignore_getattr and node.op == "get_attr":
399
+ continue
400
+
401
+ for user in node.users:
402
+ dot_graph.add_edge(pydot.Edge(node.name, user.name))
403
+
404
+ return dot_graph
405
+
406
+ else:
407
+ if not TYPE_CHECKING:
408
+ @compatibility(is_backward_compatible=False)
409
+ class FxGraphDrawer:
410
+ def __init__(
411
+ self,
412
+ graph_module: torch.fx.GraphModule,
413
+ name: str,
414
+ ignore_getattr: bool = False,
415
+ ignore_parameters_and_buffers: bool = False,
416
+ skip_node_names_in_args: bool = True,
417
+ parse_stack_trace: bool = False,
418
+ dot_graph_shape: Optional[str] = None,
419
+ ):
420
+ raise RuntimeError('FXGraphDrawer requires the pydot package to be installed. Please install '
421
+ 'pydot through your favorite Python package manager.')
venv/lib/python3.10/site-packages/torch/fx/passes/graph_manipulation.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, NamedTuple, Optional
2
+
3
+ import torch
4
+ from torch.fx._compatibility import compatibility
5
+ from torch.fx.graph import Graph
6
+ from torch.fx.graph_module import GraphModule
7
+ from torch.fx.node import (
8
+ map_arg,
9
+ Node,
10
+ Target,
11
+ )
12
+ from torch.fx.passes.shape_prop import ShapeProp
13
+
14
+ __all__ = ['replace_target_nodes_with', 'size_bytes', 'get_size_of_all_nodes', 'get_tensor_meta',
15
+ 'get_size_of_node']
16
+
17
+ @compatibility(is_backward_compatible=False)
18
+ def replace_target_nodes_with(
19
+ fx_module: GraphModule,
20
+ old_op: str,
21
+ old_target: Target,
22
+ new_op: str,
23
+ new_target: Target,
24
+ ):
25
+ """Modifies all nodes in fx_module.graph.nodes which match the specified op code and target,
26
+ and updates them to match the new op code and target"""
27
+ new_graph = Graph()
28
+ val_map: Dict[Node, Node] = {}
29
+ for node in fx_module.graph.nodes:
30
+ if node.op == old_op and node.target == old_target:
31
+ args = map_arg(node.args, lambda n: val_map[n])
32
+ kwargs = map_arg(node.kwargs, lambda n: val_map[n])
33
+ assert isinstance(args, tuple)
34
+ assert isinstance(kwargs, dict)
35
+ val_map[node] = new_graph.create_node(
36
+ new_op, new_target, args, kwargs, node.name
37
+ )
38
+ else:
39
+ val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])
40
+ fx_module.graph = new_graph
41
+
42
+
43
+ @compatibility(is_backward_compatible=False)
44
+ class size_bytes(NamedTuple):
45
+ output_size: int
46
+ total_size: int
47
+
48
+
49
+ @compatibility(is_backward_compatible=False)
50
+ def get_size_of_all_nodes(
51
+ fx_module: GraphModule, args: Optional[List[torch.Tensor]] = None
52
+ ) -> None:
53
+ """Given a fx graph module, update each node with its total size (weights + bias + output)
54
+ and its output_size(output). For a non-module node, the total size is the output size.
55
+ return total size"""
56
+ if args is not None:
57
+ # Mark shape and dtype for each node (node.shape and node.dtype)
58
+ ShapeProp(fx_module).propagate(*args)
59
+ # Calculate the total size of the whole fx graph
60
+ total_size_of_graph = 0.0
61
+ for node in fx_module.graph.nodes:
62
+ if node.op == "output":
63
+ break
64
+ node.size_bytes = get_size_of_node(fx_module, node)
65
+ return
66
+
67
+
68
+ @compatibility(is_backward_compatible=False)
69
+ def get_tensor_meta(node: Node) -> Any:
70
+ tensor_meta = node.meta.get("tensor_meta")
71
+
72
+ if not tensor_meta:
73
+ raise RuntimeError(
74
+ f"Node {node} has no tensor metadata associated with it! "
75
+ f"Check that shape propagation has run."
76
+ )
77
+
78
+ return tensor_meta
79
+
80
+
81
+ @compatibility(is_backward_compatible=False)
82
+ def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
83
+ """Given a node with node.dtype and node.shape, return its total size and its output size.
84
+ total_size = weights + bias + output_size
85
+ """
86
+ # Total num of elements
87
+ total_num_of_elems = 0
88
+ # For a module, conside all parameters
89
+ if node.op == "call_module":
90
+ submodule_dict = dict(fx_module.named_modules())
91
+ submodule = submodule_dict[node.target]
92
+ parameters = submodule.named_parameters()
93
+ # Parameters are named tuples
94
+ for name, p in parameters:
95
+ total_num_of_elems += p.numel()
96
+ # Don't forget the output size
97
+ # node.shape is the shape of this node's output
98
+ tensor_meta = get_tensor_meta(node)
99
+ output_elem = tensor_meta.shape.numel()
100
+ total_num_of_elems += output_elem
101
+ # Assume for now if it's quantized then it's qint8 or quint8
102
+ if tensor_meta.is_quantized:
103
+ size_per_elem_bytes = torch._empty_affine_quantized(
104
+ [], dtype=tensor_meta.dtype
105
+ ).element_size()
106
+ else:
107
+ size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
108
+ total_size = size_per_elem_bytes * total_num_of_elems
109
+ output_size = size_per_elem_bytes * output_elem
110
+ return size_bytes(output_size, total_size)
venv/lib/python3.10/site-packages/torch/fx/passes/infra/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ from . import pass_manager
venv/lib/python3.10/site-packages/torch/fx/passes/infra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (226 Bytes). View file