applied-ai-018 commited on
Commit
85a7cec
·
verified ·
1 Parent(s): bffd848

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/chi2.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/fishersnedecor.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/independent.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/multinomial.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/normal.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/poisson.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/futures/__init__.py +318 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.py +89 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.pyi +11 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/fx/_compatibility.py +34 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/fx/_pytree.py +69 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py +1163 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/fx/annotate.py +21 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/fx/config.py +6 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/fx/graph.py +1630 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/fx/graph_module.py +867 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/fx/immutable_collections.py +54 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/fx/interpreter.py +505 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/fx/node.py +696 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/fx/operator_schemas.py +440 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py +418 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/fx/proxy.py +563 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py +343 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/fx/tensor_type.py +104 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/fx/traceback.py +100 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/quantization/__init__.py +87 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py +28 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py +26 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py +132 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/quantization/fake_quantize.py +32 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/quantization/fuse_modules.py +22 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py +15 -0
env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/chi2.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/fishersnedecor.cpython-310.pyc ADDED
Binary file (3.52 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/independent.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/lkj_cholesky.cpython-310.pyc ADDED
Binary file (4.77 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/multinomial.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/normal.cpython-310.pyc ADDED
Binary file (4.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/distributions/__pycache__/poisson.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/futures/__init__.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
4
+
5
+ import torch
6
+
7
+ __all__ = ['Future', 'collect_all', 'wait_all']
8
+
9
+ T = TypeVar("T")
10
+ S = TypeVar("S")
11
+
12
+
13
+ class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef]
14
+ pass
15
+
16
+
17
+ class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
18
+ r"""
19
+ Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous
20
+ execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It
21
+ also exposes a set of APIs to add callback functions and set results.
22
+
23
+ .. warning:: GPU support is a beta feature, subject to changes.
24
+ """
25
+
26
+ def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None):
27
+ r"""
28
+ Create an empty unset ``Future``. If the future is intended to hold
29
+ values containing CUDA tensors, (a superset of) their CUDA devices must
30
+ be specified at construction. (This is only supported if
31
+ ``torch.cuda.is_available()`` returns ``True``). This is needed to
32
+ ensure proper CUDA stream synchronization. The child futures, returned
33
+ by the ``then`` method, will inherit these devices.
34
+
35
+ Args:
36
+ devices(``List[Union[int, str, torch.device]]``, optional): the set
37
+ of devices on which tensors contained in this future's value are
38
+ allowed to reside and on which callbacks are allowed to operate.
39
+ """
40
+ if devices is None:
41
+ devices = []
42
+ super().__init__([torch.device(d) for d in devices])
43
+
44
+ def done(self) -> bool:
45
+ r"""
46
+ Return ``True`` if this ``Future`` is done. A ``Future`` is done if it
47
+ has a result or an exception.
48
+
49
+ If the value contains tensors that reside on GPUs, ``Future.done()``
50
+ will return ``True`` even if the asynchronous kernels that are
51
+ populating those tensors haven't yet completed running on the device,
52
+ because at such stage the result is already usable, provided one
53
+ performs the appropriate synchronizations (see :meth:`wait`).
54
+ """
55
+ return super().done()
56
+
57
+ def wait(self) -> T:
58
+ r"""
59
+ Block until the value of this ``Future`` is ready.
60
+
61
+ If the value contains tensors that reside on GPUs, then an additional
62
+ synchronization is performed with the kernels (executing on the device)
63
+ which may be asynchronously populating those tensors. Such sync is
64
+ non-blocking, which means that ``wait()`` will insert the necessary
65
+ instructions in the current streams to ensure that further operations
66
+ enqueued on those streams will be properly scheduled after the async
67
+ kernels but, once that is done, ``wait()`` will return, even if those
68
+ kernels are still running. No further synchronization is required when
69
+ accessing and using the values, as long as one doesn't change streams.
70
+
71
+ Returns:
72
+ The value held by this ``Future``. If the function (callback or RPC)
73
+ creating the value has thrown an error, this ``wait`` method will
74
+ also throw an error.
75
+ """
76
+ return super().wait()
77
+
78
+ def value(self) -> T:
79
+ r"""
80
+ Obtain the value of an already-completed future.
81
+
82
+ This method should only be called after a call to :meth:`wait` has
83
+ completed, or inside a callback function passed to :meth:`then`. In
84
+ other cases this ``Future`` may not yet hold a value and calling
85
+ ``value()`` could fail.
86
+
87
+ If the value contains tensors that reside on GPUs, then this method will
88
+ *not* perform any additional synchronization. This should be done
89
+ beforehand, separately, through a call to :meth:`wait` (except within
90
+ callbacks, for which it's already being taken care of by :meth:`then`).
91
+
92
+ Returns:
93
+ The value held by this ``Future``. If the function (callback or RPC)
94
+ creating the value has thrown an error, this ``value()`` method will
95
+ also throw an error.
96
+ """
97
+ return super().value()
98
+
99
+ def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
100
+ r"""
101
+ Append the given callback function to this ``Future``, which will be run
102
+ when the ``Future`` is completed. Multiple callbacks can be added to
103
+ the same ``Future``, but the order in which they will be executed cannot
104
+ be guaranteed (to enforce a certain order consider chaining:
105
+ ``fut.then(cb1).then(cb2)``). The callback must take one argument, which
106
+ is the reference to this ``Future``. The callback function can use the
107
+ :meth:`value` method to get the value. Note that if this ``Future`` is
108
+ already completed, the given callback will be run immediately inline.
109
+
110
+ If the ``Future``'s value contains tensors that reside on GPUs, the
111
+ callback might be invoked while the async kernels that are populating
112
+ those tensors haven't yet finished executing on the device. However, the
113
+ callback will be invoked with some dedicated streams set as current
114
+ (fetched from a global pool) which will be synchronized with those
115
+ kernels. Hence any operation performed by the callback on these tensors
116
+ will be scheduled on the device after the kernels complete. In other
117
+ words, as long as the callback doesn't switch streams, it can safely
118
+ manipulate the result without any additional synchronization. This is
119
+ similar to the non-blocking behavior of :meth:`wait`.
120
+
121
+ Similarly, if the callback returns a value that contains tensors that
122
+ reside on a GPU, it can do so even if the kernels that are producing
123
+ these tensors are still running on the device, as long as the callback
124
+ didn't change streams during its execution. If one wants to change
125
+ streams, one must be careful to re-synchronize them with the original
126
+ streams, that is, those that were current when the callback was invoked.
127
+
128
+ Args:
129
+ callback(``Callable``): a ``Callable`` that takes this ``Future`` as
130
+ the only argument.
131
+
132
+ Returns:
133
+ A new ``Future`` object that holds the return value of the
134
+ ``callback`` and will be marked as completed when the given
135
+ ``callback`` finishes.
136
+
137
+ .. note:: Note that if the callback function throws, either
138
+ through the original future being completed with an exception and
139
+ calling ``fut.wait()``, or through other code in the callback, the
140
+ future returned by ``then`` will be marked appropriately with the
141
+ encountered error. However, if this callback later completes
142
+ additional futures, those futures are not marked as completed with
143
+ an error and the user is responsible for handling completion/waiting
144
+ on those futures independently.
145
+
146
+ Example::
147
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
148
+ >>> def callback(fut):
149
+ ... print(f"RPC return value is {fut.wait()}.")
150
+ >>> fut = torch.futures.Future()
151
+ >>> # The inserted callback will print the return value when
152
+ >>> # receiving the response from "worker1"
153
+ >>> cb_fut = fut.then(callback)
154
+ >>> chain_cb_fut = cb_fut.then(
155
+ ... lambda x : print(f"Chained cb done. {x.wait()}")
156
+ ... )
157
+ >>> fut.set_result(5)
158
+ RPC return value is 5.
159
+ Chained cb done. None
160
+ """
161
+ return cast(Future[S], super().then(callback))
162
+
163
+ def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
164
+ r"""
165
+ Append the given callback function to this ``Future``, which will be run
166
+ when the ``Future`` is completed. Multiple callbacks can be added to
167
+ the same ``Future``, but the order in which they will be executed cannot
168
+ be guaranteed. The callback must take one argument, which is the
169
+ reference to this ``Future``. The callback function can use the
170
+ :meth:`value` method to get the value. Note that if this ``Future`` is
171
+ already completed, the given callback will be run inline.
172
+
173
+ We recommend that you use the :meth:`then` method as it provides a way
174
+ to synchronize after your callback has completed. ``add_done_callback``
175
+ can be cheaper if your callback does not return anything. But both
176
+ :meth:`then` and ``add_done_callback`` use the same callback
177
+ registration API under the hood.
178
+
179
+ With respect to GPU tensors, this method behaves in the same way as
180
+ :meth:`then`.
181
+
182
+ Args:
183
+ callback(``Future``): a ``Callable`` that takes in one argument,
184
+ which is the reference to this ``Future``.
185
+
186
+ .. note:: Note that if the callback function throws, either
187
+ through the original future being completed with an exception and
188
+ calling ``fut.wait()``, or through other code in the callback,
189
+ error handling must be carefully taken care of. For example, if
190
+ this callback later completes additional futures, those futures are
191
+ not marked as completed with an error and the user is responsible
192
+ for handling completion/waiting on those futures independently.
193
+
194
+ Example::
195
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
196
+ >>> def callback(fut):
197
+ ... print("This will run after the future has finished.")
198
+ ... print(fut.wait())
199
+ >>> fut = torch.futures.Future()
200
+ >>> fut.add_done_callback(callback)
201
+ >>> fut.set_result(5)
202
+ This will run after the future has finished.
203
+ 5
204
+ """
205
+ super().add_done_callback(callback)
206
+
207
+ def set_result(self, result: T) -> None:
208
+ r"""
209
+ Set the result for this ``Future``, which will mark this ``Future`` as
210
+ completed and trigger all attached callbacks. Note that a ``Future``
211
+ cannot be marked completed twice.
212
+
213
+ If the result contains tensors that reside on GPUs, this method can be
214
+ called even if the asynchronous kernels that are populating those
215
+ tensors haven't yet completed running on the device, provided that the
216
+ streams on which those kernels were enqueued are set as the current ones
217
+ when this method is called. Put simply, it's safe to call this method
218
+ immediately after launching those kernels, without any additional
219
+ synchronization, as long as one doesn't change streams in between. This
220
+ method will record events on all the relevant current streams and will
221
+ use them to ensure proper scheduling for all the consumers of this
222
+ ``Future``.
223
+
224
+ Args:
225
+ result (object): the result object of this ``Future``.
226
+
227
+ Example::
228
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
229
+ >>> import threading
230
+ >>> import time
231
+ >>> def slow_set_future(fut, value):
232
+ ... time.sleep(0.5)
233
+ ... fut.set_result(value)
234
+ >>> fut = torch.futures.Future()
235
+ >>> t = threading.Thread(
236
+ ... target=slow_set_future,
237
+ ... args=(fut, torch.ones(2) * 3)
238
+ ... )
239
+ >>> t.start()
240
+ >>> print(fut.wait())
241
+ tensor([3., 3.])
242
+ >>> t.join()
243
+ """
244
+ super().set_result(result)
245
+
246
+ def set_exception(self, result: T) -> None:
247
+ r"""
248
+ Set an exception for this ``Future``, which will mark this ``Future`` as
249
+ completed with an error and trigger all attached callbacks. Note that
250
+ when calling wait()/value() on this ``Future``, the exception set here
251
+ will be raised inline.
252
+
253
+ Args:
254
+ result (BaseException): the exception for this ``Future``.
255
+
256
+ Example::
257
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
258
+ >>> fut = torch.futures.Future()
259
+ >>> fut.set_exception(ValueError("foo"))
260
+ >>> fut.wait()
261
+ Traceback (most recent call last):
262
+ ...
263
+ ValueError: foo
264
+ """
265
+ assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception."
266
+
267
+ def raise_error(fut_result):
268
+ raise fut_result
269
+
270
+ super()._set_unwrap_func(raise_error)
271
+ self.set_result(result) # type: ignore[arg-type]
272
+
273
+
274
+ def collect_all(futures: List[Future]) -> Future[List[Future]]:
275
+ r"""
276
+ Collects the provided :class:`~torch.futures.Future` objects into a single
277
+ combined :class:`~torch.futures.Future` that is completed when all of the
278
+ sub-futures are completed.
279
+
280
+ Args:
281
+ futures (list): a list of :class:`~torch.futures.Future` objects.
282
+
283
+ Returns:
284
+ Returns a :class:`~torch.futures.Future` object to a list of the passed
285
+ in Futures.
286
+
287
+ Example::
288
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
289
+ >>> fut0 = torch.futures.Future()
290
+ >>> fut1 = torch.futures.Future()
291
+ >>> fut = torch.futures.collect_all([fut0, fut1])
292
+ >>> fut0.set_result(0)
293
+ >>> fut1.set_result(1)
294
+ >>> fut_list = fut.wait()
295
+ >>> print(f"fut0 result = {fut_list[0].wait()}")
296
+ fut0 result = 0
297
+ >>> print(f"fut1 result = {fut_list[1].wait()}")
298
+ fut1 result = 1
299
+ """
300
+ return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures)))
301
+
302
+
303
+ def wait_all(futures: List[Future]) -> List:
304
+ r"""
305
+ Waits for all provided futures to be complete, and returns
306
+ the list of completed values. If any of the futures encounters an error,
307
+ the method will exit early and report the error not waiting for other
308
+ futures to complete.
309
+
310
+ Args:
311
+ futures (list): a list of :class:`~torch.futures.Future` object.
312
+
313
+ Returns:
314
+ A list of the completed :class:`~torch.futures.Future` results. This
315
+ method will throw an error if ``wait`` on any
316
+ :class:`~torch.futures.Future` throws.
317
+ """
318
+ return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()]
env-llmeval/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r'''
2
+ FX is a toolkit for developers to use to transform ``nn.Module``
3
+ instances. FX consists of three main components: a **symbolic tracer,**
4
+ an **intermediate representation**, and **Python code generation**. A
5
+ demonstration of these components in action:
6
+
7
+ ::
8
+
9
+ import torch
10
+ # Simple module for demonstration
11
+ class MyModule(torch.nn.Module):
12
+ def __init__(self):
13
+ super().__init__()
14
+ self.param = torch.nn.Parameter(torch.rand(3, 4))
15
+ self.linear = torch.nn.Linear(4, 5)
16
+
17
+ def forward(self, x):
18
+ return self.linear(x + self.param).clamp(min=0.0, max=1.0)
19
+
20
+ module = MyModule()
21
+
22
+ from torch.fx import symbolic_trace
23
+ # Symbolic tracing frontend - captures the semantics of the module
24
+ symbolic_traced : torch.fx.GraphModule = symbolic_trace(module)
25
+
26
+ # High-level intermediate representation (IR) - Graph representation
27
+ print(symbolic_traced.graph)
28
+ """
29
+ graph():
30
+ %x : [num_users=1] = placeholder[target=x]
31
+ %param : [num_users=1] = get_attr[target=param]
32
+ %add : [num_users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
33
+ %linear : [num_users=1] = call_module[target=linear](args = (%add,), kwargs = {})
34
+ %clamp : [num_users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
35
+ return clamp
36
+ """
37
+
38
+ # Code generation - valid Python code
39
+ print(symbolic_traced.code)
40
+ """
41
+ def forward(self, x):
42
+ param = self.param
43
+ add = x + param; x = param = None
44
+ linear = self.linear(add); add = None
45
+ clamp = linear.clamp(min = 0.0, max = 1.0); linear = None
46
+ return clamp
47
+ """
48
+
49
+ The **symbolic tracer** performs "symbolic execution" of the Python
50
+ code. It feeds fake values, called Proxies, through the code. Operations
51
+ on theses Proxies are recorded. More information about symbolic tracing
52
+ can be found in the :func:`symbolic_trace` and :class:`Tracer`
53
+ documentation.
54
+
55
+ The **intermediate representation** is the container for the operations
56
+ that were recorded during symbolic tracing. It consists of a list of
57
+ Nodes that represent function inputs, callsites (to functions, methods,
58
+ or :class:`torch.nn.Module` instances), and return values. More information
59
+ about the IR can be found in the documentation for :class:`Graph`. The
60
+ IR is the format on which transformations are applied.
61
+
62
+ **Python code generation** is what makes FX a Python-to-Python (or
63
+ Module-to-Module) transformation toolkit. For each Graph IR, we can
64
+ create valid Python code matching the Graph's semantics. This
65
+ functionality is wrapped up in :class:`GraphModule`, which is a
66
+ :class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
67
+ ``forward`` method generated from the Graph.
68
+
69
+ Taken together, this pipeline of components (symbolic tracing ->
70
+ intermediate representation -> transforms -> Python code generation)
71
+ constitutes the Python-to-Python transformation pipeline of FX. In
72
+ addition, these components can be used separately. For example,
73
+ symbolic tracing can be used in isolation to capture a form of
74
+ the code for analysis (and not transformation) purposes. Code
75
+ generation can be used for programmatically generating models, for
76
+ example from a config file. There are many uses for FX!
77
+
78
+ Several example transformations can be found at the
79
+ `examples <https://github.com/pytorch/examples/tree/master/fx>`__
80
+ repository.
81
+ '''
82
+
83
+ from .graph_module import GraphModule
84
+ from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
85
+ from .graph import Graph, CodeGen
86
+ from .node import Node, map_arg, has_side_effect
87
+ from .proxy import Proxy
88
+ from .interpreter import Interpreter as Interpreter, Transformer as Transformer
89
+ from .subgraph_rewriter import replace_pattern
env-llmeval/lib/python3.10/site-packages/torch/fx/__init__.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._symbolic_trace import (
2
+ symbolic_trace as symbolic_trace,
3
+ Tracer as Tracer,
4
+ wrap as wrap,
5
+ )
6
+ from .graph import Graph as Graph
7
+ from .graph_module import GraphModule as GraphModule
8
+ from .interpreter import Interpreter as Interpreter, Transformer as Transformer
9
+ from .node import has_side_effect as has_side_effect, map_arg as map_arg, Node as Node
10
+ from .proxy import Proxy as Proxy
11
+ from .subgraph_rewriter import replace_pattern as replace_pattern
env-llmeval/lib/python3.10/site-packages/torch/fx/_compatibility.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+ import textwrap
3
+
4
+ _BACK_COMPAT_OBJECTS : Dict[Any, None] = {}
5
+ _MARKED_WITH_COMPATIBILITY : Dict[Any, None] = {}
6
+
7
+ def compatibility(is_backward_compatible : bool):
8
+ if is_backward_compatible:
9
+
10
+ def mark_back_compat(fn):
11
+ docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
12
+ docstring += """
13
+ .. note::
14
+ Backwards-compatibility for this API is guaranteed.
15
+ """
16
+ fn.__doc__ = docstring
17
+ _BACK_COMPAT_OBJECTS.setdefault(fn)
18
+ _MARKED_WITH_COMPATIBILITY.setdefault(fn)
19
+ return fn
20
+
21
+ return mark_back_compat
22
+ else:
23
+
24
+ def mark_not_back_compat(fn):
25
+ docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
26
+ docstring += """
27
+ .. warning::
28
+ This API is experimental and is *NOT* backward-compatible.
29
+ """
30
+ fn.__doc__ = docstring
31
+ _MARKED_WITH_COMPATIBILITY.setdefault(fn)
32
+ return fn
33
+
34
+ return mark_not_back_compat
env-llmeval/lib/python3.10/site-packages/torch/fx/_pytree.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Type, Optional
3
+
4
+ from torch.utils._pytree import LeafSpec, PyTree, TreeSpec
5
+
6
+ FlattenFuncSpec = Callable[[PyTree, TreeSpec], List]
7
+
8
+ FlattenFuncExactMatchSpec = Callable[[PyTree, TreeSpec], bool]
9
+
10
+ SUPPORTED_NODES: Dict[Type[Any], FlattenFuncSpec] = {}
11
+
12
+ SUPPORTED_NODES_EXACT_MATCH: Dict[Type[Any], Optional[FlattenFuncExactMatchSpec]] = {}
13
+
14
+ def register_pytree_flatten_spec(
15
+ cls: Type[Any],
16
+ flatten_fn_spec: FlattenFuncSpec,
17
+ flatten_fn_exact_match_spec: Optional[FlattenFuncExactMatchSpec] = None
18
+ ) -> None:
19
+ SUPPORTED_NODES[cls] = flatten_fn_spec
20
+ SUPPORTED_NODES_EXACT_MATCH[cls] = flatten_fn_exact_match_spec
21
+
22
+ def tree_flatten_spec(pytree: PyTree, spec: TreeSpec, exact_structural_match=False) -> List[Any]:
23
+ if isinstance(spec, LeafSpec):
24
+ return [pytree]
25
+ if spec.type not in SUPPORTED_NODES:
26
+ raise RuntimeError(
27
+ f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with "
28
+ "torch.fx._pytree.register_pytree_flatten_spec. If you have serialized your model, make "
29
+ "sure that any custom pytrees have been registered before loading it.")
30
+ flatten_fn_spec = SUPPORTED_NODES[spec.type]
31
+ child_pytrees = flatten_fn_spec(pytree, spec)
32
+ if exact_structural_match:
33
+ flatten_fn_exact_match_spec = SUPPORTED_NODES_EXACT_MATCH[spec.type]
34
+ if flatten_fn_exact_match_spec and not flatten_fn_exact_match_spec(pytree, spec):
35
+ raise RuntimeError(f"Cannot flatten pytree {pytree}, given spec: {spec}")
36
+ result = []
37
+ for child, child_spec in zip(child_pytrees, spec.children_specs):
38
+ flat = tree_flatten_spec(child, child_spec, exact_structural_match)
39
+ result += flat
40
+ return result
41
+
42
+ def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]:
43
+ return [d[k] for k in spec.context]
44
+
45
+ def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
46
+ return [d[i] for i in range(len(spec.children_specs))]
47
+
48
+ def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]:
49
+ return [d[i] for i in range(len(spec.children_specs))]
50
+
51
+ def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> List[Any]:
52
+ return [d[i] for i in range(len(spec.children_specs))]
53
+
54
+ def _dict_flatten_spec_exact_match(d: Dict[Any, Any], spec: TreeSpec) -> bool:
55
+ return len(d) == len(spec.context)
56
+
57
+ def _list_flatten_spec_exact_match(d: List[Any], spec: TreeSpec) -> bool:
58
+ return len(d) == len(spec.children_specs)
59
+
60
+ def _tuple_flatten_spec_exact_match(d: Tuple[Any], spec: TreeSpec) -> bool:
61
+ return len(d) == len(spec.children_specs)
62
+
63
+ def _namedtuple_flatten_spec_exact_match(d: NamedTuple, spec: TreeSpec) -> bool:
64
+ return len(d) == len(spec.children_specs)
65
+
66
+ register_pytree_flatten_spec(dict, _dict_flatten_spec, _dict_flatten_spec_exact_match)
67
+ register_pytree_flatten_spec(list, _list_flatten_spec, _list_flatten_spec_exact_match)
68
+ register_pytree_flatten_spec(tuple, _tuple_flatten_spec, _tuple_flatten_spec_exact_match)
69
+ register_pytree_flatten_spec(namedtuple, _namedtuple_flatten_spec, _tuple_flatten_spec_exact_match) # type: ignore[arg-type]
env-llmeval/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py ADDED
@@ -0,0 +1,1163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import copy
3
+ import functools
4
+ import inspect
5
+ import math
6
+ import os
7
+ import warnings
8
+ import collections
9
+ from itertools import chain
10
+ from types import CodeType, FunctionType, ModuleType
11
+ from typing import (
12
+ Any,
13
+ Callable,
14
+ Dict,
15
+ List,
16
+ NamedTuple,
17
+ Optional,
18
+ Set,
19
+ Tuple,
20
+ Type,
21
+ Union,
22
+ )
23
+
24
+ import torch
25
+ import torch.utils._pytree as pytree
26
+ from torch._C import ScriptObject # type: ignore[attr-defined]
27
+
28
+ from ._compatibility import compatibility
29
+ from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph
30
+ from .graph_module import GraphModule
31
+ from .node import Argument, base_types, map_aggregate
32
+ from .proxy import ParameterProxy, Proxy, TracerBase, Scope, ScopeContextManager
33
+
34
+ HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
35
+
36
+ # These need to run in global scope to handle nested calls correctly
37
+ _orig_module_call: Callable = torch.nn.Module.__call__
38
+ _orig_module_getattr: Callable = torch.nn.Module.__getattr__
39
+
40
+ _proxyable_classes: Dict[Type, None] = {}
41
+
42
+ _is_fx_tracing_flag = False
43
+
44
+
45
+ def is_fx_tracing():
46
+ return _is_fx_tracing_flag
47
+
48
+ @compatibility(is_backward_compatible=True)
49
+ class ProxyableClassMeta(type):
50
+ """
51
+ ProxyableClassMeta allows you to make construction of a given Python class
52
+ symbolically traceable. For example::
53
+
54
+ import torch
55
+ import torch.fx
56
+
57
+ class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
58
+ def __init__(self, left, right):
59
+ self.left, self.right = left, right
60
+
61
+ def add(self, other):
62
+ l = self.left + other.left
63
+ r = self.right + other.right
64
+ return TensorPair(l, r)
65
+
66
+ def mul(self, other):
67
+ l = self.left * other.left
68
+ r = self.right * other.right
69
+ return TensorPair(l, r)
70
+
71
+ def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
72
+ s = x.add(TensorPair(y, y))
73
+ return s.mul(x)
74
+
75
+ x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
76
+ y = torch.randn(5, 3)
77
+ ref_out = use_tensor_pair_ctor(x, y)
78
+
79
+ traced = torch.fx.symbolic_trace(use_tensor_pair_ctor)
80
+ print(traced.code)
81
+ '''
82
+ def forward(self, x : __main___TensorPair, y : torch.Tensor):
83
+ tensor_pair = __main___TensorPair(y, y); y = None
84
+ add = x.add(tensor_pair); tensor_pair = None
85
+ mul = add.mul(x); add = x = None
86
+ return mul
87
+ '''
88
+
89
+ From this example, we can see that construction of a class (``TensorPair``)
90
+ defined with ``ProxyableClassMeta`` as metaclass can be recorded in symbolic
91
+ tracing.
92
+ """
93
+
94
+ def __init__(cls, name, bases, attrs):
95
+ _proxyable_classes.setdefault(cls)
96
+ super().__init__(name, bases, attrs)
97
+
98
+ def __call__(cls, *args, **kwargs):
99
+ instance = cls.__new__(cls) # type: ignore[call-overload]
100
+
101
+ if not is_fx_tracing():
102
+ cls.__init__(instance, *args, **kwargs) # type: ignore[misc]
103
+ return instance
104
+
105
+ found_proxies = []
106
+
107
+ def check_proxy(a):
108
+ if isinstance(a, Proxy):
109
+ found_proxies.append(a)
110
+
111
+ map_aggregate(args, check_proxy)
112
+ map_aggregate(kwargs, check_proxy)
113
+
114
+ if len(found_proxies) != 0:
115
+ tracer = found_proxies[0].tracer
116
+ return tracer.create_proxy("call_function", cls, args, kwargs)
117
+ else:
118
+ cls.__init__(instance, *args, **kwargs) # type: ignore[misc]
119
+ return instance
120
+
121
+
122
+ def _patch_function(fn: FunctionType, nargs: int) -> FunctionType:
123
+ co = fn.__code__
124
+ co_flags = co.co_flags & ~HAS_VARSTUFF
125
+ co_args: tuple
126
+ if hasattr(co, "co_qualname"):
127
+ # Python-3.11+ code signature
128
+ co_args = (
129
+ nargs,
130
+ 0,
131
+ 0,
132
+ co.co_nlocals,
133
+ co.co_stacksize,
134
+ co_flags,
135
+ co.co_code,
136
+ co.co_consts,
137
+ co.co_names,
138
+ co.co_varnames,
139
+ co.co_filename,
140
+ co.co_name,
141
+ co.co_qualname, # type: ignore[attr-defined]
142
+ co.co_firstlineno,
143
+ co.co_lnotab,
144
+ co.co_exceptiontable, # type: ignore[attr-defined]
145
+ co.co_freevars,
146
+ co.co_cellvars,
147
+ )
148
+ elif hasattr(co, "co_posonlyargcount"):
149
+ co_args = (
150
+ nargs,
151
+ 0,
152
+ 0,
153
+ co.co_nlocals,
154
+ co.co_stacksize,
155
+ co_flags,
156
+ co.co_code,
157
+ co.co_consts,
158
+ co.co_names,
159
+ co.co_varnames,
160
+ co.co_filename,
161
+ co.co_name,
162
+ co.co_firstlineno,
163
+ co.co_lnotab,
164
+ co.co_freevars,
165
+ co.co_cellvars,
166
+ )
167
+ else:
168
+ co_args = (
169
+ nargs,
170
+ 0,
171
+ co.co_nlocals,
172
+ co.co_stacksize,
173
+ co_flags,
174
+ co.co_code,
175
+ co.co_consts,
176
+ co.co_names,
177
+ co.co_varnames,
178
+ co.co_filename,
179
+ co.co_name,
180
+ co.co_firstlineno,
181
+ co.co_lnotab,
182
+ co.co_freevars,
183
+ co.co_cellvars,
184
+ )
185
+ new_code = CodeType(*co_args) # type: ignore[arg-type]
186
+ return FunctionType(
187
+ new_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__
188
+ )
189
+
190
+ # we need to insert placeholder nodes for *args and **kwargs
191
+ # we can't call this function normally, otherwise it would try to unpack them
192
+ # instead, let's make python think that args and kwargs are normal variables
193
+
194
+
195
+ @compatibility(is_backward_compatible=False)
196
+ class PHBase:
197
+ """
198
+ Object representing an input placeholder to `concrete_args`
199
+ """
200
+
201
+ def __repr__(self):
202
+ return "PH"
203
+
204
+
205
+ PH = PHBase()
206
+
207
+
208
+ @compatibility(is_backward_compatible=False)
209
+ class PHWithMeta(PHBase):
210
+ """
211
+ Object representing an input placeholder to `concrete_args`
212
+ """
213
+ def __init__(self, ph_key: Optional[str] = None):
214
+ super().__init__()
215
+
216
+ # Provide a hey for user to identify placeholder node during analysis
217
+ self.ph_key = ph_key
218
+
219
+
220
+ @compatibility(is_backward_compatible=True)
221
+ class Tracer(TracerBase):
222
+ # Reference: https://github.com/pytorch/pytorch/issues/54354
223
+ # The first line of this docstring overrides the one Sphinx generates for the
224
+ # documentation. We need it so that Sphinx doesn't leak `math`s path from the
225
+ # build environment (e.g. `<module 'math' from '/leaked/path').
226
+
227
+ """Tracer(autowrap_modules=(math,), autowrap_functions=())
228
+
229
+ ``Tracer`` is the class that implements the symbolic tracing functionality
230
+ of ``torch.fx.symbolic_trace``. A call to ``symbolic_trace(m)`` is equivalent
231
+ to ``Tracer().trace(m)``.
232
+
233
+ Tracer can be subclassed to override various behaviors of the tracing
234
+ process. The different behaviors that can be overridden are described
235
+ in the docstrings of the methods on this class.
236
+ """
237
+
238
+ # Not checking BC on this API because the default value for `autowrap_modules`
239
+ # includes the local filepath to the `math` module, which would jitter
240
+ # across machines.
241
+ @compatibility(is_backward_compatible=True)
242
+ def __init__(
243
+ self,
244
+ autowrap_modules: Tuple[ModuleType] = (math,),
245
+ autowrap_functions: Tuple[Callable, ...] = (),
246
+ param_shapes_constant: bool = False,
247
+ ) -> None:
248
+ # This method's signature is overridden by the first line of this class'
249
+ # docstring. If this method's signature is modified, the signature that
250
+ # overrides it also should be modified accordingly.
251
+
252
+ """
253
+ Construct a Tracer object.
254
+
255
+ Args:
256
+
257
+ autowrap_modules (Tuple[ModuleType]): defaults to `(math, )`,
258
+ Python modules whose functions should be wrapped automatically
259
+ without needing to use fx.wrap(). Backward-compatibility for
260
+ this parameter is guaranteed.
261
+
262
+ autowrap_functions (Tuple[Callable, ...]): defaults to `()`,
263
+ Python functions that should be wrapped automatically without
264
+ needing to use fx.wrap(). Backward compatibility for this
265
+ parameter is guaranteed.
266
+
267
+ param_shapes_constant (bool): When this flag is set, calls to shape,
268
+ size and a few other shape like attributes of a module's parameter
269
+ will be evaluated directly, rather than returning a new Proxy value
270
+ for an attribute access. Backward compatibility for this parameter
271
+ is guaranteed.
272
+ """
273
+
274
+ super().__init__()
275
+
276
+ # Functions we will eagerly wrap when we see them while tracing
277
+ # this captures both `math.sqrt()` and `from math import sqrt` automatically
278
+ self._autowrap_function_ids: Set[int] = {
279
+ id(value)
280
+ for name, value in chain(*[m.__dict__.items() for m in autowrap_modules])
281
+ if not name.startswith("_") and callable(value)
282
+ }
283
+ self._autowrap_function_ids.update({id(f) for f in autowrap_functions})
284
+
285
+ # Python modules to apply autowrap to at the start, in addition to
286
+ # modules we see while tracing
287
+ self._autowrap_search: List[ModuleType] = list(autowrap_modules)
288
+ self.param_shapes_constant = param_shapes_constant
289
+
290
+ self.submodule_paths: Optional[Dict[torch.nn.Module, str]] = None
291
+ self.root_module_name: str = ""
292
+ # Maps the containing module's name to the operator name
293
+ self.scope = Scope("", None)
294
+ # Records the module call stack
295
+ self.module_stack = collections.OrderedDict()
296
+ # Mapping of node name to module scope
297
+ self.node_name_to_scope: Dict[str, Tuple[str, type]] = {}
298
+
299
+ @compatibility(is_backward_compatible=True)
300
+ def create_arg(self, a: Any) -> "Argument":
301
+ """
302
+ A method to specify the behavior of tracing when preparing values to
303
+ be used as arguments to nodes in the ``Graph``.
304
+
305
+ By default, the behavior includes:
306
+
307
+ #. Iterate through collection types (e.g. tuple, list, dict) and recursively
308
+ call ``create_args`` on the elements.
309
+ #. Given a Proxy object, return a reference to the underlying IR ``Node``
310
+ #. Given a non-Proxy Tensor object, emit IR for various cases:
311
+
312
+ * For a Parameter, emit a ``get_attr`` node referring to that Parameter
313
+ * For a non-Parameter Tensor, store the Tensor away in a special
314
+ attribute referring to that attribute.
315
+
316
+ This method can be overridden to support more types.
317
+
318
+ Args:
319
+
320
+ a (Any): The value to be emitted as an ``Argument`` in the ``Graph``.
321
+
322
+
323
+ Returns:
324
+
325
+ The value ``a`` converted into the appropriate ``Argument``
326
+ """
327
+ # The base tracer is used to construct Graphs when there is no associated
328
+ # module hierarchy, so it can never create parameter references.
329
+ # The default tracer adds the ability to refer to parameters when
330
+ # tracing modules.
331
+ if isinstance(a, torch.nn.Parameter):
332
+ for n, p in self.root.named_parameters():
333
+ if a is p:
334
+ return self.create_node("get_attr", n, (), {})
335
+ raise NameError("parameter is not a member of this module")
336
+ elif isinstance(a, torch.Tensor):
337
+ for n_, p_ in self.root.named_buffers():
338
+ if a is p_:
339
+ return self.create_node("get_attr", n_, (), {})
340
+ elif isinstance(a, torch.nn.Module):
341
+ for n_, p_ in self.root.named_modules():
342
+ if a is p_:
343
+ return self.create_node("get_attr", n_, (), {})
344
+ # For NamedTuple instances that appear literally as args, we emit
345
+ # a node to construct the NamedTuple and use that Node as the argument.
346
+ if isinstance(a, tuple) and hasattr(a, "_fields"):
347
+ args = tuple(self.create_arg(elem) for elem in a)
348
+ return self.create_node("call_function", a.__class__, args, {})
349
+
350
+ # Tensors do not have a reliable string repr() from which they can be
351
+ # constructed (and we probably don't want to rely on that, either), so
352
+ # for any constant Tensor values we encounter, first search for if they
353
+ # are an attribute of some module in the module hierarchy. If so, emit
354
+ # a get_attr to retrieve that tensor. Otherwise, we'll store away the
355
+ # tensor value into a special attribute on the Module s.t. we can
356
+ # retrieve it with a get_attr.
357
+ if isinstance(a, (torch.Tensor, ScriptObject)):
358
+ qualname: Optional[str] = self.tensor_attrs.get(a)
359
+
360
+ # Tensor was not found in the Module hierarchy, stow it away in a
361
+ # special attribute and set the qualname to refer to that
362
+ if not qualname:
363
+ i = 0
364
+ while True:
365
+ qualname = f"_tensor_constant{i}"
366
+ if not hasattr(self.root, qualname):
367
+ break
368
+ i += 1
369
+ self.tensor_attrs[a] = qualname
370
+ setattr(self.root, qualname, a)
371
+
372
+ return self.create_node("get_attr", qualname, (), {})
373
+
374
+ if type(a) in _proxyable_classes:
375
+ # This is an instance of a proxyable class for which we did not
376
+ # witness its construction. Intern this as a constant attribute
377
+
378
+ # TODO: binary search
379
+ i = 0
380
+ while True:
381
+ qualname = f"_{a.__class__.__name__}_constant_{i}"
382
+ if not hasattr(self.root, qualname):
383
+ break
384
+ i += 1
385
+ setattr(self.root, qualname, a)
386
+
387
+ return self.create_node("get_attr", qualname, (), {})
388
+
389
+ return super().create_arg(a)
390
+
391
+ @compatibility(is_backward_compatible=True)
392
+ def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
393
+ """
394
+ A method to specify whether a given ``nn.Module`` is a "leaf" module.
395
+
396
+ Leaf modules are the atomic units that appear in
397
+ the IR, referenced by ``call_module`` calls. By default,
398
+ Modules in the PyTorch standard library namespace (torch.nn)
399
+ are leaf modules. All other modules are traced through and
400
+ their constituent ops are recorded, unless specified otherwise
401
+ via this parameter.
402
+
403
+ Args:
404
+
405
+ m (Module): The module being queried about
406
+ module_qualified_name (str): The path to root of this module. For example,
407
+ if you have a module hierarchy where submodule ``foo`` contains
408
+ submodule ``bar``, which contains submodule ``baz``, that module will
409
+ appear with the qualified name ``foo.bar.baz`` here.
410
+ """
411
+ return (
412
+ (m.__module__.startswith("torch.nn") or m.__module__.startswith("torch.ao.nn"))
413
+ and not isinstance(m, torch.nn.Sequential)
414
+ )
415
+
416
+ @compatibility(is_backward_compatible=True)
417
+ def path_of_module(self, mod: torch.nn.Module) -> str:
418
+ """
419
+ Helper method to find the qualified name of ``mod`` in the Module hierarchy
420
+ of ``root``. For example, if ``root`` has a submodule named ``foo``, which has
421
+ a submodule named ``bar``, passing ``bar`` into this function will return
422
+ the string "foo.bar".
423
+
424
+ Args:
425
+
426
+ mod (str): The ``Module`` to retrieve the qualified name for.
427
+ """
428
+ # Prefer the O(1) algorithm
429
+ if self.submodule_paths:
430
+ path = self.submodule_paths.get(mod)
431
+ if path is None:
432
+ raise NameError("module is not installed as a submodule")
433
+ assert isinstance(path, str)
434
+ return path
435
+ # O(N^2) fallback in the case that we didn't store the submodule
436
+ # paths.
437
+ else:
438
+ for n, p in self.root.named_modules():
439
+ if mod is p:
440
+ return n
441
+ raise NameError("module is not installed as a submodule")
442
+
443
+ @compatibility(is_backward_compatible=True)
444
+ def call_module(
445
+ self,
446
+ m: torch.nn.Module,
447
+ forward: Callable[..., Any],
448
+ args: Tuple[Any, ...],
449
+ kwargs: Dict[str, Any],
450
+ ) -> Any:
451
+ """
452
+ Method that specifies the behavior of this ``Tracer`` when it encounters
453
+ a call to an ``nn.Module`` instance.
454
+
455
+ By default, the behavior is to check if the called module is a leaf module
456
+ via ``is_leaf_module``. If it is, emit a ``call_module`` node referring to
457
+ ``m`` in the ``Graph``. Otherwise, call the ``Module`` normally, tracing through
458
+ the operations in its ``forward`` function.
459
+
460
+ This method can be overridden to--for example--create nested traced
461
+ GraphModules, or any other behavior you would want while tracing across
462
+ ``Module`` boundaries.
463
+
464
+ Args:
465
+
466
+ m (Module): The module for which a call is being emitted
467
+ forward (Callable): The forward() method of the ``Module`` to be invoked
468
+ args (Tuple): args of the module callsite
469
+ kwargs (Dict): kwargs of the module callsite
470
+
471
+ Return:
472
+
473
+ The return value from the Module call. In the case that a ``call_module``
474
+ node was emitted, this is a ``Proxy`` value. Otherwise, it is whatever
475
+ value was returned from the ``Module`` invocation.
476
+ """
477
+ module_qualified_name = self.path_of_module(m)
478
+ with ScopeContextManager(self.scope, Scope(module_qualified_name, type(m))) as _scope:
479
+ # module_stack is an ordered dict so writing then deleting the
480
+ # entry is equivalent to push/pop on a list
481
+ self.module_stack[_scope.module_path] = (module_qualified_name, _scope.module_type)
482
+ if not self.is_leaf_module(m, module_qualified_name):
483
+ ret_val = forward(*args, **kwargs)
484
+ else:
485
+ ret_val = self.create_proxy("call_module", module_qualified_name, args, kwargs)
486
+ key, _ = self.module_stack.popitem(last=True)
487
+ assert key == _scope.module_path, f" Unexpected key {key}"
488
+
489
+ return ret_val
490
+
491
+ @compatibility(is_backward_compatible=False)
492
+ def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]):
493
+ """
494
+ Method that specifies the behavior of this ``Tracer`` when we call getattr
495
+ on a call to an ``nn.Module`` instance.
496
+
497
+ By default, the behavior is to return a proxy value for the attribute. It
498
+ also stores the proxy value in the ``parameter_proxy_cache``, so that future
499
+ calls will reuse the proxy rather than creating a new one.
500
+
501
+ This method can be overridden to --for example-- not return proxies when
502
+ querying parameters.
503
+
504
+ Args:
505
+
506
+ attr (str): The name of the attribute being queried
507
+ attr_val (Any): The value of the attribute
508
+ parameter_proxy_cache (Dict[str, Any]): A cache of attr names to proxies
509
+
510
+ Return:
511
+
512
+ The return value from the getattr call.
513
+ """
514
+ def maybe_get_proxy_for_attr(
515
+ attr_val, collection_to_search, parameter_proxy_cache
516
+ ):
517
+ for n, p in collection_to_search:
518
+ if attr_val is p:
519
+ if n not in parameter_proxy_cache:
520
+ kwargs = {}
521
+ if (
522
+ "proxy_factory_fn"
523
+ in inspect.signature(self.create_proxy).parameters
524
+ ):
525
+ kwargs["proxy_factory_fn"] = (
526
+ None
527
+ if not self.param_shapes_constant
528
+ else lambda node: ParameterProxy(
529
+ self, node, n, attr_val
530
+ )
531
+ )
532
+ val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
533
+ parameter_proxy_cache[n] = val_proxy
534
+ return parameter_proxy_cache[n]
535
+ return None
536
+
537
+ if isinstance(attr_val, torch.nn.Parameter):
538
+ maybe_parameter_proxy = maybe_get_proxy_for_attr(
539
+ attr_val, self.root.named_parameters(), parameter_proxy_cache
540
+ )
541
+ if maybe_parameter_proxy is not None:
542
+ return maybe_parameter_proxy
543
+
544
+ if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
545
+ maybe_buffer_proxy = maybe_get_proxy_for_attr(
546
+ attr_val, self.root.named_buffers(), parameter_proxy_cache
547
+ )
548
+ if maybe_buffer_proxy is not None:
549
+ return maybe_buffer_proxy
550
+
551
+ return attr_val
552
+
553
+ # This method will be refactored
554
+ @compatibility(is_backward_compatible=False)
555
+ def create_args_for_root(self, root_fn, is_module, concrete_args=None):
556
+ """
557
+ Create ``placeholder`` nodes corresponding to the signature of the ``root``
558
+ Module. This method introspects root's signature and emits those
559
+ nodes accordingly, also supporting ``*args`` and ``**kwargs``.
560
+ """
561
+ # In some cases, a function or method has been decorated with a wrapper
562
+ # defined via ``functools.wraps``. In this case, the outer code object
563
+ # will likely not contain the actual parameters we care about, so unwrap
564
+ # the function to get to the innermost callable.
565
+ fn_for_analysis = inspect.unwrap(root_fn)
566
+ co = fn_for_analysis.__code__
567
+ total_args = co.co_argcount + co.co_kwonlyargcount
568
+ orig_args = list(co.co_varnames)
569
+ names_iter = iter(co.co_varnames)
570
+ args: List[Any] = []
571
+ skip_arg_idx = 0
572
+ if is_module:
573
+ if total_args == 0:
574
+ raise RuntimeError(
575
+ "``self`` argument cannot be part of *args expansion!"
576
+ )
577
+ skip_arg_idx = 1
578
+ next(names_iter) # skip self
579
+ args.append(self.root)
580
+
581
+ sig = inspect.signature(fn_for_analysis)
582
+
583
+ def proxy_placeholder(name: str):
584
+ if concrete_args is not None and name in concrete_args:
585
+ cnt = 0
586
+
587
+ def replace_ph(x):
588
+ nonlocal cnt
589
+ cnt += 1
590
+ param = sig.parameters[name]
591
+ default = (
592
+ ()
593
+ if param.default is inspect.Parameter.empty
594
+ else (param.default,)
595
+ )
596
+ out = self.create_proxy(
597
+ "placeholder", f"{name}_{str(cnt)}", default, {}
598
+ )
599
+ if isinstance(x, PHBase):
600
+ def transfer_attrs(fr, to):
601
+ for attr_name in dir(fr):
602
+ attr_val = getattr(fr, attr_name)
603
+ if (
604
+ not callable(attr_val)
605
+ and not attr_name.startswith("__")
606
+ and not hasattr(to, attr_name)
607
+ ):
608
+ setattr(to, attr_name, attr_val)
609
+
610
+ if x != PH:
611
+ # Transfer attrs in the case where you're using a placeholder other
612
+ # than the singleton PH (PH has no attributes to transfer).
613
+ # Proxies were created out of the placeholders.
614
+ # Transfer any metadata (put on the placeholders in the form of
615
+ # attributes set by the user) from the placeholder to the
616
+ # underlying nodes (the proxy is unwrapped by the user, but
617
+ # the metadata should hold).
618
+ transfer_attrs(fr=x, to=out.node)
619
+
620
+ return out
621
+ # Union[int, bool] == bool in Python <= 3.6
622
+ if (
623
+ type(x) == bool
624
+ or type(x) in base_types
625
+ and type(x) != torch.Tensor
626
+ ):
627
+ torch._assert(
628
+ out == x,
629
+ f"{name} has been specialized to have value {x} but got another value",
630
+ )
631
+ elif type(x) == type(None):
632
+ args = (
633
+ out,
634
+ f"{name} has been specialized to have value None but got another value",
635
+ )
636
+ self.create_proxy("call_function", _assert_is_none, args, {})
637
+ else:
638
+ warnings.warn(
639
+ f"Was not able to add assertion to guarantee correct input {name} to "
640
+ f"specialized function. It is up to the user to make sure that your inputs match the "
641
+ f"inputs you specialized the function with."
642
+ )
643
+
644
+ return x
645
+
646
+ return pytree.tree_map(replace_ph, concrete_args[name])
647
+ if name[0] == "*":
648
+ default = ()
649
+ else:
650
+ param = sig.parameters[name]
651
+ default = () if param.default is inspect.Parameter.empty else (param.default,) # type: ignore[assignment]
652
+ return self.create_proxy(
653
+ "placeholder",
654
+ name,
655
+ default,
656
+ {},
657
+ type_expr=fn_for_analysis.__annotations__.get(name, None)
658
+ )
659
+
660
+ arg_names = [next(names_iter) for idx in range(skip_arg_idx, total_args)]
661
+ if isinstance(concrete_args, tuple):
662
+ if len(arg_names) != len(concrete_args):
663
+ raise RuntimeError(
664
+ f"Tracing expected {len(arg_names)} arguments but got {len(concrete_args)} concrete arguments"
665
+ )
666
+ concrete_args = dict(zip(arg_names, concrete_args))
667
+ args.extend(proxy_placeholder(names) for names in arg_names)
668
+
669
+ if co.co_kwonlyargcount > 0 or co.co_flags & HAS_VARSTUFF:
670
+ # TODO: type annotations for *args and **kwargs
671
+ if co.co_flags & inspect.CO_VARARGS:
672
+ args.append(proxy_placeholder("*" + next(names_iter)))
673
+ if co.co_flags & inspect.CO_VARKEYWORDS:
674
+ args.append(proxy_placeholder("**" + next(names_iter)))
675
+ root_fn = _patch_function(root_fn, len(args))
676
+
677
+ flat_args, in_spec = pytree.tree_flatten(tuple(args))
678
+ if any(not isinstance(i, pytree.LeafSpec) for i in in_spec.children_specs):
679
+ # In the case that we have pytree-flattened inputs in
680
+ # `concrete_args`, generate a flattening wrapper around the
681
+ # original root function and return that.
682
+ self.graph._codegen = _PyTreeCodeGen(
683
+ _PyTreeInfo(orig_args[:total_args], in_spec, None)
684
+ )
685
+
686
+ def flatten_fn(*args):
687
+ tree_args = pytree.tree_unflatten(list(args), in_spec)
688
+ tree_out = root_fn(*tree_args)
689
+ out_args, out_spec = pytree.tree_flatten(tree_out)
690
+ assert isinstance(self.graph._codegen, _PyTreeCodeGen)
691
+ self.graph._codegen.pytree_info = (
692
+ self.graph._codegen.pytree_info._replace(out_spec=out_spec)
693
+ )
694
+ return out_args
695
+
696
+ return flatten_fn, flat_args
697
+ return root_fn, args
698
+
699
+ @compatibility(is_backward_compatible=True)
700
+ def trace(
701
+ self,
702
+ root: Union[torch.nn.Module, Callable[..., Any]],
703
+ concrete_args: Optional[Dict[str, Any]] = None,
704
+ ) -> Graph:
705
+ """
706
+ Trace ``root`` and return the corresponding FX ``Graph`` representation. ``root``
707
+ can either be an ``nn.Module`` instance or a Python callable.
708
+
709
+ Note that after this call, ``self.root`` may be different from the ``root`` passed
710
+ in here. For example, when a free function is passed to ``trace()``, we will
711
+ create an ``nn.Module`` instance to use as the root and add embedded constants
712
+ to.
713
+
714
+
715
+ Args:
716
+
717
+ root (Union[Module, Callable]): Either a ``Module`` or a function to be
718
+ traced through. Backwards-compatibility for this parameter is
719
+ guaranteed.
720
+ concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
721
+ not be treated as Proxies. This parameter is experimental and
722
+ its backwards-compatibility is *NOT* guaranteed.
723
+
724
+ Returns:
725
+
726
+ A ``Graph`` representing the semantics of the passed-in ``root``.
727
+ """
728
+ global _is_fx_tracing_flag
729
+ old_is_fx_tracing_flag = _is_fx_tracing_flag
730
+ _is_fx_tracing_flag = True
731
+ try:
732
+ if isinstance(root, torch.nn.Module):
733
+ self.root = root
734
+
735
+ assert hasattr(
736
+ type(root), self.traced_func_name
737
+ ), f"traced_func_name={self.traced_func_name} doesn't exist in {type(root).__name__}"
738
+
739
+ fn = getattr(type(root), self.traced_func_name)
740
+ self.root_module_name = root._get_name()
741
+ self.submodule_paths = {mod: name for name, mod in root.named_modules()}
742
+ else:
743
+ self.root = torch.nn.Module()
744
+ fn = root
745
+
746
+ tracer_cls: Optional[Type[Tracer]] = getattr(self, "__class__", None)
747
+ self.graph = Graph(tracer_cls=tracer_cls)
748
+ if hasattr(fn, '__code__'):
749
+ code = fn.__code__
750
+ self.graph._co_fields = {
751
+ 'co_name': code.co_name,
752
+ 'co_filename': code.co_filename,
753
+ 'co_firstlineno': code.co_firstlineno,
754
+ }
755
+
756
+ # When we encounter a Tensor value that's not a parameter, we look if it
757
+ # is some other attribute on the model. Construct a dict mapping Tensor
758
+ # values to the qualified name here for efficiency. This is used downstream
759
+ # in create_arg
760
+ self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {}
761
+
762
+ def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]):
763
+ for k, v in m.__dict__.items():
764
+ if isinstance(v, (torch.Tensor, ScriptObject)):
765
+ self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
766
+ for k, v in m.named_children():
767
+ collect_tensor_attrs(v, prefix_atoms + [k])
768
+
769
+ collect_tensor_attrs(self.root, [])
770
+
771
+ assert isinstance(fn, FunctionType)
772
+
773
+ fn_globals = fn.__globals__ # run before it gets patched
774
+ fn, args = self.create_args_for_root(
775
+ fn, isinstance(root, torch.nn.Module), concrete_args
776
+ )
777
+
778
+ parameter_proxy_cache: Dict[
779
+ str, Proxy
780
+ ] = {} # Reduce number of get_attr calls
781
+
782
+ # Method dispatch on parameters is not recorded unless it's directly used.
783
+ # Thus, we need to insert a proxy when __getattr__ requests a parameter.
784
+ @functools.wraps(_orig_module_getattr)
785
+ def module_getattr_wrapper(mod, attr):
786
+ attr_val = _orig_module_getattr(mod, attr)
787
+ return self.getattr(attr, attr_val, parameter_proxy_cache)
788
+
789
+ @functools.wraps(_orig_module_call)
790
+ def module_call_wrapper(mod, *args, **kwargs):
791
+ def forward(*args, **kwargs):
792
+ return _orig_module_call(mod, *args, **kwargs)
793
+
794
+ _autowrap_check(
795
+ patcher,
796
+ getattr(getattr(mod, "forward", mod), "__globals__", {}),
797
+ self._autowrap_function_ids,
798
+ )
799
+ return self.call_module(mod, forward, args, kwargs)
800
+
801
+ with _Patcher() as patcher:
802
+ # allow duplicate patches to support the case of nested calls
803
+ patcher.patch_method(
804
+ torch.nn.Module,
805
+ "__getattr__",
806
+ module_getattr_wrapper,
807
+ deduplicate=False,
808
+ )
809
+ patcher.patch_method(
810
+ torch.nn.Module, "__call__", module_call_wrapper, deduplicate=False
811
+ )
812
+ _patch_wrapped_functions(patcher)
813
+ _autowrap_check(patcher, fn_globals, self._autowrap_function_ids)
814
+ for module in self._autowrap_search:
815
+ _autowrap_check(
816
+ patcher, module.__dict__, self._autowrap_function_ids
817
+ )
818
+ self.create_node(
819
+ "output",
820
+ "output",
821
+ (self.create_arg(fn(*args)),),
822
+ {},
823
+ type_expr=fn.__annotations__.get("return", None),
824
+ )
825
+
826
+ self.submodule_paths = None
827
+ finally:
828
+ _is_fx_tracing_flag = old_is_fx_tracing_flag
829
+ return self.graph
830
+
831
+ def __deepcopy__(self, memo):
832
+ # _autowrap_search contains modules, which cannot be deepcopied.
833
+ new_tracer = Tracer.__new__(Tracer)
834
+
835
+ for k, v in self.__dict__.items():
836
+ if k in {'_autowrap_search'}:
837
+ new_obj = copy.copy(v)
838
+ else:
839
+ new_obj = copy.deepcopy(v, memo)
840
+
841
+ new_tracer.__dict__[k] = new_obj
842
+
843
+ return new_tracer
844
+
845
+
846
+ # Dictionary of (id(globals dict), function name) => globals_dict to patch for
847
+ # the purposes of the wrap() API.
848
+ # We key by the globals dict id and function name to ensure we're wrapping a given
849
+ # function only once.
850
+ _wrapped_fns_to_patch: Dict[Tuple[int, str], dict] = {}
851
+
852
+ # List of methods on classes to wrap (class type, function name)
853
+ # this currently only works for Tensor.* methods that aren't traced properly
854
+ _wrapped_methods_to_patch: List[Tuple[type, str]] = []
855
+
856
+ if os.environ.get("FX_PATCH_GETITEM") == "1":
857
+ # This change is needed to trace models like PositionalEmbedding from BERT:
858
+ # https://github.com/pytorch/benchmark/blob/master/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/position.py
859
+ # but causes issues in quantization documented here:
860
+ # https://github.com/pytorch/pytorch/issues/50710
861
+ # once that is fixed we can make this the default behavior.
862
+ _wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
863
+
864
+
865
+ def _find_proxy(*objects_to_search):
866
+ """
867
+ Recursively search a data structure for a Proxy() and return it,
868
+ return None if not found.
869
+ """
870
+ proxy = None
871
+
872
+ def find_proxy(x):
873
+ nonlocal proxy
874
+ if isinstance(x, Proxy):
875
+ proxy = x
876
+
877
+ map_aggregate(objects_to_search, find_proxy)
878
+ return proxy
879
+
880
+
881
+ def _create_wrapped_func(orig_fn):
882
+ @functools.wraps(orig_fn)
883
+ def wrapped(*args, **kwargs):
884
+ """
885
+ Given an closed-over ``orig_function`` to invoke, search the args and kwargs for
886
+ a Proxy object. If there is one, emit a ``call_function`` node to preserve the
887
+ call to this leaf function directly. Otherwise, just return the results of
888
+ this function call, as this function is not being traced.
889
+ """
890
+ proxy = _find_proxy(args, kwargs)
891
+ if proxy is not None:
892
+ return_proxy = proxy.tracer.create_proxy(
893
+ "call_function", orig_fn, args, kwargs
894
+ )
895
+ return_proxy.node.meta["is_wrapped"] = True
896
+ return return_proxy
897
+ return orig_fn(*args, **kwargs)
898
+
899
+ return wrapped
900
+
901
+
902
+ def _create_wrapped_method(cls, name):
903
+ orig_fn = getattr(cls, name)
904
+
905
+ @functools.wraps(orig_fn)
906
+ def wrapped(*args, **kwargs):
907
+ """
908
+ Search the args and kwargs for a Proxy object. If there is one,
909
+ emit a ``call_method`` node to preserve the call to this method
910
+ directly. Otherwise, just return the results of this function
911
+ call, as this function is not being traced.
912
+ """
913
+ proxy = _find_proxy(args, kwargs)
914
+ if proxy is not None:
915
+ return proxy.tracer.create_proxy("call_method", name, args, kwargs)
916
+ return orig_fn(*args, **kwargs)
917
+
918
+ return wrapped
919
+
920
+
921
+ class _PatchedFn(NamedTuple):
922
+ frame_dict: Any
923
+ fn_name: str
924
+ orig_fn: Any
925
+
926
+ def revert(self):
927
+ raise NotImplementedError()
928
+
929
+
930
+ class _PatchedFnSetItem(_PatchedFn):
931
+ def revert(self):
932
+ self.frame_dict[self.fn_name] = self.orig_fn
933
+
934
+
935
+ class _PatchedFnDel(_PatchedFn):
936
+ def revert(self):
937
+ del self.frame_dict[self.fn_name]
938
+
939
+
940
+ class _PatchedFnSetAttr(_PatchedFn):
941
+ def revert(self):
942
+ setattr(self.frame_dict, self.fn_name, self.orig_fn)
943
+
944
+
945
+ class _Patcher:
946
+ def __init__(self):
947
+ super().__init__()
948
+ self.patches_made: List[_PatchedFn] = []
949
+ self.visited: Set[int] = set()
950
+
951
+ def patch(
952
+ self,
953
+ frame_dict: Dict[str, Any],
954
+ name: str,
955
+ new_fn: Callable,
956
+ deduplicate: bool = True,
957
+ ):
958
+ """
959
+ Replace frame_dict[name] with new_fn until we exit the context manager.
960
+ """
961
+ new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
962
+ if name not in frame_dict and hasattr(builtins, name):
963
+ self.patches_made.append(_PatchedFnDel(frame_dict, name, None))
964
+ elif getattr(frame_dict[name], "__fx_already_patched", False):
965
+ return # already patched, no need to do it again
966
+ else:
967
+ self.patches_made.append(
968
+ _PatchedFnSetItem(frame_dict, name, frame_dict[name])
969
+ )
970
+ frame_dict[name] = new_fn
971
+
972
+ def patch_method(
973
+ self, cls: type, name: str, new_fn: Callable, deduplicate: bool = True
974
+ ):
975
+ """
976
+ Replace object_or_dict.name with new_fn until we exit the context manager.
977
+ """
978
+ new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
979
+ orig_fn = getattr(cls, name)
980
+ if getattr(orig_fn, "__fx_already_patched", False):
981
+ return # already patched, no need to do it again
982
+ self.patches_made.append(_PatchedFnSetAttr(cls, name, orig_fn))
983
+ setattr(cls, name, new_fn)
984
+
985
+ def visit_once(self, thing: Any):
986
+ """Return True on the first call to with thing, otherwise false"""
987
+ idx = id(thing)
988
+ if idx in self.visited:
989
+ return False
990
+ self.visited.add(idx)
991
+ return True
992
+
993
+ def __enter__(self):
994
+ return self
995
+
996
+ def __exit__(self, exc_type, exc_val, exc_tb):
997
+ """
998
+ Undo all the changes made via self.patch() and self.patch_method()
999
+ """
1000
+ while self.patches_made:
1001
+ # unpatch in reverse order to handle duplicates correctly
1002
+ self.patches_made.pop().revert()
1003
+ self.visited.clear()
1004
+
1005
+
1006
+ def _patch_wrapped_functions(patcher: _Patcher):
1007
+ """
1008
+ Go through ``_wrapped_fn_patch_table`` and, for each frame object, wrap
1009
+ the listed global functions in the `_create_wrapped_func` wrapper.
1010
+ """
1011
+ for (_, name), frame_dict in _wrapped_fns_to_patch.copy().items():
1012
+ if name not in frame_dict and hasattr(builtins, name):
1013
+ orig_fn = getattr(builtins, name)
1014
+ else:
1015
+ orig_fn = frame_dict[name]
1016
+ patcher.patch(frame_dict, name, _create_wrapped_func(orig_fn))
1017
+
1018
+ for cls, name in _wrapped_methods_to_patch:
1019
+ patcher.patch_method(cls, name, _create_wrapped_method(cls, name))
1020
+
1021
+
1022
+ def _autowrap_check(
1023
+ patcher: _Patcher, frame_dict: Dict[str, Any], function_ids: Set[int]
1024
+ ):
1025
+ """
1026
+ Some methods, like `math.sqrt` are common enough we want to automatically wrap them as we see them.
1027
+ This method searches a scope for them and patches them if found.
1028
+ """
1029
+ if patcher.visit_once(frame_dict):
1030
+ for name, value in frame_dict.items():
1031
+ if (
1032
+ not name.startswith("_")
1033
+ and callable(value)
1034
+ and id(value) in function_ids
1035
+ ):
1036
+ patcher.patch(frame_dict, name, _create_wrapped_func(value))
1037
+
1038
+
1039
+ @compatibility(is_backward_compatible=True)
1040
+ def wrap(fn_or_name: Union[str, Callable]):
1041
+ """
1042
+ This function can be called at module-level scope to register fn_or_name as a "leaf function".
1043
+ A "leaf function" will be preserved as a CallFunction node in the FX trace instead of being
1044
+ traced through::
1045
+
1046
+ # foo/bar/baz.py
1047
+ def my_custom_function(x, y):
1048
+ return x * x + y * y
1049
+
1050
+ torch.fx.wrap('my_custom_function')
1051
+
1052
+ def fn_to_be_traced(x, y):
1053
+ # When symbolic tracing, the below call to my_custom_function will be inserted into
1054
+ # the graph rather than tracing it.
1055
+ return my_custom_function(x, y)
1056
+
1057
+ This function can also equivalently be used as a decorator::
1058
+
1059
+ # foo/bar/baz.py
1060
+ @torch.fx.wrap
1061
+ def my_custom_function(x, y):
1062
+ return x * x + y * y
1063
+
1064
+ A wrapped function can be thought of a "leaf function", analogous to the concept of
1065
+ "leaf modules", that is, they are functions that are left as calls in the FX trace
1066
+ rather than traced through.
1067
+
1068
+ Args:
1069
+
1070
+ fn_or_name (Union[str, Callable]): The function or name of the global function to insert into the
1071
+ graph when it's called
1072
+ """
1073
+ if not callable(fn_or_name) and not isinstance(fn_or_name, str):
1074
+ raise RuntimeError(
1075
+ "Unsupported type for global function! Must be either a callable or "
1076
+ "string name"
1077
+ )
1078
+
1079
+ if callable(fn_or_name):
1080
+ assert not isinstance(fn_or_name, str) # to make mypy happy
1081
+ fn_name = fn_or_name.__name__
1082
+ else:
1083
+ assert isinstance(
1084
+ fn_or_name, str
1085
+ ), "fn_or_name must be a global function or string name"
1086
+ fn_name = fn_or_name
1087
+
1088
+ currentframe = inspect.currentframe()
1089
+ assert currentframe is not None
1090
+ f = currentframe.f_back
1091
+ assert f is not None
1092
+ if f.f_code.co_name != "<module>":
1093
+ raise NotImplementedError("wrap must be called at the top level of a module")
1094
+
1095
+ # consider implementing Callable version of this via _autowrap_function_ids / _autowrap_search
1096
+ # semantics would be slightly different, but would add support `from x import wrapped_function`
1097
+ _wrapped_fns_to_patch[(id(f.f_globals), fn_name)] = f.f_globals
1098
+ return fn_or_name
1099
+
1100
+
1101
+ @compatibility(is_backward_compatible=True)
1102
+ def symbolic_trace(
1103
+ root: Union[torch.nn.Module, Callable[..., Any]],
1104
+ concrete_args: Optional[Dict[str, Any]] = None,
1105
+ ) -> GraphModule:
1106
+ """
1107
+ Symbolic tracing API
1108
+
1109
+ Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule``
1110
+ constructed by recording operations seen while tracing through ``root``.
1111
+
1112
+ ``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures.
1113
+
1114
+ For example::
1115
+
1116
+ def f(a, b):
1117
+ if b == True:
1118
+ return a
1119
+ else:
1120
+ return a*2
1121
+
1122
+ FX can typically not trace through this due to the presence of control
1123
+ flow. However, we can use `concrete_args` to specialize on the value of
1124
+ `b` to trace through this::
1125
+
1126
+ f = fx.symbolic_trace(f, concrete_args={'b': False})
1127
+ assert f(3, False) == 6
1128
+
1129
+ Note that although you can still pass in different values of `b`, they will be ignored.
1130
+
1131
+ We can also use `concrete_args` to eliminate data-structure handling from
1132
+ our function. This will use pytrees to flatten your input. To avoid
1133
+ overspecializing, pass in `fx.PH` for values that shouldn't be
1134
+ specialized. For example::
1135
+
1136
+ def f(x):
1137
+ out = 0
1138
+ for v in x.values():
1139
+ out += v
1140
+ return out
1141
+ f = fx.symbolic_trace(f, concrete_args={'x': {'a': fx.PH, 'b': fx.PH, 'c': fx.PH}})
1142
+ assert f({'a': 1, 'b': 2, 'c': 4}) == 7
1143
+
1144
+
1145
+ Args:
1146
+ root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted
1147
+ into a Graph representation.
1148
+ concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized
1149
+
1150
+ Returns:
1151
+ GraphModule: a Module created from the recorded operations from ``root``.
1152
+ """
1153
+ tracer = Tracer()
1154
+ graph = tracer.trace(root, concrete_args)
1155
+ name = (
1156
+ root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
1157
+ )
1158
+ return GraphModule(tracer.root, graph, name)
1159
+
1160
+
1161
+ @wrap
1162
+ def _assert_is_none(value, msg):
1163
+ assert value is None, msg
env-llmeval/lib/python3.10/site-packages/torch/fx/annotate.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.proxy import Proxy
2
+ from ._compatibility import compatibility
3
+
4
+ @compatibility(is_backward_compatible=False)
5
+ def annotate(val, type):
6
+ # val could be either a regular value (not tracing)
7
+ # or fx.Proxy (tracing)
8
+ if isinstance(val, Proxy):
9
+ if val.node.type:
10
+ raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
11
+ f" Existing type is {val.node.type} "
12
+ f"and new type is {type}. "
13
+ f"This could happen if you tried to annotate a function parameter "
14
+ f"value (in which case you should use the type slot "
15
+ f"on the function signature) or you called "
16
+ f"annotate on the same value twice")
17
+ else:
18
+ val.node.type = type
19
+ return val
20
+ else:
21
+ return val
env-llmeval/lib/python3.10/site-packages/torch/fx/config.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Whether to disable showing progress on compilation passes
2
+ # Need to add a new config otherwise wil get a circular import if dynamo config is imported here
3
+ disable_progress = True
4
+
5
+ # If True this also shows the node names in each pass, for small models this is great but larger models it's quite noisy
6
+ verbose_progress = False
env-llmeval/lib/python3.10/site-packages/torch/fx/graph.py ADDED
@@ -0,0 +1,1630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from collections import defaultdict
3
+ from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
4
+ import torch.utils._pytree as pytree
5
+ from . import _pytree as fx_pytree
6
+ from ._compatibility import compatibility
7
+
8
+ import contextlib
9
+ from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
10
+ from dataclasses import dataclass
11
+ from contextlib import contextmanager
12
+ import copy
13
+ import enum
14
+ import torch
15
+ import keyword
16
+ import re
17
+ import builtins
18
+ import math
19
+ import warnings
20
+ import inspect
21
+
22
+ __all__ = ["PythonCode", "CodeGen", "Graph"]
23
+
24
+ if TYPE_CHECKING:
25
+ from .graph_module import GraphModule # noqa: F401
26
+ from ._symbolic_trace import Tracer # noqa: F401
27
+
28
+
29
+ # Mapping of builtins to their `typing` equivalent.
30
+ _origin_type_map = {
31
+ list: List,
32
+ dict: Dict,
33
+ set: Set,
34
+ frozenset: FrozenSet,
35
+ tuple: Tuple,
36
+ }
37
+
38
+
39
+ # Signature for functions thattransforms the body (`list[str]`) of the
40
+ # generated code
41
+ TransformCodeFunc = Callable[[List[str]], List[str]]
42
+
43
+
44
+ class _CustomBuiltin(NamedTuple):
45
+ """Additional objs that we add to every graph's globals.
46
+
47
+ The repr() for some standard library objects is not valid Python code without
48
+ an import. For common objects of this sort, we bundle them in the globals of
49
+ every FX graph.
50
+ """
51
+ # How to import this object from the standard library.
52
+ import_str: str
53
+ # The actual object, produced from that import string.
54
+ obj: Any
55
+
56
+ _custom_builtins: Dict[str, _CustomBuiltin] = {}
57
+
58
+
59
+ def _register_custom_builtin(name: str, import_str: str, obj: Any):
60
+ _custom_builtins[name] = _CustomBuiltin(import_str, obj)
61
+
62
+
63
+ _register_custom_builtin('inf', 'from math import inf', math.inf)
64
+ _register_custom_builtin('nan', 'from math import nan', math.nan)
65
+ _register_custom_builtin('NoneType', 'NoneType = type(None)', type(None))
66
+ _register_custom_builtin('torch', 'import torch', torch)
67
+ _register_custom_builtin('device', 'from torch import device', torch.device)
68
+ _register_custom_builtin('fx_pytree', 'import torch.fx._pytree as fx_pytree', fx_pytree)
69
+ _register_custom_builtin('pytree', 'import torch.utils._pytree as pytree', pytree)
70
+
71
+
72
+ def _is_magic(x: str) -> bool:
73
+ return x.startswith('__') and x.endswith('__')
74
+
75
+
76
+ def _snake_case(s: str) -> str:
77
+ """
78
+ Transforms the given string ``s`` to a Python-style variable name
79
+
80
+ Examples:
81
+ ``mod.snake_case`` -> ``mod.snake_case``
82
+ ``mod.pascalCase``-> ``mod.pascal_case``
83
+ ``mod.ALL_CAPS`` -> ``mod.all_caps``
84
+ """
85
+ chars = []
86
+ prev_lower = False
87
+ for c in s:
88
+ if prev_lower and c.isupper():
89
+ chars.append('_')
90
+ chars.append(c.lower())
91
+ prev_lower = c.islower()
92
+ return ''.join(chars)
93
+
94
+
95
+ def _is_from_torch(obj: Any) -> bool:
96
+ module_name = getattr(obj, '__module__', None)
97
+ if module_name is not None:
98
+ base_module = module_name.partition('.')[0]
99
+ return (
100
+ base_module == 'torch' and
101
+ not module_name.startswith("torch._dynamo.") and
102
+ not module_name.startswith("torch._inductor.")
103
+ )
104
+
105
+ name = getattr(obj, '__name__', None)
106
+ # exclude torch because torch.torch.torch.torch works. idk mang
107
+ if name is not None and name != 'torch':
108
+ for guess in [torch, torch.nn.functional]:
109
+ if getattr(guess, name, None) is obj:
110
+ return True
111
+
112
+ return False
113
+
114
+
115
+ class _Namespace:
116
+ """A context for associating names uniquely with objects.
117
+
118
+ The following invariants are enforced:
119
+ - Each object gets a single name.
120
+ - Each name is unique within a given namespace.
121
+ - Names generated do not shadow builtins, unless the object is indeed that builtin.
122
+ """
123
+ def __init__(self):
124
+ self._obj_to_name: Dict[Any, str] = {}
125
+ self._unassociated_names = set()
126
+ self._used_names: Set[str] = set()
127
+ self._base_count: Dict[str, int] = defaultdict(int)
128
+
129
+ self._illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
130
+ self._name_suffix_regex = re.compile(r"(.*)_(\d+)$")
131
+
132
+ def create_name(self, candidate: str, obj: Optional[Any]) -> str:
133
+ """Create a unique name.
134
+
135
+ Arguments:
136
+ candidate: used as the basis for the unique name, relevant to the user.
137
+ obj: If not None, an object that will be associated with the unique name.
138
+ """
139
+ if obj is not None and obj in self._obj_to_name:
140
+ return self._obj_to_name[obj]
141
+
142
+ # delete all characters that are illegal in a Python identifier
143
+ candidate = self._illegal_char_regex.sub('_', candidate)
144
+
145
+ if not candidate:
146
+ candidate = '_unnamed'
147
+
148
+ if candidate[0].isdigit():
149
+ candidate = f'_{candidate}'
150
+
151
+ match = self._name_suffix_regex.match(candidate)
152
+ if match is None:
153
+ base = candidate
154
+ num = None
155
+ else:
156
+ base, num_str = match.group(1, 2)
157
+ num = int(num_str)
158
+
159
+ candidate = base if num is None else f'{base}_{num}'
160
+ if not num:
161
+ num = self._base_count[base]
162
+
163
+ while candidate in self._used_names or self._is_illegal_name(candidate, obj):
164
+ num += 1
165
+ candidate = f'{base}_{num}'
166
+
167
+ self._used_names.add(candidate)
168
+ self._base_count[base] = num
169
+ if obj is None:
170
+ self._unassociated_names.add(candidate)
171
+ else:
172
+ self._obj_to_name[obj] = candidate
173
+ return candidate
174
+
175
+ def associate_name_with_obj(self, name: str, obj: Any):
176
+ """Associate a unique name with an object.
177
+
178
+ Neither `name` nor `obj` should be associated already.
179
+ """
180
+ assert obj not in self._obj_to_name
181
+ assert name in self._unassociated_names
182
+ self._obj_to_name[obj] = name
183
+ self._unassociated_names.remove(name)
184
+
185
+ def _is_illegal_name(self, name: str, obj: Any) -> bool:
186
+ # 1. keywords are never allowed as names.
187
+ if name in keyword.kwlist:
188
+ return True
189
+
190
+ # 2. Can't shadow a builtin name, unless you *are* that builtin.
191
+ if name in builtins.__dict__:
192
+ return obj is not builtins.__dict__[name]
193
+
194
+ # 3. Can't shadow our custom builtins either
195
+ if name in _custom_builtins:
196
+ return obj is not _custom_builtins[name].obj
197
+
198
+ return False
199
+
200
+ def _rename_object(self, obj: Any, name: str):
201
+ assert obj in self._obj_to_name
202
+ self._obj_to_name[obj] = name
203
+ self._used_names.add(name)
204
+
205
+ dtype_abbrs = {
206
+ torch.bfloat16: 'bf16',
207
+ torch.float64: 'f64',
208
+ torch.float32: 'f32',
209
+ torch.float16: 'f16',
210
+ torch.float8_e4m3fn: 'f8e4m3fn',
211
+ torch.float8_e5m2: 'f8e5m2',
212
+ torch.complex32: 'c32',
213
+ torch.complex64: 'c64',
214
+ torch.complex128: 'c128',
215
+ torch.int8: 'i8',
216
+ torch.int16: 'i16',
217
+ torch.int32: 'i32',
218
+ torch.int64: 'i64',
219
+ torch.bool: 'b8',
220
+ torch.uint8: 'u8',
221
+ }
222
+
223
+ @compatibility(is_backward_compatible=True)
224
+ @dataclass
225
+ class PythonCode:
226
+ """
227
+ Represents all the information necessary to exec or save a graph as Python code.
228
+ """
229
+ # Python source code for the forward function definition.
230
+ src: str
231
+ # Values in global scope during execution of `src_def`.
232
+ globals: Dict[str, Any]
233
+ # Optional mapping from the forward function's line number to
234
+ # node index.
235
+ _lineno_map: Optional[Dict[int, Optional[int]]]
236
+
237
+
238
+ def _format_target(base: str, target: str) -> str:
239
+ elems = target.split('.')
240
+ r = base
241
+ for e in elems:
242
+ if not e.isidentifier():
243
+ r = f'getattr({r}, "{e}")'
244
+ else:
245
+ r = f'{r}.{e}'
246
+ return r
247
+
248
+ class _InsertPoint:
249
+ def __init__(self, graph, new_insert):
250
+ self.graph = graph
251
+ self.orig_insert, graph._insert = graph._insert, new_insert
252
+
253
+ def __enter__(self):
254
+ pass
255
+
256
+ def __exit__(self, type, value, tb):
257
+ self.graph._insert = self.orig_insert
258
+
259
+ class _node_list:
260
+ def __init__(self, graph: 'Graph', direction: str = '_next'):
261
+ assert direction in ['_next', '_prev']
262
+ self.graph = graph
263
+ self.direction = direction
264
+
265
+ def __len__(self):
266
+ return self.graph._len
267
+
268
+ def __iter__(self):
269
+ root, direction = self.graph._root, self.direction
270
+ cur = getattr(root, direction)
271
+ while cur is not root:
272
+ if not cur._erased:
273
+ yield cur
274
+ cur = getattr(cur, direction)
275
+
276
+ def __reversed__(self):
277
+ return _node_list(self.graph, '_next' if self.direction == '_prev' else '_prev')
278
+
279
+ class _PyTreeInfo(NamedTuple):
280
+ """
281
+ Contains extra info stored when we're using Pytrees
282
+ """
283
+ orig_args: List[str]
284
+ in_spec: pytree.TreeSpec
285
+ out_spec: Optional[pytree.TreeSpec]
286
+
287
+ # get File:lineno code from stack_trace
288
+ def _parse_stack_trace(stack_trace: str):
289
+ if stack_trace is None:
290
+ return None
291
+ ParsedStackTrace = collections.namedtuple("ParsedStackTrace", ["file", "lineno", "code"])
292
+ pattern = re.compile(r"^File \"(.+)\", line (\d+), in (.+)$")
293
+ lines = stack_trace.strip().split('\n')
294
+ # stacktrace should have innermost frame last, so we
295
+ # iterate backwards to find the first line that starts
296
+ # with 'File '
297
+ summary_str = ""
298
+ for idx in range(len(lines) - 2, -1, -1):
299
+ line = lines[idx].strip()
300
+ matches = pattern.match(line)
301
+ if matches:
302
+ file = matches.group(1)
303
+ lineno = matches.group(2)
304
+ # next line should be the code
305
+ code = lines[idx + 1].strip()
306
+ return ParsedStackTrace(file, lineno, code)
307
+ return None
308
+
309
+
310
+ @compatibility(is_backward_compatible=False)
311
+ class CodeGen:
312
+ def __init__(self):
313
+ self._body_transformer: Optional[TransformCodeFunc] = None
314
+ self._func_name: str = "forward"
315
+
316
+ def gen_fn_def(self, free_vars: List[str], maybe_return_annotation: str) -> str:
317
+ """
318
+ Given the free variables and a return annotation, generates the beginning of the FX function.
319
+ By default, `gen_fn_def(['a', 'b'], '') == 'def {self._func_name}(a, b):'`
320
+ """
321
+ # If the original function didn't have self as its first argument, we
322
+ # would have added it.
323
+ if len(free_vars) == 0 or free_vars[0] != 'self':
324
+ free_vars.insert(0, 'self')
325
+ return f"def {self._func_name}({', '.join(free_vars)}){maybe_return_annotation}:"
326
+
327
+ def generate_output(self, output_args: Argument) -> str:
328
+ """
329
+ Given the output arguments, generates the return statement of the FX function.
330
+ Note: The returned statement should not be indented.
331
+ """
332
+ return f'return {repr(output_args)}'
333
+
334
+ def process_inputs(self, *args: Any) -> Any:
335
+ """
336
+ Transforms the inputs so that the graph can take them as arguments, as
337
+ non-default codegen may result in the inputs to the function being
338
+ different from the inputs to the graph.
339
+
340
+ If the graph was directly runnable, this invariant should hold true
341
+ `f.graph.process_outputs(f.graph(*f.graph.process_inputs(*inputs))) == f(*inputs)`
342
+ """
343
+ return args
344
+
345
+ def process_outputs(self, outputs: Any) -> Any:
346
+ """
347
+ Transforms the outputs of the graph to be identical to the codegen.
348
+
349
+ See ``process_inputs`` for more details.
350
+ """
351
+ return outputs
352
+
353
+ def additional_globals(self) -> List[Tuple[str, Any]]:
354
+ """
355
+ If your codegen uses extra global values, add tuples of (identifier,reference to the value) here.
356
+ For example, return ['List', typing.List] if you need ``List`` in the global context.
357
+ """
358
+ return []
359
+
360
+ def _gen_python_code(
361
+ self, nodes, root_module: str, namespace: _Namespace, *, verbose: bool = False,
362
+ ) -> PythonCode:
363
+ free_vars: List[str] = []
364
+ body: List[str] = []
365
+ globals_: Dict[str, Any] = {}
366
+ wrapped_fns: Dict[str, None] = {}
367
+
368
+ # Wrap string in list to pass by reference
369
+ maybe_return_annotation : List[str] = ['']
370
+
371
+ def add_global(name_hint: str, obj: Any):
372
+ """Add an obj to be tracked as a global.
373
+
374
+ We call this for names that reference objects external to the
375
+ Graph, like functions or types.
376
+
377
+ Returns: the global name that should be used to reference 'obj' in generated source.
378
+ """
379
+ if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device
380
+ # HACK: workaround for how torch custom ops are registered. We
381
+ # can't import them like normal modules so they must retain their
382
+ # fully qualified name.
383
+ return _get_qualified_name(obj)
384
+
385
+ # normalize the name hint to get a proper identifier
386
+ global_name = namespace.create_name(name_hint, obj)
387
+
388
+ if global_name in globals_:
389
+ assert globals_[global_name] is obj
390
+ return global_name
391
+ globals_[global_name] = obj
392
+ return global_name
393
+
394
+ # Pre-fill the globals table with registered builtins.
395
+ for name, (_, obj) in _custom_builtins.items():
396
+ add_global(name, obj)
397
+
398
+ def type_repr(o : Any):
399
+ if o == ():
400
+ # Empty tuple is used for empty tuple type annotation Tuple[()]
401
+ return '()'
402
+
403
+ typename = _type_repr(o)
404
+
405
+ if hasattr(o, '__origin__'):
406
+ # This is a generic type, e.g. typing.List[torch.Tensor]
407
+ origin_type = _origin_type_map.get(o.__origin__, o.__origin__)
408
+ origin_typename = add_global(_type_repr(origin_type), origin_type)
409
+
410
+ if hasattr(o, '__args__'):
411
+ # Assign global names for each of the inner type variables.
412
+ args = [type_repr(arg) for arg in o.__args__]
413
+
414
+ if len(args) == 0:
415
+ # Bare type, such as `typing.Tuple` with no subscript
416
+ # This code-path used in Python < 3.9
417
+ return origin_typename
418
+
419
+ return f'{origin_typename}[{",".join(args)}]'
420
+ else:
421
+ # Bare type, such as `typing.Tuple` with no subscript
422
+ # This code-path used in Python 3.9+
423
+ return origin_typename
424
+
425
+ # Common case: this is a regular module name like 'foo.bar.baz'
426
+ return add_global(typename, o)
427
+
428
+ def _get_repr(arg: Any) -> str:
429
+ # Handle NamedTuples (if it has `_fields`) via add_global.
430
+ if isinstance(arg, tuple) and hasattr(arg, '_fields'):
431
+ qualified_name = _get_qualified_name(type(arg))
432
+ global_name = add_global(qualified_name, type(arg))
433
+ return f"{global_name}{repr(tuple(arg))}"
434
+ elif isinstance(arg, torch._ops.OpOverload):
435
+ qualified_name = _get_qualified_name(arg)
436
+ global_name = add_global(qualified_name, arg)
437
+ return f"{global_name}"
438
+ elif isinstance(arg, enum.Enum):
439
+ cls = arg.__class__
440
+ clsname = add_global(cls.__name__, cls)
441
+ return f"{clsname}.{arg.name}"
442
+ return repr(arg)
443
+
444
+ def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str:
445
+ args_s = ', '.join(_get_repr(a) for a in args)
446
+ kwargs_s = ', '.join(f'{k} = {_get_repr(v)}' for k, v in kwargs.items())
447
+ if args_s and kwargs_s:
448
+ return f'{args_s}, {kwargs_s}'
449
+ return args_s or kwargs_s
450
+
451
+ # Run through reverse nodes and record the first instance of a use
452
+ # of a given node. This represents the *last* use of the node in the
453
+ # execution order of the program, which we will use to free unused
454
+ # values
455
+ node_to_last_use : Dict[Node, Node] = {}
456
+ user_to_last_uses : Dict[Node, List[Node]] = {}
457
+
458
+ def register_last_uses(n : Node, user : Node):
459
+ if n not in node_to_last_use:
460
+ node_to_last_use[n] = user
461
+ user_to_last_uses.setdefault(user, []).append(n)
462
+
463
+ for node in reversed(nodes):
464
+ map_arg(node.args, lambda n: register_last_uses(n, node))
465
+ map_arg(node.kwargs, lambda n: register_last_uses(n, node))
466
+
467
+ def delete_unused_values(user : Node):
468
+ """
469
+ Delete values after their last use. This ensures that values that are
470
+ not used in the remainder of the code are freed and the memory usage
471
+ of the code is optimal.
472
+ """
473
+ if user.op == 'placeholder':
474
+ return
475
+ if user.op == 'output':
476
+ body.append('\n')
477
+ return
478
+ nodes_to_delete = user_to_last_uses.get(user, [])
479
+ if len(nodes_to_delete):
480
+ to_delete_str = ' = '.join([repr(n) for n in nodes_to_delete] + ['None'])
481
+ body.append(f'; {to_delete_str}\n')
482
+ else:
483
+ body.append('\n')
484
+
485
+ prev_stacktrace = None
486
+
487
+ def append_stacktrace_summary(node : Node):
488
+ """
489
+ Append a summary of the stacktrace to the generated code. This is
490
+ useful for debugging.
491
+ """
492
+ nonlocal prev_stacktrace
493
+
494
+ if node.op not in {'placeholder', 'output'}:
495
+ if node.stack_trace:
496
+ if node.stack_trace != prev_stacktrace:
497
+ prev_stacktrace = node.stack_trace
498
+ summary_str = ""
499
+
500
+ parsed_stack_trace = _parse_stack_trace(node.stack_trace)
501
+
502
+ if parsed_stack_trace is not None:
503
+ lineno = parsed_stack_trace.lineno
504
+ code = parsed_stack_trace.code
505
+ summary_str = f'File: {parsed_stack_trace.file}:{lineno}, code: {code}'
506
+
507
+ body.append(f'\n# {summary_str}\n')
508
+ elif prev_stacktrace != "":
509
+ prev_stacktrace = ""
510
+ body.append('\n# No stacktrace found for following nodes\n')
511
+
512
+ def stringify_shape(shape : torch.Size) -> str:
513
+ return f"[{', '.join(str(x) for x in shape)}]"
514
+
515
+ def emit_node(node : Node):
516
+ maybe_type_annotation = '' if node.type is None else f' : {type_repr(node.type)}'
517
+
518
+ if verbose:
519
+ # override annotation with more detailed information
520
+ from torch._subclasses.fake_tensor import FakeTensor
521
+ from torch.fx.experimental.proxy_tensor import py_sym_types
522
+ from torch.fx.passes.shape_prop import TensorMetadata
523
+
524
+ meta_val = node.meta.get('val', node.meta.get('tensor_meta', None))
525
+
526
+ # use string as annotation, to make it valid python code
527
+ if isinstance(meta_val, FakeTensor):
528
+ maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"'
529
+ elif isinstance(meta_val, py_sym_types):
530
+ maybe_type_annotation = f': "Sym({meta_val})"'
531
+ elif isinstance(meta_val, TensorMetadata):
532
+ maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"'
533
+
534
+ if node.op == 'placeholder':
535
+ assert isinstance(node.target, str)
536
+ maybe_default_arg = '' if not node.args else f' = {_get_repr(node.args[0])}'
537
+ free_vars.append(f'{node.target}{maybe_type_annotation}{maybe_default_arg}')
538
+ raw_name = node.target.replace('*', '')
539
+ if raw_name != repr(node):
540
+ body.append(f'{repr(node)} = {raw_name}\n')
541
+ return
542
+ elif node.op == 'call_method':
543
+ assert isinstance(node.target, str)
544
+ body.append(
545
+ f'{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.target)}'
546
+ f'({_format_args(node.args[1:], node.kwargs)})')
547
+ return
548
+ elif node.op == 'call_function':
549
+ assert callable(node.target)
550
+ # pretty print operators
551
+ if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in magic_methods:
552
+ assert isinstance(node.args, tuple)
553
+ body.append(f'{repr(node)}{maybe_type_annotation} = '
554
+ f'{magic_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}')
555
+ return
556
+
557
+ # pretty print inplace operators; required for jit.script to work properly
558
+ # not currently supported in normal FX graphs, but generated by torchdynamo
559
+ if getattr(node.target, "__module__", "") == '_operator' and node.target.__name__ in inplace_methods:
560
+ body.append(f'{inplace_methods[node.target.__name__].format(*(_get_repr(a) for a in node.args))}; '
561
+ f'{repr(node)}{maybe_type_annotation} = {_get_repr(node.args[0])}')
562
+ return
563
+
564
+ qualified_name = _get_qualified_name(node.target)
565
+ global_name = add_global(qualified_name, node.target)
566
+ # special case for getattr: node.args could be 2-argument or 3-argument
567
+ # 2-argument: attribute access; 3-argument: fall through to attrib function call with default value
568
+ if global_name == 'getattr' and \
569
+ isinstance(node.args, tuple) and \
570
+ isinstance(node.args[1], str) and \
571
+ node.args[1].isidentifier() and \
572
+ len(node.args) == 2:
573
+ body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(_get_repr(node.args[0]), node.args[1])}')
574
+ return
575
+ body.append(f'{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})')
576
+ if node.meta.get('is_wrapped', False):
577
+ wrapped_fns.setdefault(global_name)
578
+ return
579
+ elif node.op == 'call_module':
580
+ assert isinstance(node.target, str)
581
+ body.append(f'{repr(node)}{maybe_type_annotation} = '
582
+ f'{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})')
583
+ return
584
+ elif node.op == 'get_attr':
585
+ assert isinstance(node.target, str)
586
+ body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}')
587
+ return
588
+ elif node.op == 'output':
589
+ if node.type is not None:
590
+ maybe_return_annotation[0] = f" -> {type_repr(node.type)}"
591
+ body.append(self.generate_output(node.args[0]))
592
+ return
593
+ raise NotImplementedError(f'node: {node.op} {node.target}')
594
+
595
+ for i, node in enumerate(nodes):
596
+ # NOTE: emit_node does not emit a string with newline. It depends
597
+ # on delete_unused_values to append one
598
+ if verbose:
599
+ append_stacktrace_summary(node)
600
+ # emit a counter comment to keep track of
601
+ # node index, which will be deleted later
602
+ # after going through _body_transformer
603
+ body.append(f"# COUNTER: {i}\n")
604
+ emit_node(node)
605
+ delete_unused_values(node)
606
+
607
+ if len(body) == 0:
608
+ # If the Graph has no non-placeholder nodes, no lines for the body
609
+ # have been emitted. To continue to have valid Python code, emit a
610
+ # single pass statement
611
+ body.append('pass\n')
612
+
613
+
614
+
615
+ if len(wrapped_fns) > 0:
616
+ wrap_name = add_global('wrap', torch.fx.wrap)
617
+ wrap_stmts = '\n'.join([f'{wrap_name}("{name}")' for name in wrapped_fns])
618
+ else:
619
+ wrap_stmts = ''
620
+
621
+ if self._body_transformer:
622
+ body = self._body_transformer(body)
623
+
624
+ for name, value in self.additional_globals():
625
+ add_global(name, value)
626
+
627
+ prologue = self.gen_fn_def(free_vars, maybe_return_annotation[0])
628
+
629
+ # remove counter and generate lineno to node index mapping
630
+ lineno_map: Dict[int, Optional[int]] = {}
631
+ prologue_len = prologue.count('\n') + 1
632
+ new_lines: List[str] = []
633
+ cur_idx = None
634
+ for line in ''.join(body).split('\n'):
635
+ counter = re.search(r"# COUNTER: (\d+)", line)
636
+ if counter and counter.group(1) is not None:
637
+ cur_idx = int(counter.group(1))
638
+ else:
639
+ lineno_map[len(new_lines) + prologue_len] = cur_idx
640
+ new_lines.append(line)
641
+
642
+ code = "\n".join(new_lines).lstrip('\n')
643
+ code = '\n'.join(' ' + line for line in code.split('\n'))
644
+
645
+ fn_code = f"""
646
+ {wrap_stmts}
647
+
648
+ {prologue}
649
+ {code}"""
650
+ return PythonCode(fn_code, globals_, _lineno_map=lineno_map)
651
+
652
+
653
+ # Ideally, we'd like to refactor all of the pytree logic into this codegen
654
+ # class. Unfortunately, there are 3 areas we currently need extra logic in FX.
655
+ # 1. In the initial symbolic trace, the pytree logic is tied up with `concrete_args`.
656
+ # 2. In the FX graph, we need to access 2 attributes - in_spec and out_spec.
657
+ # Since we can't access .graph within the FX forward, we need to copy the attribute to the module.
658
+ # 3. We currently can't register the pytree imports with `add_global` - not sure why.
659
+ class _PyTreeCodeGen(CodeGen):
660
+ def __init__(self, pytree_info: _PyTreeInfo):
661
+ super().__init__()
662
+ self.pytree_info: _PyTreeInfo = pytree_info
663
+
664
+ def process_inputs(self, *inputs: Any) -> Any:
665
+ flat_args = pytree.arg_tree_leaves(*inputs)
666
+ return flat_args
667
+
668
+ def process_outputs(self, out: Any) -> Any:
669
+ if self.pytree_info is None or self.pytree_info.out_spec is None:
670
+ return out
671
+ if not isinstance(out, (list, tuple)):
672
+ out = [out]
673
+ assert(self.pytree_info.out_spec is not None)
674
+ return pytree.tree_unflatten(out, self.pytree_info.out_spec)
675
+
676
+ def gen_fn_def(self, free_vars, maybe_return_annotation):
677
+ # Given a user function/model:
678
+ # myargs = [myargs0, myargs1]
679
+ # mykwargs = {'mykwargs0': ..., 'mykwargs1': ...}
680
+ # def forward(self, mypos, *myargs, mykey=None, **mykwargs):
681
+ #
682
+ # The generated code flattens all keywords into positional arguments for `forward()`
683
+ # e.g forward(self, mypos, myargs0, myargs1, mykey, mykwargs0, mykwargs1):
684
+ #
685
+ # Within `forward`, `tree_flatten_spec``still parses args and kwargs separately
686
+ # e.g. tree_flatten_spec(([mypos, myargs0, myargs1],
687
+ # {'mykey':mykey, 'mykwargs0':mykwargs0, 'mykwargs1':mykwargs1}),
688
+ # self._in_spec)
689
+ #
690
+ # If the user function/model does not have keywords, the dict is suppressed from tree_flatten_spec
691
+ # e.g. tree_flatten_spec([mypos, myargs0, myargs1]), self._in_spec)
692
+ if self.pytree_info is None:
693
+ return super().gen_fn_def(free_vars, maybe_return_annotation)
694
+
695
+ fn_args = self.pytree_info.orig_args
696
+ has_orig_self = (fn_args[0] == 'self') if len(fn_args) > 0 else False
697
+ if has_orig_self:
698
+ free_vars.insert(0, 'self')
699
+ fn_definition = super().gen_fn_def(fn_args[:], maybe_return_annotation)
700
+
701
+ if len(free_vars) > 0: # pytree has placeholders in it
702
+ # when kwargs is present, in_spec is tuple(args, kwargs)
703
+ has_args_kwargs_tuple = self.pytree_info.in_spec.type == tuple and \
704
+ len(self.pytree_info.in_spec.children_specs) == 2 and \
705
+ self.pytree_info.in_spec.children_specs[0].type == tuple and \
706
+ self.pytree_info.in_spec.children_specs[1].type == dict
707
+ fn_kwargs = '{}'
708
+ fn_signature = f"[{', '.join(fn_args)}], self._in_spec"
709
+ if has_args_kwargs_tuple:
710
+ count_args = len(self.pytree_info.in_spec.children_specs[0].children_specs)
711
+ fn_args = self.pytree_info.orig_args[:count_args]
712
+ fn_kwargs = '{' + ', '.join(f"'{k}':{v}" for k, v in zip(
713
+ self.pytree_info.in_spec.children_specs[1].context,
714
+ self.pytree_info.orig_args[count_args:])) + '}'
715
+ fn_signature = f"([{', '.join(fn_args)}], {fn_kwargs}), self._in_spec"
716
+
717
+ # in Python, `var1: annotation1, var2: annotation2 = function_call()` is invalid.
718
+ # we need to split it to two lines:
719
+ # one for annotation: `var1: annotation1; var2: annotation2;` (note the semicolon)
720
+ # one for code: `var1, var2, = function_call()`
721
+ without_annotation = [x.split(":")[0] for x in free_vars]
722
+ has_annotation = [x + "; " for x in free_vars if ":" in x]
723
+ if len(has_annotation) > 0:
724
+ fn_definition += "\n " + "".join(has_annotation) + "\n"
725
+ fn_definition += f"""
726
+ {', '.join(without_annotation)}, = fx_pytree.tree_flatten_spec({fn_signature})"""
727
+ return fn_definition
728
+
729
+ def generate_output(self, output_args):
730
+ if self.pytree_info and self.pytree_info.out_spec:
731
+ return f'return pytree.tree_unflatten({repr(output_args)}, self._out_spec)'
732
+ else:
733
+ return super().generate_output(output_args)
734
+
735
+ @compatibility(is_backward_compatible=True)
736
+ class Graph:
737
+ """
738
+ ``Graph`` is the main data structure used in the FX Intermediate Representation.
739
+ It consists of a series of ``Node`` s, each representing callsites (or other
740
+ syntactic constructs). The list of ``Node`` s, taken together, constitute a
741
+ valid Python function.
742
+
743
+ For example, the following code
744
+
745
+ .. code-block:: python
746
+
747
+ import torch
748
+ import torch.fx
749
+
750
+ class MyModule(torch.nn.Module):
751
+ def __init__(self):
752
+ super().__init__()
753
+ self.param = torch.nn.Parameter(torch.rand(3, 4))
754
+ self.linear = torch.nn.Linear(4, 5)
755
+
756
+ def forward(self, x):
757
+ return torch.topk(torch.sum(self.linear(x + self.linear.weight).relu(), dim=-1), 3)
758
+
759
+ m = MyModule()
760
+ gm = torch.fx.symbolic_trace(m)
761
+
762
+ Will produce the following Graph::
763
+
764
+ print(gm.graph)
765
+
766
+ .. code-block:: text
767
+
768
+ graph(x):
769
+ %linear_weight : [num_users=1] = self.linear.weight
770
+ %add_1 : [num_users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {})
771
+ %linear_1 : [num_users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
772
+ %relu_1 : [num_users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {})
773
+ %sum_1 : [num_users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1})
774
+ %topk_1 : [num_users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {})
775
+ return topk_1
776
+
777
+ For the semantics of operations represented in the ``Graph``, please see :class:`Node`.
778
+ """
779
+
780
+ @compatibility(is_backward_compatible=True)
781
+ def __init__(self, owning_module: Optional["GraphModule"] = None, tracer_cls: Optional[Type["Tracer"]] = None,
782
+ tracer_extras: Optional[Dict[str, Any]] = None):
783
+ """
784
+ Construct an empty Graph.
785
+ """
786
+ self._root : Node = Node(self, '', 'root', '', (), {})
787
+ self._used_names : Dict[str, int] = {} # base name -> number
788
+ self._insert = self._root.prepend
789
+ self._len = 0
790
+ self._graph_namespace = _Namespace()
791
+ self._owning_module = owning_module
792
+ self._tracer_cls = tracer_cls
793
+ self._tracer_extras = tracer_extras
794
+ self._codegen = CodeGen()
795
+ self._co_fields : Dict[str, Any] = {}
796
+
797
+ @property
798
+ def owning_module(self):
799
+ return self._owning_module
800
+
801
+ @owning_module.setter
802
+ def owning_module(self, mod: Optional["GraphModule"]):
803
+ self._owning_module = mod
804
+
805
+ @property
806
+ def nodes(self) -> _node_list:
807
+ """
808
+ Get the list of Nodes that constitute this Graph.
809
+
810
+ Note that this ``Node`` list representation is a doubly-linked list. Mutations
811
+ during iteration (e.g. delete a Node, add a Node) are safe.
812
+
813
+ Returns:
814
+
815
+ A doubly-linked list of Nodes. Note that ``reversed`` can be called on
816
+ this list to switch iteration order.
817
+ """
818
+ return _node_list(self)
819
+
820
+ @compatibility(is_backward_compatible=True)
821
+ def graph_copy(self, g : 'Graph', val_map : Dict[Node, Node], return_output_node=False) -> 'Optional[Argument]':
822
+ """
823
+ Copy all nodes from a given graph into ``self``.
824
+
825
+ Args:
826
+
827
+ g (Graph): The source graph from which to copy Nodes.
828
+
829
+ val_map (Dict[Node, Node]): a dictionary that will be populated with a mapping
830
+ from nodes in ``g`` to nodes in ``self``. Note that ``val_map`` can be passed
831
+ in with values in it already to override copying of certain values.
832
+
833
+ Returns:
834
+
835
+ The value in ``self`` that is now equivalent to the output value in ``g``,
836
+ if ``g`` had an ``output`` node. ``None`` otherwise.
837
+ """
838
+ for node in g.nodes:
839
+ if node in val_map:
840
+ continue
841
+ if node.op == 'output':
842
+ rv = map_arg(node.args[0], lambda n: val_map[n])
843
+ return rv if not return_output_node else (rv, node)
844
+ val_map[node] = self.node_copy(node, lambda n : val_map[n])
845
+ return None
846
+
847
+ def __deepcopy__(self, memo=None) -> 'Graph':
848
+ """
849
+ Explicitly implement __deepcopy__ to prevent excessive recursion depth
850
+ from the default implementation. This uses graph_copy to copy the nodes
851
+ in an iterative way, rather than recursive. It also populates the
852
+ memoization table to prevent unnecessary copies (e.g. references to
853
+ nodes or other parts of the Graph from a custom GraphModule implementation.
854
+ """
855
+ memo = memo if memo else {}
856
+ g = Graph(tracer_cls=self._tracer_cls)
857
+ output_vals = g.graph_copy(self, val_map=memo, return_output_node=True)
858
+ g._codegen = copy.deepcopy(self._codegen)
859
+ assert isinstance(output_vals, tuple)
860
+ output_val, old_output_node = output_vals
861
+ new_output_node = g.output(output_val, type_expr=getattr(old_output_node, 'type', None))
862
+ new_output_node.meta = copy.copy(old_output_node.meta)
863
+ return g
864
+
865
+ @compatibility(is_backward_compatible=True)
866
+ def create_node(self, op: str, target: 'Target',
867
+ args: Optional[Tuple['Argument', ...]] = None,
868
+ kwargs: Optional[Dict[str, 'Argument']] = None,
869
+ name: Optional[str] = None,
870
+ type_expr: Optional[Any] = None) -> Node:
871
+ """
872
+ Create a ``Node`` and add it to the ``Graph`` at the current insert-point.
873
+ Note that the current insert-point can be set via :meth:`Graph.inserting_before`
874
+ and :meth:`Graph.inserting_after`.
875
+
876
+ Args:
877
+ op (str): the opcode for this Node. One of 'call_function', 'call_method', 'get_attr',
878
+ 'call_module', 'placeholder', or 'output'. The semantics of these opcodes are
879
+ described in the ``Graph`` docstring.
880
+
881
+ args (Optional[Tuple[Argument, ...]]): is a tuple of arguments to this node.
882
+
883
+ kwargs (Optional[Dict[str, Argument]]): the kwargs of this Node
884
+
885
+ name (Optional[str]): an optional string name for the ``Node``.
886
+ This will influence the name of the value assigned to in the
887
+ Python generated code.
888
+
889
+ type_expr (Optional[Any]): an optional type annotation representing the
890
+ Python type the output of this node will have.
891
+
892
+ Returns:
893
+
894
+ The newly-created and inserted node.
895
+ """
896
+ assert op in ('call_function', 'call_method', 'get_attr', 'call_module', 'placeholder', 'output')
897
+ args = () if args is None else args
898
+ kwargs = {} if kwargs is None else kwargs
899
+ assert isinstance(args, tuple), "args must be a tuple"
900
+ assert isinstance(kwargs, dict), "kwargs must be a dict"
901
+
902
+ candidate = name if name is not None else self._target_to_str(target)
903
+ name = self._graph_namespace.create_name(candidate, None)
904
+ n = Node(self, name, op, target, args, kwargs, type_expr)
905
+
906
+ self._graph_namespace.associate_name_with_obj(name, n)
907
+
908
+ self._insert(n)
909
+ self._len += 1
910
+ return n
911
+
912
+ @compatibility(is_backward_compatible=False)
913
+ def process_inputs(self, *args):
914
+ """
915
+ Processes args so that they can be passed to the FX graph.
916
+ """
917
+ return self._codegen.process_inputs(*args)
918
+
919
+ @compatibility(is_backward_compatible=False)
920
+ def process_outputs(self, out):
921
+ return self._codegen.process_outputs(out)
922
+
923
+
924
+ @compatibility(is_backward_compatible=True)
925
+ def erase_node(self, to_erase : Node) -> None:
926
+ """
927
+ Erases a ``Node`` from the ``Graph``. Throws an exception if
928
+ there are still users of that node in the ``Graph``.
929
+
930
+ Args:
931
+
932
+ to_erase (Node): The ``Node`` to erase from the ``Graph``.
933
+ """
934
+ if len(to_erase.users) > 0:
935
+ raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} '
936
+ f'users in the graph: {to_erase.users}!')
937
+ if to_erase._erased:
938
+ warnings.warn(f"erase_node({to_erase}) on an already erased node")
939
+ return
940
+
941
+ to_erase._remove_from_list()
942
+ to_erase._erased = True # iterators may retain handles to erased nodes
943
+ self._len -= 1
944
+
945
+ # Null out this Node's argument nodes so that the Nodes referred to
946
+ # can update their ``users`` accordingly
947
+ new_args = map_arg(to_erase.args, lambda n: None)
948
+ assert isinstance(new_args, tuple)
949
+ to_erase.args = new_args
950
+ new_kwargs = map_arg(to_erase.kwargs, lambda n: None)
951
+ assert isinstance(new_kwargs, dict)
952
+ to_erase.kwargs = new_kwargs
953
+
954
+ @compatibility(is_backward_compatible=True)
955
+ def inserting_before(self, n: Optional[Node] = None):
956
+ """Set the point at which create_node and companion methods will insert into the graph.
957
+ When used within a 'with' statement, this will temporary set the insert point and
958
+ then restore it when the with statement exits::
959
+
960
+ with g.inserting_before(n):
961
+ ... # inserting before node n
962
+ ... # insert point restored to what it was previously
963
+ g.inserting_before(n) # set the insert point permanently
964
+
965
+ Args:
966
+
967
+ n (Optional[Node]): The node before which to insert. If None this will insert before
968
+ the beginning of the entire graph.
969
+
970
+ Returns:
971
+ A resource manager that will restore the insert point on ``__exit__``.
972
+ """
973
+ if n is None:
974
+ return self.inserting_after(self._root)
975
+ assert n.graph == self, "Node to insert before is not in graph."
976
+ return _InsertPoint(self, n.prepend)
977
+
978
+ @compatibility(is_backward_compatible=True)
979
+ def inserting_after(self, n: Optional[Node] = None):
980
+ """Set the point at which create_node and companion methods will insert into the graph.
981
+ When used within a 'with' statement, this will temporary set the insert point and
982
+ then restore it when the with statement exits::
983
+
984
+ with g.inserting_after(n):
985
+ ... # inserting after node n
986
+ ... # insert point restored to what it was previously
987
+ g.inserting_after(n) # set the insert point permanently
988
+
989
+ Args:
990
+
991
+ n (Optional[Node]): The node before which to insert. If None this will insert after
992
+ the beginning of the entire graph.
993
+
994
+ Returns:
995
+ A resource manager that will restore the insert point on ``__exit__``.
996
+ """
997
+ if n is None:
998
+ return self.inserting_before(self._root)
999
+ assert n.graph == self, "Node to insert after is not in graph."
1000
+ return _InsertPoint(self, n.append)
1001
+
1002
+ @compatibility(is_backward_compatible=True)
1003
+ def placeholder(self, name: str, type_expr: Optional[Any] = None,
1004
+ default_value : Any = inspect.Signature.empty) -> Node:
1005
+ """
1006
+ Insert a ``placeholder`` node into the Graph. A ``placeholder`` represents
1007
+ a function input.
1008
+
1009
+ Args:
1010
+
1011
+ name (str): A name for the input value. This corresponds to the name
1012
+ of the positional argument to the function this ``Graph`` represents.
1013
+
1014
+ type_expr (Optional[Any]): an optional type annotation representing the
1015
+ Python type the output of this node will have. This is needed in some
1016
+ cases for proper code generation (e.g. when the function is used
1017
+ subsequently in TorchScript compilation).
1018
+
1019
+ default_value (Any): The default value this function argument should take
1020
+ on. NOTE: to allow for `None` as a default value, `inspect.Signature.empty`
1021
+ should be passed as this argument to specify that the parameter does _not_
1022
+ have a default value.
1023
+
1024
+ .. note::
1025
+ The same insertion point and type expression rules apply for this method
1026
+ as ``Graph.create_node``.
1027
+ """
1028
+ args = () if default_value is inspect.Signature.empty else (default_value,)
1029
+ return self.create_node('placeholder', name, args=args, type_expr=type_expr)
1030
+
1031
+ @compatibility(is_backward_compatible=True)
1032
+ def get_attr(self, qualified_name: str, type_expr: Optional[Any] = None) -> Node:
1033
+ """
1034
+ Insert a ``get_attr`` node into the Graph. A ``get_attr`` ``Node`` represents the
1035
+ fetch of an attribute from the ``Module`` hierarchy.
1036
+
1037
+ Args:
1038
+
1039
+ qualified_name (str): the fully-qualified name of the attribute to be retrieved.
1040
+ For example, if the traced Module has a submodule named ``foo``, which has a
1041
+ submodule named ``bar``, which has an attribute named ``baz``, the qualified
1042
+ name ``foo.bar.baz`` should be passed as ``qualified_name``.
1043
+
1044
+ type_expr (Optional[Any]): an optional type annotation representing the
1045
+ Python type the output of this node will have.
1046
+
1047
+
1048
+ Returns:
1049
+
1050
+ The newly-created and inserted ``get_attr`` node.
1051
+
1052
+ .. note::
1053
+ The same insertion point and type expression rules apply for this method
1054
+ as ``Graph.create_node``.
1055
+ """
1056
+ def _get_attr_reference_exists(mod: torch.nn.Module, qualified_name: str) -> bool:
1057
+ module_path, _, name = qualified_name.rpartition(".")
1058
+
1059
+ try:
1060
+ submod: torch.nn.Module = mod.get_submodule(module_path)
1061
+ except AttributeError:
1062
+ warnings.warn(f"Failed to fetch module {module_path}!")
1063
+ return False
1064
+
1065
+ if not hasattr(submod, name):
1066
+ return False
1067
+
1068
+ res = getattr(submod, name)
1069
+
1070
+ if (not isinstance(res, torch.nn.Module)
1071
+ and not isinstance(res, torch.nn.Parameter)
1072
+ and name not in submod._buffers):
1073
+ return False
1074
+
1075
+ return True
1076
+
1077
+ if (self.owning_module and
1078
+ not _get_attr_reference_exists(self.owning_module, qualified_name)):
1079
+ warnings.warn("Attempted to insert a get_attr Node with no "
1080
+ "underlying reference in the owning "
1081
+ "GraphModule! Call "
1082
+ "GraphModule.add_submodule to add the "
1083
+ "necessary submodule, "
1084
+ "GraphModule.add_parameter to add the "
1085
+ "necessary Parameter, or "
1086
+ "nn.Module.register_buffer to add the "
1087
+ "necessary buffer", stacklevel=2)
1088
+ return self.create_node('get_attr', qualified_name, type_expr=type_expr)
1089
+
1090
+ @compatibility(is_backward_compatible=True)
1091
+ def call_module(self,
1092
+ module_name: str,
1093
+ args: Optional[Tuple['Argument', ...]] = None,
1094
+ kwargs: Optional[Dict[str, 'Argument']] = None,
1095
+ type_expr: Optional[Any] = None) -> Node:
1096
+ """
1097
+ Insert a ``call_module`` ``Node`` into the ``Graph``. A ``call_module`` node
1098
+ represents a call to the forward() function of a ``Module`` in the ``Module``
1099
+ hierarchy.
1100
+
1101
+ Args:
1102
+
1103
+ module_name (str): The qualified name of the ``Module`` in the ``Module``
1104
+ hierarchy to be called. For example, if the traced ``Module`` has a
1105
+ submodule named ``foo``, which has a submodule named ``bar``, the
1106
+ qualified name ``foo.bar`` should be passed as ``module_name`` to
1107
+ call that module.
1108
+
1109
+ args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
1110
+ to the called method. Note that this should *not* include a ``self`` argument.
1111
+
1112
+ kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
1113
+ to the called method
1114
+
1115
+ type_expr (Optional[Any]): an optional type annotation representing the
1116
+ Python type the output of this node will have.
1117
+
1118
+ Returns:
1119
+
1120
+ The newly-created and inserted ``call_module`` node.
1121
+
1122
+ .. note::
1123
+ The same insertion point and type expression rules apply for this method
1124
+ as :meth:`Graph.create_node`.
1125
+ """
1126
+ if (self.owning_module and
1127
+ self.owning_module.get_submodule(module_name) is None):
1128
+ warnings.warn("Attempted to insert a call_module Node with "
1129
+ "no underlying reference in the owning "
1130
+ "GraphModule! Call "
1131
+ "GraphModule.add_submodule to add the "
1132
+ "necessary submodule")
1133
+ return self.create_node('call_module', module_name, args, kwargs, type_expr=type_expr)
1134
+
1135
+ @compatibility(is_backward_compatible=True)
1136
+ def call_method(self,
1137
+ method_name: str,
1138
+ args: Optional[Tuple['Argument', ...]] = None,
1139
+ kwargs: Optional[Dict[str, 'Argument']] = None,
1140
+ type_expr: Optional[Any] = None) -> Node:
1141
+ """
1142
+ Insert a ``call_method`` ``Node`` into the ``Graph``. A ``call_method`` node
1143
+ represents a call to a given method on the 0th element of ``args``.
1144
+
1145
+ Args:
1146
+
1147
+ method_name (str): The name of the method to apply to the self argument.
1148
+ For example, if args[0] is a ``Node`` representing a ``Tensor``,
1149
+ then to call ``relu()`` on that ``Tensor``, pass ``relu`` to ``method_name``.
1150
+
1151
+ args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
1152
+ to the called method. Note that this *should* include a ``self`` argument.
1153
+
1154
+ kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
1155
+ to the called method
1156
+
1157
+ type_expr (Optional[Any]): an optional type annotation representing the
1158
+ Python type the output of this node will have.
1159
+
1160
+ Returns:
1161
+
1162
+ The newly created and inserted ``call_method`` node.
1163
+
1164
+ .. note::
1165
+ The same insertion point and type expression rules apply for this method
1166
+ as :meth:`Graph.create_node`.
1167
+ """
1168
+ return self.create_node('call_method', method_name, args, kwargs, type_expr=type_expr)
1169
+
1170
+ @compatibility(is_backward_compatible=True)
1171
+ def call_function(self,
1172
+ the_function: Callable[..., Any],
1173
+ args: Optional[Tuple['Argument', ...]] = None,
1174
+ kwargs: Optional[Dict[str, 'Argument']] = None,
1175
+ type_expr: Optional[Any] = None) -> Node:
1176
+ """
1177
+ Insert a ``call_function`` ``Node`` into the ``Graph``. A ``call_function`` node
1178
+ represents a call to a Python callable, specified by ``the_function``.
1179
+
1180
+ Args:
1181
+
1182
+ the_function (Callable[..., Any]): The function to be called. Can be any PyTorch
1183
+ operator, Python function, or member of the ``builtins`` or ``operator``
1184
+ namespaces.
1185
+
1186
+ args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
1187
+ to the called function.
1188
+
1189
+ kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
1190
+ to the called function
1191
+
1192
+ type_expr (Optional[Any]): an optional type annotation representing the
1193
+ Python type the output of this node will have.
1194
+
1195
+ Returns:
1196
+
1197
+ The newly created and inserted ``call_function`` node.
1198
+
1199
+ .. note::
1200
+ The same insertion point and type expression rules apply for this method
1201
+ as :meth:`Graph.create_node`.
1202
+ """
1203
+ return self.create_node('call_function', the_function, args, kwargs, type_expr=type_expr)
1204
+
1205
+ @compatibility(is_backward_compatible=True)
1206
+ def node_copy(self, node: Node, arg_transform: Callable[[Node], 'Argument'] = lambda x: x) -> Node:
1207
+ """
1208
+ Copy a node from one graph into another. ``arg_transform`` needs to transform arguments from
1209
+ the graph of node to the graph of self. Example::
1210
+
1211
+ # Copying all the nodes in `g` into `new_graph`
1212
+ g : torch.fx.Graph = ...
1213
+ new_graph = torch.fx.graph()
1214
+ value_remap = {}
1215
+ for node in g.nodes:
1216
+ value_remap[node] = new_graph.node_copy(node, lambda n : value_remap[n])
1217
+
1218
+ Args:
1219
+
1220
+ node (Node): The node to copy into ``self``.
1221
+
1222
+ arg_transform (Callable[[Node], Argument]): A function that transforms
1223
+ ``Node`` arguments in node's ``args`` and ``kwargs`` into the
1224
+ equivalent argument in ``self``. In the simplest case, this should
1225
+ retrieve a value out of a table mapping Nodes in the original
1226
+ graph to ``self``.
1227
+ """
1228
+ args = map_arg(node.args, arg_transform)
1229
+ kwargs = map_arg(node.kwargs, arg_transform)
1230
+ assert isinstance(args, tuple)
1231
+ assert isinstance(kwargs, dict)
1232
+ result_node = self.create_node(node.op, node.target, args, kwargs, node.name, node.type)
1233
+ result_node.meta = copy.copy(node.meta)
1234
+ return result_node
1235
+
1236
+ @compatibility(is_backward_compatible=True)
1237
+ def output(self, result: 'Argument', type_expr: Optional[Any] = None):
1238
+ """
1239
+ Insert an ``output`` ``Node`` into the ``Graph``. An ``output`` node represents
1240
+ a ``return`` statement in Python code. ``result`` is the value that should
1241
+ be returned.
1242
+
1243
+ Args:
1244
+
1245
+ result (Argument): The value to be returned.
1246
+
1247
+ type_expr (Optional[Any]): an optional type annotation representing the
1248
+ Python type the output of this node will have.
1249
+
1250
+ .. note::
1251
+
1252
+ The same insertion point and type expression rules apply for this method
1253
+ as ``Graph.create_node``.
1254
+ """
1255
+ return self.create_node(op='output', target='output', args=(result,), type_expr=type_expr)
1256
+
1257
+ def _target_to_str(self, target : Target) -> str:
1258
+ if callable(target):
1259
+ op = target.__name__
1260
+ else:
1261
+ assert isinstance(target, str)
1262
+ op = target
1263
+ if _is_magic(op):
1264
+ op = op[2:-2]
1265
+ op = _snake_case(op)
1266
+ return op
1267
+
1268
+ @compatibility(is_backward_compatible=True)
1269
+ def python_code(self, root_module: str, *, verbose: bool = False) -> PythonCode:
1270
+ """
1271
+ Turn this ``Graph`` into valid Python code.
1272
+
1273
+ Args:
1274
+
1275
+ root_module (str): The name of the root module on which to look-up
1276
+ qualified name targets. This is usually 'self'.
1277
+
1278
+ Returns:
1279
+
1280
+ A PythonCode object, consisting of two fields:
1281
+ src: the Python source code representing the object
1282
+ globals: a dictionary of global names in `src` -> the objects that they reference.
1283
+ """
1284
+ # NOTE: [Graph Namespaces]
1285
+ #
1286
+ # There are two types of symbols in generated Python source code:
1287
+ # locals and globals.
1288
+ # Locals are locally defined by the output of a node in the Graph.
1289
+ # Globals are references to external objects, like functions or types.
1290
+ #
1291
+ # When generating Python code, we need to make sure to name things
1292
+ # appropriately. In particular:
1293
+ # - All names should be unique, to avoid weird shadowing bugs.
1294
+ # - These names need to be consistent, e.g. a object should always be
1295
+ # referenced by the same name.
1296
+ #
1297
+ # To do this, we create a new namespace just for this source. All names
1298
+ # that get printed must come from this namespace.
1299
+ #
1300
+ # Why can't we re-use node.name? Because it was generated within the
1301
+ # namespace `self._graph_namespace`. In order to provide uniqueness
1302
+ # over both locals (node.name) *and* globals, we create a completely
1303
+ # new namespace to put all identifiers in.
1304
+ namespace = _Namespace()
1305
+
1306
+ # Override Node's repr to generate a valid name within our namespace.
1307
+ # Since repr() is designed to produce a valid Python expression, it
1308
+ # makes sense to re-use it. This way, it's easy to print something like
1309
+ # Tuple[Node, Node] by simply calling repr() on it. Node's __repr__ is
1310
+ # implemented cooperatively to allow this.
1311
+ def node_repr(n: Node):
1312
+ return namespace.create_name(n.name, n)
1313
+
1314
+ @contextmanager
1315
+ def override_node_repr(graph: Graph):
1316
+ orig_repr_fns = {}
1317
+ for node in graph.nodes:
1318
+ orig_repr_fns[node] = node._repr_fn
1319
+ node._repr_fn = node_repr
1320
+ try:
1321
+ yield None
1322
+ finally:
1323
+ # restore the original repr functions
1324
+ for node in graph.nodes:
1325
+ node._repr_fn = orig_repr_fns[node]
1326
+
1327
+ with override_node_repr(self):
1328
+ return self._python_code(root_module, namespace, verbose=verbose)
1329
+
1330
+ def _python_code(self, root_module: str, namespace: _Namespace, *, verbose: bool = False) -> PythonCode:
1331
+ return self._codegen._gen_python_code(self.nodes, root_module, namespace, verbose=verbose)
1332
+
1333
+
1334
+ def __str__(self) -> str:
1335
+ """
1336
+ Return a human-readable (not machine-readable) string representation
1337
+ of this Graph
1338
+ """
1339
+ placeholder_names : List[str] = []
1340
+ # This is a one-element array just so ``format_node`` can modify the closed
1341
+ # over value
1342
+ maybe_return_typename : List[str] = ['']
1343
+
1344
+ node_strs = [node.format_node(placeholder_names) for node in self.nodes]
1345
+ param_str = ', '.join(placeholder_names)
1346
+ s = f'graph({param_str}){maybe_return_typename[0]}:'
1347
+ for node_str in node_strs:
1348
+ if node_str:
1349
+ s += '\n ' + node_str
1350
+ return s
1351
+
1352
+ @compatibility(is_backward_compatible=True)
1353
+ def print_tabular(self):
1354
+ """
1355
+ Prints the intermediate representation of the graph in tabular
1356
+ format. Note that this API requires the ``tabulate`` module to be
1357
+ installed.
1358
+ """
1359
+ try:
1360
+ from tabulate import tabulate
1361
+ except ImportError:
1362
+ print("`print_tabular` relies on the library `tabulate`, "
1363
+ "which could not be found on this machine. Run `pip "
1364
+ "install tabulate` to install the library.")
1365
+ raise
1366
+
1367
+ node_specs = [[n.op, n.name, n.target, n.args, n.kwargs]
1368
+ for n in self.nodes]
1369
+ print(tabulate(node_specs,
1370
+ headers=['opcode', 'name', 'target', 'args', 'kwargs']))
1371
+
1372
+ @compatibility(is_backward_compatible=True)
1373
+ def lint(self):
1374
+ """
1375
+ Runs various checks on this Graph to make sure it is well-formed. In
1376
+ particular:
1377
+ - Checks Nodes have correct ownership (owned by this graph)
1378
+ - Checks Nodes appear in topological order
1379
+ - If this Graph has an owning GraphModule, checks that targets
1380
+ exist in that GraphModule
1381
+ """
1382
+
1383
+ # Check topo order
1384
+ def check_arg(arg : Node, n : Optional[Node] = None) -> None:
1385
+ context_str = f' of Node \'{n}\' ' if n else ' '
1386
+ if arg.graph is not self:
1387
+ raise RuntimeError(f'Argument \'{arg}\'{context_str}does not belong to this Graph, '
1388
+ f'but was used as an argument! If you are copying nodes from another graph, make '
1389
+ f'sure to use ``arg_transform`` on node_copy() to remap values\n{self}')
1390
+ if arg not in seen_values:
1391
+ raise RuntimeError(f'Argument \'{arg}\'{context_str}was used before it has been '
1392
+ f'defined! Please check that Nodes in the graph are topologically ordered\n{self}')
1393
+
1394
+ seen_names : Set[str] = set()
1395
+ seen_values : Set[Node] = set()
1396
+ for node in self.nodes:
1397
+ if node.op not in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output']:
1398
+ raise RuntimeError(f'Node {node} had unknown opcode {node.op}!')
1399
+ if node.graph is not self:
1400
+ raise RuntimeError(f'Node \'{node}\' does not belong to this Graph!')
1401
+ map_arg(node.args, lambda arg: check_arg(arg, node))
1402
+ map_arg(node.kwargs, lambda arg: check_arg(arg, node))
1403
+ seen_values.add(node)
1404
+
1405
+ if node.name in seen_names:
1406
+ raise RuntimeError(f'Node redefined name {node.name}!')
1407
+ seen_names.add(node.name)
1408
+
1409
+ # Check targets are legit
1410
+ if self.owning_module:
1411
+ for node in self.nodes:
1412
+ if node.op == 'call_function':
1413
+ if not callable(node.target):
1414
+ raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
1415
+ 'a Callable is expected')
1416
+ else:
1417
+ if not isinstance(node.target, str):
1418
+ raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
1419
+ 'a str is expected')
1420
+ if node.op in ['get_attr', 'call_module']:
1421
+ target_atoms = node.target.split('.')
1422
+ m_itr = self.owning_module
1423
+ for i, atom in enumerate(target_atoms):
1424
+ new_m_itr = getattr(m_itr, atom, None)
1425
+ seen_qualname = '.'.join(target_atoms[:i])
1426
+ if new_m_itr is None:
1427
+ raise RuntimeError(f'Node {node} target {node.target} references nonexistent attribute '
1428
+ f'{atom} of {seen_qualname}')
1429
+ if (node.op == "call_module"
1430
+ and not isinstance(new_m_itr, torch.nn.Module)):
1431
+ raise RuntimeError(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
1432
+ 'not reference an nn.Module')
1433
+ elif (node.op == "get_attr"
1434
+ and not isinstance(new_m_itr, torch.nn.Module)
1435
+ and not isinstance(new_m_itr, torch.nn.Parameter)
1436
+ and atom not in m_itr._buffers):
1437
+ warnings.warn(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
1438
+ 'not reference an nn.Module, nn.Parameter, or buffer, which is '
1439
+ 'what \'get_attr\' Nodes typically target')
1440
+ else:
1441
+ m_itr = new_m_itr
1442
+
1443
+ @compatibility(is_backward_compatible=True)
1444
+ def eliminate_dead_code(self):
1445
+ """
1446
+ Remove all dead code from the graph, based on each node's number of
1447
+ users, and whether the nodes have any side effects. The graph must be
1448
+ topologically sorted before calling.
1449
+
1450
+ Returns:
1451
+ bool: Whether the graph was changed as a result of the pass.
1452
+
1453
+ Example:
1454
+
1455
+ Before dead code is eliminated, `a` from `a = x + 1` below has no users
1456
+ and thus can be eliminated from the graph without having an effect.
1457
+
1458
+ .. code-block:: python
1459
+
1460
+ def forward(self, x):
1461
+ a = x + 1
1462
+ return x + self.attr_1
1463
+
1464
+ After dead code is eliminated, `a = x + 1` has been removed, and the rest
1465
+ of `forward` remains.
1466
+
1467
+ .. code-block:: python
1468
+
1469
+ def forward(self, x):
1470
+ return x + self.attr_1
1471
+
1472
+ .. warning::
1473
+
1474
+ Dead code elimination has some heuristics to avoid removing
1475
+ side-effectful nodes (see Node.is_impure) but in general coverage
1476
+ is very bad, so you should assume that this method is not sound
1477
+ to call unless you know that your FX graph consists entirely
1478
+ of functional operations.
1479
+ """
1480
+ # Lint the graph first to make sure its topologically sorted, otherwise
1481
+ # DCE below will not behave as expected.
1482
+ self.lint()
1483
+
1484
+ # Reverse iterate so that when we remove a node, any nodes used as an
1485
+ # input to that node have an updated user count that no longer reflects
1486
+ # the removed node.
1487
+ changed = False
1488
+ for node in reversed(self.nodes):
1489
+ if not node.is_impure() and len(node.users) == 0:
1490
+ self.erase_node(node)
1491
+ changed = True
1492
+
1493
+ return changed
1494
+
1495
+ @compatibility(is_backward_compatible=False)
1496
+ def set_codegen(self, codegen: CodeGen):
1497
+ self._codegen = codegen
1498
+
1499
+ @compatibility(is_backward_compatible=False)
1500
+ def on_generate_code(
1501
+ self,
1502
+ make_transformer: Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]
1503
+ ):
1504
+ """Register a transformer function when python code is generated
1505
+
1506
+ Args:
1507
+ make_transformer (Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]):
1508
+ a function that returns a code transformer to be registered.
1509
+ This function is called by `on_generate_code` to obtain the
1510
+ code transformer.
1511
+
1512
+ This function is also given as its input the currently
1513
+ registered code transformer (or None if nothing is registered),
1514
+ in case it is not desirable to overwrite it. This is useful to
1515
+ chain code transformers together.
1516
+
1517
+ Returns:
1518
+ a context manager that when used in a `with` statement, to automatically
1519
+ restore the previously registered code transformer.
1520
+
1521
+ Example:
1522
+
1523
+ .. code-block:: python
1524
+
1525
+
1526
+ gm: fx.GraphModule = ...
1527
+
1528
+ # This is a code transformer we want to register. This code
1529
+ # transformer prepends a pdb import and trace statement at the very
1530
+ # beginning of the generated torch.fx code to allow for manual
1531
+ # debugging with the PDB library.
1532
+ def insert_pdb(body):
1533
+ return ["import pdb; pdb.set_trace()\\n", *body]
1534
+
1535
+ # Registers `insert_pdb`, and overwrites the current registered
1536
+ # code transformer (given by `_` to the lambda):
1537
+ gm.graph.on_generate_code(
1538
+ lambda _: insert_pdb
1539
+ )
1540
+
1541
+ # Or alternatively, registers a code transformer which first
1542
+ # runs `body` through existing registered transformer, then
1543
+ # through `insert_pdb`:
1544
+ gm.graph.on_generate_code(
1545
+ lambda current_trans: (
1546
+ lambda body: insert_pdb(
1547
+ current_trans(body) if current_trans
1548
+ else body
1549
+ )
1550
+ )
1551
+ )
1552
+
1553
+ gm.recompile()
1554
+ gm(*inputs) # drops into pdb
1555
+
1556
+
1557
+ This function can also be used as a context manager, with the benefit to
1558
+ automatically restores the previously registered code transformer:
1559
+
1560
+ .. code-block:: python
1561
+
1562
+ # ... continue from previous example
1563
+
1564
+ with gm.graph.on_generate_code(lambda _: insert_pdb):
1565
+ # do more stuff with `gm`...
1566
+ gm.recompile()
1567
+ gm(*inputs) # drops into pdb
1568
+
1569
+ # now previous code transformer is restored (but `gm`'s code with pdb
1570
+ # remains - that means you can run `gm` with pdb here too, until you
1571
+ # run next `recompile()`).
1572
+ """
1573
+ on_gen_code_old = self._codegen._body_transformer
1574
+ self._codegen._body_transformer = make_transformer(on_gen_code_old)
1575
+
1576
+ @contextlib.contextmanager
1577
+ def on_generate_code_context_manager():
1578
+ try:
1579
+ yield
1580
+ finally:
1581
+ self._codegen._body_transformer = on_gen_code_old
1582
+
1583
+ return on_generate_code_context_manager()
1584
+
1585
+
1586
+ reflectable_magic_methods = {
1587
+ 'add': '{} + {}',
1588
+ 'sub': '{} - {}',
1589
+ 'mul': '{} * {}',
1590
+ 'floordiv': '{} // {}',
1591
+ 'truediv': '{} / {}',
1592
+ 'div': '{} / {}',
1593
+ 'mod': '{} % {}',
1594
+ 'pow': '{} ** {}',
1595
+ 'lshift': '{} << {}',
1596
+ 'rshift': '{} >> {}',
1597
+ 'and_': '{} & {}',
1598
+ 'or_': '{} | {}',
1599
+ 'xor': '{} ^ {}',
1600
+ 'getitem': '{}[{}]',
1601
+ 'matmul': '{} @ {}',
1602
+ }
1603
+
1604
+ magic_methods = dict({
1605
+ 'eq': '{} == {}',
1606
+ 'ne': '{} != {}',
1607
+ 'lt': '{} < {}',
1608
+ 'gt': '{} > {}',
1609
+ 'le': '{} <= {}',
1610
+ 'ge': '{} >= {}',
1611
+ 'pos': '+{}',
1612
+ 'neg': '-{}',
1613
+ 'invert': '~{}'}, **reflectable_magic_methods)
1614
+
1615
+ inplace_methods = {
1616
+ 'iadd': '{} += {}',
1617
+ 'iand': '{} &= {}',
1618
+ 'ifloordiv': '{} //= {}',
1619
+ 'ilshift': '{} <<= {}',
1620
+ 'imod': '{} %= {}',
1621
+ 'imul': '{} *= {}',
1622
+ 'imatmul': '{} @= {}',
1623
+ 'ior': '{} |= {}',
1624
+ 'ipow': '{} **= {}',
1625
+ 'irshift': '{} >>= {}',
1626
+ 'isub': '{} -= {}',
1627
+ 'itruediv': '{} /= {}',
1628
+ 'ixor': '{} ^= {}',
1629
+ 'setitem': '{}[{}] = {}',
1630
+ }
env-llmeval/lib/python3.10/site-packages/torch/fx/graph_module.py ADDED
@@ -0,0 +1,867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import itertools
3
+ import linecache
4
+ import os
5
+ import sys
6
+ import traceback
7
+ import warnings
8
+ from pathlib import Path
9
+ from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.overrides
14
+ from torch.nn.modules.module import _addindent
15
+ from torch.package import Importer, PackageExporter, PackageImporter, sys_importer
16
+
17
+ from ._compatibility import compatibility
18
+ from .graph import _custom_builtins, _is_from_torch, _PyTreeCodeGen, Graph, PythonCode
19
+
20
+ __all__ = [
21
+ "reduce_graph_module",
22
+ "reduce_package_graph_module",
23
+ "reduce_deploy_graph_module",
24
+ "GraphModule",
25
+ ]
26
+
27
+ _USER_PRESERVED_ATTRIBUTES_KEY = "_user_preserved_attributes"
28
+
29
+ # Normal exec loses the source code, however we can work with
30
+ # the linecache module to recover it.
31
+ # Using _exec_with_source will add it to our local cache
32
+ # and then tools like TorchScript will be able to get source info.
33
+ class _EvalCacheLoader:
34
+ def __init__(self):
35
+ self.eval_cache = {}
36
+ self.next_id = 0
37
+
38
+ def cache(self, src: str, globals: Dict[str, Any], co_fields=None):
39
+ """Store the source in a private cache, and add a lazy entry in linecache
40
+ that allows the source to be retrieved by 'filename'.
41
+
42
+ Args:
43
+ src (str): The module source to cache
44
+ globals (dict): The module globals
45
+
46
+ Returns:
47
+ str: The cache key (and dummy filename) generated for src.
48
+ """
49
+
50
+ key = self._get_key()
51
+ if co_fields:
52
+ key += f" from {co_fields['co_filename']}:{co_fields['co_firstlineno']} in {co_fields['co_name']}"
53
+ self.eval_cache[key] = src
54
+
55
+ # Don't mutate globals so that this loader is only used
56
+ # to populate linecache, and doesn't interact with other modules
57
+ # that might check `__loader__`
58
+ globals_copy = globals.copy()
59
+ globals_copy["__file__"] = key
60
+ globals_copy["__name__"] = key
61
+ globals_copy["__loader__"] = self
62
+ linecache.lazycache(key, globals_copy)
63
+
64
+ return key
65
+
66
+ # Part of the loader protocol (PEP 302)
67
+ # linecache will use this method when trying to find source code
68
+ def get_source(self, module_name) -> Optional[str]:
69
+ if module_name in self.eval_cache:
70
+ return self.eval_cache[module_name]
71
+ return None
72
+
73
+ def _get_key(self):
74
+ key = f"<eval_with_key>.{self.next_id}"
75
+ self.next_id += 1
76
+ return key
77
+
78
+
79
+ _loader = _EvalCacheLoader()
80
+
81
+
82
+ def _exec_with_source(src: str, globals: Dict[str, Any], co_fields=None):
83
+ key = _loader.cache(src, globals, co_fields)
84
+ exec(compile(src, key, "exec"), globals)
85
+
86
+
87
+ def _forward_from_src(src: str, globals: Dict[str, Any], co_fields=None):
88
+ return _method_from_src(
89
+ method_name="forward", src=src, globals=globals, co_fields=co_fields
90
+ )
91
+
92
+
93
+ def _method_from_src(
94
+ method_name: str, src: str, globals: Dict[str, Any], co_fields=None
95
+ ) -> Callable:
96
+ # avoid mutating the passed in dict
97
+ globals_copy = globals.copy()
98
+ _exec_with_source(src, globals_copy, co_fields)
99
+ fn = globals_copy[method_name]
100
+ del globals_copy[method_name]
101
+ return fn
102
+
103
+
104
+ def _format_import_statement(name: str, obj: Any, importer: Importer) -> str:
105
+ if name in _custom_builtins:
106
+ return _custom_builtins[name].import_str
107
+ if _is_from_torch(name):
108
+ return "import torch"
109
+ module_name, attr_name = importer.get_name(obj)
110
+ return f"from {module_name} import {attr_name} as {name}"
111
+
112
+
113
+ def _format_import_block(globals: Dict[str, Any], importer: Importer):
114
+ import_strs: Set[str] = set()
115
+ for name, obj in globals.items():
116
+ import_strs.add(_format_import_statement(name, obj, importer))
117
+ # Sort the imports so we have a stable import block that allows us to
118
+ # hash the graph module and get a consistent key for use in a cache.
119
+ return "\n".join(sorted(import_strs))
120
+
121
+
122
+ @compatibility(is_backward_compatible=True)
123
+ def reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module:
124
+ # BC: attribute name was changed from `code` to `_code` to facilitate
125
+ # making `code` into a property and adding a docstring to it
126
+ fn_src = body.get("_code") or body["code"]
127
+ forward = _forward_from_src(import_block + fn_src, {})
128
+ return _deserialize_graph_module(forward, body)
129
+
130
+
131
+ @compatibility(is_backward_compatible=True)
132
+ def reduce_package_graph_module(
133
+ importer: PackageImporter, body: Dict[Any, Any], generated_module_name: str
134
+ ) -> torch.nn.Module:
135
+ forward = importer.import_module(generated_module_name).forward
136
+ return _deserialize_graph_module(forward, body)
137
+
138
+
139
+ @compatibility(is_backward_compatible=True)
140
+ def reduce_deploy_graph_module(
141
+ importer: PackageImporter, body: Dict[Any, Any], import_block: str
142
+ ) -> torch.nn.Module:
143
+ ns = {}
144
+ ns["__builtins__"] = importer.patched_builtins
145
+ fn_src = body.get("_code")
146
+ assert fn_src is not None
147
+ forward = _forward_from_src(import_block + fn_src, ns)
148
+ return _deserialize_graph_module(forward, body)
149
+
150
+
151
+ # We create a dummy class here because symbolic_trace pulls the forward()
152
+ # function off of the class, rather than the instance. This class is used
153
+ # in _deserialize_graph_module() below.
154
+ class _CodeOnlyModule(torch.nn.Module):
155
+ def __init__(self, body):
156
+ super().__init__()
157
+ self.__dict__ = body
158
+
159
+
160
+ def _deserialize_graph_module(forward, body: Dict[Any, Any], graph_module_cls=None) -> torch.nn.Module:
161
+ """
162
+ Deserialize a GraphModule given the dictionary of the original module,
163
+ using the code to reconstruct the graph. We delete the actual graph before
164
+ saving the dictionary so that changes to the in-memory graph format do not
165
+ get serialized.
166
+ """
167
+
168
+ # Try to retrieve the forward source in a backward-compatible way
169
+ _CodeOnlyModule.forward = forward
170
+
171
+ tracer_cls = body.get("_tracer_cls")
172
+ if tracer_cls is None:
173
+ from ._symbolic_trace import Tracer
174
+
175
+ tracer_cls = Tracer
176
+
177
+ graphmodule_cls_name = body.get("_graphmodule_cls_name", "GraphModule")
178
+
179
+ # This is a workaround for a mypy linter issue related to
180
+ # passing base class as an argument - https://github.com/python/mypy/issues/5865.
181
+ cls_tracer: Any = tracer_cls
182
+
183
+ class KeepModules(cls_tracer):
184
+ # we shouldn't trace into any of the submodules,
185
+ # because they were not traced in the original GraphModule
186
+ def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
187
+ return True
188
+
189
+ com = _CodeOnlyModule(body)
190
+
191
+ tracer_extras = body.get("_tracer_extras", {})
192
+ graph = KeepModules().trace(com, **tracer_extras)
193
+
194
+ # Manually set Tracer class on the reconstructed Graph, to avoid
195
+ # referencing the private local subclass KeepModules.
196
+ graph._tracer_cls = tracer_cls
197
+ if graph_module_cls is None:
198
+ graph_module_cls = GraphModule
199
+ gm = graph_module_cls(com, graph, class_name=graphmodule_cls_name)
200
+
201
+ # The GraphModule constructor only retains attributes referenced by the graph.
202
+ # In this case, our goal is return a GraphModule as close to identical as the one
203
+ # put into the package. If any additional attributes were present in body,
204
+ # we should keep them.
205
+ for k, v in body.items():
206
+ if not hasattr(gm, k):
207
+ setattr(gm, k, v)
208
+ return gm
209
+
210
+
211
+ # copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
212
+ # This installs empty Modules where none exist yet if they are subpaths of target
213
+ def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
214
+ *prefix, field = target.split(".")
215
+ for item in prefix:
216
+ f = getattr(from_module, item)
217
+ t = getattr(to_module, item, None)
218
+ if f is t:
219
+ # we have already installed one of its parents
220
+ # (e.g. target = root.linear.weight, but we have already installed root.linear)
221
+ # once we install a parent, we no longer need to copy the children
222
+ # since all the needed properties will already be present
223
+ return
224
+
225
+ if t is None:
226
+ t = torch.nn.Module()
227
+ setattr(to_module, item, t)
228
+ from_module, to_module = f, t
229
+
230
+ orig = getattr(from_module, field)
231
+ # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
232
+ # So, we register it as a named buffer in the target module.
233
+ if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter):
234
+ to_module.register_buffer(field, orig)
235
+ else:
236
+ setattr(to_module, field, orig)
237
+
238
+
239
+ # Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
240
+ # This installs empty Modules where none exist yet if they are subpaths of target
241
+ def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
242
+ *prefix, field = target.split(".")
243
+ for item in prefix:
244
+ t = getattr(to_module, item, None)
245
+
246
+ if t is None:
247
+ t = torch.nn.Module()
248
+ setattr(to_module, item, t)
249
+ to_module = t
250
+
251
+ # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
252
+ # So, we register it as a named buffer in the target module.
253
+ if isinstance(from_obj, torch.Tensor) and not isinstance(
254
+ from_obj, torch.nn.Parameter
255
+ ):
256
+ to_module.register_buffer(field, from_obj)
257
+ else:
258
+ setattr(to_module, field, from_obj)
259
+
260
+
261
+ class _WrappedCall:
262
+ def __init__(self, cls, cls_call):
263
+ self.cls = cls
264
+ self.cls_call = cls_call
265
+
266
+ # Previously, if an error occurred when valid
267
+ # symbolically-traced code was run with an invalid input, the
268
+ # user would see the source of the error as coming from
269
+ # `File "<eval_with_key_N">`, where N is some number. We use
270
+ # this function to generate a more informative error message. We
271
+ # return the traceback itself, a message explaining that the
272
+ # error occurred in a traced Module's generated forward
273
+ # function, and five lines of context surrounding the faulty
274
+ # line
275
+ @staticmethod
276
+ def _generate_error_message(frame_summary: traceback.FrameSummary) -> str:
277
+ # auxiliary variables (for readability)
278
+ err_lineno = frame_summary.lineno
279
+ assert err_lineno is not None
280
+ line = frame_summary.line
281
+ assert line is not None
282
+ err_line_len = len(line)
283
+ all_src_lines = linecache.getlines(frame_summary.filename)
284
+
285
+ # constituent substrings of the error message
286
+ tb_repr = traceback.format_exc()
287
+ custom_msg = (
288
+ "Call using an FX-traced Module, "
289
+ f"line {err_lineno} of the traced Module's "
290
+ "generated forward function:"
291
+ )
292
+ before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno])
293
+ marker = "~" * err_line_len + "~~~ <--- HERE"
294
+ err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2])
295
+
296
+ # joined message
297
+ return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err])
298
+
299
+ def __call__(self, obj, *args, **kwargs):
300
+ try:
301
+ if self.cls_call is not None:
302
+ return self.cls_call(obj, *args, **kwargs)
303
+ else:
304
+ return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
305
+ except Exception as e:
306
+ assert e.__traceback__
307
+ topmost_framesummary: traceback.FrameSummary = (
308
+ traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1]
309
+ ) # type: ignore[arg-type]
310
+ if "eval_with_key" in topmost_framesummary.filename:
311
+ print(
312
+ _WrappedCall._generate_error_message(topmost_framesummary),
313
+ file=sys.stderr,
314
+ )
315
+ raise e.with_traceback(None) # noqa: TRY200
316
+ else:
317
+ raise e
318
+
319
+
320
+ @compatibility(is_backward_compatible=True)
321
+ class GraphModule(torch.nn.Module):
322
+ """
323
+ GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a
324
+ ``graph`` attribute, as well as ``code`` and ``forward`` attributes generated
325
+ from that ``graph``.
326
+
327
+ .. warning::
328
+
329
+ When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically
330
+ regenerated. However, if you edit the contents of the ``graph`` without reassigning
331
+ the ``graph`` attribute itself, you must call ``recompile()`` to update the generated
332
+ code.
333
+ """
334
+
335
+ def __new__(cls: "Type[GraphModule]", *args, **kwargs):
336
+ # each instance of a graph module needs its own forward method
337
+ # so create a new singleton class for each instance.
338
+ # it is a subclass of the user-defined class, the only difference
339
+ # is an extra layer to install the forward method
340
+
341
+ # address issue described at https://github.com/pytorch/pytorch/issues/63883
342
+ # in other words, traverse class hierarchy to fix the redundant class definition problem
343
+ for t in cls.__mro__:
344
+ c = t.__qualname__.split(".")[-1]
345
+ if c != "GraphModuleImpl":
346
+ cls = t
347
+ break
348
+
349
+ class GraphModuleImpl(cls): # type: ignore[misc, valid-type]
350
+ pass
351
+
352
+ return super().__new__(GraphModuleImpl)
353
+
354
+ @compatibility(is_backward_compatible=True)
355
+ def __init__(
356
+ self,
357
+ root: Union[torch.nn.Module, Dict[str, Any]],
358
+ graph: Graph,
359
+ class_name: str = "GraphModule",
360
+ ):
361
+ """
362
+ Construct a GraphModule.
363
+
364
+ Args:
365
+
366
+ root (Union[torch.nn.Module, Dict[str, Any]):
367
+ ``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
368
+ In the case that ``root`` is a Module, any references to Module-based objects (via qualified
369
+ name) in the Graph's Nodes' ``target`` field will be copied over from the respective place
370
+ within ``root``'s Module hierarchy into the GraphModule's module hierarchy.
371
+ In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be
372
+ looked up directly in the dict's keys. The object mapped to by the Dict will be copied
373
+ over into the appropriate place within the GraphModule's module hierarchy.
374
+
375
+ graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation
376
+
377
+ class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all
378
+ error messages will report as originating from ``GraphModule``. It may be helpful to set this
379
+ to ``root``'s original name or a name that makes sense within the context of your transform.
380
+ """
381
+ super().__init__()
382
+ self.__class__.__name__ = class_name
383
+ if isinstance(root, torch.nn.Module):
384
+ if hasattr(root, "training"):
385
+ self.training = root.training
386
+
387
+ # When we pickle/unpickle graph module, we don't want to drop any module or attributes.
388
+ if isinstance(root, _CodeOnlyModule):
389
+ for k, _ in root.named_children():
390
+ _copy_attr(root, self, k)
391
+
392
+ for k, _ in root.named_buffers():
393
+ _copy_attr(root, self, k)
394
+
395
+ for k, _ in root.named_parameters():
396
+ _copy_attr(root, self, k)
397
+
398
+ for node in graph.nodes:
399
+ if node.op in ["get_attr", "call_module"]:
400
+ assert isinstance(node.target, str)
401
+ _copy_attr(root, self, node.target)
402
+ elif isinstance(root, dict):
403
+ targets_to_copy = []
404
+ for node in graph.nodes:
405
+ if node.op in ["get_attr", "call_module"]:
406
+ assert isinstance(node.target, str)
407
+ if node.target not in root:
408
+ raise RuntimeError(
409
+ "Node "
410
+ + str(node)
411
+ + " referenced target "
412
+ + node.target
413
+ + " but that target was not provided in ``root``!"
414
+ )
415
+ targets_to_copy.append(node.target)
416
+ # Sort targets in ascending order of the # of atoms.
417
+ # This will ensure that less deeply nested attributes are assigned
418
+ # before more deeply nested attributes. For example, foo.bar
419
+ # will be assigned before foo.bar.baz. Otherwise, we might assign
420
+ # the user-provided ``foo.bar`` and wipe out the previously-assigned
421
+ # ``foo.bar.baz``
422
+ targets_to_copy.sort(key=lambda t: t.count("."))
423
+ for target_to_copy in targets_to_copy:
424
+ _assign_attr(root[target_to_copy], self, target_to_copy)
425
+ else:
426
+ raise RuntimeError("Unsupported type " + str(root) + " passed for root!")
427
+
428
+ self.graph = graph
429
+
430
+ # Store the Tracer class responsible for creating a Graph separately as part of the
431
+ # GraphModule state, except when the Tracer is defined in a local namespace.
432
+ # Locally defined Tracers are not pickleable. This is needed because torch.package will
433
+ # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
434
+ # to re-create the Graph during deserialization.
435
+ self._tracer_cls = None
436
+ if (
437
+ self.graph._tracer_cls
438
+ and "<locals>" not in self.graph._tracer_cls.__qualname__
439
+ ):
440
+ self._tracer_cls = self.graph._tracer_cls
441
+
442
+ self._tracer_extras = {}
443
+ if self.graph._tracer_extras:
444
+ self._tracer_extras = self.graph._tracer_extras
445
+
446
+ # Dictionary to store metadata
447
+ self.meta: Dict[str, Any] = {}
448
+
449
+ # TorchScript breaks trying to compile the graph setter because of the
450
+ # continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
451
+ #
452
+ # Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
453
+ __jit_unused_properties__ = ["graph"]
454
+
455
+ @property
456
+ def graph(self) -> Graph:
457
+ """
458
+ Return the ``Graph`` underlying this ``GraphModule``
459
+ """
460
+ return self._graph
461
+
462
+ @graph.setter
463
+ def graph(self, g: Graph) -> None:
464
+ """
465
+ Set the underlying ``Graph`` for this ``GraphModule``. This will internally
466
+ recompile the ``GraphModule`` so that the generated ``forward()`` function
467
+ corresponds to ``g``
468
+ """
469
+ assert isinstance(g, Graph), f"Expected a Graph instance, but got {type(g)}"
470
+ self._graph = g
471
+ g.owning_module = self
472
+ self.recompile()
473
+
474
+ @compatibility(is_backward_compatible=False)
475
+ def to_folder(self, folder: Union[str, os.PathLike], module_name: str = "FxModule"):
476
+ """Dumps out module to ``folder`` with ``module_name`` so that it can be
477
+ imported with ``from <folder> import <module_name>``
478
+
479
+ Args:
480
+
481
+ folder (Union[str, os.PathLike]): The folder to write the code out to
482
+
483
+ module_name (str): Top-level name to use for the ``Module`` while
484
+ writing out the code
485
+ """
486
+ folder = Path(folder)
487
+ Path(folder).mkdir(exist_ok=True)
488
+ torch.save(self.state_dict(), folder / "state_dict.pt")
489
+ tab = " " * 4
490
+ custom_builtins = "\n".join([v.import_str for v in _custom_builtins.values()])
491
+ model_str = f"""
492
+ import torch
493
+ {custom_builtins}
494
+
495
+ from torch.nn import *
496
+ class {module_name}(torch.nn.Module):
497
+ def __init__(self):
498
+ super().__init__()
499
+ """
500
+
501
+ def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]:
502
+ safe_reprs = [
503
+ nn.Linear,
504
+ nn.Conv1d,
505
+ nn.Conv2d,
506
+ nn.Conv3d,
507
+ nn.BatchNorm1d,
508
+ nn.BatchNorm2d,
509
+ nn.BatchNorm3d,
510
+ ]
511
+ if type(module) in safe_reprs:
512
+ return f"{module.__repr__()}"
513
+ else:
514
+ return None
515
+
516
+ blobified_modules = []
517
+ for module_name, module in self.named_children():
518
+ module_str = _gen_model_repr(module_name, module)
519
+ if module_str is None:
520
+ module_file = folder / f"{module_name}.pt"
521
+ torch.save(module, module_file)
522
+ blobified_modules.append(module_name)
523
+ module_repr = module.__repr__().replace("\r", " ").replace("\n", " ")
524
+ module_str = f"torch.load(r'{module_file}') # {module_repr}"
525
+ model_str += f"{tab*2}self.{module_name} = {module_str}\n"
526
+
527
+ for buffer_name, buffer in self._buffers.items():
528
+ if buffer is None:
529
+ continue
530
+ model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n"
531
+
532
+ for param_name, param in self._parameters.items():
533
+ if param is None:
534
+ continue
535
+ model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n"
536
+
537
+ model_str += (
538
+ f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n"
539
+ )
540
+ model_str += f"{_addindent(self.code, 4)}\n"
541
+
542
+ module_file = folder / "module.py"
543
+ module_file.write_text(model_str)
544
+
545
+ init_file = folder / "__init__.py"
546
+ init_file.write_text("from .module import *")
547
+
548
+ if len(blobified_modules) > 0:
549
+ warnings.warn(
550
+ "Was not able to save the following children modules as reprs -"
551
+ f"saved as pickled files instead: {blobified_modules}"
552
+ )
553
+
554
+ @compatibility(is_backward_compatible=True)
555
+ def add_submodule(self, target: str, m: torch.nn.Module) -> bool:
556
+ """
557
+ Adds the given submodule to ``self``.
558
+
559
+ This installs empty Modules where none exist yet if they are
560
+ subpaths of ``target``.
561
+
562
+ Args:
563
+ target: The fully-qualified string name of the new submodule
564
+ (See example in ``nn.Module.get_submodule`` for how to
565
+ specify a fully-qualified string.)
566
+ m: The submodule itself; the actual object we want to
567
+ install in the current Module
568
+
569
+ Return:
570
+ bool: Whether or not the submodule could be inserted. For
571
+ this method to return True, each object in the chain
572
+ denoted by ``target`` must either a) not exist yet,
573
+ or b) reference an ``nn.Module`` (not a parameter or
574
+ other attribute)
575
+ """
576
+ *prefix, field = target.split(".")
577
+ mod: torch.nn.Module = self
578
+
579
+ for item in prefix:
580
+
581
+ submod = getattr(mod, item, None)
582
+
583
+ if submod is None:
584
+ submod = torch.nn.Module()
585
+ setattr(mod, item, submod)
586
+
587
+ if not isinstance(submod, torch.nn.Module):
588
+ return False
589
+
590
+ mod = submod
591
+
592
+ mod.add_module(field, m)
593
+ return True
594
+
595
+ @compatibility(is_backward_compatible=True)
596
+ def delete_submodule(self, target: str) -> bool:
597
+ """
598
+ Deletes the given submodule from ``self``.
599
+
600
+ The module will not be deleted if ``target`` is not a valid
601
+ target.
602
+
603
+ Args:
604
+ target: The fully-qualified string name of the new submodule
605
+ (See example in ``nn.Module.get_submodule`` for how to
606
+ specify a fully-qualified string.)
607
+
608
+ Returns:
609
+ bool: Whether or not the target string referenced a
610
+ submodule we want to delete. A return value of ``False``
611
+ means that the ``target`` was not a valid reference to
612
+ a submodule.
613
+ """
614
+ atoms = target.split(".")
615
+ path, target_submod = atoms[:-1], atoms[-1]
616
+ mod: torch.nn.Module = self
617
+
618
+ # Get the parent module
619
+ for item in path:
620
+
621
+ if not hasattr(mod, item):
622
+ return False
623
+
624
+ mod = getattr(mod, item)
625
+
626
+ if not isinstance(mod, torch.nn.Module):
627
+ return False
628
+
629
+ if not hasattr(mod, target_submod):
630
+ return False
631
+
632
+ if not isinstance(getattr(mod, target_submod), torch.nn.Module):
633
+ return False
634
+
635
+ delattr(mod, target_submod)
636
+ return True
637
+
638
+ @compatibility(is_backward_compatible=True)
639
+ def delete_all_unused_submodules(self) -> None:
640
+ """
641
+ Deletes all unused submodules from ``self``.
642
+
643
+ A Module is considered "used" if any one of the following is
644
+ true:
645
+ 1. It has children that are used
646
+ 2. Its forward is called directly via a ``call_module`` node
647
+ 3. It has a non-Module attribute that is used from a
648
+ ``get_attr`` node
649
+
650
+ This method can be called to clean up an ``nn.Module`` without
651
+ manually calling ``delete_submodule`` on each unused submodule.
652
+ """
653
+ used: List[str] = []
654
+
655
+ for node in self.graph.nodes:
656
+
657
+ if node.op == "call_module" or node.op == "get_attr":
658
+
659
+ # A list of strings representing the different parts
660
+ # of the path. For example, `foo.bar.baz` gives us
661
+ # ["foo", "bar", "baz"]
662
+ fullpath = node.target.split(".")
663
+
664
+ # If we're looking at multiple parts of a path, join
665
+ # join them with a dot. Otherwise, return that single
666
+ # element without doing anything to it.
667
+ def join_fn(x: str, y: str) -> str:
668
+ return ".".join([x, y] if y else [x])
669
+
670
+ # Progressively collect all the names of intermediate
671
+ # modules. For example, if we have the target
672
+ # `foo.bar.baz`, we'll add `foo`, `foo.bar`, and
673
+ # `foo.bar.baz` to the list.
674
+ for path in itertools.accumulate(fullpath, join_fn):
675
+ used.append(path)
676
+
677
+ # For a `call_module` node, also register all recursive submodules
678
+ # as used
679
+ if node.op == "call_module":
680
+ try:
681
+ submod = self.get_submodule(node.target)
682
+
683
+ for submod_name, _ in submod.named_modules():
684
+ if submod_name != "":
685
+ used.append(".".join([node.target, submod_name]))
686
+ except AttributeError:
687
+ # Node referenced nonexistent submodule, don't need to
688
+ # worry about GCing anything
689
+ pass
690
+
691
+ to_delete = [name for name, _ in self.named_modules() if name not in used]
692
+
693
+ for name in to_delete:
694
+ self.delete_submodule(name)
695
+
696
+ @property
697
+ def code(self) -> str:
698
+ """
699
+ Return the Python code generated from the ``Graph`` underlying this
700
+ ``GraphModule``.
701
+ """
702
+ if not hasattr(self, "_code"):
703
+ raise RuntimeError(
704
+ "Code has not been generated! Please report a bug to PyTorch"
705
+ )
706
+ return self._code
707
+
708
+ @compatibility(is_backward_compatible=True)
709
+ def recompile(self) -> PythonCode:
710
+ """
711
+ Recompile this GraphModule from its ``graph`` attribute. This should be
712
+ called after editing the contained ``graph``, otherwise the generated
713
+ code of this ``GraphModule`` will be out of date.
714
+ """
715
+ if isinstance(self._graph._codegen, _PyTreeCodeGen):
716
+ self._in_spec = self._graph._codegen.pytree_info.in_spec
717
+ self._out_spec = self._graph._codegen.pytree_info.out_spec
718
+ python_code = self._graph.python_code(root_module="self")
719
+ self._code = python_code.src
720
+ self._lineno_map = python_code._lineno_map
721
+
722
+ cls = type(self)
723
+ co_fields = self._graph._co_fields if hasattr(self._graph, "_co_fields") else {}
724
+ cls.forward = _forward_from_src(self._code, python_code.globals, co_fields)
725
+
726
+ # Determine whether this class explicitly defines a __call__ implementation
727
+ # to wrap. If it does, save it in order to have wrapped_call invoke it.
728
+ # If it does not, wrapped_call can use a dynamic call to super() instead.
729
+ # In most cases, super().__call__ should be torch.nn.Module.__call__.
730
+ # We do not want to hold a reference to Module.__call__ here; doing so will
731
+ # bypass patching of torch.nn.Module.__call__ done while symbolic tracing.
732
+ cls_call = cls.__call__ if "__call__" in vars(cls) else None
733
+
734
+ if "_wrapped_call" not in vars(cls):
735
+ cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined]
736
+
737
+ def call_wrapped(self, *args, **kwargs):
738
+ return self._wrapped_call(self, *args, **kwargs)
739
+
740
+ cls.__call__ = call_wrapped # type: ignore[method-assign]
741
+
742
+ return python_code
743
+
744
+ # Passing Tracer as argument allows subclasses extending fx.GraphModule
745
+ # define their own Tracer (extending fx.Tracer).
746
+ def __reduce_deploy__(self, importer: Importer):
747
+ dict_without_graph = self.__dict__.copy()
748
+ dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__
749
+ del dict_without_graph["_graph"]
750
+
751
+ python_code = self.recompile()
752
+ import_block = _format_import_block(python_code.globals, importer)
753
+ return (reduce_deploy_graph_module, (dict_without_graph, import_block))
754
+
755
+ def __reduce_package__(self, exporter: PackageExporter):
756
+ dict_without_graph = self.__dict__.copy()
757
+ dict_without_graph["_graphmodule_cls_name"] = self.__class__.__name__
758
+ del dict_without_graph["_graph"]
759
+
760
+ generated_module_name = f"fx-generated._{exporter.get_unique_id()}"
761
+ python_code = self.recompile()
762
+ import_block = _format_import_block(python_code.globals, exporter.importer)
763
+ module_code = import_block + self.code
764
+ exporter.save_source_string(generated_module_name, module_code)
765
+ return (
766
+ reduce_package_graph_module,
767
+ (dict_without_graph, generated_module_name),
768
+ )
769
+
770
+ def __reduce__(self):
771
+ """
772
+ Serialization of GraphModule. We serialize only the generated code, not
773
+ the underlying ``Graph``. This is because ``Graph`` does not have on-disk
774
+ backward-compatibility guarantees, whereas Python source code does.
775
+ On the deserialization side, we symbolically trace through the generated
776
+ code to regenerate the underlying ``Graph``
777
+ """
778
+ dict_without_graph = self.__dict__.copy()
779
+ python_code = self.recompile()
780
+ import_block = _format_import_block(python_code.globals, sys_importer)
781
+ del dict_without_graph["_graph"]
782
+ return (reduce_graph_module, (dict_without_graph, import_block))
783
+
784
+ def _deepcopy_init(self):
785
+ return GraphModule.__init__
786
+
787
+ # because __reduce__ is defined for serialization,
788
+ # we need to define deepcopy otherwise it will call __reduce__
789
+ # and cause symbolic tracing to occur every time we try to copy the object
790
+ def __deepcopy__(self, memo):
791
+ res = type(self).__new__(type(self))
792
+ memo[id(self)] = res
793
+ fake_mod = _CodeOnlyModule(copy.deepcopy(self.__dict__, memo))
794
+ self._deepcopy_init()(res, fake_mod, fake_mod.__dict__["_graph"])
795
+ # hooks are lost during `GraphModule.__init__`, so we need to copy over
796
+ # them explicitly, note right now we are only copying state_dict related
797
+ # hooks, to reduce bc-related issues, we can copy forward/backward related
798
+ # hooks in the future as well if needed
799
+ extra_preserved_attrs = [
800
+ "_state_dict_hooks",
801
+ "_load_state_dict_pre_hooks",
802
+ "_load_state_dict_post_hooks",
803
+ ]
804
+ for attr in extra_preserved_attrs:
805
+ if attr in self.__dict__:
806
+ setattr(res, attr, copy.deepcopy(self.__dict__[attr], memo))
807
+ res.meta = copy.deepcopy(getattr(self, "meta", {}), memo)
808
+ if _USER_PRESERVED_ATTRIBUTES_KEY in res.meta:
809
+ for attr_name, attr in res.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items():
810
+ setattr(res, attr_name, attr)
811
+ return res
812
+
813
+ def __copy__(self):
814
+ res = GraphModule(self, self.graph)
815
+ res.meta = getattr(self, "meta", {})
816
+ return res
817
+
818
+ @compatibility(is_backward_compatible=False)
819
+ def print_readable(self, print_output=True):
820
+ """
821
+ Return the Python code generated for current GraphModule and its children GraphModules
822
+ """
823
+ verbose_python_code = self._graph.python_code(root_module="self", verbose=True)
824
+ module_code = verbose_python_code.src
825
+ module_code = module_code.lstrip("\n")
826
+ module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code
827
+ module_code = _addindent(module_code, 4)
828
+
829
+ submodule_code_list = [""]
830
+ for submodule in self.children():
831
+ if isinstance(submodule, GraphModule):
832
+ submodule_code_list.append(submodule.print_readable(print_output=False))
833
+ submodule_code = "\n".join(submodule_code_list)
834
+ submodule_code = _addindent(submodule_code, 4)
835
+
836
+ output = module_code + submodule_code
837
+ if print_output:
838
+ print(module_code + submodule_code)
839
+ return output
840
+
841
+ def __str__(self) -> str:
842
+ orig_str = super().__str__()
843
+ print_readable_reminder = (
844
+ "# To see more debug info, please use `graph_module.print_readable()`"
845
+ )
846
+ return "\n".join([orig_str, self._code, print_readable_reminder])
847
+
848
+ def _replicate_for_data_parallel(self):
849
+ new_gm = self.__copy__()
850
+ new_gm._is_replica = True
851
+ return new_gm
852
+
853
+
854
+ # workarounds for issues in __torch_function__
855
+
856
+ # WAR for __torch_function__ not handling tensor lists,
857
+ # fix is in https://github.com/pytorch/pytorch/pull/34725
858
+ # orig_cat = torch.cat
859
+ # def patched_cat(*args, **kwargs):
860
+ # tensors = args[0]
861
+ # for t in tensors:
862
+ # if isinstance(t, Proxy):
863
+ # return t.__torch_function__(patched_cat, (), args, kwargs)
864
+ # return orig_cat(*args, **kwargs)
865
+ # patched_cat.__module__ = 'torch'
866
+ # patched_cat.__name__ = 'cat'
867
+ # torch.cat = patched_cat
env-llmeval/lib/python3.10/site-packages/torch/fx/immutable_collections.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Iterable, List, Tuple
2
+
3
+ from ._compatibility import compatibility
4
+ from torch.utils._pytree import Context, register_pytree_node
5
+
6
+ __all__ = ["immutable_list", "immutable_dict"]
7
+
8
+ _help_mutation = """\
9
+ If you are attempting to modify the kwargs or args of a torch.fx.Node object,
10
+ instead create a new copy of it and assign the copy to the node:
11
+ new_args = ... # copy and mutate args
12
+ node.args = new_args
13
+ """
14
+
15
+ def _no_mutation(self, *args, **kwargs):
16
+ raise NotImplementedError(f"'{type(self).__name__}' object does not support mutation. {_help_mutation}")
17
+
18
+ def _create_immutable_container(base, mutable_functions):
19
+ container = type('immutable_' + base.__name__, (base,), {})
20
+ for attr in mutable_functions:
21
+ setattr(container, attr, _no_mutation)
22
+ return container
23
+
24
+ immutable_list = _create_immutable_container(list,
25
+ ['__delitem__', '__iadd__', '__imul__', '__setitem__', 'append',
26
+ 'clear', 'extend', 'insert', 'pop', 'remove'])
27
+ immutable_list.__reduce__ = lambda self: (immutable_list, (tuple(iter(self)),))
28
+ immutable_list.__hash__ = lambda self: hash(tuple(self))
29
+
30
+ compatibility(is_backward_compatible=True)(immutable_list)
31
+
32
+ immutable_dict = _create_immutable_container(dict, ['__delitem__', '__setitem__', 'clear', 'pop', 'popitem', 'update'])
33
+ immutable_dict.__reduce__ = lambda self: (immutable_dict, (iter(self.items()),))
34
+ immutable_dict.__hash__ = lambda self: hash(tuple(self.items()))
35
+ compatibility(is_backward_compatible=True)(immutable_dict)
36
+
37
+
38
+ # Register immutable collections for PyTree operations
39
+
40
+ def _immutable_dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:
41
+ return list(d.values()), list(d.keys())
42
+
43
+ def _immutable_dict_unflatten(values: Iterable[Any], context: Context) -> Dict[Any, Any]:
44
+ return immutable_dict(dict(zip(context, values)))
45
+
46
+ def _immutable_list_flatten(d: List[Any]) -> Tuple[List[Any], Context]:
47
+ return d, None
48
+
49
+ def _immutable_list_unflatten(values: Iterable[Any], context: Context) -> List[Any]:
50
+ return immutable_list(values)
51
+
52
+
53
+ register_pytree_node(immutable_dict, _immutable_dict_flatten, _immutable_dict_unflatten)
54
+ register_pytree_node(immutable_list, _immutable_list_flatten, _immutable_list_unflatten)
env-llmeval/lib/python3.10/site-packages/torch/fx/interpreter.py ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .graph_module import GraphModule
2
+ from .graph import Graph
3
+ from .node import Argument, Node, Target, map_arg, map_aggregate
4
+ from .proxy import Proxy
5
+ from ._symbolic_trace import Tracer
6
+ from ._compatibility import compatibility
7
+ from . import config
8
+ import torch.fx.traceback as fx_traceback
9
+ import torch
10
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
11
+ import inspect
12
+ from contextlib import contextmanager
13
+ from torch.hub import tqdm
14
+
15
+ __all__ = ['Interpreter', 'Transformer']
16
+
17
+ @compatibility(is_backward_compatible=True)
18
+ class Interpreter:
19
+ """
20
+ An Interpreter executes an FX graph Node-by-Node. This pattern
21
+ can be useful for many things, including writing code
22
+ transformations as well as analysis passes.
23
+
24
+ Methods in the Interpreter class can be overridden to customize
25
+ the behavior of execution. The map of overrideable methods
26
+ in terms of call hierarchy::
27
+
28
+ run()
29
+ +-- run_node
30
+ +-- placeholder()
31
+ +-- get_attr()
32
+ +-- call_function()
33
+ +-- call_method()
34
+ +-- call_module()
35
+ +-- output()
36
+
37
+ Example:
38
+
39
+ Suppose we want to swap all instances of ``torch.neg`` with
40
+ ``torch.sigmoid`` and vice versa (including their ``Tensor``
41
+ method equivalents). We could subclass Interpreter like so::
42
+
43
+ class NegSigmSwapInterpreter(Interpreter):
44
+ def call_function(self, target : Target,
45
+ args : Tuple, kwargs : Dict) -> Any:
46
+ if target == torch.sigmoid:
47
+ return torch.neg(*args, **kwargs)
48
+ return super().call_function(n)
49
+
50
+ def call_method(self, target : Target,
51
+ args : Tuple, kwargs : Dict) -> Any:
52
+ if target == 'neg':
53
+ call_self, *args_tail = args
54
+ return call_self.sigmoid(*args_tail, **kwargs)
55
+ return super().call_method(n)
56
+
57
+ def fn(x):
58
+ return torch.sigmoid(x).neg()
59
+
60
+ gm = torch.fx.symbolic_trace(fn)
61
+ input = torch.randn(3, 4)
62
+ result = NegSigmSwapInterpreter(gm).run(input)
63
+ torch.testing.assert_close(result, torch.neg(input).sigmoid())
64
+
65
+ Args:
66
+ module (GraphModule): The module to be executed
67
+ garbage_collect_values (bool): Whether to delete values after their last
68
+ use within the Module's execution. This ensures optimal memory usage during
69
+ execution. This can be disabled to, for example, examine all of the intermediate
70
+ values in the execution by looking at the ``Interpreter.env`` attribute.
71
+ """
72
+ @compatibility(is_backward_compatible=True)
73
+ def __init__(self, module : GraphModule, garbage_collect_values : bool = True):
74
+ assert isinstance(module, GraphModule)
75
+ self.module = module
76
+ self.submodules = dict(self.module.named_modules())
77
+ self.env : Dict[Node, Any] = {}
78
+ self.name = "Interpreter"
79
+ self.garbage_collect_values = garbage_collect_values
80
+ self.extra_traceback = True
81
+
82
+ if self.garbage_collect_values:
83
+ # Run through reverse nodes and record the first instance of a use
84
+ # of a given node. This represents the *last* use of the node in the
85
+ # execution order of the program, which we will use to free unused
86
+ # values
87
+ node_to_last_use : Dict[Node, Node] = {}
88
+ self.user_to_last_uses : Dict[Node, List[Node]] = {}
89
+
90
+ def register_last_uses(n : Node, user : Node):
91
+ if n not in node_to_last_use:
92
+ node_to_last_use[n] = user
93
+ self.user_to_last_uses.setdefault(user, []).append(n)
94
+
95
+ for node in reversed(self.module.graph.nodes):
96
+ map_arg(node.args, lambda n: register_last_uses(n, node))
97
+ map_arg(node.kwargs, lambda n: register_last_uses(n, node))
98
+
99
+ @compatibility(is_backward_compatible=True)
100
+ def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None, enable_io_processing : bool = True) -> Any:
101
+ """
102
+ Run `module` via interpretation and return the result.
103
+
104
+ Args:
105
+ *args: The arguments to the Module to run, in positional order
106
+ initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
107
+ This is a dict mapping `Node` to any value. This can be used, for example, to
108
+ pre-populate results for certain `Nodes` so as to do only partial evaluation within
109
+ the interpreter.
110
+ enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and
111
+ process_outputs function first before using them.
112
+
113
+ Returns:
114
+ Any: The value returned from executing the Module
115
+ """
116
+ self.env = initial_env if initial_env is not None else {}
117
+
118
+ # Positional function args are consumed left-to-right by
119
+ # `placeholder` nodes. Use an iterator to keep track of
120
+ # position and extract those values.
121
+ if enable_io_processing:
122
+ args = self.module.graph.process_inputs(*args)
123
+ self.args_iter : Iterator[Any] = iter(args)
124
+ pbar = tqdm(total=len(self.module.graph.nodes),
125
+ desc=f"{self.name}: {str(list(self.module.graph.nodes)) if config.verbose_progress else ''}",
126
+ initial=0, position=0, leave=True, disable=config.disable_progress, delay=0)
127
+
128
+ for node in self.module.graph.nodes:
129
+ pbar.update(1)
130
+ if node in self.env:
131
+ # Short circuit if we have this value. This could
132
+ # be used, for example, for partial evaluation
133
+ # where the caller has pre-populated `env` with
134
+ # values for a subset of the program.
135
+ continue
136
+
137
+ try:
138
+ self.env[node] = self.run_node(node)
139
+ except Exception as e:
140
+ if self.extra_traceback:
141
+ msg = f"While executing {node.format_node()}"
142
+ msg = f'{e.args[0]}\n\n{msg}' if e.args else str(msg)
143
+ msg += f"\nOriginal traceback:\n{node.stack_trace}"
144
+ e.args = (msg,) + e.args[1:]
145
+ if isinstance(e, KeyError):
146
+ raise RuntimeError(*e.args) from e
147
+ raise
148
+
149
+ if self.garbage_collect_values:
150
+ for to_delete in self.user_to_last_uses.get(node, []):
151
+ del self.env[to_delete]
152
+
153
+ if node.op == 'output':
154
+ output_val = self.env[node]
155
+ return self.module.graph.process_outputs(output_val) if enable_io_processing else output_val
156
+
157
+ @compatibility(is_backward_compatible=True)
158
+ def boxed_run(self, args_list):
159
+ """
160
+ Run `module` via interpretation and return the result. This uses the "boxed"
161
+ calling convention, where you pass a list of arguments, which will be cleared
162
+ by the interpreter. This ensures that input tensors are promptly deallocated.
163
+ """
164
+ args_iter = iter(args_list)
165
+ env = {}
166
+ for n in self.module.graph.nodes:
167
+ if n.op == "placeholder":
168
+ env[n] = next(args_iter)
169
+ args_list.clear()
170
+ return self.run(initial_env=env)
171
+
172
+ @contextmanager
173
+ def _set_current_node(self, node):
174
+ with fx_traceback.set_current_meta(node):
175
+ yield
176
+
177
+ @compatibility(is_backward_compatible=True)
178
+ def run_node(self, n : Node) -> Any:
179
+ """
180
+ Run a specific node ``n`` and return the result.
181
+ Calls into placeholder, get_attr, call_function,
182
+ call_method, call_module, or output depending
183
+ on ``node.op``
184
+
185
+ Args:
186
+ n (Node): The Node to execute
187
+
188
+ Returns:
189
+ Any: The result of executing ``n``
190
+ """
191
+ with self._set_current_node(n):
192
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
193
+ assert isinstance(args, tuple)
194
+ assert isinstance(kwargs, dict)
195
+ return getattr(self, n.op)(n.target, args, kwargs)
196
+
197
+ # Main Node running APIs
198
+ @compatibility(is_backward_compatible=True)
199
+ def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
200
+ """
201
+ Execute a ``placeholder`` node. Note that this is stateful:
202
+ ``Interpreter`` maintains an internal iterator over
203
+ arguments passed to ``run`` and this method returns
204
+ next() on that iterator.
205
+
206
+ Args:
207
+ target (Target): The call target for this node. See
208
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
209
+ details on semantics
210
+ args (Tuple): Tuple of positional args for this invocation
211
+ kwargs (Dict): Dict of keyword arguments for this invocation
212
+
213
+ Returns:
214
+ Any: The argument value that was retrieved.
215
+ """
216
+ assert isinstance(target, str)
217
+ if target.startswith('*'):
218
+ # For a starred parameter e.g. `*args`, retrieve all
219
+ # remaining values from the args list.
220
+ return list(self.args_iter)
221
+ else:
222
+ try:
223
+ return next(self.args_iter)
224
+ except StopIteration as si:
225
+ if len(args) > 0:
226
+ return args[0]
227
+ else:
228
+ raise RuntimeError(f'Expected positional argument for parameter {target}, but one was not passed in!') from si
229
+
230
+ @compatibility(is_backward_compatible=True)
231
+ def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
232
+ """
233
+ Execute a ``get_attr`` node. Will retrieve an attribute
234
+ value from the ``Module`` hierarchy of ``self.module``.
235
+
236
+ Args:
237
+ target (Target): The call target for this node. See
238
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
239
+ details on semantics
240
+ args (Tuple): Tuple of positional args for this invocation
241
+ kwargs (Dict): Dict of keyword arguments for this invocation
242
+
243
+ Return:
244
+ Any: The value of the attribute that was retrieved
245
+ """
246
+ assert isinstance(target, str)
247
+ return self.fetch_attr(target)
248
+
249
+ @compatibility(is_backward_compatible=True)
250
+ def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
251
+ """
252
+ Execute a ``call_function`` node and return the result.
253
+
254
+ Args:
255
+ target (Target): The call target for this node. See
256
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
257
+ details on semantics
258
+ args (Tuple): Tuple of positional args for this invocation
259
+ kwargs (Dict): Dict of keyword arguments for this invocation
260
+
261
+ Return
262
+ Any: The value returned by the function invocation
263
+ """
264
+ assert not isinstance(target, str)
265
+
266
+ # Execute the function and return the result
267
+ return target(*args, **kwargs)
268
+
269
+ @compatibility(is_backward_compatible=True)
270
+ def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
271
+ """
272
+ Execute a ``call_method`` node and return the result.
273
+
274
+ Args:
275
+ target (Target): The call target for this node. See
276
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
277
+ details on semantics
278
+ args (Tuple): Tuple of positional args for this invocation
279
+ kwargs (Dict): Dict of keyword arguments for this invocation
280
+
281
+ Return
282
+ Any: The value returned by the method invocation
283
+ """
284
+ # args[0] is the `self` object for this method call
285
+ self_obj, *args_tail = args
286
+
287
+ # Execute the method and return the result
288
+ assert isinstance(target, str)
289
+ return getattr(self_obj, target)(*args_tail, **kwargs)
290
+
291
+ @compatibility(is_backward_compatible=True)
292
+ def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
293
+ """
294
+ Execute a ``call_module`` node and return the result.
295
+
296
+ Args:
297
+ target (Target): The call target for this node. See
298
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
299
+ details on semantics
300
+ args (Tuple): Tuple of positional args for this invocation
301
+ kwargs (Dict): Dict of keyword arguments for this invocation
302
+
303
+ Return
304
+ Any: The value returned by the module invocation
305
+ """
306
+ # Retrieve executed args and kwargs values from the environment
307
+
308
+ # Execute the method and return the result
309
+ assert isinstance(target, str)
310
+ submod = self.fetch_attr(target)
311
+
312
+ return submod(*args, **kwargs)
313
+
314
+ @compatibility(is_backward_compatible=True)
315
+ def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
316
+ """
317
+ Execute an ``output`` node. This really just retrieves
318
+ the value referenced by the ``output`` node and returns it.
319
+
320
+ Args:
321
+ target (Target): The call target for this node. See
322
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
323
+ details on semantics
324
+ args (Tuple): Tuple of positional args for this invocation
325
+ kwargs (Dict): Dict of keyword arguments for this invocation
326
+
327
+ Return:
328
+ Any: The return value referenced by the output node
329
+ """
330
+ return args[0]
331
+
332
+ # Helper methods
333
+ @compatibility(is_backward_compatible=True)
334
+ def fetch_attr(self, target : str):
335
+ """
336
+ Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
337
+
338
+ Args:
339
+ target (str): The fully-qualified name of the attribute to fetch
340
+
341
+ Return:
342
+ Any: The value of the attribute.
343
+ """
344
+ target_atoms = target.split('.')
345
+ attr_itr = self.module
346
+ for i, atom in enumerate(target_atoms):
347
+ if not hasattr(attr_itr, atom):
348
+ raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
349
+ attr_itr = getattr(attr_itr, atom)
350
+ return attr_itr
351
+
352
+ @compatibility(is_backward_compatible=True)
353
+ def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
354
+ """
355
+ Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
356
+ from the current execution environment.
357
+
358
+ Args:
359
+ n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
360
+
361
+ Return:
362
+ Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
363
+ """
364
+ args = self.map_nodes_to_values(n.args, n)
365
+ assert isinstance(args, tuple)
366
+ kwargs = self.map_nodes_to_values(n.kwargs, n)
367
+ assert isinstance(kwargs, dict)
368
+ return args, kwargs
369
+
370
+ @compatibility(is_backward_compatible=True)
371
+ def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
372
+ """
373
+ Recursively descend through ``args`` and look up the concrete value
374
+ for each ``Node`` in the current execution environment.
375
+
376
+ Args:
377
+ args (Argument): Data structure within which to look up concrete values
378
+
379
+ n (Node): Node to which ``args`` belongs. This is only used for error reporting.
380
+ """
381
+ def load_arg(n_arg : Node) -> Any:
382
+ if n_arg not in self.env:
383
+ raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
384
+ f'to diagnose such issues')
385
+ return self.env[n_arg]
386
+ return map_arg(args, load_arg)
387
+
388
+ @compatibility(is_backward_compatible=True)
389
+ class Transformer(Interpreter):
390
+ """
391
+ ``Transformer`` is a special type of interpreter that produces a
392
+ new ``Module``. It exposes a ``transform()`` method that returns
393
+ the transformed ``Module``. ``Transformer`` does not require
394
+ arguments to run, as ``Interpreter`` does. ``Transformer`` works
395
+ entirely symbolically.
396
+
397
+ Example:
398
+
399
+ Suppose we want to swap all instances of ``torch.neg`` with
400
+ ``torch.sigmoid`` and vice versa (including their ``Tensor``
401
+ method equivalents). We could subclass ``Transformer`` like so::
402
+
403
+ class NegSigmSwapXformer(Transformer):
404
+ def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
405
+ if target == torch.sigmoid:
406
+ return torch.neg(*args, **kwargs)
407
+ return super().call_function(n)
408
+
409
+ def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
410
+ if target == 'neg':
411
+ call_self, *args_tail = args
412
+ return call_self.sigmoid(*args_tail, **kwargs)
413
+ return super().call_method(n)
414
+
415
+ def fn(x):
416
+ return torch.sigmoid(x).neg()
417
+
418
+ gm = torch.fx.symbolic_trace(fn)
419
+
420
+ transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform()
421
+ input = torch.randn(3, 4)
422
+ torch.testing.assert_close(transformed(input), torch.neg(input).sigmoid())
423
+
424
+ Args:
425
+ module (GraphModule): The ``Module`` to be transformed.
426
+ """
427
+
428
+ @compatibility(is_backward_compatible=True)
429
+ def __init__(self, module):
430
+ super().__init__(module)
431
+ self.new_graph = Graph()
432
+ self.new_graph.set_codegen(module.graph._codegen)
433
+
434
+ class TransformerTracer(Tracer):
435
+ def __init__(self, graph: Graph):
436
+ super().__init__()
437
+ self.graph = graph
438
+ self.tensor_attrs: Dict[torch.Tensor, str] = {} # type: ignore[assignment]
439
+
440
+ def is_leaf_module(self, _, __) -> bool:
441
+ return True
442
+
443
+ self.tracer = TransformerTracer(self.new_graph)
444
+ self.tracer.root = module
445
+
446
+ @compatibility(is_backward_compatible=True)
447
+ def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
448
+ """
449
+ Execute a ``placeholder`` node. In ``Transformer``, this is
450
+ overridden to insert a new ``placeholder`` into the output
451
+ graph.
452
+
453
+ Args:
454
+ target (Target): The call target for this node. See
455
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
456
+ details on semantics
457
+ args (Tuple): Tuple of positional args for this invocation
458
+ kwargs (Dict): Dict of keyword arguments for this invocation
459
+ """
460
+ assert isinstance(target, str)
461
+ default_value = next(iter(args)) if args else inspect.Signature.empty
462
+ return Proxy(self.new_graph.placeholder(target, default_value=default_value), self.tracer)
463
+
464
+ @compatibility(is_backward_compatible=True)
465
+ def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
466
+ """
467
+ Execute a ``get_attr`` node. In ``Transformer``, this is
468
+ overridden to insert a new ``get_attr`` node into the output
469
+ graph.
470
+
471
+ Args:
472
+ target (Target): The call target for this node. See
473
+ `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
474
+ details on semantics
475
+ args (Tuple): Tuple of positional args for this invocation
476
+ kwargs (Dict): Dict of keyword arguments for this invocation
477
+ """
478
+ assert isinstance(target, str)
479
+ return self.tracer.create_proxy("get_attr", target, args, kwargs)
480
+
481
+ @compatibility(is_backward_compatible=True)
482
+ def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
483
+ # Override so that the leaf module policy from `self.tracer` is respected.
484
+ assert isinstance(target, str)
485
+ submod = self.fetch_attr(target)
486
+ return self.tracer.call_module(submod, submod.forward, args, kwargs)
487
+
488
+ @compatibility(is_backward_compatible=True)
489
+ def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
490
+ # Override so that functions that were wrapped are still wrapped.
491
+ return self.tracer.create_proxy('call_function', target, args, kwargs)
492
+
493
+ @compatibility(is_backward_compatible=True)
494
+ def transform(self) -> GraphModule:
495
+ """
496
+ Transform ``self.module`` and return the transformed
497
+ ``GraphModule``.
498
+ """
499
+ with fx_traceback.preserve_node_meta():
500
+ result = super().run(enable_io_processing=False)
501
+ if result is not None:
502
+ def strip_proxy(a : Union[Argument, Proxy]) -> Any:
503
+ return a.node if isinstance(a, Proxy) else a
504
+ self.new_graph.output(map_aggregate(result, strip_proxy))
505
+ return GraphModule(self.module, self.new_graph)
env-llmeval/lib/python3.10/site-packages/torch/fx/node.py ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Nodes represent a definition of a value in our graph of operators.
2
+ from typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set
3
+ from ._compatibility import compatibility
4
+ from .immutable_collections import immutable_dict, immutable_list
5
+ import torch
6
+ import builtins
7
+ import types
8
+ import inspect
9
+ import warnings
10
+ from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair
11
+ from .._ops import ops as _ops
12
+
13
+ if TYPE_CHECKING:
14
+ from .graph import Graph
15
+
16
+ __all__ = ['Node', 'map_arg', 'map_aggregate', "has_side_effect"]
17
+
18
+ BaseArgumentTypes = Union[str, int, float, bool, complex, torch.dtype,
19
+ torch.Tensor, torch.device, torch.memory_format, torch.layout, torch._ops.OpOverload]
20
+ base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
21
+
22
+ Target = Union[Callable[..., Any], str]
23
+
24
+ Argument = Optional[Union[
25
+ Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types
26
+ List[Any], # actually Argument
27
+ Dict[str, Any], # actually Argument
28
+ slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing
29
+ range,
30
+ 'Node',
31
+ BaseArgumentTypes
32
+ ]]
33
+
34
+ _side_effectful_functions: Set[Callable] = {
35
+ torch._assert,
36
+ torch._assert_async,
37
+ _ops.aten._assert_async.msg,
38
+ _ops.aten.copy_.default,
39
+ _ops.aten.sym_constrain_range.default,
40
+ _ops.aten.sym_constrain_range_for_size.default,
41
+ _ops.profiler._record_function_enter,
42
+ _ops.profiler._record_function_enter_new,
43
+ _ops.profiler._record_function_exit,
44
+ _ops.inductor.accumulate_grad_.default,
45
+ }
46
+
47
+
48
+ @compatibility(is_backward_compatible=False)
49
+ def has_side_effect(fn: Callable) -> None:
50
+ _side_effectful_functions.add(fn)
51
+ return fn
52
+
53
+
54
+ # this is fixed on master, WAR for 1.5
55
+ def _find_module_of_method(orig_method: Callable[..., Any]) -> str:
56
+ name = orig_method.__name__
57
+ module = orig_method.__module__
58
+ if module is not None:
59
+ return module
60
+ for guess in [torch, torch.nn.functional]:
61
+ if getattr(guess, name, None) is orig_method:
62
+ return guess.__name__
63
+ raise RuntimeError(f'cannot find module for {orig_method}')
64
+
65
+ # Borrowed from CPython typing module
66
+ # https://github.com/python/cpython/blob/f90dc36c15d7fee0efaf6d39e97be0bdf2683e93/Lib/typing.py#L156
67
+ def _type_repr(obj):
68
+ """Return the repr() of an object, special-casing types (internal helper).
69
+ If obj is a type, we return a shorter version than the default
70
+ type.__repr__, based on the module and qualified name, which is
71
+ typically enough to uniquely identify a type. For everything
72
+ else, we fall back on repr(obj).
73
+ """
74
+ if isinstance(obj, type):
75
+ if obj.__module__ == 'builtins':
76
+ return obj.__qualname__
77
+ return f'{obj.__module__}.{obj.__qualname__}'
78
+ if obj is ...:
79
+ return('...')
80
+ if isinstance(obj, types.FunctionType):
81
+ return obj.__name__
82
+ return repr(obj)
83
+
84
+ def _get_qualified_name(func: Callable[..., Any]) -> str:
85
+ # things like getattr just appear in builtins
86
+ if getattr(builtins, func.__name__, None) is func:
87
+ return func.__name__
88
+ # torch.Tensor.{fn}
89
+ if (isinstance(func, (types.MethodDescriptorType, types.WrapperDescriptorType))
90
+ and func is getattr(torch.Tensor, func.__name__, None)):
91
+ return f"torch.Tensor.{func.__name__}"
92
+ name = func.__name__
93
+ if name == "<lambda>":
94
+ # For lambdas, try to get their defining name in the module
95
+ try:
96
+ name = inspect.getsource(func).split("=")[0].strip()
97
+ except Exception as e:
98
+ raise RuntimeError("Unable to represent lambda") from e
99
+ module = _find_module_of_method(func)
100
+ module = module.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module
101
+ # Fixup segment_reduce mismatch
102
+ if module == "torch" and name == "segment_reduce":
103
+ name = "_" + name
104
+ return f'{module}.{name}'
105
+
106
+ def _format_arg(arg, max_list_len=float('inf')) -> str:
107
+ if hasattr(arg, '_custom_fx_repr_fn'):
108
+ return arg._custom_fx_repr_fn()
109
+ elif isinstance(arg, list):
110
+ items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
111
+ maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
112
+ return f'[{items}{maybe_len}]'
113
+ elif isinstance(arg, tuple):
114
+ items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
115
+ maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
116
+ maybe_comma = ',' if len(arg) == 1 else ''
117
+ return f'({items}{maybe_comma}{maybe_len})'
118
+ elif isinstance(arg, dict):
119
+ items_str = ', '.join(f'{k}: {_format_arg(v)}' for k, v in arg.items())
120
+ return f'{{{items_str}}}'
121
+
122
+ if isinstance(arg, Node):
123
+ return '%' + str(arg)
124
+ else:
125
+ return str(arg)
126
+
127
+ @compatibility(is_backward_compatible=True)
128
+ class Node:
129
+ """
130
+ ``Node`` is the data structure that represents individual operations within
131
+ a ``Graph``. For the most part, Nodes represent callsites to various entities,
132
+ such as operators, methods, and Modules (some exceptions include nodes that
133
+ specify function inputs and outputs). Each ``Node`` has a function specified
134
+ by its ``op`` property. The ``Node`` semantics for each value of ``op`` are as follows:
135
+
136
+ - ``placeholder`` represents a function input. The ``name`` attribute specifies the name this value will take on.
137
+ ``target`` is similarly the name of the argument. ``args`` holds either: 1) nothing, or 2) a single argument
138
+ denoting the default parameter of the function input. ``kwargs`` is don't-care. Placeholders correspond to
139
+ the function parameters (e.g. ``x``) in the graph printout.
140
+ - ``get_attr`` retrieves a parameter from the module hierarchy. ``name`` is similarly the name the result of the
141
+ fetch is assigned to. ``target`` is the fully-qualified name of the parameter's position in the module hierarchy.
142
+ ``args`` and ``kwargs`` are don't-care
143
+ - ``call_function`` applies a free function to some values. ``name`` is similarly the name of the value to assign
144
+ to. ``target`` is the function to be applied. ``args`` and ``kwargs`` represent the arguments to the function,
145
+ following the Python calling convention
146
+ - ``call_module`` applies a module in the module hierarchy's ``forward()`` method to given arguments. ``name`` is
147
+ as previous. ``target`` is the fully-qualified name of the module in the module hierarchy to call.
148
+ ``args`` and ``kwargs`` represent the arguments to invoke the module on, *excluding the self argument*.
149
+ - ``call_method`` calls a method on a value. ``name`` is as similar. ``target`` is the string name of the method
150
+ to apply to the ``self`` argument. ``args`` and ``kwargs`` represent the arguments to invoke the module on,
151
+ *including the self argument*
152
+ - ``output`` contains the output of the traced function in its ``args[0]`` attribute. This corresponds to the "return" statement
153
+ in the Graph printout.
154
+ """
155
+
156
+ @compatibility(is_backward_compatible=True)
157
+ def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target',
158
+ args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'],
159
+ return_type : Optional[Any] = None) -> None:
160
+ """
161
+ Instantiate an instance of ``Node``. Note: most often, you want to use the
162
+ Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather
163
+ than instantiating a ``Node`` directly.
164
+
165
+ Args:
166
+ graph (Graph): The ``Graph`` to which this ``Node`` should belong.
167
+
168
+ name (str): The name to which the output of this ``Node`` should be assigned
169
+
170
+ op (str): The opcode for this ``Node``. Can be one of 'placeholder',
171
+ 'call_method', 'call_module', 'call_function', 'get_attr',
172
+ 'output'
173
+
174
+ target ('Target'): The target this op should call. See the broader
175
+ ``Node`` docstring for more details.
176
+
177
+ args (Tuple['Argument']): The args to be passed to ``target``
178
+
179
+ kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target``
180
+
181
+ return_type (Optional[Any]): The python type expression representing the
182
+ type of the output of this node. This field can be used for
183
+ annotation of values in the generated code or for other types
184
+ of analyses.
185
+ """
186
+ self.graph = graph
187
+ self.name = name # unique name of value being created
188
+ assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']
189
+ self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr
190
+ if op == 'call_function':
191
+ if not callable(target):
192
+ raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
193
+ 'but a Callable is expected')
194
+ else:
195
+ if not isinstance(target, str):
196
+ raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
197
+ 'but a str is expected')
198
+ self.target = target # for method/module/function, the name of the method/module/function/attr
199
+ # being invoked, e.g add, layer1, or torch.add
200
+
201
+ # All `Node`-valued inputs. Key is the Node, value is don't-care.
202
+ # The public API for this is `all_input_nodes`, this private attribute
203
+ # should not be accessed directly.
204
+ self._input_nodes : Dict[Node, None] = {}
205
+ self.__update_args_kwargs(map_arg(args, lambda x: x), map_arg(kwargs, lambda x: x)) # type: ignore[arg-type]
206
+
207
+ # All of the nodes that use the value produced by this Node
208
+ # Note one user may correspond to several uses, e.g. the node fo ``x + x``
209
+ # would appear once here, but represents two uses.
210
+ #
211
+ # Is a dict to act as an "ordered set". Keys are significant, value dont-care
212
+ self.users : Dict[Node, None] = {}
213
+ # Type expression representing the output value of this node.
214
+ # This should contain the same class of Type objects that would appear
215
+ # as type annotations for function inputs/outputs.
216
+ #
217
+ # For placeholder nodes, this value will be used to type-annotate the
218
+ # generated function parameters.
219
+ # For the return node, this value will be used to type-annotate the
220
+ # generated function return type. (Note this is a special case. ``return``
221
+ # does not produce a value, it's more of a notation. Thus, this value
222
+ # describes the type of args[0] in the ``return`` node.
223
+ self.type : Optional[Any] = return_type
224
+ self._prev = self
225
+ self._next = self
226
+ self._erased = False
227
+
228
+ # If set, use this fn to print this node
229
+ self._repr_fn : Optional[Callable[[Node], str]] = None
230
+
231
+ # Dictionary to store metadata passes need to do their
232
+ # transformations. This metadata is preserved across node copies
233
+ self.meta : Dict[str, Any] = {}
234
+
235
+ @property
236
+ def next(self) -> 'Node':
237
+ """
238
+ Returns the next ``Node`` in the linked list of Nodes.
239
+
240
+ Returns:
241
+
242
+ The next ``Node`` in the linked list of Nodes.
243
+ """
244
+ return self._next
245
+
246
+ @property
247
+ def prev(self) -> 'Node':
248
+ """
249
+ Returns the previous ``Node`` in the linked list of Nodes.
250
+
251
+ Returns:
252
+
253
+ The previous ``Node`` in the linked list of Nodes.
254
+ """
255
+ return self._prev
256
+
257
+ @compatibility(is_backward_compatible=True)
258
+ def prepend(self, x: 'Node') -> None:
259
+ """
260
+ Insert x before this node in the list of nodes in the graph. Example::
261
+
262
+ Before: p -> self
263
+ bx -> x -> ax
264
+ After: p -> x -> self
265
+ bx -> ax
266
+
267
+ Args:
268
+ x (Node): The node to put before this node. Must be a member of the same graph.
269
+ """
270
+ assert self.graph == x.graph, "Attempting to move a Node into a different Graph"
271
+ if self == x:
272
+ warnings.warn("Trying to prepend a node to itself. This behavior has no effect on the graph.")
273
+ return
274
+ x._remove_from_list()
275
+ p = self._prev
276
+ p._next, x._prev = x, p
277
+ x._next, self._prev = self, x
278
+
279
+ @compatibility(is_backward_compatible=True)
280
+ def append(self, x: 'Node') -> None:
281
+ """
282
+ Insert ``x`` after this node in the list of nodes in the graph.
283
+ Equivalent to ``self.next.prepend(x)``
284
+
285
+ Args:
286
+ x (Node): The node to put after this node. Must be a member of the same graph.
287
+ """
288
+ self._next.prepend(x)
289
+
290
+ def _remove_from_list(self):
291
+ p, n = self._prev, self._next
292
+ p._next, n._prev = n, p
293
+
294
+ @property
295
+ def args(self) -> Tuple[Argument, ...]:
296
+ """
297
+ The tuple of arguments to this ``Node``. The interpretation of arguments
298
+ depends on the node's opcode. See the :class:`Node` docstring for more
299
+ information.
300
+
301
+ Assignment to this property is allowed. All accounting of uses and users
302
+ is updated automatically on assignment.
303
+ """
304
+ return self._args
305
+
306
+ @args.setter
307
+ def args(self, a : Tuple[Argument, ...]):
308
+ """
309
+ Set the tuple of arguments to this Node. The interpretation of arguments
310
+ depends on the node's opcode. See the ``fx.Graph`` docstring for more
311
+ information.
312
+ """
313
+ # DO NOT CALL `__update_args_kwargs` directly. The correct way to
314
+ # set `args` is via direct assignment, i.e. `node.args = new_args`
315
+ self.__update_args_kwargs(map_arg(a, lambda x: x), self._kwargs) # type: ignore[arg-type]
316
+
317
+ @property
318
+ def kwargs(self) -> Dict[str, Argument]:
319
+ """
320
+ The dict of keyword arguments to this ``Node``. The interpretation of arguments
321
+ depends on the node's opcode. See the :class:`Node` docstring for more
322
+ information.
323
+
324
+ Assignment to this property is allowed. All accounting of uses and users
325
+ is updated automatically on assignment.
326
+ """
327
+ return self._kwargs
328
+
329
+ @kwargs.setter
330
+ def kwargs(self, k : Dict[str, Argument]):
331
+ """
332
+ Set the dict of kwargs to this Node. The interpretation of arguments
333
+ depends on the node's opcode. See the ``fx.Graph`` docstring for more
334
+ information.
335
+ """
336
+ # DO NOT CALL `__update_args_kwargs` directly. The correct way to
337
+ # set `args` is via direct assignment, i.e. `node.kwargs = new_kwargs`
338
+ self.__update_args_kwargs(self._args, map_arg(k, lambda x: x)) # type: ignore[arg-type]
339
+
340
+ @property
341
+ def all_input_nodes(self) -> List['Node']:
342
+ """
343
+ Return all Nodes that are inputs to this Node. This is equivalent to
344
+ iterating over ``args`` and ``kwargs`` and only collecting the values that
345
+ are Nodes.
346
+
347
+ Returns:
348
+
349
+ List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this
350
+ ``Node``, in that order.
351
+ """
352
+ return list(self._input_nodes.keys())
353
+
354
+ @compatibility(is_backward_compatible=True)
355
+ def update_arg(self, idx : int, arg : Argument) -> None:
356
+ """
357
+ Update an existing positional argument to contain the new value
358
+ ``arg``. After calling, ``self.args[idx] == arg``.
359
+
360
+ Args:
361
+
362
+ idx (int): The index into ``self.args`` of the element to update
363
+ arg (Argument): The new argument value to write into ``args``
364
+ """
365
+ args = list(self.args)
366
+ args[idx] = arg
367
+ self.args = tuple(args)
368
+
369
+ @compatibility(is_backward_compatible=True)
370
+ def insert_arg(self, idx : int, arg : Argument) -> None:
371
+ """
372
+ Insert an positional argument to the argument list with given index.
373
+
374
+ Args:
375
+
376
+ idx (int): The index of the element in ``self.args`` to be inserted before.
377
+ arg (Argument): The new argument value to insert into ``args``
378
+ """
379
+ assert 0 <= idx <= len(self.args), "insert_args index must be between 0 and len(self.args)"
380
+ args_left = self.args[:idx]
381
+ args_right = self.args[idx:]
382
+
383
+ self._args = args_left + (arg,) + args_right
384
+
385
+ _new_input_nodes = {}
386
+ map_arg(arg, _new_input_nodes.setdefault)
387
+
388
+ for new_use in _new_input_nodes.keys():
389
+ if new_use not in self._input_nodes:
390
+ self._input_nodes.setdefault(new_use)
391
+ new_use.users.setdefault(self)
392
+
393
+ @compatibility(is_backward_compatible=True)
394
+ def update_kwarg(self, key : str, arg : Argument) -> None:
395
+ """
396
+ Update an existing keyword argument to contain the new value
397
+ ``arg``. After calling, ``self.kwargs[key] == arg``.
398
+
399
+ Args:
400
+
401
+ key (str): The key in ``self.kwargs`` of the element to update
402
+ arg (Argument): The new argument value to write into ``kwargs``
403
+ """
404
+ kwargs = dict(self.kwargs)
405
+ kwargs[key] = arg
406
+ self.kwargs = kwargs
407
+
408
+ @property
409
+ def stack_trace(self) -> Optional[str]:
410
+ """
411
+ Return the Python stack trace that was recorded during tracing, if any.
412
+ When traced with fx.Tracer, this property is usually populated by
413
+ `Tracer.create_proxy`. To record stack traces during tracing for debug purposes,
414
+ set `record_stack_traces = True` on the `Tracer` instance.
415
+ When traced with dynamo, this property will be populated by default by
416
+ `OutputGraph.create_proxy`.
417
+
418
+ stack_trace would have the innermost frame at the end of the string.
419
+ """
420
+ return self.meta.get("stack_trace", None)
421
+
422
+ @stack_trace.setter
423
+ def stack_trace(self, trace : Optional[str]):
424
+ self.meta["stack_trace"] = trace
425
+
426
+ def __update_args_kwargs(self, new_args : Tuple['Argument', ...], new_kwargs : Dict[str, 'Argument']):
427
+ """
428
+ This API is internal. Do *not* call it directly.
429
+ """
430
+ self._args = new_args
431
+ self._kwargs = new_kwargs
432
+
433
+ for old_use in self._input_nodes.keys():
434
+ old_use.users.pop(self)
435
+
436
+ self._input_nodes = {}
437
+ map_arg(self._args, self._input_nodes.setdefault)
438
+ map_arg(self._kwargs, self._input_nodes.setdefault)
439
+
440
+ for new_use in self._input_nodes.keys():
441
+ new_use.users.setdefault(self)
442
+
443
+ def __repr__(self) -> str:
444
+ if self._repr_fn:
445
+ return self._repr_fn(self)
446
+ return self.name
447
+
448
+ def _pretty_print_target(self, target):
449
+ """
450
+ Make target printouts more user-friendly.
451
+ 1) builtins will be printed as `builtins.xyz`
452
+ 2) operators will be printed as `operator.xyz`
453
+ 3) other callables will be printed with qualified name, e.g. torch.add
454
+ """
455
+ if isinstance(target, str):
456
+ return target
457
+ if hasattr(target, '__module__'):
458
+ if not hasattr(target, '__name__'):
459
+ # Just to be defensive, if we don't have `__name__`, get the
460
+ # qualname. Not sure if this happens for any members of `operator`
461
+ # or `builtins`. This fallback path is not as good, since e.g.
462
+ # things in `operator` have `_operator` as their __module__.
463
+ return _get_qualified_name(target)
464
+ if target.__module__ == 'builtins':
465
+ return f'builtins.{target.__name__}'
466
+ elif target.__module__ == '_operator':
467
+ return f'operator.{target.__name__}'
468
+ return _get_qualified_name(target)
469
+
470
+ @compatibility(is_backward_compatible=True)
471
+ def format_node(self,
472
+ placeholder_names: Optional[List[str]] = None,
473
+ maybe_return_typename: Optional[List[str]] = None) -> Optional[str]:
474
+ """
475
+ Return a descriptive string representation of ``self``.
476
+
477
+ This method can be used with no arguments as a debugging
478
+ utility.
479
+
480
+ This function is also used internally in the ``__str__`` method
481
+ of ``Graph``. Together, the strings in ``placeholder_names``
482
+ and ``maybe_return_typename`` make up the signature of the
483
+ autogenerated ``forward`` function in this Graph's surrounding
484
+ GraphModule. ``placeholder_names`` and ``maybe_return_typename``
485
+ should not be used otherwise.
486
+
487
+ Args:
488
+ placeholder_names: A list that will store formatted strings
489
+ representing the placeholders in the generated
490
+ ``forward`` function. Internal use only.
491
+ maybe_return_typename: A single-element list that will store
492
+ a formatted string representing the output of the
493
+ generated ``forward`` function. Internal use only.
494
+
495
+ Returns:
496
+ str: If 1) we're using ``format_node`` as an internal helper
497
+ in the ``__str__`` method of ``Graph``, and 2) ``self``
498
+ is a placeholder Node, return ``None``. Otherwise,
499
+ return a descriptive string representation of the
500
+ current Node.
501
+ """
502
+ if self.op == 'placeholder':
503
+ assert isinstance(self.target, str)
504
+ arg_str = self.target
505
+ arg_str += arg_str + f': {_type_repr(self.type)}' if self.type else ''
506
+ if placeholder_names:
507
+ placeholder_names.append(arg_str)
508
+ return None
509
+ maybe_typename = f'{_type_repr(self.type)} ' if self.type else ''
510
+ default_val = '(default=' + str(self.args[0]) + ')' if self.args else ''
511
+ return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = {self.op}[target={self.target}]{default_val}'
512
+ elif self.op == 'get_attr':
513
+ maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
514
+ return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
515
+ f'{self.op}[target={self._pretty_print_target(self.target)}]'
516
+ elif self.op == 'output':
517
+ if self.type and maybe_return_typename:
518
+ maybe_return_typename[0] = f' -> {_type_repr(self.type)}'
519
+ return f'return {self.args[0]}'
520
+ else:
521
+ maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
522
+ return f'%{self.name} : {maybe_typename}[num_users={len(self.users)}] = ' \
523
+ f'{self.op}[target={self._pretty_print_target(self.target)}](' \
524
+ f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'
525
+
526
+ @compatibility(is_backward_compatible=True)
527
+ def replace_all_uses_with(self,
528
+ replace_with : 'Node',
529
+ delete_user_cb: Callable[['Node'], bool] = lambda user: True,
530
+ *,
531
+ propagate_meta=False
532
+ ) -> List['Node']:
533
+ """
534
+ Replace all uses of ``self`` in the Graph with the Node ``replace_with``.
535
+
536
+ Args:
537
+
538
+ replace_with (Node): The node to replace all uses of ``self`` with.
539
+ delete_user_cb (Callable): Callback that is called to determine
540
+ whether a given user of the self node should be removed.
541
+ propagate_meta (bool): Whether or not to copy all properties
542
+ on the .meta field of the original node onto the replacement node.
543
+ For safety, this is only valid to do if the replacement node
544
+ doesn't already have an existing .meta field.
545
+
546
+ Returns:
547
+
548
+ The list of Nodes on which this change was made.
549
+ """
550
+ if propagate_meta:
551
+ assert len(replace_with.meta) == 0, \
552
+ 'Called node.replace_all_uses_with(replace_with, propagate_meta=True), ' \
553
+ 'but replace_with already has .meta keys'
554
+ for k, v in self.meta.items():
555
+ replace_with.meta[k] = v
556
+ to_process = list(self.users)
557
+ skipped = []
558
+ for use_node in to_process:
559
+ if not delete_user_cb(use_node):
560
+ skipped.append(use_node)
561
+ continue
562
+
563
+ def maybe_replace_node(n : Node) -> Node:
564
+ if n == self:
565
+ return replace_with
566
+ else:
567
+ return n
568
+
569
+ new_args = map_arg(use_node.args, maybe_replace_node)
570
+ new_kwargs = map_arg(use_node.kwargs, maybe_replace_node)
571
+ assert isinstance(new_args, tuple)
572
+ assert isinstance(new_kwargs, dict)
573
+ use_node.__update_args_kwargs(new_args, new_kwargs)
574
+
575
+ assert len(self.users) - len(skipped) == 0
576
+ return [n for n in to_process if n not in skipped]
577
+
578
+ @compatibility(is_backward_compatible=False)
579
+ def is_impure(self):
580
+ """
581
+ Returns whether this op is impure, i.e. if its op is a placeholder or
582
+ output, or if a call_function or call_module which is impure.
583
+
584
+ Returns:
585
+
586
+ bool: If the op is impure or not.
587
+ """
588
+ if self.op in {"placeholder", "output"}:
589
+ return True
590
+
591
+ # Check if an impure function.
592
+ if self.op == "call_function":
593
+ return self.target in _side_effectful_functions
594
+
595
+ # Check if an impure module.
596
+ if self.op == "call_module":
597
+ assert (
598
+ self.graph.owning_module is not None
599
+ ), "self.graph.owning_module not set for purity check"
600
+ target_mod = self.graph.owning_module.get_submodule(self.target)
601
+ assert (
602
+ target_mod is not None
603
+ ), f"Did not find expected submodule target {self.target}"
604
+ return getattr(target_mod, "_is_impure", False)
605
+
606
+ return False
607
+
608
+ @compatibility(is_backward_compatible=False)
609
+ def normalized_arguments(
610
+ self, root : torch.nn.Module, arg_types : Optional[Tuple[Any]] = None,
611
+ kwarg_types : Optional[Dict[str, Any]] = None,
612
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
613
+ """
614
+ Returns normalized arguments to Python targets. This means that
615
+ `args/kwargs` will be matched up to the module/functional's
616
+ signature and return exclusively kwargs in positional order
617
+ if `normalize_to_only_use_kwargs` is true.
618
+ Also populates default values. Does not support positional-only
619
+ parameters or varargs parameters.
620
+
621
+ Supports module calls.
622
+
623
+ May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
624
+
625
+ Args:
626
+ root (torch.nn.Module): Module upon which to resolve module targets.
627
+ arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
628
+ kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
629
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
630
+
631
+ Returns:
632
+
633
+ Returns NamedTuple ArgsKwargsPair, or `None` if not successful.
634
+ """
635
+ if self.op == 'call_function':
636
+ assert callable(self.target)
637
+ return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) # type: ignore[arg-type]
638
+ elif self.op == 'call_module':
639
+ assert isinstance(self.target, str)
640
+ return normalize_module(root, self.target, self.args, self.kwargs) # type: ignore[arg-type]
641
+
642
+ return None
643
+
644
+ @compatibility(is_backward_compatible=True)
645
+ def replace_input_with(self, old_input: 'Node', new_input: 'Node'):
646
+ """
647
+ Loop through input nodes of ``self``, and replace all instances of
648
+ ``old_input`` with ``new_input``.
649
+
650
+ Args:
651
+
652
+ old_input (Node): The old input node to be replaced.
653
+ new_input (Node): The new input node to replace ``old_input``.
654
+ """
655
+ def maybe_replace_node(n : Node) -> Node:
656
+ return new_input if n == old_input else n
657
+
658
+ new_args = map_arg(self.args, maybe_replace_node)
659
+ new_kwargs = map_arg(self.kwargs, maybe_replace_node)
660
+ assert isinstance(new_args, tuple)
661
+ assert isinstance(new_kwargs, dict)
662
+ self.__update_args_kwargs(new_args, new_kwargs)
663
+
664
+ def _rename(self, candidate: str):
665
+ if candidate == self.name:
666
+ return
667
+ name = self.graph._graph_namespace.create_name(candidate, None)
668
+ self.name = name
669
+ self.graph._graph_namespace._rename_object(self, name)
670
+
671
+
672
+ @compatibility(is_backward_compatible=True)
673
+ def map_arg(a: Argument, fn: Callable[[Node], Argument]) -> Argument:
674
+ """
675
+ Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
676
+ """
677
+ assert callable(fn), "torch.fx.map_arg(a, fn): fn must be a callable"
678
+ return map_aggregate(a, lambda x: fn(x) if isinstance(x, Node) else x)
679
+
680
+ @compatibility(is_backward_compatible=True)
681
+ def map_aggregate(a: Argument, fn: Callable[[Argument], Argument]) -> Argument:
682
+ """
683
+ Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
684
+ """
685
+ if isinstance(a, tuple):
686
+ t = tuple(map_aggregate(elem, fn) for elem in a)
687
+ # Support NamedTuple (if it has `_fields`) by repacking into original type.
688
+ return t if not hasattr(a, '_fields') else type(a)(*t)
689
+ elif isinstance(a, list):
690
+ return immutable_list(map_aggregate(elem, fn) for elem in a)
691
+ elif isinstance(a, dict):
692
+ return immutable_dict((k, map_aggregate(v, fn)) for k, v in a.items())
693
+ elif isinstance(a, slice):
694
+ return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn))
695
+ else:
696
+ return fn(a)
env-llmeval/lib/python3.10/site-packages/torch/fx/operator_schemas.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import inspect
3
+ import numbers
4
+ import types
5
+ import typing
6
+ import enum
7
+ import warnings
8
+ from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING
9
+ from torch._jit_internal import boolean_dispatched
10
+ from ._compatibility import compatibility
11
+ from torch._ops import OpOverloadPacket, OpOverload
12
+
13
+ if TYPE_CHECKING:
14
+ from .node import Argument
15
+
16
+ __all__ = ["ArgsKwargsPair", "check_for_mutable_operation", "get_signature_for_torch_op", "create_type_hint",
17
+ "type_matches", "normalize_function", "normalize_module"]
18
+
19
+ @compatibility(is_backward_compatible=False)
20
+ class ArgsKwargsPair(NamedTuple):
21
+ """
22
+ Simple named tuple for wrapping args/kwargs pairs.
23
+ """
24
+ args: Tuple[Any, ...]
25
+ kwargs: Dict[str, Any]
26
+
27
+ _manual_overrides : Dict[Callable, List[inspect.Signature]] = {}
28
+
29
+ def _nonzero_schemas():
30
+ signatures = []
31
+
32
+ def nonzero(self):
33
+ pass
34
+ signatures.append(inspect.signature(nonzero))
35
+
36
+ def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef]
37
+ pass
38
+ signatures.append(inspect.signature(nonzero))
39
+
40
+ return signatures
41
+
42
+ _manual_overrides[torch.nonzero] = _nonzero_schemas()
43
+
44
+ class _FakeGlobalNamespace:
45
+ def __getattr__(self, name):
46
+ if name == 'torch':
47
+ return torch
48
+ raise RuntimeError('Expected a torch namespace lookup')
49
+
50
+ _type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout,
51
+ 'number' : numbers.Number, 'Future' : torch.jit.Future,
52
+ 'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme,
53
+ '__torch__': _FakeGlobalNamespace(), 'NoneType': type(None),
54
+ 't': typing.TypeVar('t')}
55
+ for k in dir(typing):
56
+ _type_eval_globals[k] = getattr(typing, k)
57
+
58
+ def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any:
59
+ """
60
+ Convert a TorchScript type to a Python type (including subtypes) via
61
+ eval'ing the annotation_str. _type_eval_globals sets up expressions
62
+ like "List" and "Future" to map to actual types (typing.List and jit.Future)
63
+ """
64
+ return eval(ts_type.annotation_str, _type_eval_globals)
65
+
66
+ def _torchscript_schema_to_signature_impl(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
67
+ from inspect import Parameter
68
+ parameters : List[Parameter] = []
69
+ for arg in ts_schema.arguments:
70
+ arg_type = _torchscript_type_to_python_type(arg.type)
71
+ default = arg.default_value if arg.has_default_value() else Parameter.empty
72
+ # TODO: Figure out if this is safe. It seems like when generating the type signatures for
73
+ # PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor
74
+ # argument name. Downstream, if someone converts that positional argument to a keyword
75
+ # argument, the name mismatch will break things, so here we're going to normalize the
76
+ # name to "input"
77
+ name = arg.name if arg.name != 'self' else 'input'
78
+ kind = Parameter.KEYWORD_ONLY if arg.kwarg_only else Parameter.POSITIONAL_OR_KEYWORD
79
+ # "from" is a keyword therefore it must be a POSITIONAL_ONLY argument
80
+ if name == "from":
81
+ assert kind == Parameter.POSITIONAL_OR_KEYWORD
82
+ # ParameterKind type is internal implementation detail to inspec package
83
+ # which makes it hard to do type annotation
84
+ kind = Parameter.POSITIONAL_ONLY # type: ignore[assignment]
85
+ # This renders all previous arguments to positional only
86
+ for idx, p in enumerate(parameters):
87
+ assert p.kind == Parameter.POSITIONAL_OR_KEYWORD
88
+ parameters[idx] = Parameter(name=p.name, kind=Parameter.POSITIONAL_ONLY, default=p.default, annotation=p.annotation)
89
+ parameters.append(Parameter(name=name, kind=kind, default=default, annotation=arg_type))
90
+ return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns]
91
+ if len(return_types) == 0:
92
+ return_type = None
93
+ elif len(return_types) == 1:
94
+ return_type = return_types[0]
95
+ else:
96
+ return_type = tuple(return_types)
97
+
98
+ return inspect.Signature(parameters, return_annotation=return_type)
99
+
100
+ _SCHEMA_TO_SIGNATURE_CACHE : Dict[Tuple[str, str], inspect.Signature] = {}
101
+
102
+ def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
103
+ # Cached as it's called in the hot path of FakeTensor dispatch
104
+ cache_key = ts_schema.name, ts_schema.overload_name
105
+ cache_val = _SCHEMA_TO_SIGNATURE_CACHE.get(cache_key)
106
+ if cache_val is not None:
107
+ return cache_val
108
+
109
+ res = _torchscript_schema_to_signature_impl(ts_schema)
110
+ _SCHEMA_TO_SIGNATURE_CACHE[cache_key] = res
111
+ return res
112
+
113
+ @compatibility(is_backward_compatible=False)
114
+ def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']):
115
+ signatures, schemas = get_signature_for_torch_op(target, return_schemas=True)
116
+
117
+ if signatures and schemas:
118
+ matched_schemas = []
119
+
120
+ # Iterate through all of the schema until we find one that matches
121
+ # If one matches, populate `new_args_and_kwargs` with the new args/kwargs
122
+ # values. If none matches, `new_args_and_kwargs` will be None
123
+ for candidate_signature, schema in zip(signatures, schemas):
124
+ try:
125
+ candidate_signature.bind(*args, **kwargs)
126
+ matched_schemas.append((candidate_signature, schema))
127
+ except TypeError as e:
128
+ continue
129
+
130
+ def throw_if_mutable(schema):
131
+ if schema.is_mutable:
132
+ raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional '
133
+ f'code, so operations that mutate operands in-place (e.g. via `out` arguments) '
134
+ f'are not supported')
135
+
136
+ if len(matched_schemas) == 0:
137
+ # Did not match any schema. Cannot check for mutation
138
+ pass
139
+ elif len(matched_schemas) == 1:
140
+ # Matched exactly one schema, unambiguous
141
+ _, schema_to_check = matched_schemas[0]
142
+ throw_if_mutable(schema_to_check)
143
+ pass
144
+ else:
145
+ # Ambiguous schema match. Since mutability checking is best effort,
146
+ # do nothing.
147
+ pass
148
+
149
+ @compatibility(is_backward_compatible=False)
150
+ def get_signature_for_torch_op(op : Callable, return_schemas : bool = False):
151
+ """
152
+ Given an operator on the `torch` namespace, return a list of `inspect.Signature`
153
+ objects corresponding to the overloads of that op.. May return `None` if a signature
154
+ could not be retrieved.
155
+
156
+ Args:
157
+ op (Callable): An operator on the `torch` namespace to look up a signature for
158
+
159
+ Returns:
160
+ Optional[List[inspect.Signature]]: A list of signatures for the overloads of this
161
+ operator, or None if the operator signatures could not be retrieved. If
162
+ return_schemas=True, returns a tuple containing the optional Python signatures
163
+ and the optional TorchScript Function signature
164
+ """
165
+ if isinstance(op, OpOverload):
166
+ schemas = [op._schema]
167
+ elif isinstance(op, OpOverloadPacket):
168
+ schemas = [getattr(op, overload)._schema for overload in op.overloads()]
169
+ else:
170
+ override = _manual_overrides.get(op)
171
+ if override:
172
+ return (override, None) if return_schemas else None
173
+
174
+ aten_fn = torch.jit._builtins._find_builtin(op)
175
+
176
+ if aten_fn is None:
177
+ return (None, None) if return_schemas else None
178
+ schemas = torch._C._jit_get_schemas_for_operator(aten_fn)
179
+
180
+ signatures = [_torchscript_schema_to_signature(schema) for schema in schemas]
181
+ return (signatures, schemas) if return_schemas else signatures
182
+
183
+ @compatibility(is_backward_compatible=False)
184
+ def create_type_hint(x):
185
+ try:
186
+ if isinstance(x, (list, tuple)):
187
+ # todo(chilli): Figure out the right way for mypy to handle this
188
+ if isinstance(x, list):
189
+ def ret_type(x):
190
+ return List[x] # type: ignore[valid-type]
191
+ else:
192
+ def ret_type(x):
193
+ return Tuple[x, ...]
194
+ if len(x) == 0:
195
+ return ret_type(Any)
196
+ base_type = x[0]
197
+ for t in x:
198
+ if issubclass(t, base_type):
199
+ continue
200
+ elif issubclass(base_type, t):
201
+ base_type = t
202
+ else:
203
+ return ret_type(Any)
204
+ return ret_type(base_type)
205
+ except Exception as e:
206
+ # We tried to create a type hint for list but failed.
207
+ warnings.warn(f"We were not able to successfully create type hint from the type {x}")
208
+ pass
209
+ return x
210
+
211
+ @compatibility(is_backward_compatible=False)
212
+ def type_matches(signature_type : Any, argument_type : Any):
213
+ sig_origin_type = getattr(signature_type, '__origin__', signature_type)
214
+
215
+ if signature_type is argument_type:
216
+ return True
217
+
218
+ # Union types in signature. Given type needs to match one of the
219
+ # contained types in the Union
220
+ if sig_origin_type is typing.Union and signature_type != argument_type:
221
+ sig_contained = signature_type.__args__
222
+ return any(type_matches(c, argument_type) for c in sig_contained)
223
+
224
+ if signature_type is List[int] and argument_type is int:
225
+ # int can be promoted to List[int]
226
+ return True
227
+
228
+ if getattr(signature_type, '__origin__', None) in {list, List}:
229
+ sig_el_type = signature_type.__args__[0]
230
+ if not inspect.isclass(sig_el_type):
231
+ warnings.warn(
232
+ f"Does not support nested parametric types, got {signature_type}. Please file a bug.")
233
+ return False
234
+ if getattr(argument_type, '__origin__', None) in {list, List}:
235
+ return issubclass(argument_type.__args__[0], sig_el_type)
236
+
237
+ def is_homogeneous_tuple(t):
238
+ if getattr(t, "__origin__", None) not in {tuple, Tuple}:
239
+ return False
240
+ contained = t.__args__
241
+ if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason
242
+ return True
243
+ return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained)
244
+
245
+ # Tuple[T] is accepted for List[T] parameters
246
+ return is_homogeneous_tuple(argument_type)
247
+
248
+ # Dtype is an int in schemas
249
+ if signature_type is int and argument_type is torch.dtype:
250
+ return True
251
+
252
+ if signature_type is numbers.Number and argument_type in {int, float}:
253
+ return True
254
+ if inspect.isclass(argument_type) and inspect.isclass(signature_type):
255
+ return issubclass(argument_type, signature_type)
256
+
257
+ return False
258
+
259
+ @compatibility(is_backward_compatible=False)
260
+ def normalize_function(
261
+ target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None,
262
+ kwarg_types : Optional[Dict[str, Any]] = None,
263
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
264
+ """
265
+ Returns normalized arguments to PyTorch functions. This means that
266
+ `args/kwargs` will be matched up to the functional's
267
+ signature and return exclusively kwargs in positional order if
268
+ `normalize_to_only_use_kwargs` is True.
269
+ Also populates default values. Does not support positional-only
270
+ parameters or varargs parameters (*args, **kwargs). Does not support modules.
271
+
272
+ May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
273
+
274
+ Args:
275
+ target (Callable): Function that we are normalizing
276
+ args (Tuple[Any]): Tuple of args to the function
277
+ kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
278
+ arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
279
+ kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
280
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
281
+
282
+ Returns:
283
+
284
+ Returns normalized_args_and_kwargs, or `None` if not successful.
285
+ """
286
+ if kwargs is None:
287
+ kwargs = {}
288
+ new_args_and_kwargs = None
289
+ if not isinstance(target, types.BuiltinFunctionType) and not (
290
+ isinstance(target, (OpOverloadPacket, OpOverload))
291
+ ):
292
+ target_for_analysis = target
293
+ if target in boolean_dispatched:
294
+ # HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
295
+ # a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
296
+ # branches of the dispatch have exactly the same signature. If they do, use the `true`
297
+ # branch signature for analysis. Otherwise, leave this un-normalized
298
+ assert not isinstance(target, str)
299
+ dispatched = boolean_dispatched[target]
300
+ if_true, if_false = dispatched['if_true'], dispatched['if_false']
301
+ if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters:
302
+ return None
303
+ target_for_analysis = if_true
304
+
305
+ assert callable(target_for_analysis)
306
+ sig = inspect.signature(inspect.unwrap(target_for_analysis))
307
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs)
308
+ else:
309
+ assert callable(target)
310
+ torch_op_schemas = get_signature_for_torch_op(target)
311
+ matched_schemas = []
312
+ if torch_op_schemas:
313
+ # Iterate through all of the schema until we find one that matches
314
+ # If one matches, populate `new_args_and_kwargs` with the new args/kwargs
315
+ # values. If none matches, `new_args_and_kwargs` will be None
316
+ for candidate_signature in torch_op_schemas:
317
+ try:
318
+ candidate_signature.bind(*args, **kwargs)
319
+ matched_schemas.append(candidate_signature)
320
+ except TypeError as e:
321
+ continue
322
+
323
+ if len(matched_schemas) == 0:
324
+ # Did not match any schema. Cannot normalize
325
+ pass
326
+ elif len(matched_schemas) == 1:
327
+ # Matched exactly one schema, unambiguous
328
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs,
329
+ normalize_to_only_use_kwargs)
330
+ else:
331
+ if arg_types is not None or kwarg_types is not None:
332
+ arg_types = arg_types if arg_types else cast(Tuple[Any], ())
333
+ kwarg_types = kwarg_types if kwarg_types else {}
334
+ for candidate_signature in torch_op_schemas:
335
+ sig_matches = True
336
+ try:
337
+ bound_types = candidate_signature.bind(*arg_types, **kwarg_types)
338
+ for arg_name, arg_type in bound_types.arguments.items():
339
+ param = candidate_signature.parameters[arg_name]
340
+ sig_matches = sig_matches and type_matches(param.annotation, arg_type)
341
+ except TypeError as e:
342
+ sig_matches = False
343
+ if sig_matches:
344
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs,
345
+ normalize_to_only_use_kwargs)
346
+ break
347
+ else:
348
+ # Matched more than one schema. In this situation, the caller must provide the types of
349
+ # the arguments of the overload they expect.
350
+ schema_printouts = '\n'.join(str(schema) for schema in matched_schemas)
351
+ raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but '
352
+ f'the schema match was ambiguous! Please provide argument types to '
353
+ f'the normalize_arguments() call. Available schemas:\n{schema_printouts}')
354
+
355
+ return new_args_and_kwargs
356
+
357
+ @compatibility(is_backward_compatible=False)
358
+ def normalize_module(
359
+ root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None,
360
+ normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
361
+ """
362
+ Returns normalized arguments to PyTorch modules. This means that
363
+ `args/kwargs` will be matched up to the functional's
364
+ signature and return exclusively kwargs in positional order if
365
+ `normalize_to_only_use_kwargs` is True.
366
+ Also populates default values. Does not support positional-only
367
+ parameters or varargs parameters (*args, **kwargs).
368
+
369
+ Args:
370
+ root (nn.Module): root module upon which we query modules
371
+ target (Callable): Function that we are normalizing
372
+ args (Tuple[Any]): Tuple of args to the function
373
+ kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
374
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
375
+
376
+ Returns:
377
+
378
+ Returns normalized_args_and_kwargs, or `None` if not successful.
379
+ """
380
+ try:
381
+ submod = root.get_submodule(target)
382
+ except AttributeError as e:
383
+ raise RuntimeError(f"Tried to normalize node with target {target} but root did not "
384
+ f"have that target!") from e
385
+ if hasattr(submod.__class__, '__name__'):
386
+ classname = submod.__class__.__name__
387
+ if getattr(torch.nn, classname, None) == submod.__class__:
388
+ sig = inspect.signature(inspect.unwrap(submod.forward))
389
+ if kwargs is None:
390
+ kwargs = {}
391
+ new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs,
392
+ normalize_to_only_use_kwargs)
393
+ return new_args_and_kwargs
394
+ return None
395
+
396
+ def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...],
397
+ kwargs : Dict[str, Any],
398
+ normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]:
399
+ """
400
+ Given a call target, args, and kwargs, return the arguments normalized into
401
+ an ArgsKwargsPair, or None if the type signature is not supported by
402
+ this normalization.
403
+
404
+ Args:
405
+
406
+ sig (inspect.Signature): Signature object for the target
407
+ args (Tuple): Arguments that appear at the callsite for `target`
408
+ kwargs (Dict): Keyword arguments that appear at the callsite for `target`
409
+ normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
410
+
411
+ Returns:
412
+
413
+ Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if
414
+ this target is not supported.
415
+ """
416
+
417
+ # Don't currently support positional-only
418
+ # or varargs (*args, **kwargs) signatures
419
+ supported_parameter_types = {
420
+ inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}
421
+ if any(p.kind not in supported_parameter_types for p in sig.parameters.values()):
422
+ # Add an exception for one signature, which is common for random/uniform, i.e.:
423
+ # Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None
424
+ # `from` is Python keyword and as such functions with that signature should have
425
+ # positional-only args, but at the same time they could be dispatched as kwargs
426
+ if list(sig.parameters.keys()) != ['input', 'from', 'to', 'generator']:
427
+ return None
428
+
429
+ bound_args = sig.bind(*args, **kwargs)
430
+ bound_args.apply_defaults()
431
+
432
+ new_kwargs : Dict[str, Any] = {}
433
+ new_args : List[Any] = []
434
+ for i, param in enumerate(sig.parameters):
435
+ if not normalize_to_only_use_kwargs and i < len(args):
436
+ new_args.append(bound_args.arguments[param])
437
+ else:
438
+ new_kwargs[param] = bound_args.arguments[param]
439
+
440
+ return ArgsKwargsPair(tuple(new_args), new_kwargs)
env-llmeval/lib/python3.10/site-packages/torch/fx/passes/graph_drawer.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import hashlib
3
+ import torch
4
+ import torch.fx
5
+ from typing import Any, Dict, Optional, TYPE_CHECKING
6
+ from torch.fx.node import _get_qualified_name, _format_arg
7
+ from torch.fx.graph import _parse_stack_trace
8
+ from torch.fx.passes.shape_prop import TensorMetadata
9
+ from torch.fx._compatibility import compatibility
10
+ from itertools import chain
11
+
12
+ __all__ = ['FxGraphDrawer']
13
+ try:
14
+ import pydot
15
+ HAS_PYDOT = True
16
+ except ImportError:
17
+ HAS_PYDOT = False
18
+
19
+ _COLOR_MAP = {
20
+ "placeholder": '"AliceBlue"',
21
+ "call_module": "LemonChiffon1",
22
+ "get_param": "Yellow2",
23
+ "get_attr": "LightGrey",
24
+ "output": "PowderBlue",
25
+ }
26
+
27
+ _HASH_COLOR_MAP = [
28
+ "CadetBlue1",
29
+ "Coral",
30
+ "DarkOliveGreen1",
31
+ "DarkSeaGreen1",
32
+ "GhostWhite",
33
+ "Khaki1",
34
+ "LavenderBlush1",
35
+ "LightSkyBlue",
36
+ "MistyRose1",
37
+ "MistyRose2",
38
+ "PaleTurquoise2",
39
+ "PeachPuff1",
40
+ "Salmon",
41
+ "Thistle1",
42
+ "Thistle3",
43
+ "Wheat1",
44
+ ]
45
+
46
+ _WEIGHT_TEMPLATE = {
47
+ "fillcolor": "Salmon",
48
+ "style": '"filled,rounded"',
49
+ "fontcolor": "#000000",
50
+ }
51
+
52
+ if HAS_PYDOT:
53
+ @compatibility(is_backward_compatible=False)
54
+ class FxGraphDrawer:
55
+ """
56
+ Visualize a torch.fx.Graph with graphviz
57
+ Basic usage:
58
+ g = FxGraphDrawer(symbolic_traced, "resnet18")
59
+ g.get_dot_graph().write_svg("a.svg")
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ graph_module: torch.fx.GraphModule,
65
+ name: str,
66
+ ignore_getattr: bool = False,
67
+ ignore_parameters_and_buffers: bool = False,
68
+ skip_node_names_in_args: bool = True,
69
+ parse_stack_trace: bool = False,
70
+ dot_graph_shape: Optional[str] = None,
71
+ ):
72
+ self._name = name
73
+ self.dot_graph_shape = (
74
+ dot_graph_shape if dot_graph_shape is not None else "record"
75
+ )
76
+ _WEIGHT_TEMPLATE["shape"] = self.dot_graph_shape
77
+
78
+ self._dot_graphs = {
79
+ name: self._to_dot(
80
+ graph_module, name, ignore_getattr, ignore_parameters_and_buffers, skip_node_names_in_args, parse_stack_trace
81
+ )
82
+ }
83
+
84
+ for node in graph_module.graph.nodes:
85
+ if node.op != "call_module":
86
+ continue
87
+
88
+ leaf_node = self._get_leaf_node(graph_module, node)
89
+
90
+ if not isinstance(leaf_node, torch.fx.GraphModule):
91
+ continue
92
+
93
+
94
+ self._dot_graphs[f"{name}_{node.target}"] = self._to_dot(
95
+ leaf_node,
96
+ f"{name}_{node.target}",
97
+ ignore_getattr,
98
+ ignore_parameters_and_buffers,
99
+ skip_node_names_in_args,
100
+ parse_stack_trace,
101
+ )
102
+
103
+ def get_dot_graph(self, submod_name=None) -> pydot.Dot:
104
+ """
105
+ Visualize a torch.fx.Graph with graphviz
106
+ Example:
107
+ >>> # xdoctest: +REQUIRES(module:pydot)
108
+ >>> # define module
109
+ >>> class MyModule(torch.nn.Module):
110
+ >>> def __init__(self):
111
+ >>> super().__init__()
112
+ >>> self.linear = torch.nn.Linear(4, 5)
113
+ >>> def forward(self, x):
114
+ >>> return self.linear(x).clamp(min=0.0, max=1.0)
115
+ >>> module = MyModule()
116
+ >>> # trace the module
117
+ >>> symbolic_traced = torch.fx.symbolic_trace(module)
118
+ >>> # setup output file
119
+ >>> import ubelt as ub
120
+ >>> dpath = ub.Path.appdir('torch/tests/FxGraphDrawer').ensuredir()
121
+ >>> fpath = dpath / 'linear.svg'
122
+ >>> # draw the graph
123
+ >>> g = FxGraphDrawer(symbolic_traced, "linear")
124
+ >>> g.get_dot_graph().write_svg(fpath)
125
+ """
126
+ if submod_name is None:
127
+ return self.get_main_dot_graph()
128
+ else:
129
+ return self.get_submod_dot_graph(submod_name)
130
+
131
+ def get_main_dot_graph(self) -> pydot.Dot:
132
+ return self._dot_graphs[self._name]
133
+
134
+ def get_submod_dot_graph(self, submod_name) -> pydot.Dot:
135
+ return self._dot_graphs[f"{self._name}_{submod_name}"]
136
+
137
+ def get_all_dot_graphs(self) -> Dict[str, pydot.Dot]:
138
+ return self._dot_graphs
139
+
140
+ def _get_node_style(self, node: torch.fx.Node) -> Dict[str, str]:
141
+
142
+ template = {
143
+ "shape": self.dot_graph_shape,
144
+ "fillcolor": "#CAFFE3",
145
+ "style": '"filled,rounded"',
146
+ "fontcolor": "#000000",
147
+ }
148
+ if node.op in _COLOR_MAP:
149
+ template["fillcolor"] = _COLOR_MAP[node.op]
150
+ else:
151
+ # Use a random color for each node; based on its name so it's stable.
152
+ target_name = node._pretty_print_target(node.target)
153
+ target_hash = int(hashlib.md5(target_name.encode()).hexdigest()[:8], 16)
154
+ template["fillcolor"] = _HASH_COLOR_MAP[target_hash % len(_HASH_COLOR_MAP)]
155
+ return template
156
+
157
+ def _get_leaf_node(
158
+ self, module: torch.nn.Module, node: torch.fx.Node
159
+ ) -> torch.nn.Module:
160
+ py_obj = module
161
+ assert isinstance(node.target, str)
162
+ atoms = node.target.split(".")
163
+ for atom in atoms:
164
+ if not hasattr(py_obj, atom):
165
+ raise RuntimeError(
166
+ str(py_obj) + " does not have attribute " + atom + "!"
167
+ )
168
+ py_obj = getattr(py_obj, atom)
169
+ return py_obj
170
+
171
+ def _typename(self, target: Any) -> str:
172
+ if isinstance(target, torch.nn.Module):
173
+ ret = torch.typename(target)
174
+ elif isinstance(target, str):
175
+ ret = target
176
+ else:
177
+ ret = _get_qualified_name(target)
178
+
179
+ # Escape "{" and "}" to prevent dot files like:
180
+ # https://gist.github.com/SungMinCho/1a017aab662c75d805c5954d62c5aabc
181
+ # which triggers `Error: bad label format (...)` from dot
182
+ return ret.replace("{", r"\{").replace("}", r"\}")
183
+
184
+ # shorten path to avoid drawing long boxes
185
+ # for full path = '/home/weif/pytorch/test.py'
186
+ # return short path = 'pytorch/test.py'
187
+ def _shorten_file_name(
188
+ self,
189
+ full_file_name: str,
190
+ truncate_to_last_n: int = 2,
191
+ ):
192
+ splits = full_file_name.split('/')
193
+ if len(splits) >= truncate_to_last_n:
194
+ return '/'.join(splits[-truncate_to_last_n:])
195
+ return full_file_name
196
+
197
+
198
+ def _get_node_label(
199
+ self,
200
+ module: torch.fx.GraphModule,
201
+ node: torch.fx.Node,
202
+ skip_node_names_in_args: bool,
203
+ parse_stack_trace: bool,
204
+ ) -> str:
205
+ def _get_str_for_args_kwargs(arg):
206
+ if isinstance(arg, tuple):
207
+ prefix, suffix = r"|args=(\l", r",\n)\l"
208
+ arg_strs_list = [_format_arg(a, max_list_len=8) for a in arg]
209
+ elif isinstance(arg, dict):
210
+ prefix, suffix = r"|kwargs={\l", r",\n}\l"
211
+ arg_strs_list = [
212
+ f"{k}: {_format_arg(v, max_list_len=8)}"
213
+ for k, v in arg.items()
214
+ ]
215
+ else: # Fall back to nothing in unexpected case.
216
+ return ""
217
+
218
+ # Strip out node names if requested.
219
+ if skip_node_names_in_args:
220
+ arg_strs_list = [a for a in arg_strs_list if "%" not in a]
221
+ if len(arg_strs_list) == 0:
222
+ return ""
223
+ arg_strs = prefix + r",\n".join(arg_strs_list) + suffix
224
+ if len(arg_strs_list) == 1:
225
+ arg_strs = arg_strs.replace(r"\l", "").replace(r"\n", "")
226
+ return arg_strs.replace("{", r"\{").replace("}", r"\}")
227
+
228
+
229
+ label = "{" + f"name=%{node.name}|op_code={node.op}\n"
230
+
231
+ if node.op == "call_module":
232
+ leaf_module = self._get_leaf_node(module, node)
233
+ label += r"\n" + self._typename(leaf_module) + r"\n|"
234
+ extra = ""
235
+ if hasattr(leaf_module, "__constants__"):
236
+ extra = r"\n".join(
237
+ [f"{c}: {getattr(leaf_module, c)}" for c in leaf_module.__constants__] # type: ignore[union-attr]
238
+ )
239
+ label += extra + r"\n"
240
+ else:
241
+ label += f"|target={self._typename(node.target)}" + r"\n"
242
+ if len(node.args) > 0:
243
+ label += _get_str_for_args_kwargs(node.args)
244
+ if len(node.kwargs) > 0:
245
+ label += _get_str_for_args_kwargs(node.kwargs)
246
+ label += f"|num_users={len(node.users)}" + r"\n"
247
+
248
+ tensor_meta = node.meta.get('tensor_meta')
249
+ label += self._tensor_meta_to_label(tensor_meta)
250
+
251
+ # for original fx graph
252
+ # print buf=buf0, n_origin=6
253
+ buf_meta = node.meta.get('buf_meta', None)
254
+ if buf_meta is not None:
255
+ label += f"|buf={buf_meta.name}" + r"\n"
256
+ label += f"|n_origin={buf_meta.n_origin}" + r"\n"
257
+
258
+ # for original fx graph
259
+ # print file:lineno code
260
+ if parse_stack_trace and node.stack_trace is not None:
261
+ parsed_stack_trace = _parse_stack_trace(node.stack_trace)
262
+ fname = self._shorten_file_name(parsed_stack_trace.file)
263
+ label += f"|file={fname}:{parsed_stack_trace.lineno} {parsed_stack_trace.code}" + r"\n"
264
+
265
+
266
+ return label + "}"
267
+
268
+ def _tensor_meta_to_label(self, tm) -> str:
269
+ if tm is None:
270
+ return ""
271
+ elif isinstance(tm, TensorMetadata):
272
+ return self._stringify_tensor_meta(tm)
273
+ elif isinstance(tm, list):
274
+ result = ""
275
+ for item in tm:
276
+ result += self._tensor_meta_to_label(item)
277
+ return result
278
+ elif isinstance(tm, dict):
279
+ result = ""
280
+ for v in tm.values():
281
+ result += self._tensor_meta_to_label(v)
282
+ return result
283
+ elif isinstance(tm, tuple):
284
+ result = ""
285
+ for item in tm:
286
+ result += self._tensor_meta_to_label(item)
287
+ return result
288
+ else:
289
+ raise RuntimeError(f"Unsupported tensor meta type {type(tm)}")
290
+
291
+ def _stringify_tensor_meta(self, tm: TensorMetadata) -> str:
292
+ result = ""
293
+ if not hasattr(tm, "dtype"):
294
+ print("tm", tm)
295
+ result += "|" + "dtype" + "=" + str(tm.dtype) + r"\n"
296
+ result += "|" + "shape" + "=" + str(tuple(tm.shape)) + r"\n"
297
+ result += "|" + "requires_grad" + "=" + str(tm.requires_grad) + r"\n"
298
+ result += "|" + "stride" + "=" + str(tm.stride) + r"\n"
299
+ if tm.is_quantized:
300
+ assert tm.qparams is not None
301
+ assert "qscheme" in tm.qparams
302
+ qscheme = tm.qparams["qscheme"]
303
+ if qscheme in {
304
+ torch.per_tensor_affine,
305
+ torch.per_tensor_symmetric,
306
+ }:
307
+ result += "|" + "q_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
308
+ result += "|" + "q_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
309
+ elif qscheme in {
310
+ torch.per_channel_affine,
311
+ torch.per_channel_symmetric,
312
+ torch.per_channel_affine_float_qparams,
313
+ }:
314
+ result += "|" + "q_per_channel_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
315
+ result += "|" + "q_per_channel_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
316
+ result += "|" + "q_per_channel_axis" + "=" + str(tm.qparams["axis"]) + r"\n"
317
+ else:
318
+ raise RuntimeError(f"Unsupported qscheme: {qscheme}")
319
+ result += "|" + "qscheme" + "=" + str(tm.qparams["qscheme"]) + r"\n"
320
+ return result
321
+
322
+ def _get_tensor_label(self, t: torch.Tensor) -> str:
323
+ return str(t.dtype) + str(list(t.shape)) + r"\n"
324
+
325
+ # when parse_stack_trace=True
326
+ # print file:lineno code
327
+ def _to_dot(
328
+ self,
329
+ graph_module: torch.fx.GraphModule,
330
+ name: str,
331
+ ignore_getattr: bool,
332
+ ignore_parameters_and_buffers: bool,
333
+ skip_node_names_in_args: bool,
334
+ parse_stack_trace: bool,
335
+ ) -> pydot.Dot:
336
+ """
337
+ Actual interface to visualize a fx.Graph. Note that it takes in the GraphModule instead of the Graph.
338
+ If ignore_parameters_and_buffers is True, the parameters and buffers
339
+ created with the module will not be added as nodes and edges.
340
+ """
341
+
342
+ # "TB" means top-to-bottom rank direction in layout
343
+ dot_graph = pydot.Dot(name, rankdir="TB")
344
+
345
+
346
+ buf_name_to_subgraph = {}
347
+
348
+ for node in graph_module.graph.nodes:
349
+ if ignore_getattr and node.op == "get_attr":
350
+ continue
351
+
352
+ style = self._get_node_style(node)
353
+ dot_node = pydot.Node(
354
+ node.name, label=self._get_node_label(graph_module, node, skip_node_names_in_args, parse_stack_trace), **style
355
+ )
356
+
357
+ current_graph = dot_graph
358
+
359
+ buf_meta = node.meta.get('buf_meta', None)
360
+ if buf_meta is not None and buf_meta.n_origin > 1:
361
+ buf_name = buf_meta.name
362
+ if buf_name not in buf_name_to_subgraph:
363
+ buf_name_to_subgraph[buf_name] = pydot.Cluster(buf_name, label=buf_name)
364
+ current_graph = buf_name_to_subgraph.get(buf_name)
365
+
366
+ current_graph.add_node(dot_node)
367
+
368
+ def get_module_params_or_buffers():
369
+ for pname, ptensor in chain(
370
+ leaf_module.named_parameters(), leaf_module.named_buffers()
371
+ ):
372
+ pname1 = node.name + "." + pname
373
+ label1 = (
374
+ pname1 + "|op_code=get_" + "parameter"
375
+ if isinstance(ptensor, torch.nn.Parameter)
376
+ else "buffer" + r"\l"
377
+ )
378
+ dot_w_node = pydot.Node(
379
+ pname1,
380
+ label="{" + label1 + self._get_tensor_label(ptensor) + "}",
381
+ **_WEIGHT_TEMPLATE,
382
+ )
383
+ dot_graph.add_node(dot_w_node)
384
+ dot_graph.add_edge(pydot.Edge(pname1, node.name))
385
+
386
+ if node.op == "call_module":
387
+ leaf_module = self._get_leaf_node(graph_module, node)
388
+
389
+ if not ignore_parameters_and_buffers and not isinstance(leaf_module, torch.fx.GraphModule):
390
+ get_module_params_or_buffers()
391
+
392
+ for subgraph in buf_name_to_subgraph.values():
393
+ subgraph.set('color', 'royalblue')
394
+ subgraph.set('penwidth', '2')
395
+ dot_graph.add_subgraph(subgraph)
396
+
397
+ for node in graph_module.graph.nodes:
398
+ if ignore_getattr and node.op == "get_attr":
399
+ continue
400
+
401
+ for user in node.users:
402
+ dot_graph.add_edge(pydot.Edge(node.name, user.name))
403
+
404
+ return dot_graph
405
+
406
+ else:
407
+ if not TYPE_CHECKING:
408
+ @compatibility(is_backward_compatible=False)
409
+ class FxGraphDrawer:
410
+ def __init__(
411
+ self,
412
+ graph_module: torch.fx.GraphModule,
413
+ name: str,
414
+ ignore_getattr: bool = False,
415
+ parse_stack_trace: bool = False,
416
+ ):
417
+ raise RuntimeError('FXGraphDrawer requires the pydot package to be installed. Please install '
418
+ 'pydot through your favorite Python package manager.')
env-llmeval/lib/python3.10/site-packages/torch/fx/proxy.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ import dis
3
+ import copy
4
+ import sys
5
+ import torch
6
+ import inspect
7
+ import operator
8
+ import traceback
9
+ import collections
10
+
11
+ from dataclasses import is_dataclass, fields
12
+
13
+
14
+ from .graph import magic_methods, reflectable_magic_methods, Graph
15
+ from typing import Tuple, Dict, OrderedDict, Optional, Any, Iterator, Callable
16
+ from .node import Target, Node, Argument, base_types, map_aggregate
17
+ from ._compatibility import compatibility
18
+ from .operator_schemas import check_for_mutable_operation
19
+ import torch.fx.traceback as fx_traceback
20
+
21
+ __all__ = ['TracerBase', 'GraphAppendingTracer', 'TraceError',
22
+ 'Proxy', 'Attribute', 'ParameterProxy', 'Scope',
23
+ 'ScopeContextManager']
24
+
25
+
26
+ @compatibility(is_backward_compatible=False)
27
+ class Scope:
28
+ """ Scope object that records the module path and the module type
29
+ of a module. Scope is used to track the information of the module
30
+ that contains a Node in a Graph of GraphModule. For example::
31
+
32
+ class Sub(torch.nn.Module):
33
+ def forward(self, x):
34
+ # This will be a call_method Node in GraphModule,
35
+ # scope for this would be (module_path="sub", module_type=Sub)
36
+ return x.transpose(1, 2)
37
+
38
+ class M(torch.nn.Module):
39
+ def __init__(self):
40
+ self.sub = Sub()
41
+
42
+ def forward(self, x):
43
+ # This will be a call_method Node as well,
44
+ # scope for this would be (module_path="", None)
45
+ x = x.transpose(1, 2)
46
+ x = self.sub(x)
47
+ return x
48
+
49
+ """
50
+
51
+ def __init__(self, module_path: str, module_type: Any):
52
+ super().__init__()
53
+ self.module_path = module_path
54
+ self.module_type = module_type
55
+
56
+
57
+ @compatibility(is_backward_compatible=False)
58
+ class ScopeContextManager:
59
+ """ A context manager to track the Scope of Node during symbolic tracing.
60
+ When entering a forward function of a Module, we'll update the scope information of
61
+ the current module, and when we exit, we'll restore the previous scope information.
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ scope: Scope,
67
+ current_scope: Scope,
68
+ ):
69
+ super().__init__()
70
+ # Keep a copy of prev scope to restore on exit
71
+ self._prev_scope = copy.copy(scope)
72
+ # Update scope to current scope
73
+ scope.module_path = current_scope.module_path
74
+ scope.module_type = current_scope.module_type
75
+ # Save a reference so we can restore it
76
+ self._scope = scope
77
+
78
+ def __enter__(self):
79
+ return self._scope
80
+
81
+ def __exit__(self, *args):
82
+ self._scope.module_path = self._prev_scope.module_path
83
+ self._scope.module_type = self._prev_scope.module_type
84
+ return
85
+
86
+
87
+ _COPY_META_FIELDS = ["nn_module_stack", "source_fn_stack", "original_aten", "recompute", "from_node", "quantization_tag"]
88
+
89
+
90
+ @compatibility(is_backward_compatible=True)
91
+ class TracerBase:
92
+ graph: Graph
93
+ record_stack_traces : bool = False
94
+ # Feature flag for mutable schema checking
95
+ # Enableby default in 1.12
96
+ check_mutable_operations : bool = False
97
+ # Feature flag for assert tracing
98
+ trace_asserts : bool = False
99
+ # Feature flag for proxying accesses to buffer values
100
+ proxy_buffer_attributes : bool = False
101
+
102
+ # Name of the function to be traced. It will only be used when
103
+ # ``root`` is an instance of ``nn.Module``
104
+ traced_func_name: str = "forward"
105
+
106
+ # Maps the containing module's name to the operator name
107
+ scope : Scope
108
+
109
+ # Records the module call stack
110
+ module_stack: OrderedDict[str, Tuple[str, Any]]
111
+
112
+ # Mapping of node name to module scope
113
+ node_name_to_scope: Dict[str, Tuple[str, type]]
114
+
115
+ @compatibility(is_backward_compatible=True)
116
+ def create_node(self, kind : str, target : Target,
117
+ args : Tuple[Argument, ...], kwargs : Dict[str, Argument], name : Optional[str] = None,
118
+ type_expr : Optional[Any] = None) -> Node:
119
+ """
120
+ Inserts a graph node given target, args, kwargs, and name.
121
+
122
+ This method can be overridden to do extra checking, validation, or
123
+ modification of values used in node creation. For example, one might
124
+ want to disallow in-place operations from being recorded.
125
+ """
126
+ if kind == 'call_function' and self.check_mutable_operations:
127
+ check_for_mutable_operation(target, args, kwargs)
128
+
129
+ node = self.graph.create_node(kind, target, args, kwargs, name, type_expr)
130
+ # TODO node_name_to_scope will be depreciated in favor of
131
+ # node.meta['nn_module_stack']
132
+ self.node_name_to_scope[node.name] = (
133
+ self.scope.module_path,
134
+ self.scope.module_type,
135
+ )
136
+ # Optionally set stack trace on the created Node for debugging purposes
137
+ if fx_traceback.has_preserved_node_meta():
138
+ current_meta: Dict[str, Any] = fx_traceback.get_current_meta()
139
+
140
+ stack_trace = current_meta.get("stack_trace")
141
+ if stack_trace:
142
+ node.stack_trace = stack_trace
143
+ # Explicitly set the stack_trace, nn_module_stack and source_fn on the node.meta
144
+ # If other meta fields are needed, they can be added here
145
+ for field in _COPY_META_FIELDS:
146
+ if field in current_meta:
147
+ node.meta[field] = copy.copy(current_meta[field])
148
+
149
+ # Here we decrement to account for the sequence_nr having
150
+ # just been incremented while tracing this lowered aten op.
151
+ new_seq_nr = torch.autograd._get_sequence_nr() - 1
152
+ # The sequence_nr increments every time a new autograd Node
153
+ # is created. During the FWD pass we store the sequence_nr
154
+ # corresponding to the last autograd Node created on this fx
155
+ # node's meta. A single aten op can create multiple autograd
156
+ # nodes as is the case with in-place foreach ops. During the
157
+ # BWD pass we retrieve the sequence_nr stored on the current
158
+ # executing autograd Node. See NOTE [ Sequence Number ].
159
+ if current_meta.get("in_grad_fn", False):
160
+ new_seq_nr = current_meta["grad_fn_seq_nr"]
161
+ node.meta["seq_nr"] = new_seq_nr
162
+
163
+ elif self.module_stack:
164
+ node.meta['nn_module_stack'] = copy.copy(self.module_stack)
165
+ return node
166
+
167
+ @compatibility(is_backward_compatible=True)
168
+ def proxy(self, node: Node) -> 'Proxy':
169
+ return Proxy(node, self)
170
+
171
+ @compatibility(is_backward_compatible=True)
172
+ def create_proxy(self, kind: str, target: Target, args: Tuple[Any, ...], kwargs: Dict[str, Any],
173
+ name: Optional[str] = None, type_expr : Optional[Any] = None,
174
+ proxy_factory_fn: Callable[[Node], 'Proxy'] = None):
175
+ '''
176
+ Create a Node from the given arguments, then return the Node
177
+ wrapped in a Proxy object.
178
+
179
+ If kind = 'placeholder', then we're creating a Node that
180
+ represents the parameter of a function. If we need to encode
181
+ a default parameter, we use the ``args`` tuple. ``args`` is
182
+ otherwise empty for ``placeholder`` Nodes.
183
+ '''
184
+
185
+ args_ = self.create_arg(args)
186
+ kwargs_ = self.create_arg(kwargs)
187
+ assert isinstance(args_, tuple)
188
+ assert isinstance(kwargs_, dict)
189
+
190
+ node = self.create_node(kind, target, args_, kwargs_, name, type_expr)
191
+
192
+ if not proxy_factory_fn:
193
+ proxy = self.proxy(node)
194
+ else:
195
+ proxy = proxy_factory_fn(node)
196
+
197
+ if self.record_stack_traces and not proxy.node.stack_trace:
198
+ user_frame = self._find_user_frame()
199
+ if user_frame:
200
+ summary = traceback.extract_stack(user_frame)
201
+ tb_lines = summary.format()
202
+ # stack_trace would have innermost frame at the bottom
203
+ proxy.node.stack_trace = ''.join(tb_lines)
204
+
205
+ return proxy
206
+
207
+ def _find_user_frame(self):
208
+ """
209
+ Find the Python stack frame executing the user code during
210
+ symbolic tracing.
211
+ """
212
+ # We have to do a little dance here. Basically, walk up the callstack and
213
+ # record the first frame not in the pytorch source. This is the frame executing
214
+ # the user code during tracing.
215
+ frame = inspect.currentframe()
216
+
217
+ pt_files = ['torch/fx/proxy.py',
218
+ 'torch/fx/_symbolic_trace.py',
219
+ 'torch/fx/experimental/proxy_tensor.py',
220
+ 'torch/_ops.py',
221
+ 'torch/_tensor.py',
222
+ 'torch/utils/_python_dispatch.py',
223
+ 'torch/_prims_common/wrappers.py',
224
+ 'torch/_refs/__init__.py',
225
+ 'torch/_refs/nn/functional/__init__.py',
226
+ 'torch/utils/_stats.py',
227
+ ]
228
+ while frame:
229
+ frame = frame.f_back
230
+ if frame and all(not frame.f_code.co_filename.endswith(file) for file in pt_files):
231
+ break
232
+
233
+ if not frame:
234
+ return None
235
+
236
+ return frame
237
+
238
+ @compatibility(is_backward_compatible=True)
239
+ def create_arg(self, a: Any) -> Argument:
240
+ """
241
+ A method that lowers the objects seen as arguments during symbolic evaluation
242
+ into Argument types that can be stored in IR.
243
+
244
+ Can be override to support more trace-specific types.
245
+ """
246
+ if not isinstance(a, Proxy) and hasattr(a, '__fx_create_arg__'):
247
+ return a.__fx_create_arg__(self)
248
+ # aggregates
249
+ elif isinstance(a, tuple) and hasattr(a, '_fields'):
250
+ # NamedTuple constructors don't seem to like getting a generator
251
+ # expression as an argument to their constructor, so build this
252
+ # intermediate tuple and unpack it into the NamedTuple constructor
253
+ args = tuple(self.create_arg(elem) for elem in a)
254
+ return type(a)(*args) # type: ignore[arg-type]
255
+ elif isinstance(a, (tuple, list)):
256
+ return type(a)(self.create_arg(elem) for elem in a)
257
+ elif isinstance(a, dict):
258
+ r = {}
259
+ for k, v in a.items():
260
+ # Check for invalid dict keys. We do not want a Proxy to appear
261
+ # anywhere within the key. Since keys can be collection types,
262
+ # we iterate through the key with map_aggregate
263
+ k = self.create_arg(k)
264
+
265
+ def no_node(arg):
266
+ if isinstance(arg, Node):
267
+ raise RuntimeError("Keys for dictionaries used as an argument cannot contain a "
268
+ f"Node. Got key: {k}")
269
+ map_aggregate(k, no_node)
270
+
271
+ r[k] = self.create_arg(v)
272
+ return r
273
+ elif isinstance(a, slice):
274
+ return slice(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step))
275
+
276
+ elif isinstance(a, range):
277
+ return range(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step))
278
+
279
+ elif isinstance(a, torch._ops.OpOverload):
280
+ return a
281
+
282
+ if isinstance(a, Proxy):
283
+ # base case: we unwrap the Proxy object
284
+ return a.node
285
+
286
+ if is_dataclass(a):
287
+ kwargs = {field.name: self.create_arg(getattr(a, field.name)) for field in fields(a)}
288
+ return self.create_node("call_function", a.__class__, (), kwargs)
289
+
290
+ elif isinstance(a, (*base_types, enum.Enum)) or a is None or a is ...:
291
+ return a
292
+ raise NotImplementedError(f"argument of type: {type(a)}")
293
+
294
+ @compatibility(is_backward_compatible=True)
295
+ def to_bool(self, obj: 'Proxy') -> bool:
296
+ """Called when a proxy object is being converted to a boolean, such as
297
+ when used in control flow. Normally we don't know what to do because
298
+ we don't know the value of the proxy, but a custom tracer can attach more
299
+ information to the graph node using create_node and can choose to return a value.
300
+ """
301
+ raise TraceError('symbolically traced variables cannot be used as inputs to control flow')
302
+
303
+ @compatibility(is_backward_compatible=True)
304
+ def iter(self, obj: 'Proxy') -> Iterator:
305
+ """Called when a proxy object is being iterated over, such as
306
+ when used in control flow. Normally we don't know what to do because
307
+ we don't know the value of the proxy, but a custom tracer can attach more
308
+ information to the graph node using create_node and can choose to return an iterator.
309
+ """
310
+ raise TraceError('Proxy object cannot be iterated. This can be '
311
+ 'attempted when the Proxy is used in a loop or'
312
+ ' as a *args or **kwargs function argument. '
313
+ 'See the torch.fx docs on pytorch.org for a '
314
+ 'more detailed explanation of what types of '
315
+ 'control flow can be traced, and check out the'
316
+ ' Proxy docstring for help troubleshooting '
317
+ 'Proxy iteration errors')
318
+
319
+ @compatibility(is_backward_compatible=True)
320
+ def keys(self, obj: 'Proxy') -> Any:
321
+ """Called when a proxy object is has the keys() method called.
322
+ This is what happens when ** is called on a proxy. This should return an
323
+ iterator it ** is suppose to work in your custom tracer.
324
+ """
325
+ return Attribute(obj, 'keys')()
326
+
327
+
328
+ # used in Proxy object when just appending to the graph while not tracing.
329
+ @compatibility(is_backward_compatible=True)
330
+ class GraphAppendingTracer(TracerBase):
331
+ def __init__(self, graph: Graph):
332
+ super().__init__()
333
+ self.graph = graph
334
+ self.scope = Scope("", None)
335
+ self.module_stack = collections.OrderedDict()
336
+ self.node_name_to_scope = {}
337
+
338
+ @compatibility(is_backward_compatible=False)
339
+ def assert_fn(x):
340
+ assert x
341
+
342
+ @compatibility(is_backward_compatible=True)
343
+ class TraceError(ValueError):
344
+ pass
345
+
346
+ @compatibility(is_backward_compatible=True)
347
+ class Proxy:
348
+ """
349
+ ``Proxy`` objects are ``Node`` wrappers that flow through the
350
+ program during symbolic tracing and record all the operations
351
+ (``torch`` function calls, method calls, operators) that they touch
352
+ into the growing FX Graph.
353
+
354
+ If you're doing graph transforms, you can wrap your own ``Proxy``
355
+ method around a raw ``Node`` so that you can use the overloaded
356
+ operators to add additional things to a ``Graph``.
357
+
358
+ ``Proxy`` objects cannot be iterated. In other words, the symbolic
359
+ tracer will throw an error if a ``Proxy`` is used in a loop or as
360
+ an ``*args``/``**kwargs`` function argument.
361
+
362
+ There are two main ways around this:
363
+ 1. Factor out the untraceable logic into a top-level function and
364
+ use ``fx.wrap`` on it.
365
+ 2. If the control flow is static (i.e. the loop trip count is
366
+ based on some hyperparameter), the code can be kept in its original
367
+ position and refactored into something like::
368
+
369
+ for i in range(self.some_hyperparameter):
370
+ indexed_item = proxied_value[i]
371
+
372
+ For a more detailed description into the Proxy internals, check out
373
+ the "Proxy" section in `torch/fx/OVERVIEW.md`
374
+ """
375
+
376
+ @compatibility(is_backward_compatible=True)
377
+ def __init__(self, node: Node, tracer: 'Optional[TracerBase]' = None):
378
+ if tracer is None:
379
+ # This allows you to create a Proxy object around a raw Node
380
+ tracer = GraphAppendingTracer(node.graph)
381
+ self.tracer = tracer
382
+ self.node = node
383
+
384
+ def __repr__(self) -> str:
385
+ return f'Proxy({self.node.name})'
386
+
387
+ def __getattr__(self, k) -> 'Attribute':
388
+ # note: not added to the graph yet, if this is a method call
389
+ # we peephole optimize to the method invocation
390
+ return Attribute(self, k)
391
+
392
+ def __call__(self, *args, **kwargs) -> 'Proxy':
393
+ return self.tracer.create_proxy('call_method', '__call__', (self,) + args, kwargs)
394
+
395
+ def __iter__(self) -> Iterator['Proxy']:
396
+ frame = inspect.currentframe()
397
+ assert frame is not None
398
+ calling_frame = frame.f_back
399
+ assert calling_frame is not None
400
+ inst_list = list(dis.get_instructions(calling_frame.f_code))
401
+ if sys.version_info >= (3, 11):
402
+ from bisect import bisect_left
403
+ inst_idx = bisect_left(inst_list, calling_frame.f_lasti, key=lambda x: x.offset)
404
+ else:
405
+ inst_idx = calling_frame.f_lasti // 2
406
+ inst = inst_list[inst_idx]
407
+ if inst.opname == 'UNPACK_SEQUENCE':
408
+ return (self[i] for i in range(inst.argval)) # type: ignore[index]
409
+
410
+ return self.tracer.iter(self)
411
+
412
+ def __abs__(self):
413
+ return self.tracer.create_proxy('call_function', operator.abs, (self,), {})
414
+
415
+ def __bool__(self) -> bool:
416
+ if self.tracer.trace_asserts:
417
+ # check if this boolean is used in an assertion, bytecode pattern for assertions
418
+ # is pretty stable for Python 3.7--3.9
419
+ frame = inspect.currentframe()
420
+ assert frame is not None
421
+ calling_frame = frame.f_back
422
+ assert calling_frame is not None
423
+ insts = list(dis.get_instructions(calling_frame.f_code))
424
+ if sys.version_info >= (3, 11):
425
+ from bisect import bisect_left
426
+ cur = bisect_left(insts, calling_frame.f_lasti, key=lambda x: x.offset)
427
+ else:
428
+ cur = calling_frame.f_lasti // 2
429
+ inst = insts[cur]
430
+
431
+ if inst.opname == 'POP_JUMP_IF_TRUE':
432
+ first = insts[cur + 1]
433
+ assert inst.arg is not None
434
+ last = insts[inst.arg // 2 - 1]
435
+ starts_with_assert = (first.opname == 'LOAD_GLOBAL' and first.argval == 'AssertionError'
436
+ or first.opname == 'LOAD_ASSERTION_ERROR')
437
+ if starts_with_assert and last.opname == 'RAISE_VARARGS':
438
+ self.tracer.create_proxy('call_function', assert_fn, (self,), {})
439
+ return True
440
+
441
+ return self.tracer.to_bool(self)
442
+
443
+ @compatibility(is_backward_compatible=True)
444
+ def keys(self):
445
+ return self.tracer.keys(self)
446
+
447
+ def __len__(self):
448
+ raise RuntimeError("'len' is not supported in symbolic tracing by default. If you want "
449
+ "this call to be recorded, please call torch.fx.wrap('len') at "
450
+ "module scope")
451
+
452
+ @classmethod
453
+ def __torch_function__(cls, orig_method, types, args=None, kwargs=None):
454
+ args = args if args else ()
455
+ kwargs = kwargs if kwargs else {}
456
+
457
+ tracers : Dict[Any, None] = {}
458
+
459
+ def find_tracer(a):
460
+ if isinstance(a, cls):
461
+ tracers[a.tracer] = None
462
+ torch.fx.node.map_aggregate(args, find_tracer)
463
+ torch.fx.node.map_aggregate(kwargs, find_tracer)
464
+
465
+ if len(tracers) > 1:
466
+ raise RuntimeError(f'Found multiple different tracers {list(tracers.keys())} while '
467
+ f'trying to trace operations {orig_method}')
468
+ tracer = next(iter(tracers.keys()))
469
+
470
+ if isinstance(orig_method, torch._C.ScriptMethod):
471
+ args = (orig_method.owner,) + args
472
+ return tracer.create_proxy('call_method', orig_method.name, args, kwargs)
473
+ if torch.overrides.is_tensor_method_or_property(orig_method):
474
+ return tracer.create_proxy('call_method', orig_method.__name__, args, kwargs)
475
+ else:
476
+ if isinstance(orig_method, torch._ops.HigherOrderOperator):
477
+ # TODO: Define how to symbolically trace HigherOrderOperators
478
+ raise RuntimeError("Unable to symbolically trace HigherOrderOperators")
479
+ return tracer.create_proxy('call_function', orig_method, args, kwargs,
480
+ name=tracer.graph._target_to_str(orig_method.__name__))
481
+
482
+
483
+ @compatibility(is_backward_compatible=True)
484
+ class Attribute(Proxy):
485
+ @compatibility(is_backward_compatible=True)
486
+ def __init__(self, root: Proxy, attr: str):
487
+ self.root = root
488
+ self.attr = attr
489
+ self.tracer = root.tracer
490
+ self._node: Optional[Node] = None
491
+
492
+ @property
493
+ def node(self):
494
+ # the node for attributes is added lazily, since most will just be method calls
495
+ # which do not rely on the getitem call
496
+ if self._node is None:
497
+ self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node
498
+ return self._node
499
+
500
+ def __call__(self, *args, **kwargs):
501
+ return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs)
502
+
503
+
504
+ @compatibility(is_backward_compatible=False)
505
+ class ParameterProxy(Proxy):
506
+ """
507
+ A special proxy which lets "shape", "size", "dim", and a few other
508
+ attribute accesses pass through to the underlying module parameter object,
509
+ so that conditional tests on these attributes will not throw exception during tracing
510
+ """
511
+ def __init__(self, tracer: TracerBase, node: Node, name, param):
512
+ super().__init__(node, tracer)
513
+ assert(isinstance(param, torch.nn.Parameter))
514
+ self.param = param
515
+ self.name = name
516
+
517
+ def __repr__(self) -> str:
518
+ return f'ParameterProxy({self.name})'
519
+
520
+ @property
521
+ def shape(self):
522
+ return self.param.shape
523
+
524
+ def size(self):
525
+ return self.param.size()
526
+
527
+ def dim(self):
528
+ return self.param.dim()
529
+
530
+ @property
531
+ def ndim(self):
532
+ return self.param.ndim
533
+
534
+ def numel(self):
535
+ return self.param.numel()
536
+
537
+ def nelement(self):
538
+ return self.param.nelement()
539
+
540
+
541
+ for method in magic_methods:
542
+ def _scope(method):
543
+ def impl(*args, **kwargs):
544
+ tracer = args[0].tracer
545
+ target = getattr(operator, method)
546
+ return tracer.create_proxy('call_function', target, args, kwargs)
547
+ impl.__name__ = method
548
+ as_magic = f'__{method.strip("_")}__'
549
+ setattr(Proxy, as_magic, impl)
550
+ _scope(method)
551
+
552
+ def _define_reflectable(orig_method_name):
553
+ method_name = f'__r{orig_method_name.strip("_")}__'
554
+
555
+ def impl(self, rhs):
556
+ target = getattr(operator, orig_method_name)
557
+ return self.tracer.create_proxy('call_function', target, (rhs, self), {})
558
+ impl.__name__ = method_name
559
+ impl.__qualname__ = method_name
560
+ setattr(Proxy, method_name, impl)
561
+
562
+ for orig_method_name in reflectable_magic_methods:
563
+ _define_reflectable(orig_method_name)
env-llmeval/lib/python3.10/site-packages/torch/fx/subgraph_rewriter.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .graph_module import GraphModule
2
+ from .graph import Graph
3
+ from .node import Node
4
+ from ._symbolic_trace import symbolic_trace
5
+ from ._compatibility import compatibility
6
+
7
+ import copy
8
+ from dataclasses import dataclass
9
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Union
10
+ import torch
11
+
12
+ __all__ = ['Match', 'replace_pattern', 'replace_pattern_with_filters', "ReplacedPatterns"]
13
+
14
+ @compatibility(is_backward_compatible=True)
15
+ class Match(NamedTuple):
16
+ # Node from which the match was found
17
+ anchor: Node
18
+ # Maps nodes in the pattern subgraph to nodes in the larger graph
19
+ nodes_map: Dict[Node, Node]
20
+
21
+ @compatibility(is_backward_compatible=False)
22
+ @dataclass
23
+ class ReplacedPatterns:
24
+ # Node from which the match was found
25
+ anchor: Node
26
+ # Maps nodes in the pattern subgraph to nodes in the larger graph
27
+ nodes_map: Dict[Node, Node]
28
+ # List of nodes that were added into the graph
29
+ replacements: List[Node]
30
+
31
+ def _replace_attributes(gm: GraphModule, replacement: torch.nn.Module) -> None:
32
+ gm.delete_all_unused_submodules()
33
+
34
+ if isinstance(replacement, GraphModule):
35
+ replacement.graph.lint()
36
+
37
+ def try_get_attr(gm: torch.nn.Module, target: str) -> Optional[Any]:
38
+ module_path, _, attr_name = target.rpartition(".")
39
+ mod: torch.nn.Module = gm.get_submodule(module_path)
40
+ attr = getattr(mod, attr_name, None)
41
+ return attr
42
+
43
+ for node in gm.graph.nodes:
44
+ if node.op == "call_module" or node.op == "get_attr":
45
+
46
+ gm_attr = try_get_attr(gm, node.target)
47
+ replacement_attr = try_get_attr(replacement, node.target)
48
+
49
+ # CASE 1: This target already exists as an attribute in our
50
+ # result GraphModule. Whether or not it exists in
51
+ # `replacement`, the existing submodule takes precedence.
52
+ if gm_attr is not None:
53
+ continue
54
+
55
+ # CASE 2: The target exists as an attribute in `replacement`
56
+ # only, so we need to copy it over.
57
+ elif replacement_attr is not None:
58
+ new_attr = copy.deepcopy(replacement_attr)
59
+ if isinstance(replacement_attr, torch.nn.Module):
60
+ gm.add_submodule(node.target, new_attr)
61
+ else:
62
+ setattr(gm, node.target, new_attr)
63
+
64
+ # CASE 3: The target doesn't exist as an attribute in `gm`
65
+ # or `replacement`
66
+ else:
67
+ raise RuntimeError("Attempted to create a \"", node.op,
68
+ "\" node during subgraph rewriting "
69
+ f"with target {node.target}, but "
70
+ "the referenced attribute does not "
71
+ "exist in the replacement GraphModule")
72
+
73
+ gm.graph.lint()
74
+
75
+
76
+ @compatibility(is_backward_compatible=True)
77
+ def replace_pattern(
78
+ gm: GraphModule,
79
+ pattern: Union[Callable, GraphModule],
80
+ replacement: Union[Callable, GraphModule]
81
+ ) -> List[Match]:
82
+ """
83
+ Matches all possible non-overlapping sets of operators and their
84
+ data dependencies (``pattern``) in the Graph of a GraphModule
85
+ (``gm``), then replaces each of these matched subgraphs with another
86
+ subgraph (``replacement``).
87
+
88
+ Args:
89
+ ``gm``: The GraphModule that wraps the Graph to operate on
90
+ ``pattern``: The subgraph to match in ``gm`` for replacement
91
+ ``replacement``: The subgraph to replace ``pattern`` with
92
+
93
+ Returns:
94
+ List[Match]: A list of ``Match`` objects representing the places
95
+ in the original graph that ``pattern`` was matched to. The list
96
+ is empty if there are no matches. ``Match`` is defined as:
97
+
98
+ .. code-block:: python
99
+
100
+ class Match(NamedTuple):
101
+ # Node from which the match was found
102
+ anchor: Node
103
+ # Maps nodes in the pattern subgraph to nodes in the larger graph
104
+ nodes_map: Dict[Node, Node]
105
+
106
+ Examples:
107
+
108
+ .. code-block:: python
109
+
110
+ import torch
111
+ from torch.fx import symbolic_trace, subgraph_rewriter
112
+
113
+ class M(torch.nn.Module):
114
+ def __init__(self):
115
+ super().__init__()
116
+
117
+ def forward(self, x, w1, w2):
118
+ m1 = torch.cat([w1, w2]).sum()
119
+ m2 = torch.cat([w1, w2]).sum()
120
+ return x + torch.max(m1) + torch.max(m2)
121
+
122
+ def pattern(w1, w2):
123
+ return torch.cat([w1, w2]).sum()
124
+
125
+ def replacement(w1, w2):
126
+ return torch.stack([w1, w2])
127
+
128
+ traced_module = symbolic_trace(M())
129
+
130
+ subgraph_rewriter.replace_pattern(traced_module, pattern, replacement)
131
+
132
+ The above code will first match ``pattern`` in the ``forward``
133
+ method of ``traced_module``. Pattern-matching is done based on
134
+ use-def relationships, not node names. For example, if you had
135
+ ``p = torch.cat([a, b])`` in ``pattern``, you could match
136
+ ``m = torch.cat([a, b])`` in the original ``forward`` function,
137
+ despite the variable names being different (``p`` vs ``m``).
138
+
139
+ The ``return`` statement in ``pattern`` is matched based on its
140
+ value only; it may or may not match to the ``return`` statement in
141
+ the larger graph. In other words, the pattern doesn't have to extend
142
+ to the end of the larger graph.
143
+
144
+ When the pattern is matched, it will be removed from the larger
145
+ function and replaced by ``replacement``. If there are multiple
146
+ matches for ``pattern`` in the larger function, each non-overlapping
147
+ match will be replaced. In the case of a match overlap, the first
148
+ found match in the set of overlapping matches will be replaced.
149
+ ("First" here being defined as the first in a topological ordering
150
+ of the Nodes' use-def relationships. In most cases, the first Node
151
+ is the parameter that appears directly after ``self``, while the
152
+ last Node is whatever the function returns.)
153
+
154
+ One important thing to note is that the parameters of the
155
+ ``pattern`` Callable must be used in the Callable itself,
156
+ and the parameters of the ``replacement`` Callable must match
157
+ the pattern. The first rule is why, in the above code block, the
158
+ ``forward`` function has parameters ``x, w1, w2``, but the
159
+ ``pattern`` function only has parameters ``w1, w2``. ``pattern``
160
+ doesn't use ``x``, so it shouldn't specify ``x`` as a parameter.
161
+ As an example of the second rule, consider replacing
162
+
163
+ .. code-block:: python
164
+
165
+ def pattern(x, y):
166
+ return torch.neg(x) + torch.relu(y)
167
+
168
+ with
169
+
170
+ .. code-block:: python
171
+
172
+ def replacement(x, y):
173
+ return torch.relu(x)
174
+
175
+ In this case, ``replacement`` needs the same number of parameters
176
+ as ``pattern`` (both ``x`` and ``y``), even though the parameter
177
+ ``y`` isn't used in ``replacement``.
178
+
179
+ After calling ``subgraph_rewriter.replace_pattern``, the generated
180
+ Python code looks like this:
181
+
182
+ .. code-block:: python
183
+
184
+ def forward(self, x, w1, w2):
185
+ stack_1 = torch.stack([w1, w2])
186
+ sum_1 = stack_1.sum()
187
+ stack_2 = torch.stack([w1, w2])
188
+ sum_2 = stack_2.sum()
189
+ max_1 = torch.max(sum_1)
190
+ add_1 = x + max_1
191
+ max_2 = torch.max(sum_2)
192
+ add_2 = add_1 + max_2
193
+ return add_2
194
+ """
195
+ match_and_replacements = _replace_pattern(gm, pattern, replacement)
196
+ return [Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements]
197
+
198
+
199
+ # Experimental API, not backward compatible
200
+ @compatibility(is_backward_compatible=False)
201
+ def replace_pattern_with_filters(
202
+ gm: GraphModule,
203
+ pattern: Union[Callable, Graph, GraphModule],
204
+ replacement: Union[Callable, Graph, GraphModule],
205
+ match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None, # type: ignore[name-defined]
206
+ ignore_literals: bool = False,
207
+ ) -> List[ReplacedPatterns]:
208
+ """
209
+ See replace_pattern for documentation. This function is an overload with an additional match_filter argument.
210
+
211
+ Args:
212
+ ``match_filters``: A list of functions that take in
213
+ (match: InternalMatch, original_graph: Graph, pattern_graph: Graph) and return a boolean indicating
214
+ whether the match satisfies the condition.
215
+ See matcher_utils.py for definition of InternalMatch.
216
+ """
217
+
218
+ return _replace_pattern(gm, pattern, replacement, match_filters, ignore_literals)
219
+
220
+
221
+ def _replace_pattern(
222
+ gm: GraphModule,
223
+ pattern: Union[Callable, Graph, GraphModule],
224
+ replacement: Union[Callable, Graph, GraphModule],
225
+ match_filters: Optional[List[Callable[["InternalMatch", Graph, Graph], bool]]] = None, # type: ignore[name-defined]
226
+ ignore_literals: bool = False,
227
+ ) -> List[ReplacedPatterns]:
228
+
229
+ from torch.fx.passes.utils.matcher_utils import SubgraphMatcher, InternalMatch
230
+
231
+ if match_filters is None:
232
+ match_filters = []
233
+
234
+ # Get the graphs for `gm`, `pattern`, `replacement`
235
+ original_graph: Graph = gm.graph
236
+
237
+ if isinstance(pattern, GraphModule):
238
+ pattern_graph = pattern.graph
239
+ elif isinstance(pattern, Graph):
240
+ pattern_graph = pattern
241
+ else:
242
+ pattern_graph = symbolic_trace(pattern).graph
243
+
244
+ if isinstance(replacement, GraphModule):
245
+ replacement_graph = replacement.graph
246
+ elif isinstance(replacement, Graph):
247
+ replacement_graph = replacement
248
+ else:
249
+ replacement_graph = symbolic_trace(replacement).graph
250
+
251
+ matcher = SubgraphMatcher(pattern_graph, match_output=False, match_placeholder=False,
252
+ remove_overlapping_matches=True, ignore_literals=ignore_literals)
253
+ _matches: List[InternalMatch] = matcher.match(original_graph)
254
+
255
+ # Filter out matches that don't match the filter
256
+ _matches = [
257
+ m for m in _matches
258
+ if all(match_filter(m, original_graph, pattern_graph)
259
+ for match_filter in match_filters)
260
+ ]
261
+
262
+ replacement_placeholders = [n for n in replacement_graph.nodes if n.op == "placeholder"]
263
+
264
+ # As we progressively replace nodes, we'll need to keep track of how the match results should change
265
+ match_changed_node: Dict[Node, Node] = {}
266
+
267
+ match_and_replacements = []
268
+ for match in _matches:
269
+
270
+ # Build connecting between replacement graph's input and original graph input producer node
271
+
272
+ # Initialize `val_map` with mappings from placeholder nodes in
273
+ # `replacement` to their corresponding node in `original_graph`
274
+ assert len(match.placeholder_nodes) == len(replacement_placeholders)
275
+ val_map: Dict[Node, Node] = {}
276
+ for rn, gn in zip(replacement_placeholders, match.placeholder_nodes):
277
+ if isinstance(gn, Node):
278
+ val_map[rn] = match_changed_node.get(gn, gn)
279
+ if gn != val_map[rn]:
280
+ # Update match.placeholder_nodes and match.nodes_map with the node that replaced gn
281
+ gn_ind = match.placeholder_nodes.index(gn)
282
+ match.placeholder_nodes[gn_ind] = match_changed_node[gn]
283
+ map_key = list(match.nodes_map.keys())[list(match.nodes_map.values()).index(gn)]
284
+ match.nodes_map[map_key] = match_changed_node[gn]
285
+ else:
286
+ val_map[rn] = gn
287
+
288
+ # Copy the replacement graph over
289
+ user_nodes: Set[Node] = set()
290
+ for n in match.returning_nodes:
291
+ for user in n.users:
292
+ user_nodes.add(user)
293
+ assert user_nodes, "The returning_nodes should have at least one user node"
294
+
295
+ if len(user_nodes) == 1:
296
+ first_user_node = next(iter(user_nodes))
297
+ else:
298
+ # If there are multiple user nodes, we need to find the first user node
299
+ # in the current execution order of the `original_graph`
300
+ for n in original_graph.nodes:
301
+ if n in user_nodes:
302
+ first_user_node = n
303
+ break
304
+
305
+ with original_graph.inserting_before(first_user_node):
306
+ copied_returning_nodes = original_graph.graph_copy(replacement_graph, val_map)
307
+
308
+ if isinstance(copied_returning_nodes, Node):
309
+ copied_returning_nodes = (copied_returning_nodes, )
310
+
311
+ # Get a list of nodes that have been replaced into the graph
312
+ replacement_nodes: List[Node] = [v for v in val_map.values() if v not in match.placeholder_nodes]
313
+
314
+ # Hook the output Node of the replacement subgraph in to the
315
+ # original Graph at the correct location
316
+ assert len(match.returning_nodes) == len(copied_returning_nodes)
317
+ for gn, copied_node in zip(match.returning_nodes, copied_returning_nodes):
318
+ gn.replace_all_uses_with(copied_node)
319
+ match_changed_node[gn] = copied_node
320
+ # Remove the original nodes
321
+ for node in reversed(pattern_graph.nodes):
322
+ if node.op != "placeholder" and node.op != "output":
323
+ gn = match.nodes_map[node]
324
+ gm.graph.erase_node(gn)
325
+
326
+ match_and_replacements.append(
327
+ ReplacedPatterns(
328
+ anchor=match.anchors[0],
329
+ nodes_map=match.nodes_map,
330
+ replacements=replacement_nodes
331
+ )
332
+ )
333
+
334
+ # Update the passed-in GraphModule to reflect the new state of
335
+ # `original_graph`
336
+ gm.recompile()
337
+
338
+ # If `replacement` was an nn.Module, we'll need to make sure that
339
+ # all the submodules have been copied over correctly
340
+ if isinstance(replacement, torch.nn.Module):
341
+ _replace_attributes(gm, replacement)
342
+
343
+ return match_and_replacements
env-llmeval/lib/python3.10/site-packages/torch/fx/tensor_type.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
2
+
3
+ from ._compatibility import compatibility
4
+
5
+
6
+ @compatibility(is_backward_compatible=False)
7
+ class TensorType:
8
+ """
9
+ TensorType defines a type for tensors, which consists of a list of dimensions.
10
+ Example:
11
+ class M(torch.nn.Module):
12
+ def forward(self, x:TensorType((1,2,3, Dyn)), y:TensorType((1,2,3, Dyn))):
13
+ return torch.add(x, y)
14
+ """
15
+
16
+ def __init__(self, dim):
17
+ self.__origin__ = TensorType
18
+ self.__args__ = dim
19
+
20
+ def __repr__(self):
21
+ return f'TensorType[{self.__args__}]'
22
+
23
+ def __eq__(self, other):
24
+ if isinstance(other, self.__class__):
25
+ return list(self.__args__) == list(other.__args__)
26
+ else:
27
+ return False
28
+
29
+ @staticmethod
30
+ def __class_getitem__(*args):
31
+ if len(args) == 1 and isinstance(args[0], tuple):
32
+ args = args[0]
33
+ return TensorType(tuple(args))
34
+
35
+
36
+ class _DynType:
37
+ """
38
+ _DynType defines a type which stands for the absence of type information.
39
+ """
40
+ def __init__(self):
41
+ self.__name__ = '_DynType'
42
+
43
+ def __eq__(self, other):
44
+ return isinstance(other, self.__class__)
45
+
46
+ def __str__(self):
47
+ return "Dyn"
48
+
49
+ def __repr__(self):
50
+ return "Dyn"
51
+
52
+
53
+ Dyn = _DynType()
54
+
55
+ @compatibility(is_backward_compatible=False)
56
+ def is_consistent(t1, t2):
57
+ """
58
+ A binary relation denoted by ~ that determines if t1 is consistent with t2.
59
+ The relation is reflexive, symmetric but not transitive.
60
+ returns True if t1 and t2 are consistent and False otherwise.
61
+ Example:
62
+ Dyn ~ TensorType((1,2,3))
63
+ int ~ Dyn
64
+ int ~ int
65
+ TensorType((1,Dyn,3)) ~ TensorType((1,2,3))
66
+ """
67
+
68
+ if t1 == t2:
69
+ return True
70
+
71
+ if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
72
+ return True
73
+
74
+ if isinstance(t1, TensorType) and isinstance(t2, TensorType):
75
+ return len(t1.__args__) == len(t2.__args__) and \
76
+ all(is_consistent(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__))
77
+ else:
78
+ return False
79
+
80
+
81
+ @compatibility(is_backward_compatible=False)
82
+ def is_more_precise(t1, t2):
83
+ """
84
+ A binary relation denoted by <= that determines if t1 is more precise than t2.
85
+ The relation is reflexive and transitive.
86
+ returns True if t1 is more precise than t2 and False otherwise.
87
+ Example:
88
+ Dyn >= TensorType((1,2,3))
89
+ int >= Dyn
90
+ int >= int
91
+ TensorType((1,Dyn,3)) <= TensorType((1,2,3))
92
+ """
93
+ if t1 == t2:
94
+ return True
95
+
96
+ if isinstance(t2, _DynType):
97
+ return True
98
+
99
+ if isinstance(t1, TensorType) and isinstance(t2, TensorType):
100
+ return len(t1.__args__) == len(t2.__args__) and \
101
+ all(is_more_precise(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__))
102
+
103
+ else:
104
+ return False
env-llmeval/lib/python3.10/site-packages/torch/fx/traceback.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import traceback
2
+ from contextlib import contextmanager
3
+ from typing import List, Any, Dict
4
+ from ._compatibility import compatibility
5
+
6
+ __all__ = ['preserve_node_meta', 'has_preserved_node_meta',
7
+ 'set_stack_trace', 'set_grad_fn_seq_nr', 'reset_grad_fn_seq_nr',
8
+ 'format_stack', 'set_current_meta', 'get_current_meta']
9
+
10
+ current_meta: Dict[str, Any] = {}
11
+ should_preserve_node_meta = False
12
+
13
+
14
+ @compatibility(is_backward_compatible=False)
15
+ @contextmanager
16
+ def preserve_node_meta():
17
+ global should_preserve_node_meta
18
+
19
+ saved_should_preserve_node_meta = should_preserve_node_meta
20
+ try:
21
+ should_preserve_node_meta = True
22
+ yield
23
+ finally:
24
+ should_preserve_node_meta = saved_should_preserve_node_meta
25
+
26
+
27
+ @compatibility(is_backward_compatible=False)
28
+ def set_stack_trace(stack : List[str]):
29
+ global current_meta
30
+
31
+ if should_preserve_node_meta and stack:
32
+ current_meta["stack_trace"] = "".join(stack)
33
+
34
+
35
+ @compatibility(is_backward_compatible=False)
36
+ def set_grad_fn_seq_nr(seq_nr):
37
+ global current_meta
38
+
39
+ if should_preserve_node_meta:
40
+ # The seq_nr is captured by eager mode in the grad_fn during forward
41
+ current_meta["prev_grad_fn_seq_nr"] = current_meta.get("grad_fn_seq_nr", None)
42
+ current_meta["prev_in_grad_fn"] = current_meta.get("in_grad_fn", None)
43
+ current_meta["grad_fn_seq_nr"] = seq_nr
44
+ current_meta["in_grad_fn"] = True
45
+
46
+
47
+ @compatibility(is_backward_compatible=False)
48
+ def reset_grad_fn_seq_nr():
49
+ # NB: reset state properly, this would be helpful towards supporting
50
+ # reentrant autograd if we actually wanted to do that.
51
+ global current_meta
52
+
53
+ if should_preserve_node_meta:
54
+ if current_meta["prev_grad_fn_seq_nr"] is None:
55
+ assert current_meta["prev_in_grad_fn"] is None
56
+ del current_meta["grad_fn_seq_nr"]
57
+ del current_meta["in_grad_fn"]
58
+ current_meta["grad_fn_seq_nr"] = current_meta["prev_grad_fn_seq_nr"]
59
+ current_meta["in_grad_fn"] = current_meta["prev_in_grad_fn"]
60
+
61
+
62
+ @compatibility(is_backward_compatible=False)
63
+ def format_stack() -> List[str]:
64
+ if should_preserve_node_meta:
65
+ return [current_meta.get("stack_trace", "")]
66
+ else:
67
+ # fallback to traceback.format_stack()
68
+ return traceback.format_list(traceback.extract_stack()[:-1])
69
+
70
+
71
+ @compatibility(is_backward_compatible=False)
72
+ def has_preserved_node_meta() -> bool:
73
+ return should_preserve_node_meta
74
+
75
+
76
+ @compatibility(is_backward_compatible=False)
77
+ @contextmanager
78
+ def set_current_meta(node):
79
+ global current_meta
80
+ if should_preserve_node_meta and node.meta:
81
+ saved_meta = current_meta
82
+ try:
83
+ current_meta = node.meta.copy()
84
+
85
+ # Append (node.name, node.target) onto "from_node" for provenance tracking
86
+ if "from_node" not in current_meta:
87
+ current_meta["from_node"] = [(node.name, node.target)]
88
+ elif current_meta["from_node"][-1][0] != node.name:
89
+ current_meta["from_node"].append((node.name, node.target))
90
+
91
+ yield
92
+ finally:
93
+ current_meta = saved_meta
94
+ else:
95
+ yield
96
+
97
+
98
+ @compatibility(is_backward_compatible=False)
99
+ def get_current_meta() -> Dict[str, Any]:
100
+ return current_meta
env-llmeval/lib/python3.10/site-packages/torch/quantization/__init__.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .quantize import * # noqa: F403
2
+ from .observer import * # noqa: F403
3
+ from .qconfig import * # noqa: F403
4
+ from .fake_quantize import * # noqa: F403
5
+ from .fuse_modules import fuse_modules
6
+ from .stubs import * # noqa: F403
7
+ from .quant_type import * # noqa: F403
8
+ from .quantize_jit import * # noqa: F403
9
+
10
+ # from .quantize_fx import *
11
+ from .quantization_mappings import * # noqa: F403
12
+ from .fuser_method_mappings import * # noqa: F403
13
+
14
+
15
+ def default_eval_fn(model, calib_data):
16
+ r"""
17
+ Default evaluation function takes a torch.utils.data.Dataset or a list of
18
+ input Tensors and run the model on the dataset
19
+ """
20
+ for data, target in calib_data:
21
+ model(data)
22
+
23
+
24
+ __all__ = [
25
+ "QuantWrapper",
26
+ "QuantStub",
27
+ "DeQuantStub",
28
+ # Top level API for eager mode quantization
29
+ "quantize",
30
+ "quantize_dynamic",
31
+ "quantize_qat",
32
+ "prepare",
33
+ "convert",
34
+ "prepare_qat",
35
+ # Top level API for graph mode quantization on TorchScript
36
+ "quantize_jit",
37
+ "quantize_dynamic_jit",
38
+ "_prepare_ondevice_dynamic_jit",
39
+ "_convert_ondevice_dynamic_jit",
40
+ "_quantize_ondevice_dynamic_jit",
41
+ # Top level API for graph mode quantization on GraphModule(torch.fx)
42
+ # 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
43
+ # 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
44
+ "QuantType", # quantization type
45
+ # custom module APIs
46
+ "get_default_static_quant_module_mappings",
47
+ "get_static_quant_module_class",
48
+ "get_default_dynamic_quant_module_mappings",
49
+ "get_default_qat_module_mappings",
50
+ "get_default_qconfig_propagation_list",
51
+ "get_default_compare_output_module_list",
52
+ "get_quantized_operator",
53
+ "get_fuser_method",
54
+ # Sub functions for `prepare` and `swap_module`
55
+ "propagate_qconfig_",
56
+ "add_quant_dequant",
57
+ "swap_module",
58
+ "default_eval_fn",
59
+ # Observers
60
+ "ObserverBase",
61
+ "WeightObserver",
62
+ "HistogramObserver",
63
+ "observer",
64
+ "default_observer",
65
+ "default_weight_observer",
66
+ "default_placeholder_observer",
67
+ "default_per_channel_weight_observer",
68
+ # FakeQuantize (for qat)
69
+ "default_fake_quant",
70
+ "default_weight_fake_quant",
71
+ "default_fixed_qparams_range_neg1to1_fake_quant",
72
+ "default_fixed_qparams_range_0to1_fake_quant",
73
+ "default_per_channel_weight_fake_quant",
74
+ "default_histogram_fake_quant",
75
+ # QConfig
76
+ "QConfig",
77
+ "default_qconfig",
78
+ "default_dynamic_qconfig",
79
+ "float16_dynamic_qconfig",
80
+ "float_qparams_weight_only_qconfig",
81
+ # QAT utilities
82
+ "default_qat_qconfig",
83
+ "prepare_qat",
84
+ "quantize_qat",
85
+ # module transformations
86
+ "fuse_modules",
87
+ ]
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc ADDED
Binary file (802 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc ADDED
Binary file (720 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc ADDED
Binary file (594 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc ADDED
Binary file (990 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc ADDED
Binary file (963 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc ADDED
Binary file (591 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/ns/_numeric_suite.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.ns._numeric_suite import (
11
+ _convert_tuple_to_list,
12
+ _dequantize_tensor_list,
13
+ _find_match,
14
+ _get_logger_dict_helper,
15
+ _is_identical_module_type,
16
+ compare_model_outputs,
17
+ compare_model_stub,
18
+ compare_weights,
19
+ get_logger_dict,
20
+ get_matching_activations,
21
+ Logger,
22
+ NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
23
+ OutputLogger,
24
+ prepare_model_outputs,
25
+ prepare_model_with_stubs,
26
+ Shadow,
27
+ ShadowLogger,
28
+ )
env-llmeval/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/ns/_numeric_suite_fx.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.ns._numeric_suite_fx import (
11
+ _add_loggers_impl,
12
+ _add_loggers_one_model,
13
+ _add_shadow_loggers_impl,
14
+ _extract_logger_info_one_model,
15
+ _extract_weights_impl,
16
+ _extract_weights_one_model,
17
+ add_loggers,
18
+ add_shadow_loggers,
19
+ extend_logger_results_with_comparison,
20
+ extract_logger_info,
21
+ extract_shadow_logger_info,
22
+ extract_weights,
23
+ NSTracer,
24
+ OutputLogger,
25
+ RNNReturnType,
26
+ )
env-llmeval/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ # Pack pairs of int4 values into int8, in row major order; first int4
5
+ # value goes into lower order bits, and second int4 value into higher
6
+ # order bits of resulting int8 value.
7
+ def pack_int4_to_int8(weight):
8
+ assert weight.dim() == 2
9
+ assert weight.shape[1] % 2 == 0
10
+ assert weight.dtype == torch.int8
11
+ return ((weight[:, 1::2] & 0xF) << 4) | (weight[:, 0::2] & 0xF)
12
+
13
+
14
+ # Unpack quandruples of bits in int8 values into int4 values, in row
15
+ # major order; lower 4 bits go into first int4 value goes, and upper 4
16
+ # bits go into second int4 value.
17
+ def unpack_int8_to_int4(weight):
18
+ assert weight.dim() == 2
19
+ assert weight.dtype == torch.int8
20
+ return torch.stack((weight & 0xF, (weight >> 4) & 0xF), dim=2).view(
21
+ weight.shape[0], 2 * weight.shape[1]
22
+ )
23
+
24
+
25
+ # Transpose the weight matrix, and then reorder its elements according
26
+ # to underlying requirements of CUTLASS library, so that it could be
27
+ # used for CUTLASS-based mixed datatypes linear operation.
28
+ def quantized_weight_reorder_for_mixed_dtypes_linear_cutlass(
29
+ weight, dtypeq, transpose=False
30
+ ):
31
+ assert weight.dim() == 2
32
+ assert weight.dtype == torch.int8
33
+ assert dtypeq == torch.int8 or dtypeq == torch.quint4x2
34
+ assert weight.device.type == "cuda"
35
+
36
+ device = weight.device
37
+
38
+ # subbyte_transpose
39
+ if not transpose:
40
+ if dtypeq == torch.int8:
41
+ outp = weight.T
42
+ elif dtypeq == torch.quint4x2:
43
+ outp = pack_int4_to_int8(unpack_int8_to_int4(weight.view(torch.int8)).T)
44
+ else:
45
+ outp = weight
46
+
47
+ ncols, nrows = outp.shape
48
+ assert nrows % (32 if dtypeq == torch.quint4x2 else 64) == 0
49
+ assert ncols % 64 == 0
50
+
51
+ # permute_B_rows_for_mixed_gemm
52
+ # (permute cols actually, as transpose is applied first here)
53
+ if dtypeq == torch.quint4x2:
54
+ cols_permuted = (
55
+ torch.tensor(
56
+ [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15],
57
+ device=device,
58
+ )
59
+ + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand(
60
+ nrows // 16, 16
61
+ )
62
+ ).view(-1)
63
+ else:
64
+ cols_permuted = (
65
+ torch.tensor(
66
+ [0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15],
67
+ device=device,
68
+ )
69
+ + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand(
70
+ nrows // 16, 16
71
+ )
72
+ ).view(-1)
73
+ outp = outp.index_copy(1, cols_permuted, outp)
74
+
75
+ # interleave_column_major_tensor
76
+ magic0 = 4 if dtypeq == torch.quint4x2 else 2
77
+ magic1 = 32 // magic0
78
+
79
+ tmp0 = (
80
+ (torch.arange(0, ncols // magic0, device=device) * (nrows // 4 * magic0))
81
+ .view(-1, 1)
82
+ .repeat(1, nrows // 4 * magic0)
83
+ .view(-1)
84
+ )
85
+ tmp1 = (
86
+ (torch.arange(0, nrows // 4 // magic1, device=device) * (magic0 * magic1))
87
+ .view(-1, 1)
88
+ .repeat(1, magic1)
89
+ .view(-1)
90
+ .repeat(ncols)
91
+ )
92
+ tmp2 = (
93
+ (torch.arange(0, magic0, device=device) * magic1)
94
+ .view(-1, 1)
95
+ .repeat(1, nrows // 4)
96
+ .view(-1)
97
+ .repeat(ncols // magic0)
98
+ )
99
+ tmp3 = torch.arange(0, magic1, device=device).repeat(nrows // 4 * ncols // magic1)
100
+
101
+ outp_offsets = tmp0 + tmp1 + tmp2 + tmp3
102
+
103
+ tmp = outp.view(-1).view(torch.int32)
104
+ outp = torch.zeros_like(tmp)
105
+ outp.scatter_(0, outp_offsets, tmp)
106
+ outp = outp.view(weight.dtype)
107
+
108
+ # add_bias_and_interleave_quantized_tensor_inplace
109
+ tmp = outp.view(-1)
110
+
111
+ outp = torch.empty_like(tmp)
112
+ if dtypeq == torch.int8:
113
+ tmp = (tmp.to(torch.int) + 128).to(tmp.dtype)
114
+ outp[0::4] = tmp[0::4]
115
+ outp[1::4] = tmp[2::4]
116
+ outp[2::4] = tmp[1::4]
117
+ outp[3::4] = tmp[3::4]
118
+ elif dtypeq == torch.quint4x2:
119
+ tmp0 = ((tmp & 0xF) + 8) & 0xF
120
+ tmp0 = (tmp0[1::2] << 4) | tmp0[0::2]
121
+ tmp1 = (((tmp >> 4) & 0xF) + 8) & 0xF
122
+ tmp1 = (tmp1[1::2] << 4) | tmp1[0::2]
123
+ outp[0::4] = tmp0[0::2]
124
+ outp[1::4] = tmp0[1::2]
125
+ outp[2::4] = tmp1[0::2]
126
+ outp[3::4] = tmp1[1::2]
127
+
128
+ if dtypeq == torch.quint4x2:
129
+ nrows *= 2
130
+ ncols //= 2
131
+
132
+ return outp.view(nrows, ncols).view(torch.uint8)
env-llmeval/lib/python3.10/site-packages/torch/quantization/fake_quantize.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/fake_quantize.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.quantization.fake_quantize import (
11
+ _is_fake_quant_script_module,
12
+ _is_per_channel,
13
+ _is_per_tensor,
14
+ _is_symmetric_quant,
15
+ default_fake_quant,
16
+ default_fixed_qparams_range_0to1_fake_quant,
17
+ default_fixed_qparams_range_neg1to1_fake_quant,
18
+ default_fused_act_fake_quant,
19
+ default_fused_per_channel_wt_fake_quant,
20
+ default_fused_wt_fake_quant,
21
+ default_histogram_fake_quant,
22
+ default_per_channel_weight_fake_quant,
23
+ default_weight_fake_quant,
24
+ disable_fake_quant,
25
+ disable_observer,
26
+ enable_fake_quant,
27
+ enable_observer,
28
+ FakeQuantize,
29
+ FakeQuantizeBase,
30
+ FixedQParamsFakeQuantize,
31
+ FusedMovingAvgObsFakeQuantize,
32
+ )
env-llmeval/lib/python3.10/site-packages/torch/quantization/fuse_modules.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/fuse_modules.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ # TODO: These functions are not used outside the `fuse_modules.py`
11
+ # Keeping here for now, need to remove them later.
12
+ from torch.ao.quantization.fuse_modules import (
13
+ _fuse_modules,
14
+ _get_module,
15
+ _set_module,
16
+ fuse_known_modules,
17
+ fuse_modules,
18
+ get_fuser_method,
19
+ )
20
+
21
+ # for backward compatiblity
22
+ from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn, fuse_conv_bn_relu
env-llmeval/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/fuser_method_mappings.py`, while adding an import statement
7
+ here.
8
+ """
9
+ from torch.ao.quantization.fuser_method_mappings import (
10
+ _DEFAULT_OP_LIST_TO_FUSER_METHOD,
11
+ fuse_conv_bn,
12
+ fuse_conv_bn_relu,
13
+ fuse_linear_bn,
14
+ get_fuser_method,
15
+ )