applied-ai-018 commited on
Commit
faf39dd
·
verified ·
1 Parent(s): c351b7f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/22.post_attention_layernorm.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/22.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step120/zero/22.post_attention_layernorm.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/_inductor/__init__.py +150 -0
  7. venv/lib/python3.10/site-packages/torch/_inductor/autotune_process.py +656 -0
  8. venv/lib/python3.10/site-packages/torch/_inductor/bounds.py +124 -0
  9. venv/lib/python3.10/site-packages/torch/_inductor/codecache.py +0 -0
  10. venv/lib/python3.10/site-packages/torch/_inductor/comm_analysis.py +273 -0
  11. venv/lib/python3.10/site-packages/torch/_inductor/comms.py +363 -0
  12. venv/lib/python3.10/site-packages/torch/_inductor/compile_fx.py +1451 -0
  13. venv/lib/python3.10/site-packages/torch/_inductor/config.py +752 -0
  14. venv/lib/python3.10/site-packages/torch/_inductor/constant_folding.py +264 -0
  15. venv/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py +315 -0
  16. venv/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py +2159 -0
  17. venv/lib/python3.10/site-packages/torch/_inductor/cudagraph_utils.py +105 -0
  18. venv/lib/python3.10/site-packages/torch/_inductor/debug.py +655 -0
  19. venv/lib/python3.10/site-packages/torch/_inductor/decomposition.py +678 -0
  20. venv/lib/python3.10/site-packages/torch/_inductor/dependencies.py +506 -0
  21. venv/lib/python3.10/site-packages/torch/_inductor/exc.py +98 -0
  22. venv/lib/python3.10/site-packages/torch/_inductor/freezing.py +266 -0
  23. venv/lib/python3.10/site-packages/torch/_inductor/fx_utils.py +220 -0
  24. venv/lib/python3.10/site-packages/torch/_inductor/graph.py +1324 -0
  25. venv/lib/python3.10/site-packages/torch/_inductor/hooks.py +28 -0
  26. venv/lib/python3.10/site-packages/torch/_inductor/index_propagation.py +277 -0
  27. venv/lib/python3.10/site-packages/torch/_inductor/inductor_prims.py +90 -0
  28. venv/lib/python3.10/site-packages/torch/_inductor/ir.py +0 -0
  29. venv/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py +1 -0
  30. venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/_inductor/kernel/bmm.py +128 -0
  37. venv/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py +495 -0
  38. venv/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py +312 -0
  39. venv/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py +262 -0
  40. venv/lib/python3.10/site-packages/torch/_inductor/kernel/mm_plus_mm.py +235 -0
  41. venv/lib/python3.10/site-packages/torch/_inductor/kernel/unpack_mixed_mm.py +82 -0
  42. venv/lib/python3.10/site-packages/torch/_inductor/lowering.py +0 -0
  43. venv/lib/python3.10/site-packages/torch/_inductor/metrics.py +419 -0
  44. venv/lib/python3.10/site-packages/torch/_inductor/ops_handler.py +655 -0
  45. venv/lib/python3.10/site-packages/torch/_inductor/optimize_indexing.py +118 -0
  46. venv/lib/python3.10/site-packages/torch/_inductor/pattern_matcher.py +1524 -0
  47. venv/lib/python3.10/site-packages/torch/_inductor/quantized_lowerings.py +15 -0
  48. venv/lib/python3.10/site-packages/torch/_inductor/scheduler.py +2445 -0
  49. venv/lib/python3.10/site-packages/torch/_inductor/select_algorithm.py +1156 -0
  50. venv/lib/python3.10/site-packages/torch/_inductor/sizevars.py +643 -0
ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a57f2e5ed8bac19952da2492b9c6e7474d5646223c8bc39b418231fb040d26e4
3
+ size 50332828
ckpts/universal/global_step120/zero/21.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13859b1aeecf63d26696c8427a5136010a9c894958f66ead97c45bad199eb8d3
3
+ size 50332749
ckpts/universal/global_step120/zero/22.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cdffe308a6b5f37ba184dee9c4a041a92d139efb1ac5bb3e0803ec648a3a544
3
+ size 9372
ckpts/universal/global_step120/zero/22.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca96de0a159456633f25b4688b57c3e84a65d65a621aca732d5a605f41a4000f
3
+ size 9387
ckpts/universal/global_step120/zero/22.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dff9eca88a9bc9e2d0463d6e407d03a1554d99a19628a8b0a0ecd4b3d64b3319
3
+ size 9293
venv/lib/python3.10/site-packages/torch/_inductor/__init__.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ import torch.fx
4
+ import torch.utils._pytree as pytree
5
+
6
+ __all__ = ["compile", "list_mode_options", "list_options", "cudagraph_mark_step_begin"]
7
+
8
+
9
+ def compile(
10
+ gm: torch.fx.GraphModule,
11
+ example_inputs: List[torch.Tensor],
12
+ options: Optional[Dict[str, Any]] = None,
13
+ ):
14
+ """
15
+ Compile a given FX graph with TorchInductor. This allows compiling
16
+ FX graphs captured without using TorchDynamo.
17
+
18
+ Args:
19
+ gm: The FX graph to compile.
20
+ example_inputs: List of tensor inputs.
21
+ options: Optional dict of config options. See `torch._inductor.config`.
22
+
23
+ Returns:
24
+ Callable with same behavior as gm but faster.
25
+ """
26
+ from .compile_fx import compile_fx
27
+
28
+ return compile_fx(gm, example_inputs, config_patches=options)
29
+
30
+
31
+ def aot_compile(
32
+ gm: torch.fx.GraphModule,
33
+ example_inputs: List[torch.Tensor],
34
+ options: Optional[Dict[str, Any]] = None,
35
+ ) -> str:
36
+ """
37
+ Ahead-of-time compile a given FX graph with TorchInductor into a shared library.
38
+
39
+ Args:
40
+ gm: The FX graph to compile.
41
+ example_inputs: List of tensor inputs.
42
+ options: Optional dict of config options. See `torch._inductor.config`.
43
+
44
+ Returns:
45
+ Path to the generated shared library
46
+ """
47
+ from .compile_fx import compile_fx_aot
48
+
49
+ # We will serialize the pytree info into the .so as constant strings
50
+ in_spec = None
51
+ out_spec = None
52
+ if isinstance(gm.graph._codegen, torch.fx.graph._PyTreeCodeGen):
53
+ codegen = gm.graph._codegen
54
+ gm.graph._codegen = torch.fx.graph.CodeGen()
55
+ gm.recompile()
56
+
57
+ if codegen.pytree_info.in_spec is not None:
58
+ in_spec = codegen.pytree_info.in_spec
59
+ if codegen.pytree_info.out_spec is not None:
60
+ out_spec = codegen.pytree_info.out_spec
61
+
62
+ else:
63
+ if hasattr(gm, "_in_spec"):
64
+ in_spec = gm._in_spec
65
+ if hasattr(gm, "_out_spec"):
66
+ out_spec = gm._out_spec
67
+
68
+ serialized_in_spec = pytree.treespec_dumps(in_spec) if in_spec is not None else ""
69
+ serialized_out_spec = (
70
+ pytree.treespec_dumps(out_spec) if out_spec is not None else ""
71
+ )
72
+
73
+ options = (
74
+ {
75
+ "aot_inductor.serialized_in_spec": serialized_in_spec,
76
+ "aot_inductor.serialized_out_spec": serialized_out_spec,
77
+ }
78
+ if options is None
79
+ else {
80
+ **options,
81
+ "aot_inductor.serialized_in_spec": serialized_in_spec,
82
+ "aot_inductor.serialized_out_spec": serialized_out_spec,
83
+ }
84
+ )
85
+
86
+ return compile_fx_aot(
87
+ gm,
88
+ example_inputs,
89
+ config_patches=options,
90
+ )
91
+
92
+
93
+ def list_mode_options(
94
+ mode: Optional[str] = None, dynamic: Optional[bool] = None
95
+ ) -> Dict[str, Any]:
96
+ r"""Returns a dictionary describing the optimizations that each of the available
97
+ modes passed to `torch.compile()` performs.
98
+
99
+ Args:
100
+ mode (str, optional): The mode to return the optimizations for.
101
+ If None, returns optimizations for all modes
102
+ dynamic (bool, optional): Whether dynamic shape is enabled.
103
+
104
+ Example::
105
+ >>> torch._inductor.list_mode_options()
106
+ """
107
+
108
+ mode_options: Dict[str, Dict[str, bool]] = {
109
+ "default": {},
110
+ # enable cudagraphs
111
+ "reduce-overhead": {
112
+ "triton.cudagraphs": True,
113
+ },
114
+ # enable max-autotune
115
+ "max-autotune-no-cudagraphs": {
116
+ "max_autotune": True,
117
+ },
118
+ # enable max-autotune
119
+ # enable cudagraphs
120
+ "max-autotune": {
121
+ "max_autotune": True,
122
+ "triton.cudagraphs": True,
123
+ },
124
+ }
125
+ return mode_options[mode] if mode else mode_options # type: ignore[return-value]
126
+
127
+
128
+ def list_options() -> List[str]:
129
+ r"""Returns a dictionary describing the optimizations and debug configurations
130
+ that are available to `torch.compile()`.
131
+
132
+ The options are documented in `torch._inductor.config`.
133
+
134
+ Example::
135
+
136
+ >>> torch._inductor.list_options()
137
+ """
138
+
139
+ from torch._inductor import config
140
+
141
+ current_config: Dict[str, Any] = config.shallow_copy_dict()
142
+
143
+ return list(current_config.keys())
144
+
145
+
146
+ def cudagraph_mark_step_begin():
147
+ "Indicates that a new iteration of inference or training is about to begin."
148
+ from .cudagraph_trees import mark_step_begin
149
+
150
+ mark_step_begin()
venv/lib/python3.10/site-packages/torch/_inductor/autotune_process.py ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import dataclasses
5
+ import functools
6
+ import logging
7
+ import os
8
+ import queue
9
+ import time
10
+ import warnings
11
+ from concurrent.futures import ThreadPoolExecutor
12
+ from ctypes import byref, c_size_t, c_void_p
13
+ from multiprocessing.process import BaseProcess
14
+ from multiprocessing.queues import Queue
15
+ from typing import (
16
+ Any,
17
+ Callable,
18
+ Dict,
19
+ Iterable,
20
+ List,
21
+ Optional,
22
+ Sequence,
23
+ TYPE_CHECKING,
24
+ Union,
25
+ )
26
+
27
+ import torch
28
+ from torch import multiprocessing
29
+ from torch._dynamo.testing import rand_strided
30
+
31
+ from torch._inductor import ir
32
+ from torch._inductor.codecache import CUDACodeCache, DLLWrapper, PyCodeCache
33
+
34
+ if TYPE_CHECKING:
35
+ from torch._inductor.select_algorithm import TritonTemplateCaller
36
+
37
+ from . import config
38
+ from .utils import do_bench
39
+ from .virtualized import V
40
+
41
+ CUDA_VISIBLE_DEVICES = "CUDA_VISIBLE_DEVICES"
42
+ EXIT_HANDLER_REGISTERED = False
43
+
44
+ log = logging.getLogger(__name__)
45
+
46
+
47
+ # Used to synchronize between parent and child processes
48
+ class Ping:
49
+ pass
50
+
51
+
52
+ class Pong:
53
+ pass
54
+
55
+
56
+ @contextlib.contextmanager
57
+ def set_cuda_visible_device(device: Optional[int]):
58
+ """
59
+ Context manager to set the CUDA_VISIBLE_DEVICES environment variable to the
60
+ specified single device. If device is None, don't manipulate the environment.
61
+ """
62
+ if device is None:
63
+ yield
64
+ return
65
+
66
+ current = os.environ.get(CUDA_VISIBLE_DEVICES)
67
+ os.environ[CUDA_VISIBLE_DEVICES] = str(device)
68
+ try:
69
+ yield
70
+ finally:
71
+ if current is None:
72
+ del os.environ[CUDA_VISIBLE_DEVICES]
73
+ else:
74
+ os.environ[CUDA_VISIBLE_DEVICES] = current
75
+
76
+
77
+ @dataclasses.dataclass
78
+ class TuningProcess:
79
+ """
80
+ Abstraction for launching a helper process to benchmark kernels. Spawns
81
+ the parent process and uses multiprocessing queues to send benchmark
82
+ requests and return results.
83
+ """
84
+
85
+ device: Optional[int] = None
86
+ process: Optional[BaseProcess] = None
87
+ request_queue: Optional[Queue[Any]] = None
88
+ response_queue: Optional[Queue[Any]] = None
89
+
90
+ @staticmethod
91
+ def process_main(
92
+ request_queue: Queue[Any],
93
+ response_queue: Queue[Any],
94
+ ) -> None:
95
+ """
96
+ Entry point for the child process.
97
+ """
98
+ log.debug(
99
+ "Entering TuningProcess child. Visible devices = %s",
100
+ os.environ.get(CUDA_VISIBLE_DEVICES),
101
+ )
102
+ try:
103
+ TuningProcess.workloop(request_queue, response_queue)
104
+ except Exception as ex:
105
+ log.exception("Exception in TuningProcess: %s", ex)
106
+
107
+ @staticmethod
108
+ def workloop(request_queue: Queue[Any], response_queue: Queue[Any]) -> None:
109
+ """
110
+ Work loop for the benchmarking subprocess.
111
+ """
112
+ while True:
113
+ obj = request_queue.get()
114
+
115
+ if obj is None:
116
+ break # None is a sentinel for the child to terminate
117
+ elif isinstance(obj, Ping):
118
+ response_queue.put(Pong())
119
+ elif isinstance(obj, BenchmarkRequest):
120
+ response_queue.put(obj.benchmark())
121
+ else:
122
+ raise RuntimeError(f"Invalid request type {type(obj)}")
123
+
124
+ def valid(self) -> bool:
125
+ """
126
+ True if the sub-process has been initialized.
127
+ """
128
+ return (
129
+ self.process is not None
130
+ and self.request_queue is not None
131
+ and self.response_queue is not None
132
+ )
133
+
134
+ def clear(self) -> None:
135
+ """
136
+ Reset to an uninitialized state.
137
+ """
138
+ self.process = self.request_queue = self.response_queue = None
139
+
140
+ def initialize(self) -> None:
141
+ """
142
+ Create child process, request/response queues, and do the warm up.
143
+ Set the environment to make only the provided GPU device visible
144
+ to the process.
145
+ """
146
+ if self.valid():
147
+ return
148
+
149
+ # cuda runtime does not work with "fork", use "spawn" to start processes.
150
+ ctx = multiprocessing.get_context("spawn")
151
+ self.request_queue = ctx.Queue()
152
+ self.response_queue = ctx.Queue()
153
+
154
+ self.process = ctx.Process(
155
+ target=self.process_main,
156
+ args=(
157
+ self.request_queue,
158
+ self.response_queue,
159
+ ),
160
+ )
161
+ assert self.process is not None
162
+ with set_cuda_visible_device(self.device):
163
+ self.process.start()
164
+
165
+ def put(self, obj: Any) -> None:
166
+ """
167
+ Push a work item to the child process.
168
+ """
169
+ # In case of a prior crash, ensure the subprocess is running
170
+ self.initialize()
171
+ assert self.request_queue is not None
172
+ self.request_queue.put(obj)
173
+
174
+ def get(self) -> Any:
175
+ """
176
+ Get a response from the child process.
177
+ """
178
+ assert self.process is not None
179
+ assert self.response_queue is not None
180
+ while True:
181
+ try:
182
+ return self.response_queue.get(timeout=1.0)
183
+ except queue.Empty:
184
+ status = self.process.exitcode
185
+ if status is None:
186
+ # child process is still running
187
+ continue
188
+ # child process crashed
189
+ self.clear()
190
+ raise
191
+
192
+ def terminate(self) -> None:
193
+ """
194
+ Signal the child process to terminate.
195
+ """
196
+ if self.valid():
197
+ assert self.process is not None
198
+ assert self.request_queue is not None
199
+ self.request_queue.put(None)
200
+
201
+ def wait(self) -> None:
202
+ """
203
+ Wait for the child process to exit.
204
+ """
205
+ if self.process is not None:
206
+ self.process.join()
207
+ self.clear()
208
+
209
+
210
+ @dataclasses.dataclass
211
+ class TuningProcessPool:
212
+ """
213
+ Maintains a pool of TuningProcesses to benchmark kernels in parallel
214
+ across devices. By default, we create one TuningProcess per device and
215
+ set the sub-process environment to make only that device visible.
216
+ """
217
+
218
+ processes: Optional[queue.Queue[TuningProcess]] = None
219
+ executor: Optional[ThreadPoolExecutor] = None
220
+
221
+ def initialize(self) -> None:
222
+ """
223
+ Start the child processes.
224
+ """
225
+ assert (self.processes is None) == (self.executor is None)
226
+ if self.processes is not None:
227
+ return
228
+
229
+ devices = self.get_device_list()
230
+ log.debug("Sub-process autotune device list: %s", devices)
231
+
232
+ # Launch the child processes and push a msg to "warm up"
233
+ self.processes = queue.Queue()
234
+ for device in devices:
235
+ p = TuningProcess(device=device)
236
+ p.initialize()
237
+ p.put(Ping())
238
+ self.processes.put(p)
239
+
240
+ # Wait for the initialization to finish
241
+ for p in self.processes.queue:
242
+ assert isinstance(p.get(), Pong)
243
+
244
+ # Use a thread pool to manage distributing work to the subprocesses.
245
+ # Threads block on an available process, so it makes sense to match
246
+ # the number of threads with the number of devices.
247
+ self.executor = ThreadPoolExecutor(max_workers=len(devices))
248
+
249
+ # Register the exit handler for the parent process so it will terminate
250
+ # the child processes.
251
+ global EXIT_HANDLER_REGISTERED
252
+ if not EXIT_HANDLER_REGISTERED:
253
+ EXIT_HANDLER_REGISTERED = True
254
+ import atexit
255
+
256
+ atexit.register(self.terminate)
257
+
258
+ def get_device_list(self) -> Sequence[Optional[int]]:
259
+ """
260
+ Gather the list of devices to be used in the pool.
261
+ """
262
+ if not config.autotune_multi_device:
263
+ # Don't use multiple devices
264
+ return [None]
265
+
266
+ count = torch.cuda.device_count()
267
+
268
+ # If the user specified the visible devices in the env, use those.
269
+ if CUDA_VISIBLE_DEVICES in os.environ:
270
+ devices = [int(d) for d in os.environ[CUDA_VISIBLE_DEVICES].split(",")]
271
+ assert len(devices) <= count
272
+ return devices
273
+
274
+ return list(range(count))
275
+
276
+ def terminate(self) -> None:
277
+ """
278
+ Signal all child processes to terminate.
279
+ """
280
+ if self.executor is not None:
281
+ self.executor.shutdown()
282
+ self.executor = None
283
+
284
+ if self.processes is not None:
285
+ for p in self.processes.queue:
286
+ p.terminate()
287
+ for p in self.processes.queue:
288
+ p.wait()
289
+ self.processes = None
290
+
291
+ def target(self, choice: TritonTemplateCaller) -> float:
292
+ """
293
+ Entry point for the thread-pool helper threads: Wait for an open TuningProcess,
294
+ remove it from the queue, execute the benchmark in that subprocess, and return
295
+ the TuningProcess to the queue.
296
+ """
297
+ assert choice.bmreq is not None
298
+ assert self.processes is not None
299
+
300
+ process = self.processes.get()
301
+ process.put(choice.bmreq)
302
+ try:
303
+ return process.get()
304
+ except queue.Empty:
305
+ warnings.warn(
306
+ f"Failed to benchmark choice '{choice}'. It will be ignored. "
307
+ "Please debug the root cause in case the choice can bring perf gains."
308
+ )
309
+ # set to INF so this choice will be ignored
310
+ return float("inf")
311
+ finally:
312
+ self.processes.put(process)
313
+
314
+ def benchmark(
315
+ self,
316
+ choices: List[TritonTemplateCaller],
317
+ ) -> Dict[TritonTemplateCaller, float]:
318
+ """
319
+ Benchmark each choice in a separate process.
320
+ """
321
+ assert self.processes is not None, "Tuning process pool is not initialized"
322
+ assert self.executor is not None
323
+
324
+ results = {}
325
+
326
+ # Use a ThreadExecutorPool to spread the work across the subprocesses and
327
+ # to grab subprocesses as soon as they're free.
328
+ for choice, result in zip(choices, self.executor.map(self.target, choices)):
329
+ results[choice] = result
330
+
331
+ return results
332
+
333
+
334
+ tuning_pool = TuningProcessPool()
335
+
336
+
337
+ LayoutOrBuffer = Union[ir.Layout, ir.Buffer]
338
+
339
+
340
+ @dataclasses.dataclass
341
+ class TensorMeta:
342
+ device: torch.device
343
+ dtype: torch.dtype
344
+ sizes: torch._prims_common.ShapeType
345
+ strides: torch._prims_common.StrideType
346
+ offset: int
347
+
348
+ @classmethod
349
+ def from_irnodes(
350
+ cls, irnodes: Union[LayoutOrBuffer, Sequence[LayoutOrBuffer]]
351
+ ) -> Union[TensorMeta, List[TensorMeta]]:
352
+ if isinstance(irnodes, Sequence):
353
+ result: List[Any] = [cls.from_irnodes(x) for x in irnodes]
354
+ assert all(isinstance(x, TensorMeta) for x in result)
355
+ return result
356
+
357
+ node = irnodes
358
+ if isinstance(node, ir.Layout):
359
+ node = ir.Buffer("fake", node)
360
+
361
+ dtype = node.get_dtype()
362
+ assert dtype is not None
363
+
364
+ return TensorMeta(
365
+ device=node.get_device(),
366
+ dtype=dtype,
367
+ sizes=V.graph.sizevars.size_hints(
368
+ node.get_size(),
369
+ fallback=config.unbacked_symint_fallback,
370
+ ),
371
+ strides=V.graph.sizevars.size_hints(
372
+ node.get_stride(),
373
+ fallback=config.unbacked_symint_fallback,
374
+ ),
375
+ offset=V.graph.sizevars.size_hint(
376
+ node.get_layout().offset,
377
+ fallback=config.unbacked_symint_fallback,
378
+ ),
379
+ )
380
+
381
+ def to_tensor(self) -> torch.Tensor:
382
+ return rand_strided(
383
+ self.sizes,
384
+ self.strides,
385
+ device=self.device,
386
+ dtype=self.dtype,
387
+ extra_size=self.offset,
388
+ )
389
+
390
+
391
+ @dataclasses.dataclass
392
+ class BenchmarkRequest:
393
+ """
394
+ Only handle triton template benchmark for now. The extern kernel benchmark
395
+ can be done inside the same process since they usually don't cause crash.
396
+
397
+ Important: Instances of this class and subclasses have to be serializable
398
+ across process boundaries. Do not put CUDA Tensors in here!
399
+ """
400
+
401
+ def __init__(
402
+ self,
403
+ kernel_name: str,
404
+ input_tensor_meta: Union[TensorMeta, List[TensorMeta]],
405
+ output_tensor_meta: Union[TensorMeta, List[TensorMeta]],
406
+ extra_args: Iterable[Any],
407
+ ):
408
+ # the kernel name defined in the module
409
+ self.kernel_name = kernel_name
410
+
411
+ if isinstance(input_tensor_meta, TensorMeta):
412
+ input_tensor_meta = [input_tensor_meta]
413
+ self.input_tensor_meta = input_tensor_meta
414
+
415
+ if isinstance(output_tensor_meta, (tuple, list)):
416
+ assert len(output_tensor_meta) == 1
417
+ output_tensor_meta = output_tensor_meta[0]
418
+ self.output_tensor_meta = output_tensor_meta
419
+
420
+ self.extra_args = extra_args
421
+
422
+ def make_run_fn(
423
+ self, *input_tensors: torch.Tensor, output_tensor: torch.Tensor
424
+ ) -> Callable[[], None]:
425
+ raise NotImplementedError()
426
+
427
+ def cleanup_run_fn(self) -> None:
428
+ pass
429
+
430
+ def benchmark(
431
+ self,
432
+ *input_tensors: torch.Tensor,
433
+ output_tensor: Optional[torch.Tensor] = None,
434
+ ) -> float:
435
+ debug = log.isEnabledFor(logging.DEBUG)
436
+ if debug:
437
+ start_ts = time.time()
438
+
439
+ # create args and out tensor
440
+ if output_tensor is None:
441
+ assert len(input_tensors) == 0
442
+ input_tensors = tuple(x.to_tensor() for x in self.input_tensor_meta)
443
+ output_tensor = self.output_tensor_meta.to_tensor()
444
+
445
+ if debug:
446
+ create_tensor_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
447
+ start_ts = time.time()
448
+
449
+ fn = self.make_run_fn(*input_tensors, output_tensor=output_tensor)
450
+
451
+ if debug:
452
+ load_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
453
+ start_ts = time.time()
454
+
455
+ out = do_bench(fn)
456
+ torch.cuda.synchronize() # shake out any CUDA errors
457
+
458
+ if debug:
459
+ bench_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
460
+ log.debug(
461
+ "InChildProcess %s: load %f, create tensor %f, bench %f",
462
+ str(self),
463
+ load_elapse, # type: ignore[possibly-undefined]
464
+ create_tensor_elapse, # type: ignore[possibly-undefined]
465
+ bench_elapse,
466
+ )
467
+ self.cleanup_run_fn()
468
+ return out
469
+
470
+
471
+ class TestBenchmarkRequest(BenchmarkRequest):
472
+ """
473
+ Supports unit testing. Defined in this file so that the TuningProcess
474
+ sub-process knows how to unpickle these objects.
475
+ """
476
+
477
+ def __init__(self, value: Optional[float] = None) -> None:
478
+ self.value = value
479
+
480
+ def benchmark(
481
+ self, *input_tensors: torch.Tensor, output_tensor: Optional[torch.Tensor] = None
482
+ ) -> float:
483
+ if self.value is None:
484
+ raise Exception("Failed to run")
485
+ return self.value
486
+
487
+
488
+ class TritonBenchmarkRequest(BenchmarkRequest):
489
+ # Important: Instances of this class have to be serializable
490
+ # across process boundaries. Do not put CUDA Tensors in here!
491
+
492
+ def __init__(
493
+ self,
494
+ kernel_name: str,
495
+ input_tensor_meta: Union[TensorMeta, List[TensorMeta]],
496
+ output_tensor_meta: Union[TensorMeta, List[TensorMeta]],
497
+ extra_args: Iterable[Any],
498
+ module_path: str, # the path of the module defining the triton kernel
499
+ module_cache_key: str,
500
+ grid: List[int],
501
+ num_stages: int,
502
+ num_warps: int,
503
+ matrix_instr_nonkdim: int = 0, # only used for hip to choose the shape of mfma instruction.
504
+ ):
505
+ super().__init__(kernel_name, input_tensor_meta, output_tensor_meta, extra_args)
506
+ self.module_path = module_path
507
+ self.module_cache_key = module_cache_key
508
+ self.grid = grid
509
+ self.num_stages = num_stages
510
+ self.num_warps = num_warps
511
+ self.matrix_instr_nonkdim = matrix_instr_nonkdim
512
+
513
+ def make_run_fn(
514
+ self, *input_tensors: torch.Tensor, output_tensor: torch.Tensor
515
+ ) -> Callable[[], None]:
516
+ mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path)
517
+ log.debug(
518
+ "benchmark module key: %s, path: %s",
519
+ self.module_cache_key,
520
+ self.module_path,
521
+ )
522
+
523
+ run_method = getattr(mod, self.kernel_name).run
524
+ extra_args = list(self.extra_args)
525
+
526
+ # Newer version of triton add warmup argument to JITFunction.run.
527
+ # This code handles backward-compatibility.
528
+ warmup_arg = {}
529
+ import inspect
530
+
531
+ if "warmup" in inspect.signature(run_method).parameters:
532
+ warmup_arg["warmup"] = False
533
+
534
+ if torch.version.hip and self.matrix_instr_nonkdim != 0:
535
+ return functools.partial(
536
+ run_method,
537
+ *input_tensors,
538
+ output_tensor,
539
+ *self.extra_args,
540
+ grid=self.grid,
541
+ **warmup_arg,
542
+ num_stages=self.num_stages,
543
+ num_warps=self.num_warps,
544
+ matrix_instr_nonkdim=self.matrix_instr_nonkdim,
545
+ )
546
+ else:
547
+ return functools.partial(
548
+ run_method,
549
+ *input_tensors,
550
+ output_tensor,
551
+ *self.extra_args,
552
+ grid=self.grid,
553
+ **warmup_arg,
554
+ num_stages=self.num_stages,
555
+ num_warps=self.num_warps,
556
+ )
557
+
558
+ def __str__(self) -> str:
559
+ return f"{self.kernel_name=}, {self.module_path=}, {self.module_cache_key=}"
560
+
561
+
562
+ class CUDABenchmarkRequest(BenchmarkRequest):
563
+ # Important: Instances of this class have to be serializable
564
+ # across process boundaries. Do not put CUDA Tensors in here!
565
+
566
+ def __init__(
567
+ self,
568
+ kernel_name: str,
569
+ input_tensor_meta: Union[TensorMeta, List[TensorMeta]],
570
+ output_tensor_meta: Union[TensorMeta, List[TensorMeta]],
571
+ extra_args: Iterable[Any],
572
+ source_code: str,
573
+ ):
574
+ super().__init__(kernel_name, input_tensor_meta, output_tensor_meta, extra_args)
575
+ self.source_code = source_code
576
+ self.workspace_size: int = 0
577
+ self.workspace: Optional[torch.Tensor] = None
578
+ self.DLL: Optional[DLLWrapper] = None
579
+ self.hash_key: str = ""
580
+ self.source_file: str = ""
581
+ self.hash_key, self.source_file = CUDACodeCache.write(self.source_code, "so")
582
+
583
+ def precompile(self):
584
+ # Prepopulate CUDACodeCache
585
+ # may happen in separate Threadpool
586
+ log.debug("Precompiling %s", self)
587
+ CUDACodeCache.load(self.source_code, "so")
588
+ log.debug("Done precompiling %s", self)
589
+
590
+ def make_run_fn(
591
+ self, *input_tensors: torch.Tensor, output_tensor: torch.Tensor
592
+ ) -> Callable[[], None]:
593
+ self.DLL, self.hash_key, self.source_file = CUDACodeCache.load(
594
+ self.source_code, "so"
595
+ )
596
+ args = [
597
+ c_void_p(tensor.data_ptr())
598
+ for tensor in list(input_tensors) + [output_tensor]
599
+ ]
600
+ log.debug(
601
+ "make_run_fn: self.kernel_name=%s, self.source_file=%s, self.hash_key=%s, self.DLL=%s, args=%s, self.extra_args=%s",
602
+ self.kernel_name,
603
+ self.source_file,
604
+ self.hash_key,
605
+ self.DLL,
606
+ args,
607
+ self.extra_args,
608
+ )
609
+ run_method = getattr(self.DLL, self.kernel_name)
610
+ stream_ptr = c_void_p(torch.cuda.current_stream().cuda_stream)
611
+
612
+ # Retrieve workspace_size and initialize workspace.
613
+ c_workspace_size = c_size_t()
614
+ run_method(
615
+ *args, # input ptrs and output ptrs
616
+ *self.extra_args,
617
+ byref(
618
+ c_workspace_size
619
+ ), # set workspace size ptr to retrieve workspace size
620
+ None, # null workspace ptr
621
+ stream_ptr,
622
+ )
623
+ self.workspace_size = c_workspace_size.value
624
+ # TODO: Support non-zero workspace_size.
625
+ assert self.workspace_size == 0, (
626
+ "Things need to be fixed to support non-zero workspace_size: "
627
+ "1) max autotune cache needs to store workspace size; "
628
+ "2) memory allocation needs to allocate / deallocate workspace correctly; "
629
+ )
630
+
631
+ # Generate partial function.
632
+ return functools.partial(
633
+ run_method,
634
+ *args,
635
+ *self.extra_args,
636
+ None, # null workspace size ptr
637
+ None, # set workspace ptr, TODO: update it to a real ptr if workspace_size > 0
638
+ stream_ptr,
639
+ )
640
+
641
+ def cleanup_run_fn(self) -> None:
642
+ if self.DLL is not None:
643
+ self.DLL.close()
644
+ self.workspace = None
645
+
646
+ def __str__(self) -> str:
647
+ return f"{self.kernel_name=}, {self.source_file=}, {self.hash_key=}"
648
+
649
+
650
+ def benchmark_in_sub_process(
651
+ choices: List[TritonTemplateCaller],
652
+ ) -> Dict[TritonTemplateCaller, float]:
653
+ """
654
+ Do benchmarking in a subprocess and return the perf number (latency).
655
+ """
656
+ return tuning_pool.benchmark(choices)
venv/lib/python3.10/site-packages/torch/_inductor/bounds.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from functools import partial
3
+ from typing import Any, Callable, Dict
4
+
5
+ from sympy import Expr
6
+
7
+ import torch
8
+ from torch.utils._sympy.value_ranges import bound_sympy, ValueRangeAnalysis, ValueRanges
9
+ from .ir import InterpreterShim, LoopBody, LoopBodyBlock
10
+ from .utils import cache_on_self, dominated_nodes
11
+ from .virtualized import V
12
+
13
+
14
+ class BoundVars:
15
+ """
16
+ Performs Value Range Analysis on LoopBody's fx graph by calling BoundVars.run()
17
+ It exposes the ranges of the nodes in the `bounds` variable
18
+
19
+ Note. A current limitation of this analysis is that it just works on a per-loop basis.
20
+ We should be able to propagate the bounds between across the whole graph. This may benefit
21
+ the case a bounded variable is returned by a kernel and fed into another.
22
+ """
23
+
24
+ def __init__(self, loop_body: LoopBody) -> None:
25
+ self.loop_body = loop_body
26
+ self.replacement_vals = {
27
+ k: ValueRanges[Expr](0, v - 1)
28
+ if (isinstance(v, int) or v.is_number)
29
+ else bound_sympy(v)
30
+ for k, v in loop_body.var_ranges.items()
31
+ }
32
+ # avoid computing these values, pessimistically assume that they are unbounded
33
+ self.unbounded_vars = dominated_nodes(
34
+ node
35
+ for node in self.loop_body.get_nodes()
36
+ if node.target in ["load", "reduction", operator.getitem]
37
+ or "masked_subblock" in node.target
38
+ )
39
+ # To access this variable call `get_bounds()`
40
+ self._bounds: Dict[torch.fx.Node, ValueRanges[Expr]] = {}
41
+
42
+ @cache_on_self
43
+ def get_bounds(self) -> Dict[torch.fx.Node, ValueRanges[Expr]]:
44
+ submodules = self.swap_submodules(self.loop_body.submodules)
45
+
46
+ # Initialize the environment with the unbounded variables
47
+ for node in self.unbounded_vars:
48
+ # we need to evaluate masked_subblock to recurse, and we need to set indirect values
49
+ if not isinstance(node.target, str) or (
50
+ "masked_subblock" not in node.target
51
+ and "set_indirect" not in node.target
52
+ ):
53
+ self._bounds[node] = ValueRanges[Expr].unknown()
54
+
55
+ with V.set_ops_handler(ValueRangeAnalysis()):
56
+ interpreter = InterpreterShim(self.loop_body.root_block.graph, submodules)
57
+ interpreter.run(V.get_ops_handler(), initial_env=self._bounds)
58
+ return self._bounds
59
+
60
+ def swap_submodules(
61
+ self, submodules: Dict[str, Callable[..., Any]]
62
+ ) -> Dict[str, Callable[..., ValueRanges[Expr]]]:
63
+ result: Dict[str, Callable[..., ValueRanges[Expr]]] = {}
64
+ for key in submodules.keys():
65
+ if key == "get_index":
66
+ result[key] = self.get_index
67
+ elif "masked_subblock" in key:
68
+ subblock = self.loop_body.subblocks[key]
69
+ # The result within the lambda will reference to the final
70
+ # set of modules at the end of the for-loop as it stores a reference to it
71
+
72
+ # bind subblock in a function because python lambdas close over by reference
73
+ # moving the lambda out of make_fn would close over the reference to subblock,
74
+ # so all lambdas would have the same subblock reference that is the final
75
+ # subblock in the loop
76
+ def make_fn(subblock):
77
+ return lambda mask, value: self.masked_subblock(
78
+ subblock, self._bounds, mask, value, result
79
+ )
80
+
81
+ result[key] = make_fn(subblock)
82
+
83
+ elif "set_indirect" in key:
84
+ idx = int(key[len("set_indirect") :])
85
+ var = self.loop_body.indirect_vars[idx]
86
+ indirect = partial(self.set_indirect, var)
87
+ result[key] = indirect
88
+ else:
89
+ assert "scan" in key
90
+ result[key] = submodules[key]
91
+
92
+ return result
93
+
94
+ def masked_subblock(
95
+ self,
96
+ subblock: LoopBodyBlock,
97
+ env: Dict[torch.fx.Node, ValueRanges[Expr]],
98
+ mask: Any,
99
+ value: Any,
100
+ submodules: Dict[str, Callable[..., Any]],
101
+ ) -> ValueRanges[Expr]:
102
+ interp = InterpreterShim(subblock.graph, submodules)
103
+ interp.run(V.get_ops_handler(), initial_env=env)
104
+ output = [node for node in subblock.graph.nodes if node.target == "output"]
105
+ assert len(output) == 1
106
+ # dont bother unioning with value since the load from buffer will be
107
+ # pessimistically assumed to be inf anyway
108
+ return interp.env[output[0]]
109
+
110
+ def set_indirect(self, old: Expr, new: ValueRanges[Expr]) -> ValueRanges[Expr]:
111
+ assert isinstance(new, ValueRanges)
112
+ self.replacement_vals[old] = new
113
+ return new
114
+
115
+ def get_index(self, name: Expr) -> ValueRanges[Expr]:
116
+ expr = self.loop_body.indexing_exprs[name]
117
+ bound = self.replacement_vals.get(expr)
118
+ if bound is None:
119
+ bound = bound_sympy(expr, self.replacement_vals)
120
+ # The following assertion is true at the time of this writing
121
+ # We don't assert is as to not execute bound_sympy when bound is not None
122
+ # assert bound is None or bound == bound_sympy(expr, self.replacement_vals)
123
+ self.replacement_vals[name] = bound
124
+ return bound
venv/lib/python3.10/site-packages/torch/_inductor/codecache.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_inductor/comm_analysis.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from enum import IntEnum
3
+
4
+ import sympy
5
+
6
+ import torch
7
+ from . import ir
8
+
9
+ from .utils import get_dtype_size, sympy_product
10
+ from .virtualized import V
11
+
12
+
13
+ class NCCL_COLL(IntEnum):
14
+ ALL_REDUCE = 0
15
+ ALL_GATHER = 1
16
+ REDUCE_SCATTER = 2
17
+
18
+
19
+ class NVIDIA_GPU_TYPE(IntEnum):
20
+ VOLTA = 0
21
+ AMPERE = 1
22
+ HOPPER = 2
23
+
24
+
25
+ def get_gpu_type() -> NVIDIA_GPU_TYPE:
26
+ gpu_info = torch.utils.collect_env.get_gpu_info(torch.utils.collect_env.run) or ""
27
+ if "V100" in gpu_info:
28
+ return NVIDIA_GPU_TYPE.VOLTA
29
+ elif "A100" in gpu_info:
30
+ return NVIDIA_GPU_TYPE.AMPERE
31
+ elif "H100" in gpu_info:
32
+ return NVIDIA_GPU_TYPE.HOPPER
33
+ else:
34
+ # for other gpu types, assume Ampere
35
+ return NVIDIA_GPU_TYPE.AMPERE
36
+
37
+
38
+ def get_collective_type(node: ir.IRNode) -> NCCL_COLL:
39
+ if isinstance(node, ir._CollectiveKernel):
40
+ kernel_name = node.python_kernel_name
41
+ assert kernel_name is not None
42
+ if "all_reduce" in kernel_name:
43
+ return NCCL_COLL.ALL_REDUCE
44
+ elif "all_gather" in kernel_name:
45
+ return NCCL_COLL.ALL_GATHER
46
+ elif "reduce_scatter" in kernel_name:
47
+ return NCCL_COLL.REDUCE_SCATTER
48
+ else:
49
+ raise Exception(f"Unsupported collective kernel: {kernel_name}")
50
+
51
+ if isinstance(node, (ir.AllReduce, ir.AllReduceCoalesced)):
52
+ return NCCL_COLL.ALL_REDUCE
53
+ elif isinstance(node, (ir.AllGatherIntoTensor, ir.AllGatherIntoTensorCoalesced)):
54
+ return NCCL_COLL.ALL_GATHER
55
+ elif isinstance(node, (ir.ReduceScatterTensor, ir.ReduceScatterTensorCoalesced)):
56
+ return NCCL_COLL.REDUCE_SCATTER
57
+ else:
58
+ raise Exception(f"Unsupported collective type: {node}")
59
+
60
+
61
+ def get_collective_input_size_bytes(node: ir.IRNode) -> int:
62
+ sz_bytes = 0
63
+ for inp in node.inputs: # type: ignore[attr-defined]
64
+ shape = inp.layout.size
65
+ numel = sympy_product(inp.layout.size)
66
+ if isinstance(numel, sympy.Integer):
67
+ # For ease of testing
68
+ numel = int(numel)
69
+ else:
70
+ numel = V.graph.sizevars.size_hint(numel)
71
+ sz_bytes += numel * get_dtype_size(inp.layout.dtype)
72
+ return sz_bytes
73
+
74
+
75
+ def get_collective_group_size(node: ir.IRNode) -> int:
76
+ if type(node) == ir._CollectiveKernel:
77
+ from torch.distributed.distributed_c10d import _get_group_size_by_name
78
+
79
+ return _get_group_size_by_name(node.constant_args[-1])
80
+ elif isinstance(node, ir.CollectiveKernel):
81
+ return node.constant_args[2] # type: ignore[attr-defined]
82
+ else:
83
+ raise TypeError(f"Unsupported collective type: {node}")
84
+
85
+
86
+ ####################################################################################################################
87
+ # The following code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
88
+ ####################################################################################################################
89
+
90
+
91
+ class NCCL_HW(IntEnum):
92
+ NVLINK = 0
93
+ PCI = 1
94
+ NET = 2
95
+
96
+
97
+ class NCCL_ALGO(IntEnum):
98
+ TREE = 0
99
+ RING = 1
100
+
101
+
102
+ class NCCL_PROTO(IntEnum):
103
+ # The ordering and enum values here matches original in
104
+ # https://github.com/NVIDIA/nccl/blob/0b083e52096c387bad7a5c5c65b26a9dca54de8c/src/include/devcomm.h#L28
105
+ # For difference between these protocols, see https://github.com/NVIDIA/nccl/issues/281#issuecomment-571816990
106
+ LL = 0 # Low-latency
107
+ # LL128 = 1 # Low-latency 128-byte
108
+ # SIMPLE = 2
109
+
110
+
111
+ # Latencies in us
112
+ # len(NCCL_ALGO) x len(NCCL_PROTO)
113
+ # NOTE: use array instead of tensor to prevent incompatibility with fake mode
114
+ baseLat = [
115
+ # Tree
116
+ [
117
+ 6.8, # LL
118
+ ],
119
+ # Ring
120
+ [
121
+ 6.6, # LL
122
+ ],
123
+ ]
124
+
125
+ # Latencies in us
126
+ # len(NCCL_HW) x len(NCCL_ALGO) x len(NCCL_PROTO)
127
+ hwLat = [
128
+ # NVLINK
129
+ [
130
+ [0.6], # Tree (LL)
131
+ [0.6], # Ring (LL)
132
+ ],
133
+ # PCI
134
+ [
135
+ [1.0], # Tree (LL)
136
+ [1.0], # Ring (LL)
137
+ ],
138
+ # NET
139
+ [
140
+ [5.0], # Tree (LL)
141
+ [2.7], # Ring (LL)
142
+ ],
143
+ ]
144
+
145
+
146
+ # LL128 max BW per channel
147
+ llMaxBws = [
148
+ # Volta-N1/Intel-N2/Intel-N4
149
+ [
150
+ 39.0,
151
+ 39.0,
152
+ 20.4,
153
+ ],
154
+ # Ampere-N1/AMD-N2/AMD-N4
155
+ [
156
+ 87.7,
157
+ 22.5, # avg of ring & tree
158
+ 19.0,
159
+ ],
160
+ # Hopper-N1/AMD-N2/AMD-N4
161
+ [
162
+ 87.7,
163
+ 22.5, # avg of ring & tree
164
+ 19.0,
165
+ ],
166
+ ]
167
+
168
+
169
+ def estimate_nccl_collective_runtime(node: ir.IRNode) -> float:
170
+ """
171
+ Returns estimated NCCL collective runtime in nanoseconds (ns).
172
+
173
+ The following heuristics are copied from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc.
174
+ We aim to estimate the runtime as accurately as possible.
175
+
176
+ Assumptions:
177
+ - only ring algorithm (NCCL_ALGO_RING) is used
178
+ - only Low-Latency protocol (NCCL_PROTO_LL) is used, i.e. Simple or LL128 is not used
179
+ - 8 gpus per node # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
180
+ - collective is one of: allreduce, reducescatter, allgather
181
+ """
182
+ tensor_storage_size_bytes = get_collective_input_size_bytes(node)
183
+ # Convert bytes to GB
184
+ tensor_storage_size_GB = tensor_storage_size_bytes / 1024 / 1024 / 1024
185
+
186
+ # Currently assumes each node has 8 gpus. And when >1 node is used, assumes each node uses all 8 gpus.
187
+ # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
188
+ num_gpus_per_node = 8
189
+ group_size = get_collective_group_size(node)
190
+ nNodes = math.ceil(group_size / num_gpus_per_node)
191
+ nRanks = group_size # this is total # of gpus globally that participate in this collective op
192
+
193
+ if nRanks <= 1:
194
+ return 0
195
+
196
+ # Assumes ring algorithm
197
+ nccl_algo = NCCL_ALGO.RING
198
+ nccl_proto = NCCL_PROTO.LL
199
+ coll = get_collective_type(node)
200
+
201
+ # =============== bandwidth computation ===============
202
+ # First compute bandwidth in GB/s; then at the end, convert it to GB/ns
203
+
204
+ bwIntra = torch._inductor.config.intra_node_bw
205
+ bwInter = torch._inductor.config.inter_node_bw
206
+
207
+ compCapIndex = get_gpu_type()
208
+ index2 = nNodes - 1 if nNodes <= 2 else 2
209
+ # LL: for single node, we look at GPU type; for multi-node, we look at CPU type
210
+ index1 = compCapIndex if nNodes == 1 else 0
211
+ llMaxBw = llMaxBws[index1][index2]
212
+
213
+ # NOTE: each step of ring algorithm is synchronized,
214
+ # and is bottlenecked by the slowest link which is the inter-node interconnect.
215
+ # hence when nNodes >= 2, bw is inter-node bandwidth.
216
+ # NOTE: the original code in https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc
217
+ # have this as `if nNodes <= 2` which seems wrong. Corrected it here.
218
+ bw = bwIntra if nNodes == 1 else bwInter
219
+ nChannels = 2 # Assume # channels is 2
220
+ busBw = nChannels * bw
221
+
222
+ # Various model refinements
223
+ busBw = min(
224
+ llMaxBw,
225
+ busBw
226
+ * (1.0 / 4.0 if (nNodes > 1 or coll == NCCL_COLL.ALL_REDUCE) else 1.0 / 3.0),
227
+ )
228
+
229
+ if coll == NCCL_COLL.ALL_REDUCE:
230
+ nsteps = 2 * (nRanks - 1)
231
+ elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER):
232
+ nsteps = nRanks - 1
233
+
234
+ # Convert bus BW to algorithm BW (tensor bytes / algoBW = actual execution time)
235
+ ratio = (1.0 * nRanks) / nsteps # type: ignore[possibly-undefined]
236
+ bandwidth = busBw * ratio
237
+ # Convert GB/s to GB/ns
238
+ bandwidth_GB_per_ns = bandwidth / 1e9
239
+
240
+ # =============== latency computation ===============
241
+ intraHw = NCCL_HW.NVLINK
242
+ hw = intraHw if nNodes == 1 else NCCL_HW.NET
243
+
244
+ if coll == NCCL_COLL.ALL_REDUCE:
245
+ if nNodes > 1:
246
+ nInterSteps = 2 * nNodes
247
+ else:
248
+ nInterSteps = 0
249
+ elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER):
250
+ nInterSteps = nNodes - 1
251
+
252
+ # First compute latency in us; then at the end, convert it to ns
253
+ latency = baseLat[nccl_algo][nccl_proto]
254
+ intraLat = hwLat[intraHw][nccl_algo][nccl_proto]
255
+ interLat = hwLat[NCCL_HW.NET][nccl_algo][nccl_proto]
256
+
257
+ # Inter-node rings still have to launch nsteps * net overhead.
258
+ netOverhead = 0.0
259
+ if nNodes > 1:
260
+ netOverhead = 1.0 # getNetOverhead(comm);
261
+ intraLat = max(intraLat, netOverhead)
262
+ latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat # type: ignore[possibly-undefined]
263
+ # Convert us to ns
264
+ latency_ns = latency * 1e3
265
+
266
+ # =============== final result ===============
267
+ transport_ns = tensor_storage_size_GB / bandwidth_GB_per_ns
268
+ return transport_ns + latency_ns
269
+
270
+
271
+ ################################################################################################################
272
+ # The above code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
273
+ ################################################################################################################
venv/lib/python3.10/site-packages/torch/_inductor/comms.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pyre-strict
2
+
3
+ from typing import List
4
+
5
+ import torch
6
+
7
+ from . import config, ir, scheduler
8
+ from .dependencies import WeakDep
9
+ from .utils import tuple_sorted
10
+
11
+ overlap_log = torch._logging.getArtifactLogger(__name__, "overlap")
12
+
13
+
14
+ def sink_waits(
15
+ snodes: List["scheduler.BaseSchedulerNode"],
16
+ ) -> List["scheduler.BaseSchedulerNode"]:
17
+ """
18
+ Greedily moves waits as late as possible (i.e. until we reach a use). Optimal in terms of
19
+ communication overlap.
20
+ """
21
+ new_order = []
22
+ cur_waits = set()
23
+ for snode in snodes:
24
+ if isinstance(snode.node, ir.Wait):
25
+ cur_waits.add(snode)
26
+ else:
27
+ for wait in tuple_sorted(cur_waits):
28
+ if snode in wait.node_users:
29
+ new_order.append(wait)
30
+ cur_waits.remove(wait)
31
+ new_order.append(snode)
32
+ new_order.extend(tuple_sorted(cur_waits))
33
+ return new_order
34
+
35
+
36
+ def raise_comms(
37
+ snodes: List["scheduler.BaseSchedulerNode"],
38
+ ) -> List["scheduler.BaseSchedulerNode"]:
39
+ """
40
+ Greedily moves comms as early as possible (i.e. until we reach an input).
41
+ Optimal in terms of communication overlap.
42
+
43
+ TODO: We might want to adjust this in the future to account for memory limitations.
44
+ e.g. when we are compiling FSDP, this heuristics will cause the all-gathers to be prefetched as soon as possible,
45
+ which is the beginning of the forwards pass. We'll have to either do a special pass for FSDP,
46
+ or we'll want to redo this pass with memory considerations so we handle the FSDP case in a general way.
47
+ """
48
+ new_order_reversed: List["scheduler.BaseSchedulerNode"] = []
49
+ cur_comms: List["scheduler.BaseSchedulerNode"] = []
50
+ for snode in reversed(snodes):
51
+ if isinstance(snode.node, ir.CollectiveKernel):
52
+ cur_comms.append(snode)
53
+ else:
54
+ for comm in cur_comms:
55
+ assert len(comm.inverse_users) > 0
56
+ while len(cur_comms) > 0 and any(
57
+ snode in comm.inverse_users for comm in cur_comms
58
+ ):
59
+ comm = cur_comms.pop(0)
60
+ new_order_reversed.append(comm)
61
+ new_order_reversed.append(snode)
62
+ assert len(cur_comms) <= 1
63
+ new_order_reversed.extend(tuple_sorted(cur_comms))
64
+ return new_order_reversed[::-1]
65
+
66
+
67
+ def get_ancestors(node):
68
+ ancestors = set()
69
+ cur_nodes = [node]
70
+ while len(cur_nodes) > 0:
71
+ new_nodes = []
72
+ for node in cur_nodes:
73
+ for inp in node.inverse_users:
74
+ if inp not in ancestors:
75
+ ancestors.add(inp)
76
+ new_nodes.append(inp)
77
+ cur_nodes = new_nodes
78
+ return ancestors
79
+
80
+
81
+ def get_descendants(node):
82
+ descendants = set()
83
+ cur_nodes = [node]
84
+ while len(cur_nodes) > 0:
85
+ new_nodes = []
86
+ for node in cur_nodes:
87
+ for inp in node.node_users:
88
+ if inp not in descendants:
89
+ descendants.add(inp)
90
+ new_nodes.append(inp)
91
+ cur_nodes = new_nodes
92
+ return descendants
93
+
94
+
95
+ def decide_global_ordering_of_comms(nodes: List["scheduler.BaseSchedulerNode"]):
96
+ """
97
+ Decide global ordering of comms, by just enforcing the ordering that's in the input graph
98
+ (might not be the same ordering as the eager mode program).
99
+ TODO: Come up with a better approach
100
+ """
101
+ comm_nodes = [n for n in nodes if isinstance(n.node, ir.CollectiveKernel)]
102
+ for i in range(1, len(comm_nodes)):
103
+ # Enforce ordering by making previous comm a `WeakDep` dependency of the next comm
104
+ comm_nodes[i].add_fake_dep(WeakDep(comm_nodes[i - 1].get_name()))
105
+
106
+
107
+ def assert_no_comm_nodes(snodes: List["scheduler.BaseSchedulerNode"]) -> None:
108
+ assert not any(isinstance(snode.node, ir.CollectiveKernel) for snode in snodes)
109
+
110
+
111
+ def estimate_op_runtime(snode: "scheduler.BaseSchedulerNode") -> float:
112
+ """
113
+ Returns estimated op runtime in nanoseconds (ns)
114
+ """
115
+ if config.estimate_op_runtime == "default":
116
+ runtime = snode.get_estimated_runtime()
117
+ else:
118
+ assert callable(config.estimate_op_runtime)
119
+ runtime = config.estimate_op_runtime(snode)
120
+ return runtime
121
+
122
+
123
+ def reorder_compute_for_overlap(
124
+ snodes: List["scheduler.BaseSchedulerNode"],
125
+ ) -> List["scheduler.BaseSchedulerNode"]:
126
+ """
127
+ Decides a global ordering of all compute and communication nodes,
128
+ assuming that we already have a global ordering of communication nodes.
129
+
130
+ Overall scheduling procedure is:
131
+ Step 1: Given that we've currently scheduled comm N, we now schedule all compute nodes
132
+ that are required for comm N + 1 but do not depend on comm N, to run at the same time with comm N.
133
+ Step 2: If all those compute nodes are sufficient to overlap comm N, we're done.
134
+ Otherwise, we now need to look elsewhere to find compute that overlaps with comm N.
135
+ We prioritize compute nodes that are needed sooner.
136
+ Step 3: We schedule the compute nodes dependent on comm N and required for comm N + 1.
137
+ Step 4: We schedule comm N + 1.
138
+ Repeat this for subsequent comm nodes.
139
+ """
140
+ final_order = []
141
+
142
+ comm_nodes = []
143
+ for snode in snodes:
144
+ if isinstance(snode.node, ir.CollectiveKernel):
145
+ comm_nodes.append(snode)
146
+ if len(comm_nodes) == 0:
147
+ # if there is no comm nodes, return the current order
148
+ return snodes
149
+
150
+ comm_ancestors = {node: get_ancestors(node) for node in comm_nodes}
151
+ comm_descendants = {node: get_descendants(node) for node in comm_nodes}
152
+
153
+ indeg = dict.fromkeys(snodes, 0)
154
+ for snode in snodes:
155
+ for user in snode.node_users:
156
+ if user in indeg:
157
+ indeg[user] += 1
158
+ ready_to_schedule_nodes = {node for node in snodes if indeg[node] == 0}
159
+
160
+ unscheduled_nodes = set()
161
+ unscheduled_nodes = set(snodes)
162
+
163
+ def schedule_node(snode):
164
+ """
165
+ Schedule a single node.
166
+ """
167
+ assert snode in unscheduled_nodes
168
+ assert snode in ready_to_schedule_nodes
169
+ ready_to_schedule_nodes.remove(snode)
170
+ unscheduled_nodes.remove(snode)
171
+ final_order.append(snode)
172
+ for user in tuple_sorted(snode.node_users):
173
+ if user in indeg:
174
+ indeg[user] -= 1
175
+ if indeg[user] == 0:
176
+ ready_to_schedule_nodes.add(user)
177
+
178
+ def schedule_nodes(snodes):
179
+ """
180
+ Schedules all nodes in `snodes` in an arbitrary topologically valid order.
181
+ """
182
+ all_nodes = set(snodes)
183
+ assert all(node in unscheduled_nodes for node in all_nodes)
184
+ while len(all_nodes) > 0:
185
+ # NOTE: since model graph is always a DAG and does not have circular dependency inside,
186
+ # there should be at least one node that is a "free node" (i.e. indeg == 0),
187
+ # hence infinite loop is not possible. But we check here just to be safe.
188
+ progress = False
189
+ for node in tuple_sorted(all_nodes):
190
+ if node in ready_to_schedule_nodes:
191
+ schedule_node(node)
192
+ all_nodes.remove(node)
193
+ progress = True
194
+ if not progress:
195
+ raise Exception(
196
+ "Unable to find a free node (indeg == 0). This is an impossible state to reach. "
197
+ "Please report a bug to PyTorch."
198
+ )
199
+
200
+ # First, schedule all compute nodes that are required by first comm node,
201
+ # as well as the first comm node itself.
202
+ assert len(comm_nodes) > 0
203
+ schedule_nodes(
204
+ list(comm_ancestors[comm_nodes[0]]) + [comm_nodes[0]],
205
+ )
206
+
207
+ rolled_over_compute_cost = 0
208
+ for idx in range(1, len(comm_ancestors)):
209
+ # Step 1: Given that we've currently scheduled comm `idx-1`, we now schedule
210
+ # all compute nodes that are required for comm `idx` but do not depend on comm `idx-1`,
211
+ # to run at the same time with comm `idx-1`.
212
+ needed_by_next_comm_and_ready_compute_nodes = unscheduled_nodes & (
213
+ comm_ancestors[comm_nodes[idx]] - comm_descendants[comm_nodes[idx - 1]]
214
+ )
215
+ assert_no_comm_nodes(needed_by_next_comm_and_ready_compute_nodes)
216
+
217
+ total_compute_runtime_cost = rolled_over_compute_cost + sum(
218
+ [
219
+ estimate_op_runtime(node)
220
+ for node in needed_by_next_comm_and_ready_compute_nodes
221
+ ]
222
+ )
223
+ prev_comm_runtime_cost = estimate_op_runtime(comm_nodes[idx - 1])
224
+ schedule_nodes(tuple_sorted(needed_by_next_comm_and_ready_compute_nodes))
225
+
226
+ # Step 2: If all those compute nodes are sufficient to overlap comm `idx-1`, we're done.
227
+ # Otherwise, we now need to look elsewhere to find compute that overlaps with comm `idx`.
228
+ # We prioritize compute nodes that are needed sooner.
229
+ step1_runtime_cost = total_compute_runtime_cost
230
+ if step1_runtime_cost >= prev_comm_runtime_cost:
231
+ pass
232
+ else:
233
+ # Find all ready to schedule compute nodes that do not depend on comm `idx-1`.
234
+ ready_to_schedule_compute_nodes = tuple_sorted(
235
+ ready_to_schedule_nodes - comm_descendants[comm_nodes[idx - 1]]
236
+ )
237
+ assert_no_comm_nodes(ready_to_schedule_compute_nodes)
238
+
239
+ def earliest_comm_descendant(node):
240
+ for idx in range(len(comm_nodes)):
241
+ if node in comm_ancestors[comm_nodes[idx]]:
242
+ return idx
243
+ return len(comm_nodes)
244
+
245
+ # Prioritize compute nodes that are needed sooner.
246
+ ready_to_schedule_compute_nodes = sorted(
247
+ ready_to_schedule_compute_nodes, key=earliest_comm_descendant
248
+ )
249
+
250
+ for snode in ready_to_schedule_compute_nodes:
251
+ if total_compute_runtime_cost >= prev_comm_runtime_cost:
252
+ # If accumulated compute runtime cost is greater than comm `idx-1` runtime cost,
253
+ # it means we have maximized overlap for comm `idx-1`, and hence we stop looking
254
+ # for more compute to schedule.
255
+ break
256
+ compute_runtime_cost = estimate_op_runtime(snode)
257
+ # If we're not able to leverage more than half of this
258
+ # node's compute to overlap, we skip it.
259
+ # TODO: Smarter heuristics here
260
+ if (
261
+ prev_comm_runtime_cost - total_compute_runtime_cost
262
+ ) <= compute_runtime_cost / 2:
263
+ continue
264
+ schedule_node(snode)
265
+ total_compute_runtime_cost += compute_runtime_cost
266
+ rollable_compute_cost = total_compute_runtime_cost - step1_runtime_cost
267
+
268
+ # Step 3: We schedule the compute nodes dependent on comm `idx-1` and required for comm `idx`.
269
+ needed_by_next_comm_nodes = unscheduled_nodes & comm_ancestors[comm_nodes[idx]]
270
+ schedule_nodes(list(needed_by_next_comm_nodes))
271
+
272
+ # Step 4: We schedule comm `idx`.
273
+ schedule_nodes([comm_nodes[idx]])
274
+
275
+ is_prev_comm_blocking_next_comm = len(needed_by_next_comm_nodes) > 0
276
+ # The idea here is that if there are no compute nodes from Step 3
277
+ # (i.e. if prev comm is not blocking next comm), we can roll over the compute nodes
278
+ # in Step 2 to overlap with the next comm, since they're not required to finish
279
+ # before the next comm starts.
280
+ if is_prev_comm_blocking_next_comm:
281
+ rolled_over_compute_cost = 0
282
+ else:
283
+ rolled_over_compute_cost = rollable_compute_cost # type: ignore[assignment]
284
+
285
+ schedule_nodes(unscheduled_nodes)
286
+ return final_order
287
+
288
+
289
+ def node_summary(snode):
290
+ detail = ""
291
+ if isinstance(snode.node, ir.ExternKernelOut):
292
+ detail = f" ({snode.node.python_kernel_name})"
293
+ out_tensor_info = ""
294
+ if (
295
+ hasattr(snode.node, "layout")
296
+ and hasattr(snode.node.layout, "size")
297
+ and hasattr(snode.node.layout, "stride")
298
+ ):
299
+ out_tensor_info = (
300
+ f" (size={snode.node.layout.size}, stride={snode.node.layout.stride})"
301
+ )
302
+ node_name = ""
303
+ if hasattr(snode.node, "name"):
304
+ node_name = snode.node.name
305
+ return f"{snode.node.__class__.__name__}{detail}{out_tensor_info} ({node_name})"
306
+
307
+
308
+ def visualize_overlap(order):
309
+ total_est_runtime: float = 0.0
310
+ cur_comm_node = None
311
+ for snode in order:
312
+ if cur_comm_node is None:
313
+ if isinstance(snode.node, ir.CollectiveKernel):
314
+ total_est_runtime += estimate_op_runtime(snode)
315
+ cur_comm_node = snode.node
316
+ elif isinstance(snode.node, ir.Wait):
317
+ raise Exception(
318
+ "Wait is not expected when there is no collective running"
319
+ )
320
+ else: # exposed compute op
321
+ total_est_runtime += estimate_op_runtime(snode)
322
+ overlap_log.debug(f"{node_summary(snode)}") # noqa: G004
323
+ else: # cur_comm_node is not None
324
+ if isinstance(snode.node, ir.CollectiveKernel):
325
+ raise Exception(
326
+ "Found two collectives running at the same time. "
327
+ "`visualize_overlap` needs to be updated to handle this case"
328
+ )
329
+ elif isinstance(snode.node, ir.Wait): # end of this comm op
330
+ overlap_log.debug(f"{node_summary(snode)}") # noqa: G004
331
+ cur_comm_node = None
332
+ else: # overlapped compute op
333
+ overlap_log.debug(f"| {node_summary(snode)}") # noqa: G004
334
+ overlap_log.debug(
335
+ f"Est. runtime (ms): {total_est_runtime / 1000 / 1000}" # noqa: G004
336
+ )
337
+
338
+
339
+ def reorder_compute_and_comm_for_overlap(
340
+ snodes: List["scheduler.BaseSchedulerNode"],
341
+ ) -> List["scheduler.BaseSchedulerNode"]:
342
+ order = snodes
343
+ for p in config.reorder_for_compute_comm_overlap_passes:
344
+ if isinstance(p, str) and p in globals():
345
+ p = globals()[p] # it is a builtin pass
346
+ if torch.distributed.get_rank() == 0:
347
+ overlap_log.debug(
348
+ f"==== Visualize overlap before reordering pass {p} ====" # noqa: G004
349
+ )
350
+ try:
351
+ visualize_overlap(order)
352
+ except Exception as e:
353
+ overlap_log.debug(str(e))
354
+ order = p(order) # type: ignore[operator]
355
+ if torch.distributed.get_rank() == 0:
356
+ overlap_log.debug(
357
+ f"==== Visualize overlap after reordering pass {p} ====" # noqa: G004
358
+ )
359
+ try:
360
+ visualize_overlap(order)
361
+ except Exception as e:
362
+ overlap_log.debug(str(e))
363
+ return order
venv/lib/python3.10/site-packages/torch/_inductor/compile_fx.py ADDED
@@ -0,0 +1,1451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import logging
4
+ import os
5
+ import sys
6
+ import time
7
+ import warnings
8
+ from itertools import count
9
+
10
+ from typing import (
11
+ Any,
12
+ Callable,
13
+ Dict,
14
+ FrozenSet,
15
+ List,
16
+ Optional,
17
+ Sequence,
18
+ Tuple,
19
+ Union,
20
+ )
21
+ from unittest import mock
22
+
23
+ from functorch.compile import min_cut_rematerialization_partition
24
+
25
+ import torch.fx
26
+ import torch.utils._pytree as pytree
27
+ from torch._dynamo import (
28
+ compiled_autograd,
29
+ config as dynamo_config,
30
+ logging as dynamo_logging,
31
+ utils as dynamo_utils,
32
+ )
33
+ from torch._dynamo.utils import (
34
+ counters,
35
+ detect_fake_mode,
36
+ lazy_format_graph_code,
37
+ optimus_scuba_log,
38
+ )
39
+ from torch._functorch.aot_autograd import aot_export_module, make_boxed_func
40
+ from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
41
+ from torch._inductor.cudagraph_utils import BoxedDeviceIndex
42
+
43
+ from torch._inductor.debug import save_args_for_compile_fx_inner
44
+ from torch._inductor.utils import BoxedBool, count_tangents
45
+ from torch._logging import trace_structured
46
+ from torch._ops import OpOverload
47
+ from torch._subclasses.fake_tensor import FakeTensor
48
+ from torch._utils_internal import signpost_event
49
+ from torch.fx.passes.fake_tensor_prop import FakeTensorProp
50
+
51
+ from .._dynamo.backends.common import aot_autograd
52
+ from ..fx._lazy_graph_module import _use_lazy_graph_module # type: ignore[attr-defined]
53
+ from ..fx.graph import _PyTreeCodeGen
54
+ from . import config, metrics
55
+ from .debug import DebugContext
56
+ from .decomposition import select_decomp_table
57
+ from .fx_passes.joint_graph import joint_graph_passes
58
+ from .fx_passes.post_grad import post_grad_passes, view_to_reshape
59
+ from .fx_passes.pre_grad import pre_grad_passes
60
+ from .graph import GraphLowering
61
+ from .ir import ExternKernelNode
62
+ from .utils import get_dtype_size, has_incompatible_cudagraph_ops, output_node
63
+ from .virtualized import V
64
+
65
+ if config.is_fbcode():
66
+ from torch._inductor.fb.utils import time_and_log
67
+ else:
68
+ # no-op decorator
69
+ def time_and_log(attr: str, extra_loggings: Optional[Dict[str, str]] = None):
70
+ return dynamo_utils.identity
71
+
72
+
73
+ log = logging.getLogger(__name__)
74
+ perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
75
+ post_grad_graphs_log = torch._logging.getArtifactLogger(__name__, "post_grad_graphs")
76
+ ALIGNMENT = 16
77
+
78
+
79
+ # copy_ fails when trying to write to tensors with memory overlap,
80
+ # for expanded dimensions (a dimension which used to have size 1 -> ?)
81
+ # we can select one element from that dimension and write to it
82
+ # to achieve writing to all values of that dimension of the input tensor
83
+ def get_expanded_dims(t):
84
+ if not isinstance(t, torch.Tensor):
85
+ return None
86
+ return [i for i in range(t.ndim) if t.stride(i) == 0 and t.size(i) != 1]
87
+
88
+
89
+ def index_expanded_dims(t: torch.Tensor, expanded_dims: List[int]) -> torch.Tensor:
90
+ for expanded_dim in expanded_dims:
91
+ t = torch.ops.aten.slice(t, expanded_dim, 0, 1)
92
+ return t
93
+
94
+
95
+ def complex_memory_overlap(t: torch.Tensor) -> bool:
96
+ # if torch._debug_has_internal_overlap thinks this tensor potentially has
97
+ # memory overlap internally, let's dig deeper to find out whether it's true.
98
+ t = index_expanded_dims(t, get_expanded_dims(t))
99
+ if torch._debug_has_internal_overlap(t) != 0:
100
+ strides = t.stride()
101
+ sizes = t.shape
102
+ indices = list(range(len(strides)))
103
+ indices = [x for _, x in sorted(zip(strides, indices))]
104
+ for i in range(len(strides)):
105
+ prev_stride = 1 if i == 0 else strides[indices[i - 1]]
106
+ prev_size = 1 if i == 0 else sizes[indices[i - 1]]
107
+ if strides[indices[i]] < prev_stride * prev_size:
108
+ return True
109
+ return False
110
+
111
+
112
+ @functools.lru_cache(None)
113
+ def _step_logger():
114
+ return dynamo_logging.get_step_logger(log)
115
+
116
+
117
+ @functools.lru_cache(None)
118
+ def _warn_tf32_disabled():
119
+ if (
120
+ torch.cuda.is_available()
121
+ and not torch.backends.cuda.matmul.allow_tf32
122
+ and torch.cuda.get_device_capability() >= (8, 0)
123
+ ):
124
+ warnings.warn(
125
+ "TensorFloat32 tensor cores for float32 matrix multiplication available but not enabled. "
126
+ "Consider setting `torch.set_float32_matmul_precision('high')` for better performance."
127
+ )
128
+
129
+
130
+ def _unlift_graph(mod, gm, graph_signature):
131
+ from torch.export.unflatten import _assign_attr, _AttrKind
132
+
133
+ state_dict = {}
134
+ for name, param in mod.named_parameters(remove_duplicate=False):
135
+ state_dict[name] = param
136
+ _assign_attr(
137
+ param,
138
+ gm,
139
+ name,
140
+ attr_kind=_AttrKind.PARAMETER,
141
+ )
142
+ for name, buffer in mod.named_buffers(remove_duplicate=False):
143
+ state_dict[name] = buffer
144
+ _assign_attr(
145
+ buffer,
146
+ gm,
147
+ name,
148
+ attr_kind=_AttrKind.BUFFER,
149
+ )
150
+
151
+ placeholder_nodes = [node for node in gm.graph.nodes if node.op == "placeholder"]
152
+ lifted_inputs = []
153
+ for node in placeholder_nodes:
154
+ node_name = node.name
155
+ if node_name in graph_signature.inputs_to_parameters:
156
+ lifted_inputs.append(graph_signature.inputs_to_parameters[node_name])
157
+ elif node_name in graph_signature.inputs_to_buffers:
158
+ lifted_inputs.append(graph_signature.inputs_to_buffers[node_name])
159
+ else:
160
+ assert node_name in graph_signature.user_inputs
161
+ lifted_inputs.append(None)
162
+
163
+ from torch.export._unlift import _unlift
164
+
165
+ outputs = list(gm.graph.nodes)[-1].args[0]
166
+ mutated_outputs = []
167
+ for out in outputs:
168
+ if out in graph_signature.buffers_to_mutate:
169
+ mutated_outputs.append(graph_signature.buffers_to_mutate[out.name])
170
+ else:
171
+ mutated_outputs.append(None)
172
+
173
+ unlifted_gm = _unlift(
174
+ gm,
175
+ lifted_inputs,
176
+ mutated_outputs,
177
+ pytree.LeafSpec(),
178
+ None,
179
+ state_dict,
180
+ {},
181
+ )
182
+ return unlifted_gm
183
+
184
+
185
+ def _get_subgraph_names(gm):
186
+ for node in gm.graph.nodes:
187
+ if node.target == torch.ops.higher_order.cond:
188
+ true_subgraph_name = node.args[1].name
189
+ false_subgraph_name = node.args[2].name
190
+ yield true_subgraph_name
191
+ yield false_subgraph_name
192
+
193
+
194
+ def _recursive_pre_grad_passes(gm, example_inputs):
195
+ for subgraph_name in _get_subgraph_names(gm):
196
+ subgraph = getattr(gm, subgraph_name)
197
+ # as we don't have recursive example inputs, passing None here
198
+ new_subgraph = _recursive_pre_grad_passes(subgraph, example_inputs=None)
199
+ setattr(gm, subgraph_name, new_subgraph)
200
+ return pre_grad_passes(gm, example_inputs)
201
+
202
+
203
+ def _recursive_joint_graph_passes(gm):
204
+ for subgraph_name in _get_subgraph_names(gm):
205
+ subgraph = getattr(gm, subgraph_name)
206
+ _recursive_joint_graph_passes(subgraph)
207
+ joint_graph_passes(gm)
208
+
209
+
210
+ def _recursive_post_grad_passes(gm, is_inference: bool = False):
211
+ for subgraph_name in _get_subgraph_names(gm):
212
+ subgraph = getattr(gm, subgraph_name)
213
+ _recursive_post_grad_passes(subgraph, is_inference)
214
+ post_grad_passes(gm, is_inference)
215
+
216
+
217
+ def split_const_gm(
218
+ gm: torch.fx.GraphModule,
219
+ ) -> Tuple[torch.fx.GraphModule, Dict[str, int]]:
220
+ """
221
+ This function takes an GraphModule input "gm".
222
+ The gm will be split into 2 components,
223
+ 1) const_gm, which consists the subgraph of gm that can be constant folded.
224
+ 2) gm (being inplace modified,) which returns the graph after constant folding.
225
+
226
+ const_output_index is a mapping of corresponding node name from gm to the
227
+ output index of const_gm.
228
+ Returns (const_gm, const_output_index)
229
+ """
230
+ from torch._inductor.constant_folding import (
231
+ CONST_MODULE_TAG,
232
+ META_TAG,
233
+ MODULE_TAG,
234
+ replace_node_with_constant,
235
+ run_and_get_constant_graph,
236
+ )
237
+
238
+ const_gm = run_and_get_constant_graph(gm)
239
+ const_result = const_gm()
240
+
241
+ const_outputs = {
242
+ x.name: idx for idx, x in enumerate(tuple(const_gm.graph.nodes)[-1].args[0])
243
+ }
244
+
245
+ to_erase_node = []
246
+ to_replace_node = []
247
+ const_output_index = {}
248
+ for node in gm.graph.nodes:
249
+ if node.name in const_outputs:
250
+ to_replace_node.append(node)
251
+ elif node.meta[META_TAG] == CONST_MODULE_TAG:
252
+ to_erase_node.append(node)
253
+
254
+ for node in to_replace_node:
255
+ new_const_name = "_FOLDED_CONST_" + node.name
256
+ replace_node_with_constant(
257
+ gm,
258
+ node,
259
+ const_result[const_outputs[node.name]],
260
+ new_const_name,
261
+ )
262
+ const_output_index[new_const_name] = const_outputs[node.name]
263
+ for node in to_erase_node[::-1]:
264
+ if node.users:
265
+ for n in node.users:
266
+ assert n.meta[META_TAG] == MODULE_TAG, f"node: {node} user not empty."
267
+ else:
268
+ gm.graph.erase_node(node)
269
+ gm.recompile()
270
+
271
+ return const_gm, const_output_index
272
+
273
+
274
+ def is_tf32_warning_applicable(gm: torch.fx.GraphModule):
275
+ aten = torch.ops.aten
276
+ tf32_ops = {
277
+ aten.mm.default,
278
+ aten.addmm.default,
279
+ aten.bmm.default,
280
+ aten.baddbmm.default,
281
+ }
282
+ for node in gm.graph.nodes:
283
+ if (
284
+ node.op == "call_function"
285
+ and node.target in tf32_ops
286
+ and isinstance(node.meta.get("val", None), torch.Tensor)
287
+ and node.meta["val"].dtype == torch.float32
288
+ and node.meta["val"].device.type == "cuda"
289
+ ):
290
+ return True
291
+ return False
292
+
293
+
294
+ @DebugContext.wrap
295
+ def count_bytes_inner(
296
+ gm: torch.fx.GraphModule,
297
+ example_inputs: List[torch.Tensor],
298
+ num_fixed: int = 0,
299
+ **kwargs,
300
+ ):
301
+ shape_env = _shape_env_from_inputs(example_inputs)
302
+ fake_mode = fake_tensor_prop(gm, example_inputs)
303
+
304
+ with V.set_fake_mode(fake_mode):
305
+ _recursive_post_grad_passes(gm, False)
306
+
307
+ graph = GraphLowering(gm, shape_env=shape_env, num_static_inputs=num_fixed)
308
+ with V.set_graph_handler(graph), V.set_real_inputs(example_inputs):
309
+ graph.run(*example_inputs)
310
+ num_bytes, nodes_num_elem, node_runtimes = graph.count_bytes()
311
+ metrics.num_bytes_accessed += num_bytes
312
+ metrics.nodes_num_elem += nodes_num_elem
313
+ metrics.node_runtimes += node_runtimes
314
+ return make_boxed_func(gm.forward)
315
+
316
+
317
+ def fake_tensor_prop(
318
+ gm: torch.fx.GraphModule,
319
+ example_inputs: List[torch.Tensor],
320
+ force_allow_non_fake_inputs: bool = False,
321
+ ):
322
+ """
323
+ If we can not detect fake mode from the context of inputs, create one.
324
+
325
+ The created fake mode will be returned.
326
+ """
327
+ fake_mode = detect_fake_mode(example_inputs)
328
+ if not fake_mode:
329
+ fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
330
+ FakeTensorProp(gm, mode=fake_mode).propagate(*example_inputs)
331
+ else:
332
+ ctx = (
333
+ contextlib.nullcontext()
334
+ if not force_allow_non_fake_inputs
335
+ else mock.patch.object(fake_mode, "allow_non_fake_inputs", True)
336
+ )
337
+ with ctx: # type: ignore[attr-defined]
338
+ FakeTensorProp(gm, mode=fake_mode).propagate_dont_convert_inputs(
339
+ *example_inputs
340
+ )
341
+
342
+ return fake_mode
343
+
344
+
345
+ # pass config dict back to user
346
+ def get_patched_config_dict(config_patches=None) -> Dict[str, Any]:
347
+ with config.patch(config_patches):
348
+ return config.get_config_copy()
349
+
350
+
351
+ @DebugContext.wrap
352
+ @torch.utils._python_dispatch._disable_current_modes()
353
+ @time_and_log(
354
+ attr="compilation time (in seconds)",
355
+ extra_loggings={"config_dict": str(get_patched_config_dict())},
356
+ )
357
+ # Need this decorator for compile_fx_inner even if we already have one for
358
+ # compile_fx. The reason is the compilation for backward graph may happen after
359
+ # compile_fx return and we may want to use the _LazyGraphModule for compiling
360
+ # the backward graph as well.
361
+ @_use_lazy_graph_module(dynamo_config.use_lazy_graph_module)
362
+ @dynamo_utils.dynamo_timed(phase_name="inductor_compile")
363
+ def compile_fx_inner(
364
+ gm: torch.fx.GraphModule,
365
+ example_inputs: List[torch.Tensor],
366
+ cudagraphs: Optional[BoxedBool] = None,
367
+ num_fixed: int = 0,
368
+ is_backward: bool = False,
369
+ graph_id: Optional[int] = None,
370
+ cpp_wrapper: bool = False,
371
+ aot_mode: bool = False,
372
+ is_inference: bool = False,
373
+ boxed_forward_device_index: Optional[BoxedDeviceIndex] = None,
374
+ user_visible_outputs: FrozenSet[str] = frozenset(),
375
+ layout_opt: Optional[bool] = None,
376
+ extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
377
+ ) -> Union[CompiledFxGraph, str]:
378
+ """
379
+ Inductor API that compiles a single graph.
380
+
381
+ If you change the argument list for this function, make sure you
382
+ also update the call to save_args_for_compile_fx_inner below accordingly.
383
+ """
384
+ if dynamo_utils.count_calls(gm.graph) == 0 and not aot_mode:
385
+ # trigger the real recompilation for _LazyGraphModule before returning
386
+ # the forward method.
387
+ from torch.fx._lazy_graph_module import _LazyGraphModule
388
+
389
+ _LazyGraphModule.force_recompile(gm)
390
+ return make_boxed_func(gm.forward)
391
+
392
+ assert isinstance(
393
+ next(iter(reversed(gm.graph.nodes))).args[0], (tuple, list)
394
+ ), f"inductor can only compile FX graphs which return a tuple/list, but got {gm.graph}"
395
+
396
+ if config.save_args:
397
+ save_args_for_compile_fx_inner(
398
+ gm,
399
+ example_inputs,
400
+ cudagraphs=cudagraphs,
401
+ num_fixed=num_fixed,
402
+ is_backward=is_backward,
403
+ graph_id=graph_id,
404
+ cpp_wrapper=cpp_wrapper,
405
+ aot_mode=aot_mode,
406
+ is_inference=is_inference,
407
+ boxed_forward_device_index=boxed_forward_device_index,
408
+ user_visible_outputs=user_visible_outputs,
409
+ layout_opt=layout_opt,
410
+ )
411
+
412
+ if cudagraphs is None:
413
+ cudagraphs = BoxedBool(config.triton.cudagraphs)
414
+
415
+ # Inputs to fx_codegen_and_compile
416
+ # Anything that affects codegen should go here, so if the signature
417
+ # of fx_codegen_and_compile changes, the dict should be updated accordingly
418
+ graph_kwargs = {
419
+ "cudagraphs": cudagraphs,
420
+ "num_fixed": num_fixed,
421
+ "is_backward": is_backward,
422
+ "graph_id": graph_id,
423
+ "cpp_wrapper": cpp_wrapper,
424
+ "aot_mode": aot_mode,
425
+ "is_inference": is_inference,
426
+ "user_visible_outputs": user_visible_outputs,
427
+ "layout_opt": layout_opt,
428
+ "extern_node_serializer": extern_node_serializer,
429
+ }
430
+
431
+ start = time.time()
432
+
433
+ if config.fx_graph_cache and not aot_mode:
434
+ compiled_graph = FxGraphCache.load(
435
+ fx_codegen_and_compile, gm, example_inputs, graph_kwargs
436
+ )
437
+ else:
438
+ compiled_graph = fx_codegen_and_compile(
439
+ gm, example_inputs, **graph_kwargs # type: ignore[arg-type]
440
+ )
441
+
442
+ log.debug("FX codegen and compilation took %.3fs", time.time() - start)
443
+
444
+ # check cudagraph disabling reasons from inductor lowering
445
+ if cudagraphs and compiled_graph.disabled_cudagraphs_reason:
446
+ perf_hint_log.warning(
447
+ "skipping cudagraphs due to %s", compiled_graph.disabled_cudagraphs_reason
448
+ )
449
+ BoxedBool.disable(cudagraphs)
450
+
451
+ # Return the output strides to the caller via TracingContext
452
+ context = torch._guards.TracingContext.try_get()
453
+ if context is not None and context.output_strides is not None:
454
+ assert len(context.output_strides) == 0
455
+ context.output_strides.extend(compiled_graph.output_strides)
456
+
457
+ if aot_mode:
458
+ return compiled_graph
459
+
460
+ if cudagraphs:
461
+ # output args are tuple of first argument
462
+ output = output_node(gm)
463
+ assert len(output.args) == 1
464
+ stack_traces = [
465
+ (arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
466
+ for arg in output.args[0]
467
+ ]
468
+
469
+ complex_memory_overlap_inputs = any(
470
+ complex_memory_overlap(t)
471
+ for t in example_inputs
472
+ if isinstance(t, torch.Tensor)
473
+ )
474
+
475
+ from torch._inductor.cudagraph_utils import check_for_mutation
476
+
477
+ has_mutation_str = check_for_mutation(gm, compiled_graph, num_fixed)
478
+ has_mutation = has_mutation_str is not None
479
+
480
+ if has_mutation:
481
+ compiled_graph.disabled_cudagraphs_reason = has_mutation_str
482
+
483
+ cudagraph_tests = [
484
+ (not has_mutation, "mutated inputs"),
485
+ (not has_incompatible_cudagraph_ops(gm), "incompatible ops"),
486
+ (not complex_memory_overlap_inputs, "complex memory overlap"),
487
+ (
488
+ all(
489
+ isinstance(t, (torch.Tensor, torch.SymInt)) for t in example_inputs
490
+ ),
491
+ "non-Tensor inputs",
492
+ ),
493
+ ]
494
+ cudagraph_fail_reasons = [s for b, s in cudagraph_tests if not b]
495
+
496
+ if not cudagraph_fail_reasons:
497
+ if not config.triton.cudagraph_trees:
498
+ # Force specialize all inputs so that CUDA graphs will work
499
+ for t in example_inputs:
500
+ if isinstance(t, torch.SymInt):
501
+ int(t) # guard
502
+
503
+ if (
504
+ boxed_forward_device_index is not None
505
+ and not is_inference
506
+ and not is_backward
507
+ ):
508
+ boxed_forward_device_index.set(next(iter(compiled_graph.device_idxs)))
509
+
510
+ compiled_graph.current_callable = cudagraphify(
511
+ compiled_graph.get_current_callable(),
512
+ example_inputs,
513
+ static_input_idxs=range(num_fixed),
514
+ device_index=next(iter(compiled_graph.device_idxs)),
515
+ stack_traces=stack_traces,
516
+ is_backward=is_backward,
517
+ is_inference=is_inference,
518
+ constants=tuple(compiled_graph.constants.values()),
519
+ )
520
+ else:
521
+ BoxedBool.disable(cudagraphs)
522
+
523
+ # See [Backward Generation Handling]
524
+ # if cudagraph'd the forward and set the device, we need to let the cudagraph manager
525
+ # know we are we running the backward even if we will not run it in cudagraphs
526
+ if is_backward and config.triton.cudagraph_trees:
527
+ assert boxed_forward_device_index is not None
528
+ assert boxed_forward_device_index.value is not None
529
+ compiled_graph_callable = compiled_graph.get_current_callable()
530
+
531
+ manager = torch._inductor.cudagraph_trees.get_manager(
532
+ boxed_forward_device_index.value, create_if_none_exists=False
533
+ )
534
+ # should already exist from forward
535
+ assert manager is not None
536
+
537
+ def compiled_artifact(new_inputs):
538
+ manager.set_to_running_backward()
539
+ return compiled_graph_callable(new_inputs)
540
+
541
+ compiled_graph.current_callable = compiled_artifact
542
+
543
+ if "cuda" in compiled_graph.device_types:
544
+ # prefer better disable_cudagraphs_reason bc stack trace
545
+ # TODO: migrate all disable reasons to stack trace, refactor
546
+ if compiled_graph.disabled_cudagraphs_reason:
547
+ perf_hint_log.warning(compiled_graph.disabled_cudagraphs_reason)
548
+ else:
549
+ perf_hint_log.warning(
550
+ "skipping cudagraphs due to %s", cudagraph_fail_reasons
551
+ )
552
+
553
+ # cudagraphs does its own aligning of inputs
554
+ if not cudagraphs:
555
+ new_callable = align_inputs(
556
+ compiled_graph.get_current_callable(), example_inputs, range(num_fixed)
557
+ )
558
+ if new_callable is not compiled_graph.get_current_callable():
559
+ compiled_graph.current_callable = new_callable
560
+
561
+ _step_logger()(
562
+ logging.INFO,
563
+ "torchinductor done compiling "
564
+ f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
565
+ f"graph {graph_id}",
566
+ )
567
+
568
+ # aot autograd needs to know to pass in inputs as a list
569
+ compiled_graph._boxed_call = True
570
+ return compiled_graph
571
+
572
+
573
+ def fx_codegen_and_compile(
574
+ gm: torch.fx.GraphModule,
575
+ example_inputs: List[torch.Tensor],
576
+ cudagraphs: Optional[BoxedBool] = None,
577
+ num_fixed: int = 0,
578
+ is_backward: bool = False,
579
+ graph_id: Optional[int] = None,
580
+ cpp_wrapper: bool = False,
581
+ aot_mode: bool = False,
582
+ is_inference: bool = False,
583
+ user_visible_outputs: FrozenSet[str] = frozenset(),
584
+ layout_opt: Optional[bool] = None,
585
+ extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
586
+ ) -> Union[CompiledFxGraph, str]:
587
+ if is_tf32_warning_applicable(gm):
588
+ _warn_tf32_disabled()
589
+
590
+ # lift the maximum depth of the Python interpreter stack
591
+ # to adapt large/deep models
592
+ sys.setrecursionlimit(max(sys.getrecursionlimit(), 2000))
593
+
594
+ _step_logger()(
595
+ logging.INFO,
596
+ "torchinductor compiling "
597
+ f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
598
+ f"graph {graph_id}",
599
+ )
600
+ V.debug.fx_graph(gm, example_inputs)
601
+ # TODO: Should we actually dump this? It should be redundant with the aot
602
+ # structured logs...
603
+ # trace_structured("inductor_input_graph", payload_fn=lambda: gm.print_readable(print_output=False))
604
+
605
+ shape_env = _shape_env_from_inputs(example_inputs)
606
+
607
+ # Convert view to reshape in the graph. This is necessary primarily for
608
+ # layout optimization. Do it unconditionally for uniformity.
609
+ #
610
+ # It's needed because when we do layout optimization, an contiguous tensor
611
+ # in eager mode may becomes a channels last tensor. A view op previously
612
+ # can be applied to the contiguous tensor may not be able to be applied
613
+ # on the channels tensor any more. An error like
614
+ # RuntimeError: view size is not compatible with input tensor's size and stride
615
+ # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
616
+ # will be printed.
617
+ #
618
+ # Replace view op to reshape op in this case.
619
+ # As an example, timm_resnest/botnet26t_256/convnext_base etc. will fail if we don't do this.
620
+ #
621
+ # Also this has to be done before FakeTensorProp below to avoid the failed
622
+ # .view() call.
623
+ view_to_reshape(gm)
624
+
625
+ # It is safe to run FakeTensorProp under no_grad because by the time
626
+ # we're in inductor, we assume that AOTAutograd has already "taken care"
627
+ # of autograd, so there should be no more autograd-related API's in the
628
+ # graph.
629
+ with torch.no_grad():
630
+ fake_mode = fake_tensor_prop(gm, example_inputs)
631
+
632
+ # pattern matcher passes might not preserve striding information
633
+ # on node.meta["val"]. if in the future we rely on these being
634
+ # correct we will need to fix.
635
+
636
+ with V.set_fake_mode(fake_mode):
637
+ # has some issues with memory in training
638
+ _recursive_post_grad_passes(gm, is_inference=is_inference)
639
+ V.debug.fx_graph_transformed(gm, example_inputs)
640
+ post_grad_graphs_log.debug("%s", lazy_format_graph_code("AFTER POST GRAD", gm))
641
+ trace_structured(
642
+ "inductor_post_grad_graph",
643
+ payload_fn=lambda: gm.print_readable(print_output=False),
644
+ )
645
+ optimus_scuba_log["inductor_post_grad"] = counters["inductor"]
646
+ signpost_event(
647
+ "optimus",
648
+ "compile_fx.post_grad_passes",
649
+ optimus_scuba_log,
650
+ )
651
+
652
+ with V.set_fake_mode(fake_mode):
653
+ const_output_index = None
654
+ const_graph = None
655
+ const_code = None
656
+
657
+ if aot_mode and config.aot_inductor.use_runtime_constant_folding:
658
+ const_gm, const_output_index = split_const_gm(gm)
659
+
660
+ const_graph = GraphLowering(
661
+ const_gm,
662
+ example_inputs=[],
663
+ shape_env=shape_env,
664
+ num_static_inputs=num_fixed,
665
+ graph_id=graph_id,
666
+ cpp_wrapper=cpp_wrapper,
667
+ aot_mode=aot_mode,
668
+ user_visible_outputs=user_visible_outputs,
669
+ extern_node_serializer=extern_node_serializer,
670
+ is_inference=is_inference,
671
+ is_const_graph=True,
672
+ )
673
+ with V.set_graph_handler(const_graph):
674
+ assert cpp_wrapper, "AOT mode only supports C++ wrapper"
675
+ const_graph.run()
676
+
677
+ const_code, _ = const_graph.codegen_with_cpp_wrapper()
678
+
679
+ graph = GraphLowering(
680
+ gm,
681
+ # example_inputs will be used by AOTInductor to dry-run the generated code for Triton kernel tuning.
682
+ # For the forward pass, we have the real inputs to be used as example_inputs. For the backward pass,
683
+ # we currently use fake tensors and defake them later.
684
+ example_inputs=example_inputs,
685
+ shape_env=shape_env,
686
+ num_static_inputs=num_fixed,
687
+ graph_id=graph_id,
688
+ cpp_wrapper=cpp_wrapper,
689
+ aot_mode=aot_mode,
690
+ user_visible_outputs=user_visible_outputs,
691
+ extern_node_serializer=extern_node_serializer,
692
+ is_inference=is_inference,
693
+ const_output_index=const_output_index,
694
+ const_code=const_code,
695
+ const_module=const_graph,
696
+ )
697
+ with V.set_graph_handler(graph):
698
+ graph.run(*example_inputs)
699
+ output_strides: List[Optional[Tuple[int, ...]]] = []
700
+ if graph.graph_outputs is not None:
701
+ # We'll put the output strides in the compiled graph so we
702
+ # can later return them to the caller via TracingContext
703
+ for out in graph.graph_outputs:
704
+ if hasattr(out, "layout"):
705
+ output_strides.append(
706
+ tuple(
707
+ V.graph.sizevars.size_hint(s) for s in out.layout.stride
708
+ )
709
+ )
710
+ else:
711
+ output_strides.append(None)
712
+
713
+ metrics_helper = metrics.CachedMetricsHelper()
714
+ compiled_fn = graph.compile_to_fn()
715
+
716
+ if V.aot_compilation is True:
717
+ return compiled_fn
718
+
719
+ if cudagraphs and not V.graph.disable_cudagraphs_reason:
720
+ from torch._inductor.cudagraph_utils import (
721
+ check_lowering_disable_cudagraph,
722
+ )
723
+
724
+ V.graph.disable_cudagraphs_reason = check_lowering_disable_cudagraph(
725
+ V.graph.device_node_mapping
726
+ )
727
+
728
+ compiled_graph = CompiledFxGraph(
729
+ compiled_fn,
730
+ graph,
731
+ output_strides,
732
+ V.graph.disable_cudagraphs_reason,
733
+ metrics_helper.get_deltas(),
734
+ )
735
+
736
+ return compiled_graph
737
+
738
+
739
+ def clone_preserve_strides(x: torch.Tensor):
740
+ needed_size = (
741
+ sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
742
+ )
743
+ buffer = torch.as_strided(x, (needed_size,), (1,)).clone()
744
+ return torch.as_strided(buffer, x.size(), x.stride())
745
+
746
+
747
+ def copy_misaligned_inputs(
748
+ new_inputs: List[torch.Tensor], check_inputs_idxs: Sequence[int]
749
+ ) -> None:
750
+ for i in check_inputs_idxs:
751
+ if new_inputs[i].data_ptr() % ALIGNMENT:
752
+ new_inputs[i] = clone_preserve_strides(new_inputs[i])
753
+
754
+
755
+ def get_input_idxs_to_check(
756
+ inputs: Union[List[torch.Tensor], Sequence[int]],
757
+ static_input_idxs: Sequence[int],
758
+ ) -> Sequence[int]:
759
+ def is_aligned(storage_offset, dtype):
760
+ return (storage_offset * get_dtype_size(dtype)) % ALIGNMENT == 0
761
+
762
+ ids_to_check = []
763
+ for i, input in enumerate(inputs):
764
+ if (
765
+ isinstance(input, torch.Tensor)
766
+ and (
767
+ i not in static_input_idxs
768
+ or not is_aligned(input.storage_offset(), input.dtype)
769
+ )
770
+ and input.device.type == "cuda"
771
+ ):
772
+ ids_to_check.append(i)
773
+ return ids_to_check
774
+
775
+
776
+ def align_inputs_from_check_idxs(
777
+ model: Callable[[List[torch.Tensor]], Any], inputs_to_check: Sequence[int]
778
+ ):
779
+ if len(inputs_to_check) == 0:
780
+ return model
781
+
782
+ def run(new_inputs):
783
+ copy_misaligned_inputs(new_inputs, inputs_to_check)
784
+ return model(new_inputs)
785
+
786
+ return run
787
+
788
+
789
+ def align_inputs(
790
+ model: Callable[[List[torch.Tensor]], Any],
791
+ inputs: List[torch.Tensor],
792
+ static_input_idxs: Sequence[int] = (),
793
+ ):
794
+ inputs_to_check = get_input_idxs_to_check(inputs, static_input_idxs)
795
+ return align_inputs_from_check_idxs(model, inputs_to_check)
796
+
797
+
798
+ @dynamo_utils.dynamo_timed
799
+ def cudagraphify(
800
+ model: torch.fx.GraphModule,
801
+ inputs: List[torch.Tensor],
802
+ static_input_idxs: Sequence[int] = (),
803
+ *,
804
+ device_index: int,
805
+ stack_traces: List[Optional[str]],
806
+ is_backward: bool,
807
+ is_inference: bool,
808
+ constants: Tuple[torch.Tensor, ...] = (),
809
+ ):
810
+ from torch._inductor.cudagraph_trees import (
811
+ cudagraphify_impl as new_cudagraphify_impl,
812
+ )
813
+
814
+ cudagraphify_fn: Callable[..., Any]
815
+ if config.triton.cudagraph_trees:
816
+ cudagraphify_fn = functools.partial(
817
+ new_cudagraphify_impl,
818
+ device_index=device_index,
819
+ stack_traces=stack_traces,
820
+ is_backward=is_backward,
821
+ is_inference=is_inference,
822
+ constants=constants,
823
+ )
824
+ else:
825
+ cudagraphify_fn = cudagraphify_impl
826
+
827
+ # if using fake tensors, defer cudagraphs until we get real inputs at runtime
828
+ if not any(isinstance(inp, FakeTensor) for inp in inputs):
829
+ return cudagraphify_fn(model, inputs, static_input_idxs)
830
+
831
+ compiled_fn = None
832
+
833
+ def run(new_inputs):
834
+ nonlocal compiled_fn
835
+ if compiled_fn is None:
836
+ with dynamo_utils.preserve_rng_state():
837
+ compiled_fn = cudagraphify_fn(model, new_inputs, static_input_idxs)
838
+ return compiled_fn(new_inputs)
839
+
840
+ return run
841
+
842
+
843
+ def remove_unaligned_input_idxs(
844
+ inputs: Union[List[torch.Tensor], Sequence[int]],
845
+ static_input_idxs: Sequence[int],
846
+ ):
847
+ """
848
+ We require all inputs to be aligned, so introduce a copy for any
849
+ that aren't.
850
+ """
851
+ aligned_static_input_idxs = []
852
+ for idx, input in zip(static_input_idxs, inputs):
853
+ if isinstance(input, torch.Tensor) and (input.data_ptr() % ALIGNMENT) == 0:
854
+ aligned_static_input_idxs.append(idx)
855
+ if len(aligned_static_input_idxs) != len(static_input_idxs):
856
+ return aligned_static_input_idxs
857
+ return static_input_idxs
858
+
859
+
860
+ def static_input(x: torch.Tensor):
861
+ """
862
+ Copy and input while preserving strides
863
+ """
864
+ # TODO(jansel): figure out why this version doesn't work:
865
+ # return torch.empty_strided(x.size(), x.stride(), dtype=x.dtype, device=x.device)
866
+ needed_size = (
867
+ sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
868
+ )
869
+ buffer = torch.empty(needed_size, dtype=x.dtype, device=x.device)
870
+ return torch.as_strided(buffer, x.size(), x.stride())
871
+
872
+
873
+ def index_expanded_dims_and_copy_(
874
+ dst: torch.Tensor,
875
+ src: torch.Tensor,
876
+ expanded_dims: List[int],
877
+ ):
878
+ "Index into expanded dimensions of both dst and src then copy_"
879
+ dst = index_expanded_dims(dst, expanded_dims)
880
+ src = index_expanded_dims(src, expanded_dims)
881
+ dst.copy_(src)
882
+
883
+
884
+ def cudagraphify_impl(
885
+ model: torch.fx.GraphModule,
886
+ inputs: List[torch.Tensor],
887
+ static_input_idxs: Sequence[int] = (),
888
+ ):
889
+ """
890
+ Assumes inputs[static_input_idxs[i]] are always the same memory address
891
+ """
892
+ check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
893
+ static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
894
+ copy_misaligned_inputs(inputs, check_input_idxs)
895
+
896
+ assert isinstance(inputs, list)
897
+
898
+ inps_expanded_dims = [
899
+ get_expanded_dims(x) if idx not in static_input_idxs else []
900
+ for idx, x in enumerate(inputs)
901
+ ]
902
+
903
+ # allocate static tensor inputs
904
+ static_inputs = [
905
+ x
906
+ if not isinstance(x, torch.Tensor)
907
+ else static_input(x)
908
+ if idx not in static_input_idxs
909
+ else x.detach()
910
+ for idx, x in enumerate(inputs)
911
+ ]
912
+
913
+ # copy over input values for fresh allocations
914
+ for idx, (x, expanded_dims) in enumerate(zip(inputs, inps_expanded_dims)):
915
+ if isinstance(x, torch.Tensor) and idx not in static_input_idxs:
916
+ index_expanded_dims_and_copy_(static_inputs[idx], x, expanded_dims)
917
+
918
+ # warmup
919
+ torch.cuda.synchronize()
920
+ stream = torch.cuda.Stream()
921
+ stream.wait_stream(torch.cuda.current_stream())
922
+ # copy static_inputs because it will be cleared in model
923
+ with torch.cuda.stream(stream):
924
+ model(list(static_inputs))
925
+ stream.synchronize()
926
+ torch.cuda.current_stream().wait_stream(stream)
927
+ torch.cuda.synchronize()
928
+
929
+ # record
930
+ graph = torch.cuda.CUDAGraph()
931
+ with torch.cuda.graph(graph, stream=stream, capture_error_mode="thread_local"):
932
+ static_outputs = model(list(static_inputs))
933
+ if not isinstance(static_outputs, (list, tuple)):
934
+ static_outputs = (static_outputs,)
935
+
936
+ if config.size_asserts:
937
+
938
+ def run(new_inputs):
939
+ assert len(static_inputs) == len(new_inputs)
940
+ for idx, (dst, src, expanded_dims) in enumerate(
941
+ zip(static_inputs, new_inputs, inps_expanded_dims)
942
+ ):
943
+ if not isinstance(dst, torch.Tensor):
944
+ pass
945
+ elif idx in static_input_idxs:
946
+ assert dst.data_ptr() == src.data_ptr()
947
+ else:
948
+ # TODO - could make one single op of multiple slices
949
+ # and avoid dispatch.
950
+ # Could also pre-index the `dst` tensors
951
+ index_expanded_dims_and_copy_(dst, src, expanded_dims)
952
+ new_inputs.clear()
953
+ graph.replay()
954
+ return static_outputs
955
+
956
+ else:
957
+ copy_indices = [
958
+ idx for idx in range(len(static_inputs)) if idx not in static_input_idxs
959
+ ]
960
+
961
+ def run(new_inputs):
962
+ for idx in copy_indices:
963
+ expanded_dims = inps_expanded_dims[idx]
964
+ index_expanded_dims_and_copy_(
965
+ static_inputs[idx], new_inputs[idx], expanded_dims
966
+ )
967
+ new_inputs.clear()
968
+ graph.replay()
969
+ return static_outputs
970
+
971
+ return align_inputs_from_check_idxs(run, check_input_idxs)
972
+
973
+
974
+ def compile_fx_aot(
975
+ model_: torch.fx.GraphModule,
976
+ example_inputs_: List[torch.Tensor],
977
+ inner_compile: Callable[..., Any] = compile_fx_inner,
978
+ config_patches: Optional[Dict[str, Any]] = None,
979
+ ):
980
+ config_patches: Dict[str, Any] = (
981
+ {"cpp_wrapper": True}
982
+ if config_patches is None
983
+ else {**config_patches, "cpp_wrapper": True}
984
+ )
985
+ if (
986
+ "aot_inductor.output_path" not in config_patches
987
+ and not config.aot_inductor.output_path
988
+ ):
989
+ config_patches = {
990
+ **config_patches,
991
+ "aot_inductor.output_path": code_hash(model_.code),
992
+ }
993
+
994
+ extern_node_serializer = config_patches.pop("extern_node_serializer", None)
995
+ with V.set_aot_compilation(True):
996
+ compiled_lib_path = compile_fx(
997
+ model_,
998
+ example_inputs_,
999
+ inner_compile=functools.partial(
1000
+ inner_compile,
1001
+ aot_mode=True,
1002
+ extern_node_serializer=extern_node_serializer,
1003
+ ),
1004
+ config_patches=config_patches,
1005
+ )
1006
+ assert os.path.exists(
1007
+ compiled_lib_path
1008
+ ), f"AOTInductor compiled library does not exist at {compiled_lib_path}"
1009
+ return compiled_lib_path
1010
+
1011
+
1012
+ _graph_counter = count(0)
1013
+
1014
+
1015
+ def fw_compiler_freezing(
1016
+ aot_autograd_model: torch.fx.GraphModule,
1017
+ aot_example_inputs: List[torch.Tensor],
1018
+ dynamo_model: torch.fx.GraphModule,
1019
+ num_example_inputs: int,
1020
+ inner_compile: Callable[..., Any],
1021
+ cudagraphs: BoxedBool,
1022
+ graph_id: int,
1023
+ forward_device: BoxedDeviceIndex,
1024
+ ):
1025
+ from torch._inductor.freezing import convert_conv_weights_to_channels_last, freeze
1026
+
1027
+ # partition_fn won't be called
1028
+ _recursive_joint_graph_passes(aot_autograd_model)
1029
+
1030
+ layout_opt = GraphLowering.decide_layout_opt(aot_autograd_model, is_inference=True)
1031
+ if layout_opt:
1032
+ # make sure meta['val'] is properly setup
1033
+ fake_tensor_prop(aot_autograd_model, aot_example_inputs, True)
1034
+ convert_conv_weights_to_channels_last(aot_autograd_model)
1035
+
1036
+ opt_model, preserved_arg_indices = freeze(
1037
+ dynamo_model,
1038
+ aot_autograd_model,
1039
+ aot_example_inputs, # type: ignore[arg-type]
1040
+ )
1041
+
1042
+ aot_example_inputs = [aot_example_inputs[ind] for ind in preserved_arg_indices]
1043
+ num_fixed = len(preserved_arg_indices) - num_example_inputs
1044
+
1045
+ fake_mode = detect_fake_mode(aot_example_inputs)
1046
+
1047
+ # for freezing, all graph outputs should be user visible
1048
+ *_, model_outputs_node = opt_model.graph.nodes
1049
+ model_outputs = model_outputs_node.args[0]
1050
+ user_visible_outputs = [
1051
+ n.name for n in model_outputs if isinstance(n, torch.fx.Node)
1052
+ ]
1053
+
1054
+ # constant params will be real tensors, not fake
1055
+ tracing_context = torch._guards.TracingContext.try_get()
1056
+ if tracing_context is not None:
1057
+ params_flat = tracing_context.params_flat
1058
+ assert params_flat is not None
1059
+ for i in range(len(params_flat)):
1060
+ if i not in preserved_arg_indices:
1061
+ params_flat[i] = None
1062
+
1063
+ with mock.patch.object(fake_mode, "allow_non_fake_inputs", True):
1064
+ optimized_function = inner_compile(
1065
+ opt_model,
1066
+ aot_example_inputs,
1067
+ num_fixed=num_fixed,
1068
+ cudagraphs=cudagraphs,
1069
+ graph_id=graph_id,
1070
+ is_inference=True,
1071
+ boxed_forward_device_index=forward_device,
1072
+ layout_opt=layout_opt,
1073
+ user_visible_outputs=user_visible_outputs,
1074
+ )
1075
+
1076
+ # aot_inductor codegens a call that takes in just the inputs, so we don't return a wrapper
1077
+ # that drops constant-ified params
1078
+ if V.aot_compilation is True:
1079
+ return optimized_function
1080
+
1081
+ def wrapper(args):
1082
+ args_new = [args[i] for i in preserved_arg_indices]
1083
+ args.clear()
1084
+ return optimized_function(args_new)
1085
+
1086
+ wrapper._boxed_call = True # type: ignore[attr-defined]
1087
+
1088
+ return wrapper
1089
+
1090
+
1091
+ @_use_lazy_graph_module(dynamo_config.use_lazy_graph_module)
1092
+ def compile_fx(
1093
+ model_: torch.fx.GraphModule,
1094
+ example_inputs_: List[torch.Tensor],
1095
+ inner_compile: Callable[..., Any] = compile_fx_inner,
1096
+ config_patches: Optional[Dict[str, Any]] = None,
1097
+ decompositions: Optional[Dict[OpOverload, Callable[..., Any]]] = None,
1098
+ ):
1099
+ """Main entrypoint to a compile given FX graph"""
1100
+ if config_patches:
1101
+ with config.patch(config_patches):
1102
+ return compile_fx(
1103
+ model_,
1104
+ example_inputs_,
1105
+ # need extra layer of patching as backwards is compiled out of scope
1106
+ inner_compile=config.patch(config_patches)(inner_compile),
1107
+ decompositions=decompositions,
1108
+ )
1109
+
1110
+ if config.cpp_wrapper:
1111
+ with config.patch(
1112
+ {
1113
+ "cpp_wrapper": False,
1114
+ "triton.autotune_cublasLt": False,
1115
+ "triton.cudagraphs": False,
1116
+ "triton.store_cubin": True,
1117
+ }
1118
+ ), V.set_real_inputs(example_inputs_):
1119
+ inputs_ = example_inputs_
1120
+ if isinstance(model_, torch.fx.GraphModule):
1121
+ fake_inputs = [
1122
+ node.meta.get("val")
1123
+ for node in model_.graph.nodes
1124
+ if node.op == "placeholder"
1125
+ ]
1126
+ if all(v is not None for v in fake_inputs):
1127
+ # Validate devices before switching to fake tensors.
1128
+ for idx, fi, i in zip(count(), fake_inputs, inputs_):
1129
+ if fi.device != i.device:
1130
+ raise ValueError(
1131
+ f"Device mismatch between fake input and example input at position #{idx}: "
1132
+ f"{fi.device} vs {i.device}. If the model was exported via torch.export(), "
1133
+ "make sure torch.export() and torch.aot_compile() run on the same device."
1134
+ )
1135
+ inputs_ = fake_inputs
1136
+ return compile_fx(
1137
+ model_,
1138
+ inputs_,
1139
+ inner_compile=functools.partial(inner_compile, cpp_wrapper=True),
1140
+ decompositions=decompositions,
1141
+ )
1142
+
1143
+ recursive_compile_fx = functools.partial(
1144
+ compile_fx,
1145
+ inner_compile=inner_compile,
1146
+ decompositions=decompositions,
1147
+ )
1148
+
1149
+ if not graph_returns_tuple(model_):
1150
+ return make_graph_return_tuple(
1151
+ model_,
1152
+ example_inputs_,
1153
+ recursive_compile_fx,
1154
+ )
1155
+
1156
+ if isinstance(model_, torch.fx.GraphModule):
1157
+ if isinstance(model_.graph._codegen, _PyTreeCodeGen):
1158
+ # this graph is the result of dynamo.export()
1159
+ return handle_dynamo_export_graph(
1160
+ model_,
1161
+ example_inputs_,
1162
+ recursive_compile_fx,
1163
+ )
1164
+
1165
+ model_ = _recursive_pre_grad_passes(model_, example_inputs_)
1166
+ optimus_scuba_log["inductor_pre_grad"] = counters["inductor"]
1167
+ signpost_event(
1168
+ "optimus",
1169
+ "compile_fx.pre_grad_passes",
1170
+ optimus_scuba_log,
1171
+ )
1172
+
1173
+ if any(isinstance(x, (list, tuple, dict)) for x in example_inputs_):
1174
+ return flatten_graph_inputs(
1175
+ model_,
1176
+ example_inputs_,
1177
+ recursive_compile_fx,
1178
+ )
1179
+
1180
+ assert not config._raise_error_for_testing
1181
+ num_example_inputs = len(example_inputs_)
1182
+ cudagraphs = BoxedBool(config.triton.cudagraphs)
1183
+ forward_device = BoxedDeviceIndex(None)
1184
+
1185
+ graph_id = next(_graph_counter)
1186
+
1187
+ decompositions = (
1188
+ decompositions if decompositions is not None else select_decomp_table()
1189
+ )
1190
+
1191
+ @dynamo_utils.dynamo_timed
1192
+ def fw_compiler_base(
1193
+ model: torch.fx.GraphModule,
1194
+ example_inputs: List[torch.Tensor],
1195
+ is_inference: bool,
1196
+ ):
1197
+ if is_inference:
1198
+ # partition_fn won't be called
1199
+ _recursive_joint_graph_passes(model)
1200
+
1201
+ fixed = torch._inductor.utils.num_fw_fixed_arguments(
1202
+ num_example_inputs, len(example_inputs)
1203
+ )
1204
+ user_visible_outputs = set()
1205
+
1206
+ if config.keep_output_stride:
1207
+ *_, model_outputs_node = model.graph.nodes
1208
+ assert model_outputs_node.op == "output"
1209
+ model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args)
1210
+ num_model_outputs = len(model_outputs)
1211
+
1212
+ context = torch._guards.TracingContext.try_get()
1213
+ # See Note [User Outputs in the inductor graph]
1214
+ if context is not None and context.fw_metadata and not is_inference:
1215
+ original_output_start_index = (
1216
+ context.fw_metadata.num_mutated_inp_runtime_indices
1217
+ )
1218
+ else:
1219
+ original_output_start_index = 0
1220
+
1221
+ if isinstance(model_, torch.fx.GraphModule):
1222
+ *_, orig_model_outputs_node = model_.graph.nodes
1223
+ assert orig_model_outputs_node.op == "output"
1224
+ orig_model_outputs, _ = pytree.tree_flatten(
1225
+ orig_model_outputs_node.args
1226
+ )
1227
+ num_orig_model_outputs = len(orig_model_outputs)
1228
+ else:
1229
+ num_orig_model_outputs = num_model_outputs
1230
+
1231
+ assert num_orig_model_outputs <= num_model_outputs
1232
+
1233
+ # Note [User Outputs in the inductor graph]
1234
+ # We makes the following assumption
1235
+ # For inference
1236
+ # len(orig_model_outputs) == len(model_outputs)
1237
+ # For training
1238
+ # len(orig_model_outputs) <= len(model_outputs)
1239
+ # During training, most of the time the model_outputs starts with
1240
+ # original module's outputs followed by saved activations.
1241
+ # But this can be not true if the model have inplace updated tensors.
1242
+ # AOTAutograd will make those tensors being returned before the original
1243
+ # module's output.
1244
+ # To make things safe, we'll use original_output_start_index field
1245
+ # set by AOTAutograd to decide where the original module outputs start.
1246
+ orig_output_end_idx = original_output_start_index + num_orig_model_outputs
1247
+ # Sanity chec: we are about to splice out the "user" outputs from the full set
1248
+ # of "graph" outputs. Make sure we're within bounds.
1249
+ assert orig_output_end_idx <= num_model_outputs
1250
+
1251
+ user_visible_outputs = {
1252
+ n.name
1253
+ for n in model_outputs[original_output_start_index:orig_output_end_idx]
1254
+ if isinstance(n, torch.fx.Node)
1255
+ }
1256
+
1257
+ return inner_compile(
1258
+ model,
1259
+ example_inputs,
1260
+ num_fixed=fixed,
1261
+ cudagraphs=cudagraphs,
1262
+ graph_id=graph_id,
1263
+ is_inference=is_inference,
1264
+ boxed_forward_device_index=forward_device,
1265
+ user_visible_outputs=user_visible_outputs,
1266
+ )
1267
+
1268
+ fw_compiler = functools.partial(fw_compiler_base, is_inference=False)
1269
+
1270
+ if config.freezing and not torch.is_grad_enabled():
1271
+ inference_compiler = functools.partial(
1272
+ fw_compiler_freezing,
1273
+ dynamo_model=model_,
1274
+ num_example_inputs=num_example_inputs,
1275
+ inner_compile=inner_compile,
1276
+ cudagraphs=cudagraphs,
1277
+ graph_id=graph_id,
1278
+ forward_device=forward_device,
1279
+ )
1280
+ else:
1281
+ inference_compiler = functools.partial(fw_compiler_base, is_inference=True)
1282
+
1283
+ def partition_fn(graph, joint_inputs, **kwargs):
1284
+ _recursive_joint_graph_passes(graph)
1285
+ return min_cut_rematerialization_partition(
1286
+ graph, joint_inputs, **kwargs, compiler="inductor"
1287
+ )
1288
+
1289
+ @dynamo_utils.dynamo_timed
1290
+ @dynamo_utils.maybe_cprofile
1291
+ def bw_compiler(model: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
1292
+ fixed = count_tangents(model)
1293
+ return inner_compile(
1294
+ model,
1295
+ example_inputs,
1296
+ num_fixed=fixed,
1297
+ cudagraphs=cudagraphs,
1298
+ is_backward=True,
1299
+ graph_id=graph_id,
1300
+ boxed_forward_device_index=forward_device,
1301
+ )
1302
+
1303
+ # TODO: can add logging before/after the call to create_aot_dispatcher_function
1304
+ # in torch._functorch/aot_autograd.py::aot_module_simplified::aot_function_simplified::new_func
1305
+ # once torchdynamo is merged into pytorch
1306
+
1307
+ fake_mode = detect_fake_mode(example_inputs_) or torch._subclasses.FakeTensorMode(
1308
+ allow_non_fake_inputs=True
1309
+ )
1310
+ tracing_context = (
1311
+ torch._guards.TracingContext.try_get()
1312
+ or torch._guards.TracingContext(fake_mode)
1313
+ )
1314
+
1315
+ if V.aot_compilation is True:
1316
+ gm, graph_signature = aot_export_module(
1317
+ model_, example_inputs_, trace_joint=False, decompositions=decompositions
1318
+ )
1319
+ unlifted_gm = _unlift_graph(model_, gm, graph_signature)
1320
+ if "dynamo_flat_name_to_original_fqn" in model_.meta:
1321
+ unlifted_gm.meta["dynamo_flat_name_to_original_fqn"] = model_.meta[
1322
+ "dynamo_flat_name_to_original_fqn"
1323
+ ]
1324
+ with V.set_fake_mode(fake_mode), compiled_autograd.disable():
1325
+ return inference_compiler(unlifted_gm, example_inputs_)
1326
+
1327
+ with V.set_fake_mode(fake_mode), torch._guards.tracing(
1328
+ tracing_context
1329
+ ), compiled_autograd.disable():
1330
+ return aot_autograd(
1331
+ fw_compiler=fw_compiler,
1332
+ bw_compiler=bw_compiler,
1333
+ inference_compiler=inference_compiler,
1334
+ decompositions=decompositions,
1335
+ partition_fn=partition_fn,
1336
+ keep_inference_input_mutations=True,
1337
+ )(model_, example_inputs_)
1338
+
1339
+
1340
+ def _shape_env_from_inputs(inputs: List[torch.Tensor]):
1341
+ shape_env = None
1342
+ fake_mode = detect_fake_mode(inputs)
1343
+
1344
+ # TODO(voz): It would be nice to enable this assert, but there are lots of tests that
1345
+ # pass in real inputs for now.
1346
+ # if len(inputs) > 0:
1347
+ # assert fake_mode is not None, breakpoint()
1348
+
1349
+ if fake_mode is not None:
1350
+ return fake_mode.shape_env
1351
+
1352
+ # When there are no tensor inputs, get shape_env from the first SymInt.
1353
+ for input in inputs:
1354
+ if isinstance(input, torch.SymInt):
1355
+ return input.node.shape_env
1356
+
1357
+ # TODO(voz): Should we always have one anyway?
1358
+ return None
1359
+
1360
+
1361
+ def graph_returns_tuple(gm: torch.fx.GraphModule):
1362
+ """True if a FX graph returns a tuple"""
1363
+ if not isinstance(gm, torch.fx.GraphModule):
1364
+ return True # can't check this, assume true
1365
+ (rv,) = output_node(gm).args
1366
+ if isinstance(rv, (list, tuple)):
1367
+ return True
1368
+ if (
1369
+ isinstance(rv, torch.fx.node.Node)
1370
+ and hasattr(rv.target, "_schema")
1371
+ and len(rv.target._schema.returns) > 1
1372
+ and all(str(ret.type) == "Tensor" for ret in rv.target._schema.returns)
1373
+ ):
1374
+ # for graphs whose result is one node with multiple outputs
1375
+ return True
1376
+ return False
1377
+
1378
+
1379
+ def make_graph_return_tuple(
1380
+ gm: torch.fx.GraphModule,
1381
+ inputs: List[torch.Tensor],
1382
+ compile_gm: Callable[..., Any],
1383
+ ):
1384
+ """
1385
+ Mutate gm so it returns a tuple. This is only needed for graphs
1386
+ not created by torchdynamo that return non-tuples.
1387
+ """
1388
+ node = output_node(gm)
1389
+ (rv,) = node.args
1390
+ rv, spec = pytree.tree_flatten(rv)
1391
+ with gm.graph.inserting_before(node):
1392
+ gm.graph.output(rv)
1393
+ gm.graph.erase_node(node)
1394
+ assert graph_returns_tuple(gm)
1395
+
1396
+ compiled_fn = compile_gm(gm, inputs)
1397
+
1398
+ @functools.wraps(compiled_fn)
1399
+ def wrapper(*args, **kwargs):
1400
+ return pytree.tree_unflatten(compiled_fn(*args, **kwargs), spec)
1401
+
1402
+ return wrapper
1403
+
1404
+
1405
+ def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
1406
+ """
1407
+ Mutate inputs so that they are flat and wrap gm such that it
1408
+ accepts those inputs. This is only needed for graphs not created
1409
+ by torchdynamo that take bumpy inputs.
1410
+ """
1411
+ inputs, spec = pytree.tree_flatten(inputs)
1412
+
1413
+ class GmWrapper(torch.nn.Module):
1414
+ def __init__(self):
1415
+ super().__init__()
1416
+ self.gm = gm
1417
+
1418
+ def forward(self, *args):
1419
+ args: List[Any] = list(args)
1420
+ return self.gm(*pytree.tree_unflatten(args, spec))
1421
+
1422
+ compiled_fn = compile_gm(GmWrapper(), inputs)
1423
+
1424
+ @functools.wraps(compiled_fn)
1425
+ def wrapper(*args):
1426
+ # note this doesn't check the spec, assuming it is the same
1427
+ return compiled_fn(*pytree.arg_tree_leaves(*args))
1428
+
1429
+ return wrapper
1430
+
1431
+
1432
+ def handle_dynamo_export_graph(
1433
+ gm: torch.fx.GraphModule,
1434
+ inputs: List[torch.Tensor],
1435
+ compile_gm: Callable[..., Any],
1436
+ ):
1437
+ """
1438
+ `torch._dynamo.export` embeds pytrees in the FX graph codegen object,
1439
+ convert that to a normal FX graph so inductor can compile it.
1440
+ """
1441
+ codegen = gm.graph._codegen
1442
+ gm.graph._codegen = torch.fx.graph.CodeGen()
1443
+ gm.recompile()
1444
+
1445
+ compiled_fn = compile_gm(gm, codegen.process_inputs(*inputs))
1446
+
1447
+ @functools.wraps(compiled_fn)
1448
+ def wrapper(*args):
1449
+ return codegen.process_outputs(compiled_fn(*codegen.process_inputs(*args)))
1450
+
1451
+ return wrapper
venv/lib/python3.10/site-packages/torch/_inductor/config.py ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os # noqa: C101
2
+ import sys
3
+ from typing import Any, Callable, Dict, Optional, TYPE_CHECKING
4
+
5
+ import torch
6
+
7
+
8
+ def is_fbcode():
9
+ return not hasattr(torch.version, "git_version")
10
+
11
+
12
+ # add some debug printouts
13
+ debug = False
14
+
15
+ # add inf and NaN checkers
16
+ debug_check_inf_and_nan = False
17
+
18
+ # Whether to disable a progress bar for autotuning
19
+ disable_progress = True
20
+
21
+ # Whether to enable printing the source code for each future
22
+ verbose_progress = False
23
+
24
+ # use fx aot graph codegen cache
25
+ fx_graph_cache = os.environ.get("TORCHINDUCTOR_FX_GRAPH_CACHE") == "1"
26
+
27
+ # use cpp wrapper instead of python wrapper
28
+ cpp_wrapper = os.environ.get("TORCHINDUCTOR_CPP_WRAPPER", "0") == "1"
29
+
30
+ # codegen cpp wrapper code in an ABI compatible mode
31
+ abi_compatible = (
32
+ os.environ.get("TORCHINDUCTOR_ABI_COMPATIBLE", "1" if is_fbcode() else "0") == "1"
33
+ )
34
+
35
+ c_shim_version = os.environ.get(
36
+ "TORCHINDUCTOR_C_SHIM_VERSION", "1" if is_fbcode() else "2"
37
+ )
38
+
39
+ # dead code elimination
40
+ dce = False
41
+
42
+ # assume weight tensors are fixed size
43
+ static_weight_shapes = True
44
+
45
+ # put correctness assertions in generated code
46
+ size_asserts = os.environ.get("TORCHINDUCTOR_SIZE_ASSERTS", "1") == "1"
47
+ nan_asserts = os.environ.get("TORCHINDUCTOR_NAN_ASSERTS") == "1"
48
+
49
+ # enable loop reordering based on input orders
50
+ pick_loop_orders = True
51
+
52
+ # reuse a kernel input as the output
53
+ inplace_buffers = True
54
+
55
+ # reuse a buffer for an unrelated purpose
56
+ allow_buffer_reuse = True
57
+
58
+ # Enable pooled allocations for non-output tensors
59
+ memory_planning = os.environ.get("TORCHINDUCTOR_MEMORY_PLANNING", "0") == "1"
60
+
61
+ # How to organize memory under memory_planning=True:
62
+ # - "none": do not try to pool storage, just reuse
63
+ # - "intermediates": all non-outputs share storage, outputs each get unique storage
64
+ # - "outputs": two pools, one for intermediates (freed on return) and one for outputs
65
+ # - "combined": a single pool for both intermediates and outputs
66
+ memory_pool = os.environ.get("TORCHINDUCTOR_MEMORY_POOL", "intermediates")
67
+
68
+ # codegen benchmark harness
69
+ benchmark_harness = True
70
+
71
+ # fuse pointwise into templates
72
+ epilogue_fusion = True
73
+
74
+ # do epilogue fusions before other fusions
75
+ epilogue_fusion_first = False
76
+
77
+ # enable pattern match+replace optimizations
78
+ pattern_matcher = True
79
+
80
+ # register custom graph optimization pass hook. so far, pre/post passes are
81
+ # only applied before/after pattern_matcher in post_grad_passes.
82
+ #
83
+ # def my_custom_pre_pass(graph: torch.fx.graph.Graph):
84
+ # # my custom graph optimization pass
85
+ # ...
86
+ #
87
+ # def my_custom_post_pass(graph: torch.fx.graph.Graph):
88
+ # # my custom graph optimization pass
89
+ # ...
90
+ #
91
+ # torch._inductor.config.post_grad_custom_pre_pass = my_custom_pre_pass
92
+ # torch._inductor.config.post_grad_custom_post_pass = my_custom_post_pass
93
+ post_grad_custom_pre_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None
94
+ post_grad_custom_post_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None
95
+
96
+ # Registers a custom pregrad pass. Note that the pre-grad IR is 1.
97
+ # non-functional, 2. non-normalized, and 3. prone to change. Ideally we should
98
+ # use post-grad passes.
99
+ pre_grad_custom_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None
100
+
101
+ # Optimize away split cat patterns (Experimental)
102
+ split_cat_fx_passes = True
103
+
104
+ # Optimize conv-batchnorm if batchnorm is in eval mode. Slightly reduces numerical stability.
105
+ efficient_conv_bn_eval_fx_passes = False
106
+
107
+ # Enable predispatch aten IR for export
108
+ is_predispatch = False
109
+
110
+ # Deprecated
111
+ group_fusion = False
112
+
113
+ # Deprecated
114
+ batch_fusion = True
115
+
116
+ # Pre grad group/batch fusion and options in order, set to empty dict to disable fusion.
117
+ # Call `torch._inductor.fx_passes.group_batch_fusion.list_group_batch_fusions()` to see available fusions.
118
+ pre_grad_fusion_options: Dict[str, Dict[str, Any]] = {
119
+ "batch_linear": {},
120
+ "batch_linear_lhs": {},
121
+ "batch_layernorm": {},
122
+ "batch_tanh": {},
123
+ "batch_relu": {},
124
+ "batch_sigmoid": {},
125
+ }
126
+
127
+ # Post grad group/batch fusion and options, set to empty dict to disable fusion.
128
+ # Call `torch._inductor.fx_passes.group_batch_fusion.list_group_batch_fusions(False)` to see available fusions.
129
+ post_grad_fusion_options: Dict[str, Dict[str, Any]] = {}
130
+
131
+ # enable reordering pass for improving memory locality
132
+ reorder_for_locality = True
133
+
134
+ # Scale down RBLOCK for better occupancy
135
+ dynamic_scale_rblock = os.environ.get("TORCHINDUCTOR_DYNAMIC_SCALE_RBLOCK", "1") == "1"
136
+
137
+ # this forces fusion for int_mm with mul. Needed when you want to avoid realizing the int32
138
+ # but the mul gets fused with other pointwise ops instead.
139
+ force_fuse_int_mm_with_mul = False
140
+
141
+ # for pattern torch.mm(a, b.to(dtype)) with cuda tensors,
142
+ # enable torch._inductor.kernel.mm.tuned_mixed_mm fused kernel.
143
+ # Autotune will compare perf with normal cast->then->mm option
144
+ use_mixed_mm = False
145
+
146
+ # enable runtime numeric check for pre/post grad fx passes
147
+ # floating point provides limited accuracy (about 7 decimal digits for single precision
148
+ # floating point numbers,about 16 decimal digits for double precision floating point numbers)
149
+ # according to PyTorch documentation.
150
+ # https://pytorch.org/docs/stable/notes/numerical_accuracy.html#batched-computations-or-slice-computations
151
+ fx_passes_numeric_check: Dict[str, Any] = {
152
+ "pre_grad": False,
153
+ "precision": 1e-4,
154
+ "num_iterations": 1,
155
+ "requires_optimizer": True,
156
+ }
157
+
158
+ # for pattern torch.mm(a, b.to(dtype)) with cuda tensors, always use
159
+ # torch._inductor.kernel.mm.tuned_mixed_mm's fused kernel.
160
+ # Autotune will not compare with normal cast->then->mm option.
161
+ # (if force_mixed_mm is true, the use_mixed_mm flag will be ignored)
162
+ force_mixed_mm = False
163
+
164
+ # enable reordering pass for increasing overlap between compute and communication
165
+ reorder_for_compute_comm_overlap = False
166
+
167
+ # passes (in execution order) for increasing overlap between compute and communication
168
+ # for built-in passes, use string name; for user-defined passes, pass in the function handle
169
+ reorder_for_compute_comm_overlap_passes = [
170
+ "reorder_compute_for_overlap",
171
+ "sink_waits",
172
+ "raise_comms",
173
+ ]
174
+
175
+ # runtime estimation function for ops
176
+ # for built-in estimation function, pass in "default"; for user-defined estimation function, pass in the function handle
177
+ estimate_op_runtime = "default"
178
+
179
+ # unit: GB/s, uni-directional P2P bandwidth per card
180
+ # default value is NVLink
181
+ intra_node_bw = 300
182
+
183
+ # unit: GB/s, uni-directional P2P bandwidth per node
184
+ # default value is InfiniBand
185
+ inter_node_bw = 25
186
+
187
+ # enable slow autotuning passes to select algorithms
188
+ max_autotune = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE") == "1"
189
+
190
+ # enable slow autotuning passes to select pointwise/reductions algorithms
191
+ max_autotune_pointwise = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE") == "1"
192
+
193
+ # enable slow autotuning passes to select gemm algorithms
194
+ max_autotune_gemm = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_GEMM") == "1"
195
+
196
+ # enable autotune local cache
197
+ use_autotune_local_cache = True
198
+
199
+ # enable autotune remote cache
200
+ use_autotune_remote_cache = (
201
+ os.environ.get("TORCH_INDUCTOR_AUTOTUNE_REMOTE_CACHE") == "1"
202
+ )
203
+
204
+ # force cublas and triton to use the same precision; cublas supports TF32 for matmul operations
205
+ # when m, n, k are multiples of 16, 16, 8, whereas triton supports TF32 for matmul operations
206
+ # for any combinations of m, n, k, regardless of their alignment. setting this flag will ensure
207
+ # that triton does not use TF32 wherever cublas would not use TF32
208
+ force_same_precision = (
209
+ True if is_fbcode() else os.environ.get("TORCHINDUCTOR_FORCE_SAME_PRECISION") == "1"
210
+ )
211
+ # Specify candidate backends for gemm autotune.
212
+ # Possible choices are combinations of: ATen, Triton, CUTLASS.
213
+ # ATen: default Pytorch ATen kernels.
214
+ # Triton: Triton templates defined in torch inductor.
215
+ # CUTLASS: Cutlass templates and kernels.
216
+ max_autotune_gemm_backends = os.environ.get(
217
+ "TORCHINDUCTOR_MAX_AUTOTUNE_GEMM_BACKENDS", "ATEN,TRITON"
218
+ ).upper()
219
+
220
+ # the value used as a fallback for the unbacked SymInts
221
+ # that can appear in the input shapes (e.g., in autotuning)
222
+ unbacked_symint_fallback = 8192
223
+
224
+ # enable searching global and local cache regardless of `max_autotune`
225
+ search_autotune_cache = os.environ.get("TORCHINDUCTOR_SEARCH_AUTOTUNE_CACHE") == "1"
226
+
227
+ save_args = os.environ.get("TORCHINDUCTOR_SAVE_ARGS") == "1"
228
+
229
+ # We will disable creating subprocess for autotuning if this is False
230
+ autotune_in_subproc = os.environ.get("TORCHINDUCTOR_AUTOTUNE_IN_SUBPROC") == "1"
231
+
232
+ # If autotuning in subprocess, whether to use multiple devices
233
+ autotune_multi_device = os.environ.get("TORCHINDUCTOR_AUTOTUNE_MULTI_DEVICE") == "1"
234
+
235
+ coordinate_descent_tuning = (
236
+ os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_TUNING") == "1"
237
+ )
238
+ coordinate_descent_check_all_directions = (
239
+ os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_CHECK_ALL_DIRECTIONS") == "1"
240
+ )
241
+ coordinate_descent_search_radius = int(
242
+ os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_RADIUS", "1")
243
+ )
244
+
245
+ # Disabled by default on ROCm, opt-in if model utilises NHWC convolutions
246
+ layout_opt_default = "1" if not torch.version.hip else "0"
247
+ layout_optimization = (
248
+ os.environ.get("TORCHINDUCTOR_LAYOUT_OPTIMIZATION", layout_opt_default) == "1"
249
+ )
250
+
251
+ force_layout_optimization = os.environ.get("TORCHINDUCTOR_FORCE_LAYOUT_OPT", "0") == "1"
252
+
253
+
254
+ # Whether to keep the output strides the same as eager after layout optimization.
255
+ keep_output_stride = os.environ.get("TORCHINDUCTOR_KEEP_OUTPUT_STRIDE", "1") == "1"
256
+
257
+ # Enabling this will let compiler print warning messages if a generated triton
258
+ # kernel has inputs with mixed layouts. This is helpful for perf debugging
259
+ # since kernel with mixed layout inputs may run much slower then one whose inputs
260
+ # have uniform layouts.
261
+ warn_mix_layout = os.environ.get("TORCHINDUCTOR_WARN_MIX_LAYOUT") == "1"
262
+
263
+ # control store vs recompute heuristic
264
+ # For fanouts, rematerialization can lead to exponential blowup. So, have
265
+ # smaller threshold
266
+ realize_reads_threshold = 4
267
+ realize_opcount_threshold = 30
268
+
269
+ # Threshold to prevent excessive accumulation of ops in one buffer during lowering
270
+ realize_acc_reads_threshold = 8
271
+
272
+ # fallback to eager for random/dropout, this is slow but useful for debugging
273
+ fallback_random = False
274
+
275
+ # automatically create fallbacks when encountering an unhandled op
276
+ implicit_fallbacks = True
277
+
278
+ # fuse even in cases without common reads
279
+ aggressive_fusion = False
280
+
281
+ # For each fused kernel in the wrapper, comment with the nodes that get fused.
282
+ # Useful for debugging fusion.
283
+ debug_fusion = os.environ.get("TORCHINDUCTOR_DEBUG_FUSION") == "1"
284
+ benchmark_fusion = os.environ.get("TORCHINDUCTOR_BENCHMARK_FUSION") == "1"
285
+ enabled_metric_tables = os.environ.get("TORCHINDUCTOR_ENABLED_METRIC_TABLES", "")
286
+
287
+ # how many nodes to allow into a single fusion
288
+ max_fusion_size = 64
289
+
290
+ # max number of inputs to generate cat as a pointwise op with masked laods
291
+ max_pointwise_cat_inputs = 8
292
+
293
+ # replace small reductions with pointwise, disable with `= 1`
294
+ unroll_reductions_threshold = 8
295
+
296
+ # Add extra comments to output code (causes compile cache misses)
297
+ comment_origin = False
298
+
299
+ # Convert 1x1 convs into matmuls
300
+ conv_1x1_as_mm = False
301
+
302
+ # Enable split reductions for better utilization when the dimension
303
+ # being reduced over is large (by splitting it)
304
+ split_reductions = True
305
+
306
+ benchmark_kernel = os.environ.get("TORCHINDUCTOR_BENCHMARK_KERNEL", "0") == "1"
307
+
308
+ # Enable constant and index_expr folding
309
+ constant_and_index_propagation = True
310
+
311
+ # we always add constants into graph.constants without
312
+ # performing any constant-inlining optimization
313
+ always_keep_tensor_constants = False
314
+
315
+ # assert that indirect indexing does not read / write out of bounds
316
+ assert_indirect_indexing = True
317
+
318
+ # constant folding on the joint graph
319
+ joint_graph_constant_folding = True
320
+
321
+ # Enable indirect_indexing asserts for decompositions and lowerings
322
+ debug_index_asserts = False
323
+
324
+ # warnings intended for PyTorch developers, disable for point releases
325
+ is_nightly_or_source = "dev" in torch.__version__ or "git" in torch.__version__
326
+ developer_warnings = is_fbcode() or is_nightly_or_source
327
+
328
+ # The multiprocessing start method to use for inductor workers in the codecache.
329
+ # TODO: fork is not safe in a multithreaded environment, we should evaluate changing
330
+ # the default to spawn.
331
+ worker_start_method = "fork"
332
+
333
+
334
+ def decide_compile_threads():
335
+ """
336
+ Here are the precedence to decide compile_threads
337
+ 1. User can override it by TORCHINDUCTOR_COMPILE_THREADS. One may want to disable async compiling by
338
+ setting this to 1 to make pdb happy.
339
+ 2. Set to 1 if it's win32 platform or it's a fbcode build
340
+ 3. decide by the number of CPU cores
341
+ """
342
+ if "TORCHINDUCTOR_COMPILE_THREADS" in os.environ:
343
+ return int(os.environ["TORCHINDUCTOR_COMPILE_THREADS"])
344
+ elif sys.platform == "win32" or is_fbcode():
345
+ return 1
346
+ else:
347
+ cpu_count = (
348
+ len(os.sched_getaffinity(0))
349
+ if hasattr(os, "sched_getaffinity")
350
+ else os.cpu_count()
351
+ )
352
+ assert cpu_count
353
+ return min(32, cpu_count)
354
+
355
+
356
+ compile_threads = decide_compile_threads()
357
+
358
+ # gemm autotuning global cache dir
359
+ if is_fbcode():
360
+ from libfb.py import parutil
361
+
362
+ try:
363
+ if __package__:
364
+ global_cache_dir = parutil.get_dir_path(
365
+ os.path.join(__package__.replace(".", os.sep), "fb/cache")
366
+ )
367
+ else:
368
+ global_cache_dir = parutil.get_dir_path("fb/cache")
369
+ except ValueError:
370
+ global_cache_dir = None
371
+ else:
372
+ global_cache_dir = None
373
+
374
+ # If kernel is fused, the name is generated from the origin node op names
375
+ # for larger kernels limit this
376
+ kernel_name_max_ops = 10
377
+
378
+ # Pad input tensors of matmul/bmm/addmm to leverage Tensor Cores in NVIDIA GPUs
379
+ shape_padding = os.environ.get("TORCHINDUCTOR_SHAPE_PADDING", "1") == "1"
380
+
381
+ # Fx-based linear/matmul/bmm + permute/transpose vertical fusion
382
+ permute_fusion = os.environ.get("TORCHINDUCTOR_PERMUTE_FUSION", "0") == "1"
383
+
384
+ # Mark the wrapper call in PyTorch profiler
385
+ profiler_mark_wrapper_call = False
386
+
387
+ # Generate hook calls to torch._inductor.hooks.run_intermediate_hooks for
388
+ # every intermediate for which we can correlate it with an intermediate
389
+ # from the original FX graph
390
+ generate_intermediate_hooks = False
391
+
392
+ # Populate traceback field on IRNode; good for debugging why origin_node is
393
+ # not populated, or finding out where an IRNode was constructed
394
+ debug_ir_traceback = False
395
+
396
+ # used for debugging to make sure config is properly set
397
+ _raise_error_for_testing = False
398
+
399
+ _profile_var = os.environ.get("TORCHINDUCTOR_PROFILE", "")
400
+ profile_bandwidth = _profile_var != ""
401
+ profile_bandwidth_regex = "" if _profile_var == "1" else _profile_var
402
+ # Specify a file where we print out the profiling results.
403
+ # None means we do not dump results to a file.
404
+ profile_bandwidth_output = os.environ.get("TORCHINDUCTOR_PROFILE_OUTPUT", None)
405
+
406
+ # TODO: remove later
407
+ disable_cpp_codegen = False
408
+
409
+
410
+ # Freezing will attempt to inline weights as constants in optimization
411
+ # and run constant folding and other optimizations on them. After freezing, weights
412
+ # can no longer be updated.
413
+ freezing: bool = os.environ.get("TORCHINDUCTOR_FREEZING", "0") == "1"
414
+
415
+ # Make freezing invalidate the eager Parameters of nn modules, to avoid memory overhead
416
+ # of potentially keeping multiple copies of weights.
417
+ freezing_discard_parameters: bool = False
418
+
419
+ # Kill switch for allowing temporary tensors to be allocated as stack arrays. Tests
420
+ # should be run with this flag both on and off to make sure we have coverage.
421
+ allow_stack_allocation: bool = (
422
+ os.environ.get("TORCHINDUCTOR_STACK_ALLOCATION", "1") == "1"
423
+ )
424
+
425
+ # Enables an alternate DSO interface (the "minimal ArrayRef interface") intended
426
+ # to maximize performance for use cases that it can accommodate at the expense of
427
+ # generality. In brief:
428
+ # - inputs and outputs are ArrayRefTensor<T> (note that strides are required, but the
429
+ # tensor must be contiguous)
430
+ # - constant handling is unchanged because it is not a per-inference-iteration bottleneck
431
+ #
432
+ # When the DSO is generated in this mode, the usual interface will also be supported,
433
+ # but performance for that interface may be degraded.
434
+ use_minimal_arrayref_interface: bool = False
435
+
436
+ # decompose some memory bound matmul/bmm to mul
437
+ decompose_mem_bound_mm: bool = False
438
+
439
+
440
+ # config specific to codegen/cpp.py
441
+ class cpp:
442
+ # set to torch.get_num_threads()
443
+ threads = -1
444
+
445
+ # Do not generate loops when the condition doesn't hold, like:
446
+ # for(long i0=4096; i0<4096; i0+=1)
447
+ no_redundant_loops = True
448
+
449
+ # Assume number of threads is dynamic, don't specialize thread number.
450
+ # Kernels don't recompile on thread number changes with this flag on.
451
+ # For single-threaded workload, turning it on would incur a slight
452
+ # performance degradation.
453
+ dynamic_threads = False
454
+
455
+ simdlen: Optional[int] = None
456
+ min_chunk_size = 4096
457
+ cxx = (
458
+ None, # download gcc12 from conda-forge if conda is installed
459
+ # "g++-12",
460
+ # "g++-11",
461
+ # "g++-10",
462
+ # "clang++",
463
+ os.environ.get("CXX", "clang++" if sys.platform == "darwin" else "g++"),
464
+ # "g++.par",
465
+ )
466
+ # Allow kernel performance profiling via PyTorch profiler
467
+ enable_kernel_profile = False
468
+
469
+ # enable weight prepacking to get a better performance; may lead to large memory footprint
470
+ weight_prepack = True
471
+
472
+ # Inject a bug into our relu implementation; useful for testing our repro
473
+ # extraction and minification functionality.
474
+ # Valid values: "compile_error", "runtime_error", "accuracy"
475
+ inject_relu_bug_TESTING_ONLY: Optional[str] = None
476
+ inject_log1p_bug_TESTING_ONLY: Optional[str] = None
477
+
478
+ # If None, autodetect whether or not AVX512/AVX2 can be used. Otherwise,
479
+ # force usage as specified, without testing.
480
+ vec_isa_ok: Optional[bool] = None
481
+
482
+ # similar to config.triton.descriptive_names
483
+ descriptive_names = "original_aten"
484
+
485
+ # how many nodes to allow into a single horizontal fusion
486
+ max_horizontal_fusion_size = 16
487
+
488
+ # Make scatter_reduce fallback when reduce is sum to avoid performance regression
489
+ # using atomic_add.
490
+ fallback_scatter_reduce_sum = True
491
+
492
+ # Use funsafe-math-optimizations when compiling
493
+ enable_unsafe_math_opt_flag = False
494
+
495
+ # Use ffp-contract when compiling
496
+ enable_floating_point_contract_flag = False
497
+
498
+
499
+ # config specific to codegen/triton.py
500
+ class triton:
501
+ # Use cudagraphs on output code
502
+ cudagraphs = False
503
+
504
+ # Use cudagraph trees for memory pooling if `cudagraphs` is True
505
+ cudagraph_trees = True
506
+
507
+ # assertions not on the fast path, steady state
508
+ slow_path_cudagraph_asserts = True
509
+
510
+ # TODO - need to debug why this prevents cleanup
511
+ cudagraph_trees_history_recording = False
512
+
513
+ # assertions on the fast path
514
+ fast_path_cudagraph_asserts = False
515
+
516
+ # skip warmup for cudagraph trees
517
+ skip_cudagraph_warmup = False
518
+
519
+ # Synchronize before and after every compiled graph.
520
+ debug_sync_graph = False
521
+
522
+ # Synchronize after every kernel launch, to help pinpoint bugs
523
+ debug_sync_kernel = False
524
+
525
+ # Always load full blocks (rather than broadcasting inside the block)
526
+ dense_indexing = False
527
+
528
+ # limit tiling dimensions
529
+ max_tiles = 2
530
+
531
+ # use triton.autotune for pointwise ops with complex layouts
532
+ # this should only be disabled for debugging/testing
533
+ autotune_pointwise = True
534
+
535
+ # max autotune gemm with cublasLt
536
+ autotune_cublasLt = True
537
+
538
+ # should we stop a fusion to allow better tiling?
539
+ tiling_prevents_pointwise_fusion = True
540
+ tiling_prevents_reduction_fusion = True
541
+
542
+ # should we give different names to kernels
543
+ # Note: This is orthogonal to descriptive_names - this is deciding whether
544
+ # our triton kernel names should all be `triton_` (to maximize caching) or
545
+ # whether they should be unique.
546
+ unique_kernel_names = os.environ.get("TORCHINDUCTOR_UNIQUE_KERNEL_NAMES") == "1"
547
+
548
+ # should we put op names in kernel names
549
+ # False: No special names (just triton__1, triton__2, etc.)
550
+ # "torch": Maps to the fx op in the Dynamo graph (module name, method name, etc.)
551
+ # "original_aten": Maps to the highest-level aten op (i.e. pre-decompositions)
552
+ # "inductor_node": Maps to the node name in the FX graph passed to Inductor
553
+ descriptive_names = "original_aten"
554
+
555
+ # use alternate codegen for smaller reductions
556
+ persistent_reductions = (
557
+ os.environ.get("TORCHINDUCTOR_PERSISTENT_REDUCTIONS", "1") == "1"
558
+ )
559
+
560
+ # 0/False: disable
561
+ # 1/True: enable, use tuning to pick between different subkernels
562
+ # 2: enable, force using persistent reduction (for debugging)
563
+ # 3: enable, force using non-persistent reduction (for debugging)
564
+ multi_kernel = int(os.environ.get("TORCHINDUCTOR_MULTI_KERNEL", "0"))
565
+
566
+ # hint to Triton when arguments are divisible by 16
567
+ divisible_by_16 = True
568
+
569
+ # theses are not enforced, but they are used by asserts in triton_heuristics.py
570
+ # NOTE: mobilevit_s in timm_models required X to be set to the higher value 2048
571
+
572
+ # Max RBLOCK will be large for multi-kernel since we do more aggressive
573
+ # persistent reduction.
574
+ max_block = {
575
+ "X": 2048,
576
+ "Y": 1024,
577
+ "Z": 1024,
578
+ "R": 4096 * (16 if multi_kernel else 1),
579
+ }
580
+
581
+ # Minimum RBLOCK to be used for a TritonSplitScanKernel
582
+ # NOTE: This also indirectly controls the size of workspace buffer required
583
+ min_split_scan_rblock = 256
584
+
585
+ # Store the generated cubin files for cpp wrapper code to load
586
+ store_cubin = False
587
+
588
+ # the max number of spills we allow for the configs we benchmark.
589
+ # Setting this to 0 means we skip a config if it spills even a single
590
+ # register.
591
+ # Setting it to a larger value allows a config spilling a small amount
592
+ # of registers being benchmarked.
593
+ #
594
+ # NOTE: triton will always report >0 register spills for kernels using sin/cos.
595
+ # (check this issue https://github.com/openai/triton/issues/1756 )
596
+ # So far we see a fixed 8 spilled registers for kernels using sin/cos.
597
+ # Raise the threshold to 16 to be safe.
598
+ # We should revisit this once we understand more of the source of register spills.
599
+ spill_threshold: int = 16
600
+
601
+ # Generate code containing the newer tl.make_block_ptr() API for loads/store
602
+ use_block_ptr = False
603
+
604
+ # Inject a bug into our relu implementation; useful for testing our repro
605
+ # extraction and minification functionality.
606
+ # Valid values: "compile_error", "runtime_error", "accuracy"
607
+ inject_relu_bug_TESTING_ONLY: Optional[str] = None
608
+
609
+
610
+ class aot_inductor:
611
+ # AOTInductor output path
612
+ # If an absolute path is specified, the generated lib files will be stored under the directory;
613
+ # If a relative path is specified, it will be used as a subdirectory under the default caching path;
614
+ # If not specified, a temp directory will be created under the default caching path.
615
+ # If the specified path contains something like "model.so", the sub-string will be used
616
+ # to name the generated library.
617
+ output_path = ""
618
+
619
+ debug_compile = os.environ.get("AOT_INDUCTOR_DEBUG_COMPILE", "0") == "1"
620
+
621
+ # Serialized tree spec for flattening inputs
622
+ serialized_in_spec = ""
623
+
624
+ # Serialized tree spec for flattening outputs
625
+ serialized_out_spec = ""
626
+
627
+ # flag to decide whether to create a submodule for constant graph.
628
+ use_runtime_constant_folding: bool = False
629
+
630
+
631
+ class cuda:
632
+ # CUDA arch to use for CUDA template kernel compilation.
633
+ # e.g. "70", "75", "80", "90", etc.
634
+ # When arch is None, Inductor uses torch.cuda.get_device_capability(0).
635
+ arch: Optional[str] = None
636
+
637
+ # CUDA version to use for CUDA template kernel compilation.
638
+ # e.g. "11.4", "12.1", etc.
639
+ # When version is None, Inductor uses torch.version.cuda.
640
+ version: Optional[str] = None
641
+
642
+ # Optimization level for the host compiler.
643
+ compile_opt_level = "-O1"
644
+
645
+ # Whether to enable device LTO (link-time-optimization).
646
+ enable_cuda_lto = False
647
+
648
+ # Whether to keep intermediate files dring compilation.
649
+ enable_ptxas_info = False
650
+
651
+ # Whether to enable debug info, e.g. line number, cutlass debug info.
652
+ enable_debug_info = False
653
+
654
+ # Whether to use fast math.
655
+ use_fast_math = False
656
+
657
+ # Path to the CUTLASS repo root directory.
658
+ # The default path only works under PyTorch local development environment.
659
+ cutlass_dir = os.environ.get(
660
+ "TORCHINDUCTOR_CUTLASS_DIR",
661
+ os.path.abspath(
662
+ os.path.join(os.path.dirname(torch.__file__), "../third_party/cutlass/")
663
+ ),
664
+ )
665
+
666
+ # Configures the maximum number of CUTLASS configs to profile in max_autotune.
667
+ # By default it's None, so that all CUTLASS configs are tuned.
668
+ # This is mainly used to reduce test time in CI.
669
+ cutlass_max_profiling_configs: Optional[int] = None
670
+
671
+ # Path to CUDA NVCC.
672
+ # NVCC search order:
673
+ # 1) cuda_cxx set in this config
674
+ # 2)CUDACXX environment variable
675
+ # 3)CUDA_HOME environment variable
676
+ # 4) default system search PATH.
677
+ cuda_cxx: Optional[str] = None
678
+
679
+ # If set to True, it will ensure that only GEMM ops capable of
680
+ # epilogue fusion via CUTLASS Epilogue Visitor Trees ( EVT )
681
+ # are enabled for the CUTLASS backend.
682
+ cutlass_only_evt_capable_ops: bool = False
683
+
684
+
685
+ # create a directory containing lots of debug information
686
+ class trace:
687
+ # master switch for all debugging flags below
688
+ enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
689
+
690
+ # Save debug information to a temporary directory
691
+ # If not specified, a temp directory will be created by system
692
+ debug_dir: Optional[str] = None
693
+
694
+ # Save python logger call >=logging.DEBUG
695
+ debug_log = False
696
+
697
+ # Save python logger call >=logging.INFO
698
+ info_log = False
699
+
700
+ # Save input FX graph (post decomps, pre optimization)
701
+ fx_graph = True
702
+
703
+ # Save FX graph after transformations
704
+ fx_graph_transformed = True
705
+
706
+ # Save TorchInductor IR before fusion pass
707
+ ir_pre_fusion = True
708
+
709
+ # Save TorchInductor IR after fusion pass
710
+ ir_post_fusion = True
711
+
712
+ # Copy generated code to trace dir
713
+ output_code = True
714
+
715
+ # SVG figure showing post-fusion graph
716
+ graph_diagram = os.environ.get("INDUCTOR_POST_FUSION_SVG", "0") == "1"
717
+
718
+ # SVG figure showing fx with fusion
719
+ draw_orig_fx_graph = os.environ.get("INDUCTOR_ORIG_FX_SVG", "0") == "1"
720
+
721
+ # We draw our fx graphs with the "record" shape attribute by default.
722
+ # Sometimes, when the graph is very complex, we may hit dot errors like below:
723
+ # "flat edge between adjacent nodes one of which has a record shape -
724
+ # replace records with HTML-like labels"
725
+ # and thus fail to generate a graph. So, let's give the user an option
726
+ # to specify the shape attribute for the dot graph. For example, passing
727
+ # INDUCTOR_DOT_GRAPH_SHAPE_SVG = "none" would let us generate HTML-like lables
728
+ # to workaround the above failure.
729
+ dot_graph_shape = os.environ.get("INDUCTOR_DOT_GRAPH_SHAPE_SVG", None)
730
+
731
+ # Store cProfile (see snakeviz to view)
732
+ compile_profile = False
733
+
734
+ # Upload the .tar.gz file
735
+ # Needs to be overriden based on specific environment needs
736
+ upload_tar: Optional[Callable[[str], None]] = None
737
+
738
+ log_autotuning_results: bool = False
739
+
740
+
741
+ _save_config_ignore = {
742
+ # workaround: "Can't pickle <function ...>"
743
+ "trace.upload_tar",
744
+ }
745
+
746
+ if TYPE_CHECKING:
747
+ from torch.utils._config_typing import * # noqa: F401, F403
748
+
749
+ from torch.utils._config_module import install_config_module
750
+
751
+ # adds patch, save_config, etc
752
+ install_config_module(sys.modules[__name__])
venv/lib/python3.10/site-packages/torch/_inductor/constant_folding.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from typing import Any, Callable, Dict, Optional
3
+
4
+ import torch
5
+ import torch.utils._pytree as pytree
6
+
7
+ aten = torch.ops.aten
8
+
9
+ # We would like to split modules into two subgraphs for runtime weight updates to work correctly.
10
+ # The use case and more information could be found at:
11
+ # https://docs.google.com/document/d/1inZC-8KarJ6gKB7G9egmYLx1V_dKX_apxon0w4zPC0Q/edit?usp=sharing
12
+ META_TAG = "MODULE_TYPE"
13
+ MODULE_TAG = "_MAIN_MODULE"
14
+ CONST_MODULE_TAG = "_CONST_MODULE"
15
+
16
+
17
+ def replace_node_with_constant(gm, node, constant, name=None):
18
+ g = gm.graph
19
+
20
+ if name:
21
+ qualname = name
22
+ else:
23
+ if not hasattr(gm, "_frozen_param_count"):
24
+ gm._frozen_param_count = 0
25
+ i = gm._frozen_param_count
26
+
27
+ while True:
28
+ qualname = f"_frozen_param{i}"
29
+ if not hasattr(gm, qualname):
30
+ break
31
+ i += 1
32
+
33
+ gm._frozen_param_count = i + 1
34
+
35
+ with g.inserting_before(node):
36
+ new_input_node = g.create_node("get_attr", qualname, (), {})
37
+ node.replace_all_uses_with(new_input_node)
38
+ new_input_node.meta.update(node.meta)
39
+ g.erase_node(node)
40
+
41
+ # needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning
42
+ gm.register_buffer(qualname, constant)
43
+ setattr(gm, qualname, constant)
44
+
45
+
46
+ class ConstantFolder(torch.fx.Interpreter):
47
+ def __init__(
48
+ self,
49
+ gm,
50
+ skip_constructors=False,
51
+ ):
52
+ super().__init__(gm)
53
+ self.node_replacements: Dict[torch.fx.Node, Any] = {}
54
+ self.replaced_uses: Dict[torch.fx.Node, int] = collections.Counter()
55
+ self.unknown_value = object()
56
+ self.skip_constructors: bool = skip_constructors
57
+
58
+ # overwrite this to deallocate env values if their only remaining use
59
+ # is the output
60
+ self.user_to_last_uses = self.node_to_last_non_output_use()
61
+
62
+ def is_impure(self, node: torch.fx.node.Node):
63
+ if node.target in [
64
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
65
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
66
+ torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
67
+ ]:
68
+ # For the pattern fp32_weight -> q -> dq
69
+ # We only folding fp32_weight -> q
70
+ # int8_weight and leave dq in graph to be fused
71
+ return True
72
+ return False
73
+
74
+ def node_to_last_non_output_use(self):
75
+ last_non_output_use = collections.defaultdict(list)
76
+ seen_uses = set()
77
+ output_node = next(iter(reversed(self.module.graph.nodes)))
78
+
79
+ for node in reversed(self.module.graph.nodes):
80
+ if node.target == "output":
81
+ continue
82
+
83
+ def add_use(inp):
84
+ if inp in seen_uses:
85
+ return
86
+
87
+ seen_uses.add(inp)
88
+ last_non_output_use[node].append(inp)
89
+
90
+ pytree.tree_map_only(torch.fx.Node, add_use, (node.args, node.kwargs))
91
+
92
+ # if this node is only used in output, we want to gc it right away
93
+ if len(node.users) == 1 and output_node in node.users:
94
+ last_non_output_use[node].append(node)
95
+
96
+ return last_non_output_use
97
+
98
+ def run_node(self, node):
99
+ if node.target == "output":
100
+ # because we remove nodes from env on last non output use,
101
+ # re-define them now or we'll get error in interpreter
102
+ def set_env(arg):
103
+ self.env[arg] = self.unknown_value
104
+
105
+ pytree.tree_map_only(torch.fx.Node, set_env, node.args)
106
+ return super().run_node(node)
107
+
108
+ args, kwargs = self.fetch_args_kwargs_from_env(node)
109
+ flattened_inputs = pytree.arg_tree_leaves(*args, **kwargs)
110
+
111
+ if self.unknown_value in flattened_inputs:
112
+ return self.unknown_value
113
+
114
+ # TODO - fix errors with this
115
+ if (
116
+ node.op == "call_function"
117
+ and node.target == aten._efficientzerotensor.default
118
+ ):
119
+ return self.unknown_value
120
+
121
+ # TODO - constant folding triton kernel returns the inputs -- fix this
122
+ if (
123
+ node.op == "call_function"
124
+ and node.name == "triton_kernel_wrapper_functional_proxy"
125
+ ):
126
+ return self.unknown_value
127
+
128
+ # skip constructors, since inductor generates optimal code for them already
129
+ # and turning into tensor would result in an additional global memory read
130
+ # TODO - more complicated strategy
131
+ if (
132
+ self.skip_constructors
133
+ and node.op != "get_attr"
134
+ and not any(isinstance(e, torch.Tensor) for e in flattened_inputs)
135
+ ):
136
+ return self.unknown_value
137
+
138
+ # All mutations should either be removed or on inputs which we did not make constant
139
+ if (
140
+ isinstance(node.target, torch._ops.OpOverload)
141
+ and torch.Tag.nondeterministic_seeded in node.target.tags
142
+ ):
143
+ return self.unknown_value
144
+
145
+ out = super().run_node(node)
146
+
147
+ if node.op != "get_attr" and isinstance(out, torch.Tensor):
148
+ if not self.insertable_tensor_check(out):
149
+ return out
150
+
151
+ if self.is_impure(node):
152
+ return self.unknown_value
153
+
154
+ self.add_node_replacement(node, out)
155
+
156
+ flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs)
157
+
158
+ for n in flattened_node_inps:
159
+ if not isinstance(n, torch.fx.Node):
160
+ continue
161
+
162
+ self.replaced_uses[n] += 1
163
+
164
+ for to_delete in self.user_to_last_uses.get(node, []):
165
+ if self.replaced_uses[to_delete] == len(to_delete.users):
166
+ self.node_replacements.pop(to_delete, None)
167
+
168
+ return out
169
+
170
+ def insertable_tensor_check(self, tensor: torch.Tensor) -> bool:
171
+ return True
172
+
173
+ def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
174
+ self.node_replacements[node] = tensor
175
+
176
+ def run(self):
177
+ env = {}
178
+ for n in self.module.graph.nodes:
179
+ if n.op == "placeholder":
180
+ env[n] = self.unknown_value
181
+ return super().run(initial_env=env)
182
+
183
+
184
+ @torch.utils._python_dispatch._disable_current_modes()
185
+ def constant_fold(gm, constraint_fn: Optional[Callable[[torch.fx.Node], bool]] = None):
186
+ cf = ConstantFolder(gm, skip_constructors=True)
187
+ cf.run()
188
+
189
+ for node, constant in cf.node_replacements.items():
190
+ if constraint_fn is not None and not constraint_fn(node):
191
+ continue
192
+ replace_node_with_constant(gm, node, constant)
193
+
194
+ erased_params = []
195
+ for node in gm.graph.nodes:
196
+ if node.op == "get_attr" and len(node.users) == 0:
197
+ if hasattr(gm, node.target):
198
+ delattr(gm, node.target)
199
+ erased_params.append(node)
200
+
201
+ for node in erased_params:
202
+ gm.graph.erase_node(node)
203
+
204
+ gm.graph.eliminate_dead_code()
205
+ gm.graph.lint()
206
+ gm.recompile()
207
+
208
+
209
+ @torch.utils._python_dispatch._disable_current_modes()
210
+ def constant_graph_tag(gm: torch.fx.GraphModule):
211
+ cf = ConstantFolder(gm, skip_constructors=True)
212
+ cf.run()
213
+
214
+ for node in gm.graph.nodes:
215
+ if (
216
+ node.op == "get_attr"
217
+ or node in cf.node_replacements
218
+ or node in cf.replaced_uses
219
+ ):
220
+ node.meta[META_TAG] = CONST_MODULE_TAG
221
+ else:
222
+ node.meta[META_TAG] = MODULE_TAG
223
+
224
+
225
+ def run_and_get_constant_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
226
+ """
227
+ Construct a GraphModule which corresponds to the part which could be
228
+ constant folded in provided gm.
229
+ """
230
+
231
+ constant_graph_tag(gm)
232
+ # We rewrite the tags, if it's a constant being directly consumed, without
233
+ # any folding opportunity, we keep it in main gm.
234
+ for node in gm.graph.nodes:
235
+ if node.op == "get_attr":
236
+ used_to_fold = False
237
+ for u in node.users:
238
+ if u.meta[META_TAG] == CONST_MODULE_TAG:
239
+ used_to_fold = True
240
+ break
241
+ if not used_to_fold:
242
+ node.meta[META_TAG] = MODULE_TAG
243
+
244
+ new_graph = torch.fx.Graph()
245
+
246
+ node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
247
+ output_nodes = []
248
+ for node in gm.graph.nodes:
249
+ if node.meta[META_TAG] == MODULE_TAG:
250
+ continue
251
+
252
+ new_node = new_graph.node_copy(node, lambda x: node_remapping[x])
253
+ node_remapping[node] = new_node
254
+
255
+ for user in node.users:
256
+ if user.meta[META_TAG] == MODULE_TAG:
257
+ output_nodes.append(new_node)
258
+ break
259
+
260
+ new_graph.output(tuple(output_nodes))
261
+ new_graph.lint()
262
+ new_gm = torch.fx.GraphModule(gm, new_graph)
263
+
264
+ return new_gm
venv/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import itertools
3
+ import logging
4
+ from typing import Callable, Optional
5
+
6
+ from torch.utils._triton import has_triton
7
+ from .utils import red_text, triton_config_to_hashable
8
+
9
+ if has_triton():
10
+ import triton
11
+ else:
12
+ triton = None
13
+
14
+ from . import config as inductor_config
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+
19
+ def get_field(config, name):
20
+ if name == "num_warps":
21
+ return config.num_warps
22
+ elif name == "num_stages":
23
+ return config.num_stages
24
+ else:
25
+ return config.kwargs.get(name, None)
26
+
27
+
28
+ def set_field(config, name, value):
29
+ if name == "num_warps":
30
+ config.num_warps = value
31
+ elif name == "num_stages":
32
+ config.num_stages = value
33
+ else:
34
+ config.kwargs[name] = value
35
+
36
+
37
+ class CoordescTuner:
38
+ """
39
+ The coordinate descent tuner. Tune one field/coordinate at a time.
40
+
41
+ TODO will it be necessary to tune multiple fields simultaneously.
42
+
43
+
44
+ TODO: what if both increasing and decreasing a field can improve perf.
45
+ i.e., there are multiple local optima..
46
+ """
47
+
48
+ def __init__(self, is_mm=False, name="unknown", size_hints=None):
49
+ self.is_mm = is_mm # we will tune num_stages for mm
50
+ self.cached_benchmark_results = {}
51
+ self.name = name
52
+ self.size_hints = size_hints
53
+
54
+ def get_xmax(self):
55
+ xmax = inductor_config.triton.max_block["X"]
56
+ if self.size_hints and len(self.size_hints) > 0:
57
+ xmax = min(xmax, self.size_hints[0])
58
+ return xmax
59
+
60
+ def get_ymax(self):
61
+ ymax = inductor_config.triton.max_block["Y"]
62
+ if self.size_hints and len(self.size_hints) > 1:
63
+ ymax = min(ymax, self.size_hints[1])
64
+ return ymax
65
+
66
+ def get_zmax(self):
67
+ zmax = inductor_config.triton.max_block["Z"]
68
+ if self.size_hints and len(self.size_hints) > 2:
69
+ zmax = min(zmax, self.size_hints[2])
70
+ return zmax
71
+
72
+ def get_rmax(self):
73
+ if self.size_hints and len(self.size_hints) > 0:
74
+ return self.size_hints[-1] # the last one is for reduction
75
+ else:
76
+ # large enough. We should not pick this large RBLOCK anyway
77
+ return 2**30
78
+
79
+ def get_warpsmax(self):
80
+ # Currently, CUDA has a maximum of 1024 threads, so 32 is the max
81
+ # number of warps.
82
+ return 1024 // 32
83
+
84
+ def cache_benchmark_result(self, config, timing):
85
+ self.cached_benchmark_results[triton_config_to_hashable(config)] = timing
86
+
87
+ def lookup_in_cache(self, config):
88
+ return self.cached_benchmark_results.get(triton_config_to_hashable(config))
89
+
90
+ def call_func(self, func, config):
91
+ found = self.lookup_in_cache(config)
92
+ if found is not None:
93
+ log.debug(" CACHED")
94
+ return found
95
+ timing = func(config)
96
+ self.cache_benchmark_result(config, timing)
97
+ return timing
98
+
99
+ @property
100
+ def tunable_fields(self):
101
+ out = [
102
+ "XBLOCK",
103
+ "YBLOCK",
104
+ "ZBLOCK",
105
+ # NOTE: we should not tune RBLOCK for persistent reduction.
106
+ # We rely on the fact that persistent reduction's triton.Config
107
+ # does not have the RBLOCK field to guarantee that.
108
+ "RBLOCK",
109
+ # the following 3 are for mm
110
+ "BLOCK_M",
111
+ "BLOCK_N",
112
+ "BLOCK_K",
113
+ "num_warps",
114
+ ]
115
+ if self.is_mm:
116
+ out.append("num_stages")
117
+
118
+ return out
119
+
120
+ def value_too_large(self, name, val):
121
+ if name == "XBLOCK":
122
+ return val > self.get_xmax()
123
+ if name == "YBLOCK":
124
+ return val > self.get_ymax()
125
+ if name == "ZBLOCK":
126
+ return val > self.get_zmax()
127
+ if name == "RBLOCK":
128
+ return val > self.get_rmax()
129
+ if name == "num_warps":
130
+ return val > self.get_warpsmax()
131
+
132
+ return False
133
+
134
+ def get_neighbour_values(self, name, orig_val, radius=1, include_self=False):
135
+ """
136
+ Get neighbour values in 'radius' steps. The original value is not
137
+ returned as it's own neighbour.
138
+ """
139
+ assert radius >= 1
140
+
141
+ def update(cur_val, inc=True):
142
+ if name == "num_stages":
143
+ if inc:
144
+ return cur_val + 1
145
+ else:
146
+ return cur_val - 1
147
+ else:
148
+ if inc:
149
+ return cur_val * 2
150
+ else:
151
+ return cur_val // 2
152
+
153
+ out = []
154
+ # increment loop
155
+ cur_val = orig_val
156
+ for _ in range(radius):
157
+ cur_val = update(cur_val, True)
158
+ if self.value_too_large(name, cur_val):
159
+ break
160
+ out.append(cur_val)
161
+
162
+ # decrement loop
163
+ cur_val = orig_val
164
+ for _ in range(radius):
165
+ cur_val = update(cur_val, False)
166
+ if cur_val <= 0:
167
+ break
168
+ out.append(cur_val)
169
+
170
+ if include_self:
171
+ out.append(orig_val)
172
+ return out
173
+
174
+ @staticmethod
175
+ def has_improvement(baseline, test):
176
+ threshold = 0.001 # 0.1%
177
+ return test is not None and test < baseline * (1 - threshold)
178
+
179
+ def check_all_tuning_directions(
180
+ self,
181
+ func: Callable[["triton.Config"], float],
182
+ best_config,
183
+ best_timing,
184
+ ):
185
+ """
186
+ Check all directions. We only do this once the regular coordinate
187
+ descent tuning find no better choices any more.
188
+ We only have a few tunable fields, so this should be fine.
189
+ """
190
+ candidate_values_list = []
191
+ effective_fields = []
192
+ for field in self.tunable_fields:
193
+ old_value = get_field(best_config, field)
194
+ if old_value is None:
195
+ continue
196
+ candidate_values = self.get_neighbour_values(
197
+ field,
198
+ old_value,
199
+ radius=inductor_config.coordinate_descent_search_radius,
200
+ include_self=True,
201
+ )
202
+ candidate_values_list.append(candidate_values)
203
+ effective_fields.append(field)
204
+
205
+ choices = itertools.product(*candidate_values_list)
206
+ improved = False
207
+ for choice in choices:
208
+ assert len(choice) == len(effective_fields)
209
+ candidate_config = copy.deepcopy(best_config)
210
+ for new_val, field in zip(choice, effective_fields):
211
+ set_field(candidate_config, field, new_val)
212
+ cmp_res, candidate_timing = self.compare_config(
213
+ func, candidate_config, best_config, best_timing
214
+ )
215
+ if cmp_res:
216
+ improved = True
217
+ best_config = candidate_config
218
+ best_timing = candidate_timing
219
+
220
+ return improved, best_config, best_timing
221
+
222
+ def compare_config(self, func, candidate_config, best_config, best_timing):
223
+ """
224
+ Check if candidate_config is better than best_config.
225
+
226
+ Return a touple of (compare_result, candidate_timing).
227
+ compare_result is true iff candidate_config is better.
228
+ """
229
+ log.debug("Try config %s", candidate_config)
230
+ try:
231
+ candidate_timing = self.call_func(func, candidate_config)
232
+ except Exception as e:
233
+ log.debug("Got exception %s", e)
234
+ return False, float("inf")
235
+
236
+ if self.has_improvement(best_timing, candidate_timing):
237
+ log.debug(
238
+ "Tune from %s %f -> %s %f",
239
+ best_config,
240
+ best_timing,
241
+ candidate_config,
242
+ candidate_timing,
243
+ )
244
+
245
+ return True, candidate_timing
246
+ return False, candidate_timing
247
+
248
+ def autotune(
249
+ self,
250
+ func: Callable[["triton.Config"], float],
251
+ baseline_config: "triton.Config",
252
+ baseline_timing: Optional[float] = None,
253
+ ) -> "triton.Config":
254
+ if baseline_timing is None:
255
+ baseline_timing = self.call_func(func, baseline_config)
256
+
257
+ log.debug("= Do coordinate descent tuning for %s =", self.name)
258
+ log.debug(
259
+ "Baseline Config %s, baseline timing %f", baseline_config, baseline_timing
260
+ )
261
+ improved = True
262
+ best_config = baseline_config
263
+ best_timing = baseline_timing
264
+ tunable_fields = self.tunable_fields
265
+
266
+ while improved:
267
+ improved = False
268
+
269
+ for name in tunable_fields:
270
+ cur_val = get_field(best_config, name)
271
+ # some kernel don't have RBLOCK/YBLOCK/ZBLOCK. So cur_val may be None
272
+ if cur_val is None:
273
+ continue
274
+
275
+ # It's possible that candidate_values is empty.
276
+ # E.g., if XBLOCK is 1 initially and size_hint for x is also 1.
277
+ # We would not try either larger or smaller XBLOCK in this case.
278
+ candidate_values = self.get_neighbour_values(name, cur_val)
279
+
280
+ for next_val in candidate_values:
281
+ candidate_config = copy.deepcopy(best_config)
282
+ set_field(candidate_config, name, next_val)
283
+
284
+ cmp_res, candidate_timing = self.compare_config(
285
+ func, candidate_config, best_config, best_timing
286
+ )
287
+ if cmp_res:
288
+ improved = True
289
+ best_config, best_timing = candidate_config, candidate_timing
290
+
291
+ if not improved and inductor_config.coordinate_descent_check_all_directions:
292
+ old_best_timing = best_timing
293
+ improved, best_config, best_timing = self.check_all_tuning_directions(
294
+ func, best_config, best_timing
295
+ )
296
+
297
+ if improved:
298
+ msg = red_text(
299
+ "Coordinate descend tuning found improvement of %.3fx by looking in all directions."
300
+ )
301
+ log.debug(
302
+ msg,
303
+ old_best_timing / best_timing,
304
+ )
305
+
306
+ log.debug(
307
+ "Improve from %s %f -> %s %f, %.3fx",
308
+ baseline_config,
309
+ baseline_timing,
310
+ best_config,
311
+ best_timing,
312
+ baseline_timing / best_timing,
313
+ )
314
+
315
+ return best_config
venv/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py ADDED
@@ -0,0 +1,2159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CUDA graph trees are a safety abstraction over CUDAGraphs, similar to make_graph_callables,
3
+ which share the same memory pool. Sharing a memory pool is an extremely
4
+ important optimization when chaining multiple CUDA graphs together, as it
5
+ prevents you from needing to copy intermediate tensors from one graph to the
6
+ next, and reduces overall memory usage by allowing dead memory from the first
7
+ pool to be reused in the second.
8
+
9
+ The standard graph/make_graph_callables support sharing memory pool, but
10
+ with a lot of caveats. CUDA graph trees remove these restrictions:
11
+
12
+ * Previously, if you recorded graphs A, B, you had to replay A, B in that
13
+ order. With CUDA graph trees, after replaying A, you can change your
14
+ mind and record/replay a different graph B'; we will support efficient
15
+ execution of both A, B and A, B', using only max(mem(A, B), mem(A, B')). In
16
+ other words: we support arbitrary trees of CUDA graph operations, not just
17
+ sequences (this is why this feature is called CUDA graph trees.)
18
+
19
+ * Previously, if you executed graph A, some non-CUDA graph code, and then
20
+ graph B, after executing graph B, it was not safe to retain any references
21
+ to intermediates produced by A. With CUDA graph trees, we track if any
22
+ outputs of graph A are still live by the time graph B is run, and make
23
+ sure graph B doesn't clobber there memory when reusing the CUDA graphs
24
+ pool. You'll get a separate recording of B depending on what tensors
25
+ stay live or dead.
26
+
27
+ CUDA graph trees are flexible enough to be used in Dynamo across graph breaks,
28
+ which is their primary use case.
29
+
30
+ The ability to switch from replay to record is fairly nontrivial: remember that
31
+ when you replay a CUDA graph, you only replay CUDA operations; no CPU side state
32
+ is updated. In particular, the CPU-side book-keeping for the allocator is not
33
+ reconstructed. However, to record a new child CUDA graph, we must restore this
34
+ book-keeping. This is what checkpoint pool state is used for.
35
+ """
36
+
37
+ from __future__ import annotations
38
+
39
+ import contextlib
40
+ import dataclasses
41
+ import functools
42
+ import gc
43
+ import itertools
44
+ import operator
45
+ import sys
46
+ import threading
47
+ import traceback
48
+ import warnings
49
+ import weakref
50
+ from collections import defaultdict
51
+
52
+ from enum import auto, Enum
53
+ from typing import (
54
+ Any,
55
+ Callable,
56
+ cast,
57
+ Dict,
58
+ Iterator,
59
+ List,
60
+ Optional,
61
+ Sequence,
62
+ Set,
63
+ Tuple,
64
+ Union,
65
+ )
66
+
67
+ import torch.fx
68
+ from torch import Tensor
69
+ from torch._dynamo.mutation_guard import GenerationTracker
70
+ from torch._dynamo.utils import preserve_rng_state
71
+ from torch._inductor.compile_fx import (
72
+ align_inputs_from_check_idxs,
73
+ copy_misaligned_inputs,
74
+ get_expanded_dims,
75
+ get_input_idxs_to_check,
76
+ index_expanded_dims,
77
+ remove_unaligned_input_idxs,
78
+ static_input,
79
+ )
80
+ from torch.multiprocessing.reductions import StorageWeakRef
81
+ from torch.storage import UntypedStorage
82
+ from torch.types import _bool
83
+ from torch.utils import _pytree as pytree
84
+ from torch.utils.weak import TensorWeakRef
85
+
86
+ StorageWeakRefPointer = int
87
+ StorageDataPtr = int
88
+ NBytes = int
89
+
90
+ if torch.backends.cuda.is_built():
91
+ from torch._C import (
92
+ _cuda_CUDAAllocator_AllocatorState as AllocatorState,
93
+ _set_cached_tensors_enabled as _set_cached_tensors_enabled,
94
+ )
95
+ else:
96
+
97
+ class AllocatorState: # type: ignore[no-redef]
98
+ pass
99
+
100
+ def _set_cached_tensors_enabled(enabled: _bool) -> None:
101
+ pass
102
+
103
+
104
+ log = torch._logging.getArtifactLogger(__name__, "cudagraphs")
105
+
106
+
107
+ from . import config
108
+
109
+
110
+ @dataclasses.dataclass(frozen=True)
111
+ class GraphID:
112
+ "Unique counter of a cuda graph recording"
113
+ id: int
114
+
115
+
116
+ @dataclasses.dataclass(frozen=True)
117
+ class FunctionID:
118
+ "Unique counter of a function wrapped in cudagraphify_impl"
119
+ id: int
120
+
121
+
122
+ @dataclasses.dataclass(frozen=True)
123
+ class WrappedFunction:
124
+ """
125
+ Represents a function that you want to record for CUDA graph replay,
126
+ with a little more metadata so we can identify if we have an applicable
127
+ CUDA graph in our CUDA graph tree for it.
128
+ """
129
+
130
+ model: Callable[..., Any]
131
+ static_input_idxs: Sequence[int]
132
+ id: FunctionID
133
+ constants: Tuple[torch.Tensor, ...]
134
+
135
+
136
+ def clear_cublass_cache():
137
+ """
138
+ Cublas keeps a persistent workspace allocation for running matmuls. This poses a problem for
139
+ doing warmup within a CUDAGraph private pool because we do not want persistent allocations from
140
+ one one run to the next. When we begin a new run of a cudagraphs path (generation), all tensors
141
+ from the previous generation are freed. This frees them the memory pool, but not elsewhere.
142
+ A tensor in the cublas workspace would continue to be in use the workspace but would also get allocated
143
+ in the next run. The memory would be in use in two places.
144
+
145
+ To solve this, we clear cublas caches before and after warming up or recording. If a workspace is required
146
+ it will be allocated to the cudagraph private pool and accounted for in the allocator for the duration of the
147
+ program. There is no overhead to this on replay since cudagraphs removes allocation overhead.
148
+ """
149
+ torch._C._cuda_clearCublasWorkspaces()
150
+
151
+
152
+ @contextlib.contextmanager
153
+ def clear_cublas_manager():
154
+ "Context manager around clearing cublas caches that will clear on enter and exit"
155
+ clear_cublass_cache()
156
+ try:
157
+ yield
158
+ finally:
159
+ clear_cublass_cache()
160
+
161
+
162
+ @contextlib.contextmanager
163
+ def disable_conv_cache_emptying():
164
+ prev = torch._C._cuda_get_conv_benchmark_empty_cache()
165
+ torch._C._cudnn_set_conv_benchmark_empty_cache(False)
166
+ try:
167
+ yield
168
+ finally:
169
+ torch._C._cudnn_set_conv_benchmark_empty_cache(prev)
170
+
171
+
172
+ @contextlib.contextmanager
173
+ def enable_history_recording():
174
+ "Turns on history recording in the CUDA Caching Allocator"
175
+ enabled = torch._C._cuda_isHistoryEnabled()
176
+ try:
177
+ if not enabled:
178
+ torch.cuda.memory._record_memory_history()
179
+ yield
180
+ finally:
181
+ if not enabled:
182
+ torch.cuda.memory._record_memory_history(None)
183
+
184
+
185
+ def get_history_recording():
186
+ # TODO - remove, prevents cleanup
187
+ if not config.triton.cudagraph_trees_history_recording:
188
+ return contextlib.nullcontext()
189
+ return enable_history_recording()
190
+
191
+
192
+ class TreeManagerContainer:
193
+ """
194
+ Manages the lifetime of the tree manager. Like `PrivatePool` in cuda caching allocator,
195
+ the tree and its corresponding memory pool should be kept alive as long as any outstanding
196
+ graph or tensor which is an output of a graph remains alive.
197
+
198
+ There is a single tree manager container per device.
199
+
200
+ The lifecycle of a tree_manager is:
201
+ - Is constructed, no graph, no fns, no tensors
202
+ - Tree manager is fetched, resulting in tree manager being allocated
203
+ - We generate a bunch of functions, calling add_strong_reference
204
+ - These functions die, calling finalize_reference
205
+ - When all the functions die, we finalize_tree_manager.
206
+
207
+ TODO: in the future, we would like to do the following once storage weak refs land
208
+ - We look for all the live storages and add references to THOSE
209
+ - We count as storages die
210
+ - All the storages are dead, we deallocate the tree manager
211
+ """
212
+
213
+ def __init__(self, device_index):
214
+ # This class keeps a strong reference to tree_manager,
215
+ # but upon all other strong references to the tree_manager will reset it to None.
216
+ # We need a strong reference so that we can still access its attributes upon cleanup.
217
+ self.tree_manager: Optional[CUDAGraphTreeManager] = None
218
+
219
+ # Number of outstanding references to the current tree manager
220
+ self.live_cudagraphify_fns = 0
221
+
222
+ self.device_index = device_index
223
+
224
+ # Following two objects are only set in the case that Tensor outputs outlive
225
+ # the cudagraphify_fns. Reference to the Graph is needed to keep the private pool from
226
+ # deallocation.
227
+ self.live_storages_count = 0
228
+ self.graph: Optional[torch.cuda.CUDAGraph] = None
229
+
230
+ self.lock = threading.Lock()
231
+
232
+ def _finalize_tensor(self):
233
+ with self.lock:
234
+ self.live_storages_count -= 1
235
+ if self.live_storages_count == 0:
236
+ self.graph = None
237
+
238
+ # manager was used again after existing cleanup,
239
+ # we shouldnt set it to None
240
+ if self.live_cudagraphify_fns == 0:
241
+ self.tree_manager = None
242
+
243
+ def finalize_cudagraphify_fn(self):
244
+ with self.lock:
245
+ self.live_cudagraphify_fns -= 1
246
+ if self.live_cudagraphify_fns == 0:
247
+ self._finalize_tree_manager()
248
+
249
+ def _finalize_tree_manager(self):
250
+ assert self.lock.locked()
251
+ self.tree_manager = None
252
+
253
+ # TODO - when issue #91395 is landed, we can set a weakref on
254
+ # storages and trigger a deallocation when all outputs of the
255
+ # cudagraph are dead.
256
+
257
+ # live_storages = list(
258
+ # tree_manager.live_cudagraph_pool_storages_in_curr_execution()
259
+ # )
260
+
261
+ # # Maintain reference to graph to keep tensors alive
262
+ # assert len(tree_manager.roots) > 0, "expected at least one use"
263
+ # root = next(tree_manager.get_roots())
264
+ # self.graph = root.graph
265
+ # seen_storages = set()
266
+ # for stor in live_storages:
267
+ # if stor in seen_storages:
268
+ # continue
269
+ # seen_storages.add(stor)
270
+ # self.live_storages_count += 1
271
+ # . weakref.finalize(stor, self._finalize_tensor)
272
+
273
+ def add_strong_reference(self, fn: Callable[..., Any]):
274
+ with self.lock:
275
+ self.live_cudagraphify_fns += 1
276
+
277
+ weakref.finalize(fn, self.finalize_cudagraphify_fn)
278
+
279
+ def get_tree_manager(self) -> CUDAGraphTreeManager:
280
+ with self.lock:
281
+ if self.tree_manager is None:
282
+ self.tree_manager = CUDAGraphTreeManager(self.device_index)
283
+ return self.tree_manager
284
+
285
+
286
+ local = threading.local()
287
+
288
+ # one tree manager per device
289
+ local.tree_manager_containers = {}
290
+ local.tree_manager_locks = defaultdict(threading.Lock)
291
+
292
+
293
+ # only incremented by user call of mark_step_begin
294
+ class MarkStepBox:
295
+ mark_step_counter = 0
296
+
297
+
298
+ # We need to register this as an object that will be copied over as TLS when new
299
+ # threads are created in autograd
300
+ torch._C._stash_obj_in_tls("tree_manager_containers", local.tree_manager_containers)
301
+ torch._C._stash_obj_in_tls("tree_manager_locks", local.tree_manager_locks)
302
+
303
+
304
+ def mark_step_begin():
305
+ "Indicates that a new iteration of inference or training is about to begin."
306
+
307
+ # iterate down to distinguish from GenerationTracking counter
308
+ MarkStepBox.mark_step_counter -= 1
309
+
310
+
311
+ def reset_cudagraph_trees():
312
+ "Clear all cudagraph trees"
313
+ # see shutdown below for why this is necessary
314
+ container_dict = get_obj(local, "tree_manager_containers")
315
+ locks_dict = get_obj(local, "tree_manager_locks")
316
+ for device, lock in locks_dict.items():
317
+ with lock:
318
+ container = container_dict.get(device)
319
+ if not container or not container.tree_manager:
320
+ continue
321
+
322
+ container.tree_manager.shutdown()
323
+
324
+ _set_cached_tensors_enabled(False)
325
+ container_dict.clear()
326
+
327
+ MarkStepBox.mark_step_counter = 0
328
+
329
+
330
+ def get_obj(local, attr_name):
331
+ if hasattr(local, attr_name):
332
+ return getattr(local, attr_name)
333
+ else:
334
+ assert torch._C._is_key_in_tls(attr_name)
335
+ return torch._C._get_obj_in_tls(attr_name)
336
+
337
+
338
+ def get_container(device_index: int):
339
+ container_dict = get_obj(local, "tree_manager_containers")
340
+ lock = get_obj(local, "tree_manager_locks")[device_index]
341
+
342
+ with lock:
343
+ if device_index not in container_dict:
344
+ container_dict[device_index] = TreeManagerContainer(device_index)
345
+
346
+ return container_dict[device_index]
347
+
348
+
349
+ def get_manager(
350
+ device_index: int, create_if_none_exists=True
351
+ ) -> Optional[CUDAGraphTreeManager]:
352
+ if create_if_none_exists:
353
+ return get_container(device_index).get_tree_manager()
354
+ return get_container(device_index).tree_manager
355
+
356
+
357
+ def cudagraphify_impl(model, inputs, static_input_idxs, *args, **kwargs):
358
+ fn_cache: Dict[Tuple[int, ...], Callable[..., Any]] = {}
359
+
360
+ # Detect int inputs: we need to index on these
361
+ int_key = [i for i, v in enumerate(inputs) if isinstance(v, int)]
362
+ get_ints: Any = operator.itemgetter(*int_key) if int_key else lambda _: None
363
+
364
+ del inputs
365
+
366
+ def deferred_cudagraphify(inputs):
367
+ int_key = get_ints(inputs)
368
+ fn = fn_cache.get(int_key)
369
+ if fn is not None:
370
+ return fn(inputs)
371
+
372
+ if int_key is None:
373
+ log.info("recording cudagraph tree for graph without symints")
374
+ else:
375
+ log.info("recording cudagraph tree for symint key %s", int_key)
376
+
377
+ # first get indices we need to check to align, then update our static inputs,
378
+ # and finally copy
379
+ check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
380
+ new_static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
381
+ copy_misaligned_inputs(inputs, check_input_idxs)
382
+
383
+ fn, out = cudagraphify(model, inputs, new_static_input_idxs, *args, **kwargs)
384
+ fn = align_inputs_from_check_idxs(fn, inputs_to_check=check_input_idxs)
385
+ fn_cache[int_key] = fn
386
+
387
+ return out
388
+
389
+ return deferred_cudagraphify
390
+
391
+
392
+ def cudagraphify(
393
+ model,
394
+ inputs,
395
+ static_input_idxs=(),
396
+ *,
397
+ device_index: int,
398
+ is_backward: bool,
399
+ is_inference: bool,
400
+ stack_traces: Optional[StackTraces] = None,
401
+ constants: Tuple[torch.Tensor, ...] = (),
402
+ ):
403
+ manager = get_container(device_index).get_tree_manager()
404
+ assert not (is_backward and is_inference)
405
+ mode = (
406
+ CompilationMode.BACKWARD
407
+ if is_backward
408
+ else (CompilationMode.INFERENCE if is_inference else CompilationMode.FORWARD)
409
+ )
410
+
411
+ return manager.add_function(
412
+ model,
413
+ inputs,
414
+ static_input_idxs,
415
+ stack_traces,
416
+ mode,
417
+ constants,
418
+ )
419
+
420
+
421
+ class StorageWeakRefWrapper:
422
+ """
423
+ Wrapper around a storage weak ref. Will deallocate it upon expiration if invoked.
424
+ """
425
+
426
+ __slots__ = ["ref", "_data_ptr", "extra_ref_check"]
427
+
428
+ storage_ref: Optional[StorageWeakRef]
429
+
430
+ def __init__(
431
+ self,
432
+ inp: Union[Tensor, UntypedStorage],
433
+ extra_ref_check: Optional[Callable[[], None]] = None,
434
+ ):
435
+ """
436
+ extra_ref_check is an additional check we need to run to check if the
437
+ weak ref has expired. in checking storage use count we assume extra_ref_check
438
+ will hold an additional reference to the storage.
439
+ """
440
+ if isinstance(inp, Tensor):
441
+ stor = inp.untyped_storage()
442
+ else:
443
+ assert isinstance(inp, UntypedStorage)
444
+ stor = inp
445
+ self.ref = StorageWeakRef(stor)
446
+ self._data_ptr = stor.data_ptr()
447
+ self.extra_ref_check = extra_ref_check
448
+
449
+ @classmethod
450
+ def from_weakref_and_data_ptr(cls, cdata, data_ptr, extra_ref_check=None):
451
+ instance = cls.__new__(cls)
452
+ instance._data_ptr = data_ptr
453
+ instance.ref = StorageWeakRef.from_weakref(cdata)
454
+ instance.extra_ref_check = extra_ref_check
455
+ return instance
456
+
457
+ def __call__(self) -> Optional[StorageWeakRefPointer]:
458
+ if self.expired():
459
+ return None
460
+
461
+ return self.ref.cdata
462
+
463
+ def swap_weakref(self, cdata):
464
+ self.ref.__del__()
465
+ self.ref.cdata = cdata
466
+
467
+ def data_ptr(self) -> int:
468
+ "NB: returns the data ptr even if the storage has expired"
469
+ return self._data_ptr
470
+
471
+ def remove_extra_reference(self):
472
+ self.extra_ref_check = None
473
+
474
+ def expired(self):
475
+ if self.extra_ref_check is not None and not self.extra_ref_check():
476
+ return False
477
+
478
+ # if extra_ref_check is not None we expect an additional reference
479
+ stor_count = torch._C._storage_Use_Count(self.ref.cdata)
480
+ return (stor_count - (self.extra_ref_check is not None)) == 0
481
+
482
+ def __repr__(self):
483
+ if self.ref is None or self.ref.expired():
484
+ return f"StorageWeakRefWrapper to {self.data_ptr()}; dead"
485
+ else:
486
+ return f"StorageWeakRefWrapper to {self.data_ptr()}; alive"
487
+
488
+
489
+ def is_live(weak_ref: Optional[StorageWeakRefWrapper]) -> bool:
490
+ return maybe_deref(weak_ref) is not None
491
+
492
+
493
+ def maybe_deref(
494
+ weak_ref: Optional[StorageWeakRefWrapper],
495
+ ) -> Optional[Tuple[StorageWeakRefPointer, int]]:
496
+ if weak_ref is None:
497
+ return None
498
+ r = weak_ref()
499
+ if r is None:
500
+ return None
501
+ # NB: r.data_ptr() does not necessarily equal weak_ref.data_ptr()
502
+ return r, weak_ref.data_ptr()
503
+
504
+
505
+ @contextlib.contextmanager
506
+ def _use_cuda_memory_pool_manager(device, mem_pool, stream):
507
+ """
508
+ Context manager to use cuda graph pool for new allocations. If you use this manager
509
+ all cudagraph tensors in use should be reflected in the allocator or they will be overwritten.
510
+ existing_graph should already have been used in a capture, and the mem_pool must already exist,
511
+ because this manager will not preserve a reference to the pool which keeps it alive.
512
+ """
513
+ torch.cuda.synchronize()
514
+ stream.wait_stream(torch.cuda.current_stream())
515
+
516
+ with torch.cuda.stream(stream), torch.device(device):
517
+ torch._C._cuda_beginAllocateCurrentStreamToPool(device, mem_pool)
518
+ try:
519
+ yield
520
+ finally:
521
+ torch._C._cuda_endAllocateCurrentStreamToPool(device, mem_pool)
522
+ torch._C._cuda_releasePool(device, mem_pool)
523
+
524
+ torch.cuda.current_stream().wait_stream(stream)
525
+
526
+
527
+ def map_to_ref(t: Optional[Tensor]) -> Optional[StorageWeakRefWrapper]:
528
+ if not isinstance(t, torch.Tensor):
529
+ assert t is None
530
+ return None
531
+ return StorageWeakRefWrapper(t)
532
+
533
+
534
+ # A path index of (depth, offset) indices into a graph that is `depth`` number of nodes from the root
535
+ # at graph output offset
536
+ PathOutputIndex = Tuple[int, int]
537
+
538
+ # For each node in the path, for each output, is the output alive
539
+ PathLiveness = List[List[bool]]
540
+
541
+ StackTraces = List[Optional[str]]
542
+
543
+
544
+ class CUDAWarmupNode:
545
+ """
546
+ Simplified Wrapper around A CUDA Model that wraps outputs in storage refs and exposes
547
+ apis to get the live storages in the current chain of warmup.
548
+
549
+ A CUDAWarmupNode may have either CUDAGraphNode or CUDAWarmupNode as a parent, but may only have
550
+ CUDAWarmupNode as children, because we cannot record or execute with tensors which do not have stable
551
+ memory addresses.
552
+
553
+ CUDAWarmupNode and CUDAGraphNode have a number of differences that make it easier to use separate classes.
554
+ - Much of the CUDAGraphNode logic & initialization is based on the tensor properties of first recording. In the
555
+ first instance of warmup, these are not finalized yet.
556
+ - All Inputs to the RecordedFunction must be copied over to the cuda graph memory pool, this is unnecessary in warmup.
557
+ - CUDAWarmup is only used once and so does not need to optimize as much bookkeeping. It is much simpler.
558
+
559
+ NB: this class and CUDAGraphNode need to expose `path_live_weakrefs`, `all_outputs_are_dead`, and
560
+ `self.outputs_weakrefs`, `stack_traces`, and `tensor_weakrefs` for compatibility.
561
+ """
562
+
563
+ def __init__(
564
+ self,
565
+ wrapped_function: WrappedFunction,
566
+ parent,
567
+ cuda_graphs_pool: Tuple[int, int],
568
+ existing_cuda_graph: Optional[torch.cuda.CUDAGraph],
569
+ device_index: int,
570
+ stack_traces: Optional[StackTraces],
571
+ stream: torch.cuda.Stream,
572
+ already_warm: bool,
573
+ ):
574
+ self.wrapped_function = wrapped_function
575
+ self.parent = parent
576
+ self.cuda_graphs_pool = cuda_graphs_pool
577
+ self.outputs_weakrefs: List[Optional[StorageWeakRefWrapper]] = []
578
+ self.tensor_weakrefs: List[Optional[TensorWeakRef]] = []
579
+ self.existing_cuda_graph = existing_cuda_graph
580
+ self.has_run = False
581
+ self.device_index = device_index
582
+ self.stack_traces = stack_traces
583
+ self.stream = stream
584
+ self.already_warm = already_warm
585
+
586
+ def run(self, new_inputs):
587
+ assert not self.has_run, "Wrapped function should never be run twice"
588
+
589
+ # See: output_is_alias_of_persistent_static_inputs below. We should only be returning freshly created
590
+ # storages in path_live_weakrefs.
591
+ existing_path_data_ptrs = {
592
+ t.data_ptr() for t in self.path_live_weakrefs() if t()
593
+ }
594
+
595
+ def get_non_cudagraph_inps():
596
+ non_cudagraph_inps = set()
597
+ for t in itertools.chain(new_inputs, self.wrapped_function.constants):
598
+ if (
599
+ isinstance(t, torch.Tensor)
600
+ and t.untyped_storage().data_ptr() not in existing_path_data_ptrs
601
+ ):
602
+ non_cudagraph_inps.add(t.untyped_storage().data_ptr())
603
+ return non_cudagraph_inps
604
+
605
+ non_cudagraph_inps = get_non_cudagraph_inps()
606
+
607
+ if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
608
+ refs = list(self.path_live_weakrefs())
609
+ check_memory_pool(self.device_index, self.cuda_graphs_pool, refs)
610
+
611
+ with torch.cuda.device(
612
+ self.device_index
613
+ ), disable_conv_cache_emptying(), clear_cublas_manager(), _use_cuda_memory_pool_manager(
614
+ self.device_index, self.cuda_graphs_pool, self.stream
615
+ ), get_history_recording():
616
+ out = self.wrapped_function.model(new_inputs)
617
+
618
+ assert len(new_inputs) == 0
619
+
620
+ # sdpa returns cpu tensors when not recording cuda graph
621
+ def add_ref(o):
622
+ return (
623
+ o is not None
624
+ and isinstance(o, torch.Tensor)
625
+ and o.is_cuda
626
+ and o.untyped_storage().data_ptr() not in non_cudagraph_inps
627
+ and o.untyped_storage().data_ptr() != 0
628
+ )
629
+
630
+ self.outputs_weakrefs.extend(
631
+ [map_to_ref(o) if add_ref(o) else None for o in out]
632
+ )
633
+ self.tensor_weakrefs.extend(
634
+ [TensorWeakRef(o) if add_ref(o) else None for o in out]
635
+ )
636
+
637
+ if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
638
+ out_refs = self.path_live_weakrefs()
639
+ new_storages = [
640
+ t for t in out_refs if t.data_ptr() not in non_cudagraph_inps
641
+ ]
642
+ check_memory_pool(self.device_index, self.cuda_graphs_pool, new_storages)
643
+
644
+ return out
645
+
646
+ @property
647
+ def _path_from_root(self):
648
+ nodes = []
649
+ node = self
650
+ while node:
651
+ nodes.append(node)
652
+ node = node.parent
653
+
654
+ yield from reversed(nodes)
655
+
656
+ def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
657
+ "Returns all live storages weakrefs that created by nodes in this path"
658
+ for node in self._path_from_root:
659
+ for output in node.outputs_weakrefs:
660
+ if is_live(output):
661
+ yield output
662
+
663
+ def all_outputs_are_dead(self):
664
+ return not list(self.path_live_weakrefs())
665
+
666
+
667
+ # Aliases for List that say what the indices denote
668
+ InputList = List # input indexes
669
+ OutputList = List # output indexes
670
+ LevelList = List # levels (distance from root of tree)
671
+
672
+
673
+ class OutputAliasInfo:
674
+ pass
675
+
676
+
677
+ class _UnaliasedStorage(OutputAliasInfo):
678
+ "Singleton to mark that the graph output constructs a new alias or is None"
679
+ pass
680
+
681
+
682
+ UnaliasedStorage = _UnaliasedStorage()
683
+
684
+
685
+ class AliasesPriorGraphOutput(OutputAliasInfo):
686
+ "Marks that the graph output aliases an output of a prior graph"
687
+ __slots__ = ["index"]
688
+
689
+ index: PathOutputIndex
690
+
691
+ def __init__(self, index: PathOutputIndex):
692
+ assert isinstance(index, tuple)
693
+ self.index = index
694
+
695
+
696
+ class AliasesNewOutput(OutputAliasInfo):
697
+ "Marks that the graph output aliases an index in the new, returned outputs"
698
+
699
+ __slots__ = ["index"]
700
+
701
+ index: int
702
+
703
+ def __init__(self, index):
704
+ assert isinstance(index, int)
705
+ self.index = index
706
+
707
+
708
+ class CUDAGraphNode:
709
+ """
710
+ A single recording of a function into a CUDA Graph. Recordings of CUDA Graphs share a single memory pool
711
+ and are structured into a tree, where there is a single recording that can precede it (parent) and multiple
712
+ subsequent recordings that may follow (children). A node will have no parent if it is the first recording
713
+ in a tree; i.e., when it is first recorded, there are no live tensors from a previous recording which
714
+ would force a dependency.
715
+
716
+ On first recording, all of the live tensors in the current CUDA Graph Node path will be
717
+ reflected in the corresponding private pool. On subsequent executions, the caching allocator
718
+ is unaffected when the graph is replayed.
719
+
720
+ In order to support recording a subsequent cuda graph recording after execution of this graph,
721
+ we checkpoint the state of the memory pool so that it may later be resumed.
722
+
723
+ WrappedFunction should have already been warmed up prior to invocation.
724
+
725
+ See [setCheckpointPoolState] for further explanation, as well as
726
+ https://user-images.githubusercontent.com/13564/222815509-374f3400-f83d-4f7d-8fa6-4a092b3250bb.png
727
+ """
728
+
729
+ def __init__(
730
+ self,
731
+ wrapped_function: WrappedFunction,
732
+ id: GraphID,
733
+ parent: Optional[CUDAGraphNode],
734
+ inputs: List[Tensor],
735
+ cuda_graphs_pool: Tuple[int, int],
736
+ device_index: int,
737
+ stack_traces: Optional[StackTraces],
738
+ stream: torch.cuda.Stream,
739
+ ):
740
+ assert isinstance(inputs, (list, tuple))
741
+
742
+ self.wrapped_function = wrapped_function
743
+ self.id = id
744
+ self.device = device_index
745
+ self.stack_traces = stack_traces
746
+ self.stream = stream
747
+
748
+ # if this is a root parent will be None. use weakref to prevent reference cycle
749
+ self._parent = weakref.ref(parent) if parent is not None else None
750
+ # reference to the shared memory pool for the entire cuda graphs tree
751
+ self.cuda_graphs_pool = cuda_graphs_pool
752
+
753
+ # A single wrapped function may be recorded multiple times if memory patterns or
754
+ # invariants change from one execution to the next
755
+ self.children: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
756
+
757
+ # StorageWeakRef maintains whether the Storage C++ object remains allocated,
758
+ # not whether the corresponding memory has been deallocated. In order
759
+ # to use them to track memory deallocations we must maintain a single StorageWeakRef
760
+ # for all Storages that reference that memory (even if we are constructing Storages
761
+ # that do not have a deallocator function). We maintain one single storage_cache
762
+ # as we execute any tree path. When we retrieve a storage from the cache we
763
+ # check that it is still alive, and we hash based on observed recording data ptr
764
+ # and storage cdata.
765
+
766
+ # we preserve a single reference to executed outputs that is then referenced
767
+ # in children to avoid children having to chase parent pointers in the hot path
768
+ # DO NOT reassign output_weakrefs, only call `clear()`
769
+ # Path is a series of nodes from root to the current node
770
+ self.outputs_weakrefs: OutputList[Optional[StorageWeakRefWrapper]] = []
771
+ self.path_weakrefs: LevelList[OutputList[Optional[StorageWeakRefWrapper]]] = [
772
+ node.outputs_weakrefs for node in self._path_from_root
773
+ ]
774
+ self.path_stacktraces: LevelList[StackTraces] = [
775
+ node.stack_traces for node in self._path_from_root
776
+ ]
777
+ self.tensor_weakrefs: OutputList[Optional[TensorWeakRef]] = []
778
+
779
+ # tensors which are outputs of previous graphs in the tree
780
+ self.cudagraph_managed_idxs: List[int] = [
781
+ idx
782
+ for idx, t in enumerate(inputs)
783
+ if isinstance(t, torch.Tensor) and self._is_cuda_graph_recorded_tensor(t)
784
+ ]
785
+
786
+ self.static_input_idxs: List[int] = list(
787
+ set(wrapped_function.static_input_idxs) | set(self.cudagraph_managed_idxs)
788
+ )
789
+
790
+ self.static_input_data_ptrs: InputList[Optional[int]] = [
791
+ (
792
+ inputs[i].data_ptr()
793
+ if isinstance(inputs[i], torch.Tensor) and i in self.static_input_idxs
794
+ else None
795
+ )
796
+ for i in range(len(inputs))
797
+ ]
798
+
799
+ # When we checkpoint, and free generations, we will be manually freeing the outputs
800
+ # of CUDAGraphNodes. We should not be freeing parameters, not do we need to account for
801
+ # their liveness (they are static), so we need to compute which outputs are aliases of
802
+ # parameters. Some static inputs are saved tensors from the forward that die in the backward.
803
+ # Their locations are static but lifetimes are not. We only include the persistent static
804
+ # data ptrs below because the non persistent data ptrs may be outputs of this record and
805
+ # fresh allocations.
806
+
807
+ # precompute expanded dims to avoid computing in the hot path
808
+ self.expanded_dims: List[List[int]] = [
809
+ get_expanded_dims(x)
810
+ if isinstance(x, torch.Tensor) and idx not in self.static_input_idxs
811
+ else []
812
+ for idx, x in enumerate(inputs)
813
+ ]
814
+
815
+ # For each node in path, which outputs were observed to be live
816
+ # before invoking graph recording, and after graph recording
817
+ self.recorded_liveness_before_graph: LevelList[OutputList[bool]] = []
818
+ self.recorded_liveness_after_graph: LevelList[OutputList[bool]] = []
819
+
820
+ # List of Tuples of (depth, output_index) that index into node at depth
821
+ # number of nodes from root and output_index of outputs. Will index into
822
+ # path_weakrefs.
823
+ self.expected_dead_indices_before_graph: List[PathOutputIndex] = []
824
+ self.expected_dead_indices_after_graph: List[PathOutputIndex] = []
825
+
826
+ # all live indices after graph recording
827
+ self.live_indices_after_graph: List[PathOutputIndex] = []
828
+
829
+ if self.parent is not None:
830
+ previous_liveness = self.parent.recorded_liveness_after_graph
831
+ curr_liveness = self._get_liveness(self.path_weakrefs)
832
+
833
+ different_indices = self._get_different_indices(
834
+ previous_liveness, curr_liveness
835
+ )
836
+
837
+ self.recorded_liveness_before_graph = curr_liveness
838
+ self.expected_dead_indices_before_graph = different_indices
839
+
840
+ recording_inputs = self._allocate_and_copy_recording_inputs(inputs)
841
+ # recording inputs will copy over memory, so we can free non recording inputs
842
+ inputs.clear()
843
+ del inputs
844
+
845
+ # graph used for recording model invocation
846
+ self.graph: Optional[torch.cuda.CUDAGraph] = torch.cuda.CUDAGraph()
847
+
848
+ # we allocate non-static inputs within the same memory pool as the CUDAGraph
849
+ # which we will record the model with. For memory efficiency, it is important
850
+ # to reclaim the input memory when the inputs are no longer live. To accomplish this,
851
+ # we reconstruct tensors at the correct data pointers of our inputs which are
852
+ # non owning and do not prevent deallocation. On subsequent executions, input values
853
+ # will be copied over to these tensors.
854
+ self.reconstructed_inputs: InputList[Union[Tensor, int]] = [
855
+ self._reconstruct_from_tensor_metadata(self._tensor_metadata(x))
856
+ if isinstance(x, torch.Tensor)
857
+ else x
858
+ for x in recording_inputs
859
+ ]
860
+
861
+ # DO THE RECORDING!!!
862
+ # We record the CUDA graph in the constructor of CUDAGraphNode, which
863
+ # gives you what the CPU side compute of the function would do. We
864
+ # don't throw the recording outputs away: their memory is
865
+ # correctly accounted for in the CUDAGraphs caching allocator. This
866
+ # means on the very FIRST run of the CUDA graph node, we can directly
867
+ # do more recording, because we have a valid caching allocator state.
868
+ # NB: This relies on run() being called immediately after the
869
+ # constructor, otherwise this optimization would not be valid.
870
+
871
+ # initialized below in _record
872
+
873
+ self.checkpointed_caching_state: Optional[AllocatorState] = None
874
+
875
+ # Output Storage Alias information, can be:
876
+ # - A new, unaliased storage, or the output is None
877
+ # - An alias of an output of a prior graph
878
+ # - An alias of an output already created in the reconstructed outputs
879
+ # This is None if the output in question is an int
880
+ self.output_storage_alias: OutputList[Optional[OutputAliasInfo]] = []
881
+
882
+ # is the output Storage unaliased in subsequent outputs, of all subsequent paths
883
+ # if it is, we cached the output tensor and adjust storage liveness tracking to also
884
+ # check if the output tensor does not have an additional python reference.
885
+ # If a descendent node discovers it has an alias of a prior output, then the output
886
+ # will no longer be cached in the ancestor.
887
+ # The large majority of tensors are unaliased, and preserving aliased output tensors would add
888
+ # significant additional complexity with marginal gains
889
+ # The cached tensor outputs are added on the first execution, and cleared whenever we need
890
+ # to do subsequent recording
891
+ self.unaliased_in_all_paths: OutputList[bool] = []
892
+ self.cached_tensor_outputs: OutputList[Optional[Tensor]] = []
893
+
894
+ # if an output aliases a static, persistent input then the corresponding Tensor will
895
+ # be set here. These are different than cached tensors, because they are tensors that
896
+ # are aliases of parameters that are always live.
897
+ self.static_output_tensors: OutputList[Optional[Tensor]] = []
898
+
899
+ # Cleared after recording
900
+ self.recording_outputs: Optional[
901
+ OutputList[Union[torch.Tensor, int]]
902
+ ] = self._record(wrapped_function.model, recording_inputs)
903
+ self.outputs_metadata: OutputList[Union[Dict[str, Any], int, None]] = []
904
+
905
+ # As with inputs, we do not want to keep the outputs permanently alive because that would prevent
906
+ # their memory being reclaimed in subsequent cuda graph recordings. We record the tensor metadata
907
+ # needed to reconstruct instead.
908
+ assert self.recording_outputs is not None
909
+ for out in self.recording_outputs:
910
+ if isinstance(out, torch.Tensor):
911
+ self.outputs_metadata.append(
912
+ self._tensor_metadata(out, ignore_storage_offset=False)
913
+ )
914
+ else:
915
+ assert isinstance(out, (int, type(None))), type(out)
916
+ self.outputs_metadata.append(out)
917
+
918
+ self.graph.replay()
919
+
920
+ def _copy_input(self, idx, dst, src):
921
+ expanded_dims = self.expanded_dims[idx]
922
+ dst = index_expanded_dims(dst, expanded_dims)
923
+ src = index_expanded_dims(src, expanded_dims)
924
+ # TODO - one jit kernel across multiple inputs
925
+ dst.copy_(src)
926
+
927
+ def run_first_inputs(self, new_inputs):
928
+ if config.triton.fast_path_cudagraph_asserts:
929
+ self.debug_check_invariants_before_invocation()
930
+
931
+ # graph is already invoked in the __init__
932
+ # inputs are copied over in _allocate_recording_inputs and subsequently cleared
933
+ assert len(new_inputs) == 0
934
+ outputs = self.recording_outputs
935
+ self.recording_outputs = None
936
+ return outputs
937
+
938
+ def run(self, new_inputs):
939
+ if config.triton.fast_path_cudagraph_asserts:
940
+ self.debug_check_invariants_before_invocation()
941
+
942
+ assert len(self.static_input_data_ptrs) == len(new_inputs)
943
+ # NB: this ranges over non-static inputs too
944
+ for idx, data_ptr in enumerate(self.static_input_data_ptrs):
945
+ if idx in self.cudagraph_managed_idxs:
946
+ continue
947
+ if not isinstance(new_inputs[idx], torch.Tensor):
948
+ pass
949
+ elif data_ptr is not None:
950
+ # static input, e.g., parameter
951
+ assert data_ptr == new_inputs[idx].data_ptr()
952
+ else:
953
+ # non-static input, need to copy it into CUDA graph
954
+ dst = self.reconstructed_inputs[idx]
955
+ src = new_inputs[idx]
956
+ self._copy_input(idx, dst, src)
957
+
958
+ new_inputs.clear()
959
+ self.run_graph()
960
+
961
+ outputs = self.reconstruct_outputs()
962
+ self.debug_check_invariants_after_invocation()
963
+
964
+ return outputs
965
+
966
+ def reconstruct_outputs(self):
967
+ "Reconstruct output tensors according to their saved metadata and alias information"
968
+
969
+ # Cached tensors will not yet be set on the first execution
970
+ # They are also cleared in checkpointing, so if we checkpoint this node
971
+ # and then execute it again we will need to repopulate cached tensors
972
+ if not self.cached_tensor_outputs:
973
+ self._initialize_cached_tensors()
974
+
975
+ outputs: List[Optional[Union[int, torch.Tensor]]] = []
976
+
977
+ for i, (storage_info, metadata) in enumerate(
978
+ zip(self.output_storage_alias, self.outputs_metadata)
979
+ ):
980
+ if not isinstance(metadata, dict): # tensor metadata
981
+ assert isinstance(metadata, (int, type(None)))
982
+ outputs.append(metadata)
983
+ continue
984
+
985
+ cached_t = self.cached_tensor_outputs[i]
986
+ if cached_t is not None:
987
+ # No need to update weakrefs, already correctly initialized
988
+ outputs.append(cached_t)
989
+ continue
990
+
991
+ static_t = self.static_output_tensors[i]
992
+ if static_t is not None:
993
+ assert self.outputs_weakrefs[i] is None
994
+ outputs.append(static_t)
995
+ continue
996
+
997
+ storage = self.prepare_alias_info_for_tensor_construction(
998
+ storage_info, metadata
999
+ )
1000
+
1001
+ if isinstance(storage, UntypedStorage) or storage is None:
1002
+ out = self._reconstruct_from_tensor_metadata(metadata, storage)
1003
+ else:
1004
+ assert isinstance(storage, int)
1005
+ out = self._reconstruct_from_tensor_metadata(
1006
+ metadata, cast(torch.Tensor, outputs[storage]).untyped_storage()
1007
+ )
1008
+
1009
+ outputs.append(out)
1010
+ w = self.outputs_weakrefs[i]
1011
+ assert w is not None
1012
+ w.swap_weakref(out.untyped_storage()._weak_ref())
1013
+
1014
+ return outputs
1015
+
1016
+ def prepare_alias_info_for_tensor_construction(
1017
+ self,
1018
+ out_alias_info: Optional[OutputAliasInfo],
1019
+ metadata: Union[Dict[str, Any], int, None],
1020
+ ) -> Union[UntypedStorage, None, int]:
1021
+ if (
1022
+ isinstance(metadata, (int, type(None)))
1023
+ or out_alias_info is UnaliasedStorage
1024
+ ):
1025
+ return None
1026
+
1027
+ if isinstance(out_alias_info, AliasesPriorGraphOutput):
1028
+ depth, existing_output_index = out_alias_info.index
1029
+ ref = self.path_weakrefs[depth][existing_output_index]
1030
+ assert ref is not None
1031
+ return torch.UntypedStorage._new_with_weak_ptr(ref())
1032
+
1033
+ assert isinstance(out_alias_info, AliasesNewOutput)
1034
+ return out_alias_info.index
1035
+
1036
+ def prepare_storages_for_construction(
1037
+ self,
1038
+ ) -> List[Union[UntypedStorage, None, int]]:
1039
+ output_storages = []
1040
+ for output_storage_alias, metadata in zip(
1041
+ self.output_storage_alias, self.outputs_metadata
1042
+ ):
1043
+ output_storages.append(
1044
+ self.prepare_alias_info_for_tensor_construction(
1045
+ output_storage_alias, metadata
1046
+ )
1047
+ )
1048
+
1049
+ return output_storages
1050
+
1051
+ def run_graph(self):
1052
+ assert self.graph is not None
1053
+ self.graph.replay()
1054
+
1055
+ def all_outputs_are_dead(self):
1056
+ "All outputs of the path from this node to its root are dead"
1057
+ for depth, output_index in self.live_indices_after_graph:
1058
+ if is_live(self.path_weakrefs[depth][output_index]):
1059
+ return False
1060
+ return True
1061
+
1062
+ def _record(self, model, inputs):
1063
+ "Record the model"
1064
+
1065
+ def static_input_iter():
1066
+ for i in self.wrapped_function.static_input_idxs:
1067
+ if isinstance(
1068
+ inputs[i], torch.Tensor
1069
+ ) and not self._is_cuda_graph_recorded_tensor(inputs[i]):
1070
+ yield inputs[i]
1071
+
1072
+ # see: output_is_alias_of_persistent_static_inputs above
1073
+ static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper] = {
1074
+ inp.untyped_storage().data_ptr(): StorageWeakRefWrapper(inp)
1075
+ for inp in itertools.chain(
1076
+ static_input_iter(), self.wrapped_function.constants
1077
+ )
1078
+ }
1079
+
1080
+ if config.triton.slow_path_cudagraph_asserts:
1081
+ # need to use parent live weakrefs because live_indices isnt set yet
1082
+ memory = (
1083
+ [] if self.parent is None else list(self.parent.path_live_weakrefs())
1084
+ )
1085
+ memory += [
1086
+ StorageWeakRefWrapper(elem)
1087
+ for i, elem in enumerate(inputs)
1088
+ if isinstance(elem, torch.Tensor)
1089
+ and i not in self.wrapped_function.static_input_idxs
1090
+ and elem.untyped_storage().data_ptr() != 0
1091
+ ]
1092
+ check_memory_pool(self.device, self.cuda_graphs_pool, memory)
1093
+
1094
+ with preserve_rng_state(), torch.cuda.device(
1095
+ self.device
1096
+ ), clear_cublas_manager(), torch.cuda.graph(
1097
+ self.graph,
1098
+ stream=self.stream,
1099
+ pool=self.cuda_graphs_pool,
1100
+ capture_error_mode="thread_local",
1101
+ ), get_history_recording():
1102
+ static_outputs = model(inputs)
1103
+
1104
+ # running model should reclaim memory
1105
+ assert len(inputs) == 0
1106
+
1107
+ if not isinstance(static_outputs, (list, tuple)):
1108
+ static_outputs = (static_outputs,)
1109
+
1110
+ self._add_first_outputs(static_outputs, static_input_persistent_storage_ptrs)
1111
+
1112
+ return static_outputs
1113
+
1114
+ def _add_first_outputs(
1115
+ self,
1116
+ outputs,
1117
+ static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper],
1118
+ ):
1119
+ "Add the outputs from the first invocation of the node and set up metadata"
1120
+
1121
+ # getting liveness before we have added the outputs to path, so the length
1122
+ # of the two lists is equal
1123
+ prev_liveness = self.recorded_liveness_before_graph
1124
+ curr_liveness = self._get_liveness(self.path_weakrefs)
1125
+
1126
+ delta = self._get_different_indices(prev_liveness, curr_liveness)
1127
+ self.expected_dead_indices_after_graph = delta
1128
+
1129
+ assert len(self.outputs_weakrefs) == 0
1130
+ # index from data pointer to index in outputs
1131
+ output_new_storages_index: Dict[StorageDataPtr, int] = {}
1132
+
1133
+ self.unaliased_in_all_paths = [False for _ in range(len(outputs))]
1134
+ self.static_output_tensors = [None for _ in range(len(outputs))]
1135
+
1136
+ for i, o in enumerate(outputs):
1137
+ if o is None or not isinstance(o, torch.Tensor):
1138
+ self.output_storage_alias.append(UnaliasedStorage)
1139
+ continue
1140
+
1141
+ torch._check(
1142
+ o.is_cuda or o.untyped_storage().data_ptr() == 0,
1143
+ lambda: (
1144
+ "Expected all cuda outputs in cuda graph recording. Non cuda output "
1145
+ f"from {self.stack_traces[i] if self.stack_traces else '(unknown)'}"
1146
+ ),
1147
+ ),
1148
+
1149
+ ref = static_input_persistent_storage_ptrs.get(
1150
+ o.untyped_storage().data_ptr(), None
1151
+ )
1152
+ # also treat empty storages as static outputs because we do not need to manage their lifetime
1153
+ # and they should not participate in checkpointing
1154
+ is_empty_storage = o.untyped_storage().data_ptr() == 0
1155
+ if (ref and ref() is not None) or is_empty_storage:
1156
+ self.output_storage_alias.append(None)
1157
+ self.static_output_tensors[i] = o
1158
+ continue
1159
+
1160
+ path_ref = self._is_alias_of_live_recorded_tensor(o)
1161
+ if path_ref is not None:
1162
+ self._mark_prior_graph_output_as_aliased(path_ref)
1163
+ self.output_storage_alias.append(AliasesPriorGraphOutput(path_ref))
1164
+ continue
1165
+
1166
+ if o.untyped_storage().data_ptr() in output_new_storages_index:
1167
+ index = output_new_storages_index[o.untyped_storage().data_ptr()]
1168
+ self.unaliased_in_all_paths[index] = False
1169
+ self.output_storage_alias.append(AliasesNewOutput(index))
1170
+ continue
1171
+
1172
+ output_new_storages_index[o.untyped_storage().data_ptr()] = i
1173
+ self.output_storage_alias.append(UnaliasedStorage)
1174
+ self.unaliased_in_all_paths[i] = True
1175
+
1176
+ if self.stack_traces is None:
1177
+ self.stack_traces = [None for _ in range(len(outputs))]
1178
+ else:
1179
+ assert len(self.stack_traces) == len(
1180
+ outputs
1181
+ ), "Wrong number of stack traces passed in"
1182
+
1183
+ assert not self.outputs_weakrefs
1184
+ for out, static_output_tensor in zip(outputs, self.static_output_tensors):
1185
+ if not isinstance(out, torch.Tensor) or static_output_tensor is not None:
1186
+ self.outputs_weakrefs.append(None)
1187
+ self.tensor_weakrefs.append(None)
1188
+ else:
1189
+ self.outputs_weakrefs.append(StorageWeakRefWrapper(out))
1190
+ self.tensor_weakrefs.append(TensorWeakRef(out))
1191
+
1192
+ self.recorded_liveness_after_graph = self._get_liveness(self.path_weakrefs)
1193
+ self.checkpointed_caching_state = torch._C._cuda_getCheckpointState(
1194
+ self.device, self.cuda_graphs_pool
1195
+ )
1196
+
1197
+ # now, get liveness with outputs added
1198
+ for depth in range(len(self.path_weakrefs)):
1199
+ for output_index in range(len(self.path_weakrefs[depth])):
1200
+ if is_live(self.path_weakrefs[depth][output_index]):
1201
+ self.live_indices_after_graph.append((depth, output_index))
1202
+
1203
+ self.debug_check_invariants_after_invocation()
1204
+ if config.triton.slow_path_cudagraph_asserts:
1205
+ check_memory_pool(
1206
+ self.device, self.cuda_graphs_pool, list(self.path_live_weakrefs())
1207
+ )
1208
+
1209
+ def _mark_prior_graph_output_as_aliased(self, index: PathOutputIndex):
1210
+ "Remove a graph output from the unaliased, cached tensors in an ancestor node"
1211
+ depth, output_index = index
1212
+ node = list(self._path_from_root)[depth]
1213
+ node.unaliased_in_all_paths[output_index] = False
1214
+ x = self.path_weakrefs[depth][output_index]
1215
+ assert x is not None
1216
+ x.remove_extra_reference()
1217
+
1218
+ def _initialize_cached_tensors(self):
1219
+ # we should not be clearing output_weakrefs, and they should be set in the first
1220
+ # record run
1221
+ assert len(self.outputs_weakrefs) == len(self.outputs_metadata)
1222
+
1223
+ for i, (storage_info, metadata, make_cached) in enumerate(
1224
+ zip(
1225
+ self.output_storage_alias,
1226
+ self.outputs_metadata,
1227
+ self.unaliased_in_all_paths,
1228
+ )
1229
+ ):
1230
+ if not make_cached:
1231
+ self.cached_tensor_outputs.append(None)
1232
+ continue
1233
+
1234
+ assert storage_info is UnaliasedStorage
1235
+ assert isinstance(metadata, dict)
1236
+ s = self.create_storage(metadata)
1237
+ out = self._reconstruct_from_tensor_metadata(metadata, storage=s)
1238
+
1239
+ # XXX: let autograd know that there will be an additional reference to the tensor
1240
+ # that can be ignored when deciding whether to do gradient buffer inplacing.
1241
+ # Otherwise, inplacing could differ between tracing and subsequent execution.
1242
+ # For some models we tested this led to inputs no longer being in cudagraph pools,
1243
+ # leading to spurious re-recordings.
1244
+ # It also tells AMP cache that even though the tensor impls cannot be cached
1245
+ # in dtype conversions.
1246
+
1247
+ torch._C._add_cached_tensor(out)
1248
+
1249
+ self_ref = weakref.ref(self)
1250
+
1251
+ # one reference in our array, and calling sys.getrefcount bumps the refcount by one
1252
+ def check_refcount(i):
1253
+ self_loc = self_ref()
1254
+ if self_loc is None:
1255
+ return False
1256
+ return self_loc.get_output_refcount(i) == 2
1257
+
1258
+ check = functools.partial(check_refcount, i=i)
1259
+
1260
+ self.outputs_weakrefs[i] = StorageWeakRefWrapper(out, extra_ref_check=check)
1261
+ self.cached_tensor_outputs.append(out)
1262
+
1263
+ def get_output_refcount(self, index):
1264
+ return sys.getrefcount(self.cached_tensor_outputs[index])
1265
+
1266
+ @property
1267
+ def parent(self):
1268
+ "unwraps the weakref to _parent"
1269
+ return self._parent() if self._parent is not None else None
1270
+
1271
+ @property
1272
+ def _path_to_root(self):
1273
+ "Returns all nodes in the path starting at self and ending at root"
1274
+ node = self
1275
+ while node:
1276
+ yield node
1277
+ node = node.parent
1278
+
1279
+ @property
1280
+ def _path_from_root(self):
1281
+ "Returns all nodes in the path starting at the root and ending at self"
1282
+ nodes = reversed(list(self._path_to_root))
1283
+ yield from nodes
1284
+
1285
+ def _is_cuda_graph_recorded_tensor(self, t: torch.Tensor):
1286
+ "Is this tensor an output of a node in this path"
1287
+ for output_refs in self.path_weakrefs:
1288
+ for storage_weak_ref in output_refs:
1289
+ if storage_weak_ref is None:
1290
+ continue
1291
+ # don't need to check liveness of storage since the cuda graph managed
1292
+ # memory is never released.
1293
+ data_ptr = storage_weak_ref.data_ptr()
1294
+ if t.untyped_storage().data_ptr() == data_ptr:
1295
+ return True
1296
+
1297
+ return False
1298
+
1299
+ def _is_alias_of_live_recorded_tensor(
1300
+ self, t: torch.Tensor
1301
+ ) -> Optional[PathOutputIndex]:
1302
+ for depth, output_refs in enumerate(self.path_weakrefs):
1303
+ for output_index, storage_ref in enumerate(output_refs):
1304
+ if (storage_and_ptr := maybe_deref(storage_ref)) is not None:
1305
+ storage, ptr = storage_and_ptr
1306
+ if ptr == t.untyped_storage().data_ptr():
1307
+ return (depth, output_index)
1308
+
1309
+ return None
1310
+
1311
+ @staticmethod
1312
+ def _check_liveness(
1313
+ indices: List[PathOutputIndex],
1314
+ output_refs: List[List[Optional[StorageWeakRefWrapper]]],
1315
+ ):
1316
+ "Check that all of the indices specified are dead references"
1317
+ for depth, output_index in indices:
1318
+ w = output_refs[depth][output_index]
1319
+ assert w is not None
1320
+ if w() is not None:
1321
+ return False
1322
+ return True
1323
+
1324
+ def add_child(self, function_id: FunctionID, node: CUDAGraphNode):
1325
+ "Adds node as a a child of self"
1326
+ self.children[function_id].append(node)
1327
+
1328
+ @staticmethod
1329
+ def _get_different_indices(
1330
+ prev: List[List[bool]], curr: List[List[bool]]
1331
+ ) -> List[PathOutputIndex]:
1332
+ "Find indices where the two lists differ."
1333
+ dead_indices = []
1334
+ assert len(prev) <= len(curr)
1335
+ for i, (outputs1, outputs2) in enumerate(zip(prev, curr)):
1336
+ assert len(outputs1) == len(outputs2)
1337
+ for j, (output1, output2) in enumerate(zip(outputs1, outputs2)):
1338
+ if output1 != output2:
1339
+ dead_indices.append((i, j))
1340
+
1341
+ return dead_indices
1342
+
1343
+ @staticmethod
1344
+ def _get_liveness(
1345
+ weakrefs: List[List[Optional[StorageWeakRefWrapper]]],
1346
+ ) -> List[List[bool]]:
1347
+ "Maps weakrefs to true if the reference is alive and false otherwise"
1348
+ if len(weakrefs) == 0:
1349
+ return []
1350
+
1351
+ return [pytree.tree_map(is_live, outputs) for outputs in weakrefs]
1352
+
1353
+ def debug_assert_invariants(
1354
+ self, expected_liveness: List[List[bool]], newly_dead: List[PathOutputIndex]
1355
+ ):
1356
+ if not config.triton.fast_path_cudagraph_asserts:
1357
+ return
1358
+
1359
+ for i, node in enumerate(self._path_from_root):
1360
+ assert self.path_weakrefs[i] is node.outputs_weakrefs
1361
+
1362
+ nodes = list(self._path_from_root)
1363
+
1364
+ live_blocks = get_block_addrs(self.cuda_graphs_pool)
1365
+
1366
+ live_storage_data_ptrs = set()
1367
+ live_storage_weak_ptrs = set()
1368
+
1369
+ for depth, outputs_liveness in enumerate(expected_liveness):
1370
+ for output_idx, output_liveness in enumerate(outputs_liveness):
1371
+ # tensor can die early, but it can't be alive when it should be dead
1372
+ w = self.path_weakrefs[depth][output_idx]
1373
+ if (stor_weak_ptr_and_data_ptr := maybe_deref(w)) is not None:
1374
+ assert output_liveness
1375
+ stor_weak_ptr, stor_data_ptr = stor_weak_ptr_and_data_ptr
1376
+ assert (stor_data_ptr in live_storage_data_ptrs) == (
1377
+ stor_weak_ptr in live_storage_weak_ptrs
1378
+ )
1379
+ live_storage_data_ptrs.add(stor_data_ptr)
1380
+ live_storage_weak_ptrs.add(stor_weak_ptr)
1381
+
1382
+ is_persistent_alias = (
1383
+ nodes[depth].static_output_tensors[output_idx] is not None
1384
+ )
1385
+
1386
+ if is_persistent_alias:
1387
+ assert stor_data_ptr not in live_blocks
1388
+
1389
+ for depth, output_index in newly_dead:
1390
+ assert not is_live(self.path_weakrefs[depth][output_index])
1391
+
1392
+ def debug_check_invariants_before_invocation(self):
1393
+ self.debug_assert_invariants(
1394
+ self.recorded_liveness_before_graph, self.expected_dead_indices_before_graph
1395
+ )
1396
+
1397
+ def debug_check_invariants_after_invocation(self):
1398
+ self.debug_assert_invariants(
1399
+ self.recorded_liveness_before_graph, self.expected_dead_indices_after_graph
1400
+ )
1401
+
1402
+ def data_ptrs_dead_since_invocation(self) -> List[int]:
1403
+ """
1404
+ Since this node was invoked, return data ptrs of all tensor outputs that have died
1405
+ in the current executing tree path.
1406
+ """
1407
+ curr_liveness = self._get_liveness(self.path_weakrefs)
1408
+ _get_different_indices = self._get_different_indices(
1409
+ self.recorded_liveness_after_graph, curr_liveness
1410
+ )
1411
+
1412
+ path = list(self._path_from_root)
1413
+ ptrs_to_deallocate = []
1414
+ for depth, output_index in _get_different_indices:
1415
+ ptrs_to_deallocate.append(
1416
+ path[depth].outputs_metadata[output_index]["data_ptr"]
1417
+ )
1418
+
1419
+ return ptrs_to_deallocate
1420
+
1421
+ def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
1422
+ for i, j in self.live_indices_after_graph:
1423
+ out = self.path_weakrefs[i][j]
1424
+ if out is not None and is_live(out):
1425
+ yield out
1426
+
1427
+ def remove_node_cached_tensors(self):
1428
+ for t in self.cached_tensor_outputs:
1429
+ if t is not None:
1430
+ torch._C._remove_cached_tensor(t)
1431
+ self.cached_tensor_outputs.clear()
1432
+
1433
+ for i, unaliased in enumerate(self.unaliased_in_all_paths):
1434
+ if unaliased:
1435
+ n = self.outputs_weakrefs[i]
1436
+ assert n is not None
1437
+ n.remove_extra_reference()
1438
+
1439
+ def remove_path_cached_tensors(self):
1440
+ for node in self._path_from_root:
1441
+ node.remove_node_cached_tensors()
1442
+
1443
+ def clear_path_state(self):
1444
+ "Clear the path state in this current executing node"
1445
+ # this doesnt actually do anything right now, leaving it as placeholder
1446
+ pass
1447
+
1448
+ @staticmethod
1449
+ def _tensor_metadata(x, ignore_storage_offset=True):
1450
+ assert isinstance(x, torch.Tensor)
1451
+ # We ignore the storage offset for inputs, but not for outputs
1452
+ # TODO: - should we make the storage resizable ?
1453
+ return {
1454
+ "nbytes": x.untyped_storage().nbytes(),
1455
+ "data_ptr": x.untyped_storage().data_ptr(),
1456
+ "size": x.shape,
1457
+ "stride": x.stride(),
1458
+ "dtype": x.dtype,
1459
+ "device": x.device,
1460
+ "storage_offset": x.storage_offset() if not ignore_storage_offset else 0,
1461
+ }
1462
+
1463
+ def _reconstruct_from_tensor_metadata(
1464
+ self, metadata: Dict[str, Any], storage=None
1465
+ ) -> Tensor:
1466
+ s = self.create_storage(metadata) if storage is None else storage
1467
+ return torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata(metadata, s)
1468
+
1469
+ def create_storage(self, metadata):
1470
+ return torch._C._construct_storage_from_data_pointer(
1471
+ metadata["data_ptr"], metadata["device"], metadata["nbytes"]
1472
+ )
1473
+
1474
+ def _allocate_and_copy_recording_inputs(
1475
+ self, inputs
1476
+ ) -> List[Union[torch.Tensor, int]]:
1477
+ """
1478
+ Allocate inputs for non static, non cudagraph managraphed managed tensors in the memory pool
1479
+ and copy over the tensor values.
1480
+ """
1481
+
1482
+ torch.cuda.synchronize()
1483
+ self.stream.wait_stream(torch.cuda.current_stream())
1484
+ recording_inputs: List[Union[Tensor, int]] = []
1485
+
1486
+ with warnings.catch_warnings(record=True), torch.cuda.device(
1487
+ self.device
1488
+ ), _use_cuda_memory_pool_manager(
1489
+ self.device,
1490
+ mem_pool=self.cuda_graphs_pool,
1491
+ stream=self.stream,
1492
+ ):
1493
+ for i, inp in enumerate(inputs):
1494
+ if not isinstance(inp, torch.Tensor):
1495
+ assert isinstance(inp, int)
1496
+ recording_inputs.append(inp)
1497
+ elif i not in self.static_input_idxs:
1498
+ # static_input does an allocation!
1499
+ recording_inputs.append(static_input(inp))
1500
+ # copy over and clear non recording input
1501
+ self._copy_input(i, recording_inputs[-1], inp)
1502
+ inputs[i] = None
1503
+ del inp
1504
+ else:
1505
+ recording_inputs.append(inp)
1506
+
1507
+ return recording_inputs
1508
+
1509
+ def check_invariants(self, inputs: List[Tensor]) -> bool:
1510
+ """
1511
+ Checks if this node can be run. The same pattern of tensor liveness and tensors
1512
+ managed in the cudagraph private pool must remain stable.
1513
+ """
1514
+
1515
+ # previously managed data pointers remain stable
1516
+ for idx in self.cudagraph_managed_idxs:
1517
+ if inputs[idx].data_ptr() != self.static_input_data_ptrs[idx]:
1518
+ return False
1519
+
1520
+ if not self._check_liveness(
1521
+ self.expected_dead_indices_before_graph, self.path_weakrefs
1522
+ ):
1523
+ return False
1524
+
1525
+ # the cudagraph managed tensors which died upon recording must also die upon
1526
+ # this invocation. it is too late to check after we've replayed the graph,
1527
+ # because we would have already written over their memory.
1528
+ for idx in self.cudagraph_managed_idxs:
1529
+ inputs[idx] = None # type: ignore[call-overload]
1530
+
1531
+ torch._check(
1532
+ self._check_liveness(
1533
+ self.expected_dead_indices_after_graph, self.path_weakrefs
1534
+ ),
1535
+ lambda: "TODO: graph recording observed an input tensor deallocate during graph "
1536
+ " recording that did not occur during replay. Please file an issue.",
1537
+ )
1538
+ return True
1539
+
1540
+ def num_descendants(self) -> int:
1541
+ "Total number of descendents of this node"
1542
+ num_desc = 0
1543
+ for children in self.children.values():
1544
+ for child in children:
1545
+ num_desc += 1
1546
+ num_desc += child.num_descendants()
1547
+ return num_desc
1548
+
1549
+
1550
+ def get_cudagraph_segments(pool_id):
1551
+ segments = torch.cuda.memory_snapshot()
1552
+ return [segment for segment in segments if segment["segment_pool_id"] == pool_id]
1553
+
1554
+
1555
+ def get_block_addrs(pool_id, live_only=True):
1556
+ blocks = []
1557
+
1558
+ for segment in get_cudagraph_segments(pool_id):
1559
+ addr = segment["address"]
1560
+ for block in segment["blocks"]:
1561
+ if block["state"] == "active_allocated" or not live_only:
1562
+ blocks.append(addr)
1563
+
1564
+ addr += block["size"]
1565
+
1566
+ return blocks
1567
+
1568
+
1569
+ def format_tb(frames):
1570
+ formatted_traceback = []
1571
+
1572
+ for entry in frames:
1573
+ formatted_traceback.append(
1574
+ traceback.FrameSummary(entry["filename"], entry["line"], entry["name"])
1575
+ )
1576
+
1577
+ return "".join(traceback.format_list(formatted_traceback))
1578
+
1579
+
1580
+ def check_memory_pool(device, pool_id, live_storages_ptrs: List[StorageWeakRefWrapper]):
1581
+ assert all(
1582
+ isinstance(elem, StorageWeakRefWrapper) for elem in live_storages_ptrs
1583
+ ) # noqa: C419
1584
+ unique_storages = {stor.data_ptr() for stor in live_storages_ptrs if stor()}
1585
+
1586
+ # check if there is a divergence first, then do the expensive snapshot call after
1587
+ # we know it will error
1588
+ if torch._C._cuda_checkPoolLiveAllocations(device, pool_id, unique_storages):
1589
+ return
1590
+
1591
+ # at this point we are past the fast-path. we have seen rare cases where a dead tensor is dead,
1592
+ # but hasn't been gc'd yet, and gives false positive for allocated_not_in_live_storages
1593
+ gc.collect()
1594
+
1595
+ segments = get_cudagraph_segments(pool_id)
1596
+
1597
+ allocated_not_in_live_storages = {}
1598
+
1599
+ for segment in segments:
1600
+ addr = segment["address"]
1601
+ for block in segment["blocks"]:
1602
+ if block["state"] == "active_allocated":
1603
+ if addr not in unique_storages:
1604
+ allocated_not_in_live_storages[addr] = block
1605
+ else:
1606
+ unique_storages.remove(addr)
1607
+
1608
+ addr += block["size"]
1609
+
1610
+ torch._check(
1611
+ len(unique_storages) == 0,
1612
+ lambda: f"These storage data ptrs are not allocated in pool {pool_id} but should be {unique_storages}",
1613
+ )
1614
+
1615
+ if allocated_not_in_live_storages != 0:
1616
+ formatted = []
1617
+ for dp, block in allocated_not_in_live_storages.items():
1618
+ trace = format_tb(block.get("frames", []))
1619
+ formatted.append(f"Data Pointer: {dp}, history: \n{trace}")
1620
+ formatted_s = "\n".join(formatted)
1621
+ msg = (
1622
+ f"These live storage data ptrs are in the cudagraph pool but not "
1623
+ f"accounted for as an output of cudagraph trees: \n\n{formatted_s}"
1624
+ )
1625
+ raise RuntimeError(msg)
1626
+
1627
+
1628
+ class ExecutionState(Enum):
1629
+ """
1630
+ Represents the state of the CUDAGraph Tree. Will be None if there is no live current memory allocated
1631
+ in the cuda graph pool. Otherwise will reflect the state of the most recently executed node.
1632
+ """
1633
+
1634
+ NONE = auto()
1635
+ WARMUP = auto()
1636
+ RECORDING = auto()
1637
+ EXECUTION = auto()
1638
+
1639
+
1640
+ class CompilationMode(Enum):
1641
+ FORWARD = auto()
1642
+ BACKWARD = auto()
1643
+ INFERENCE = auto()
1644
+
1645
+
1646
+ class CUDAGraphTreeManager:
1647
+ """
1648
+ Groups individual recordings or executions of cuda graphs into a tree of recordings,
1649
+ and checks required invariants, and manages warmups of graphs.
1650
+
1651
+ When graphs are recorded in the same tree, it enforces subsequent execution
1652
+ to follow the same order and have the same output tensor livespans. To remove
1653
+ unnecessary coupling of cuda graphs (and additional imposed invariants),
1654
+ the tree manager will end a currently recording tree whenever it is valid - when
1655
+ the memory pool no longer has any live allocations.
1656
+
1657
+ We ignore outputs from a previous generation that correspond to prior model outputs.
1658
+ Currently this is hardcoded `GenerationTracker.generation` tracked in torch dynamo.
1659
+ # TODO: make generation increment configurable, warn on overwrite.
1660
+
1661
+ We run graph warmups in the cudagraph memory pool and return the result on the first invocation
1662
+ of a function. For many models it is important to reclaim activations as you run the backward.
1663
+ If we were to warm up the model and keep an extra copy of the inputs around to subsequently
1664
+ use for recording, we would incur a memory penalty. Additionally, if we are part way through training
1665
+ your model and need to recompile, memory will be allocated to the cuda graph pool, so we run this
1666
+ warmup run in the cuda graph memory pool. As for recording, warm up needs the state of live tensors
1667
+ to be accurately reflected so we checkpoint the allocator state if we need to warm up following graph
1668
+ replay.
1669
+ """
1670
+
1671
+ def __init__(self, device_index: int):
1672
+ # roots are functions which have no dependencies on an other node. I.e.,
1673
+ # when they are first invoked, none of their inputs are outputs are outputs
1674
+ # of another node, nor are there any live outputs of another node whose
1675
+ # liveness would create a dependency.
1676
+ self.roots: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
1677
+
1678
+ # mapping from function id to wrapped function
1679
+ self.ids_to_funcs: Dict[FunctionID, WrappedFunction] = {}
1680
+
1681
+ self.ids_to_stack_traces: Dict[FunctionID, StackTraces] = {}
1682
+
1683
+ self.warmed_up_functions: Set[FunctionID] = set()
1684
+ # if we fail to increment generation, and are stuck warming up,
1685
+ # only warn on each function once
1686
+ self.warned_functions: Set[FunctionID] = set()
1687
+ torch._C._set_cached_tensors_enabled(True)
1688
+
1689
+ # NB: cuda caching allocator will remember the stream a segment is allocated to
1690
+ # and only allocate that segment to the same stream. we need to use a single stream
1691
+ # for all allocations to the memory pool, otherwise the allocations to separate streams
1692
+ # will not be reused; separate recordings would have use the same memory pool, but not
1693
+ # the same memory.
1694
+
1695
+ with torch.cuda.device(device_index):
1696
+ torch.cuda.synchronize()
1697
+ self.stream = torch.cuda.Stream()
1698
+ self.stream.wait_stream(torch.cuda.current_stream())
1699
+
1700
+ # Keeps Memory Pool Alive
1701
+ self.graph: Optional[torch.cuda.CUDAGraph] = torch.cuda.CUDAGraph()
1702
+ self.cuda_graphs_thread_pool = torch.cuda.graph_pool_handle()
1703
+
1704
+ with warnings.catch_warnings(record=True), torch.cuda.graph(
1705
+ self.graph,
1706
+ pool=self.cuda_graphs_thread_pool,
1707
+ stream=self.stream,
1708
+ capture_error_mode="thread_local",
1709
+ ):
1710
+ pass
1711
+
1712
+ self.graph_counter = itertools.count(0)
1713
+ self.func_counter = itertools.count(0)
1714
+
1715
+ # whether we the current node is in a state of warmup, recording, execution. If
1716
+ # there is no current node the state will be ExecutionState.None.
1717
+ self.path_state = ExecutionState.NONE
1718
+ self.device_index = device_index
1719
+
1720
+ # the most recently invoked cudagraph wrapping of a function. Will be None
1721
+ # when there is no output from a previous recording or execution whose memory
1722
+ # we need to respect in the cuda caching allocation. If you incremented generation,
1723
+ # this will also be none, as ignore those allocations.
1724
+ self.current_node: Optional[CUDAGraphNode] = None
1725
+
1726
+ # current generation of cudagraph invocations. when torch.compile is run
1727
+ # we increment the current generation. are willing to ignore live outputs
1728
+ # of a previous generation in checking liveness.
1729
+ self.current_gen: int = -1
1730
+
1731
+ # number of instances we are in execution and failed to match to an
1732
+ # existing child
1733
+ self.debug_fail_counter = 0
1734
+ # number of instances we had to checkpoint the function
1735
+ self.debug_checkpointing_counter = 0
1736
+
1737
+ self.id_to_mode: Dict[FunctionID, CompilationMode] = {}
1738
+
1739
+ # Note: [Backward Generation Handling]
1740
+ # We generally perform a sequence of forward executions followed by backward executions.
1741
+ # If multiple torch.compile wrapped forwards are executed with their backwards pending,
1742
+ # we should not disregard the outputs from a prior torch.compile since the entire training
1743
+ # loop hasn't completed. Occasionally, a backward pass corresponding to a forward pass may
1744
+ # not be executed, so we cannot wait for all pending forward pass backward completions, so
1745
+ # we cannot wait for all backwards to have been invoked. Instead we wait for a single backward
1746
+ # invocation. Triggering a backward pass typically doesn't lead to another torch.compile
1747
+ # invocation, making it less likely for the generation to increase between multiple
1748
+ # backward calls. The following use case is covered by this approach:
1749
+ # mod1 = torch.compile(...)
1750
+ # mod2 = torch.compile(...)
1751
+ # mod2(mod1(x)).sum().backward()
1752
+
1753
+ self.running_forwards_with_pending_backwards = False
1754
+
1755
+ def run(self, new_inputs: List[Tensor], function_id: FunctionID):
1756
+ assert self.graph is not None, "Running CUDAGraph after shutdown"
1757
+ out = self._run(new_inputs, function_id)
1758
+
1759
+ # The forwards are only pending following invocation, not before
1760
+ mode = self.id_to_mode[function_id]
1761
+ if mode == CompilationMode.FORWARD:
1762
+ self.running_forwards_with_pending_backwards = True
1763
+ elif mode == CompilationMode.BACKWARD:
1764
+ self.running_forwards_with_pending_backwards = False
1765
+
1766
+ return out
1767
+
1768
+ def set_to_running_backward(self):
1769
+ self.running_forwards_with_pending_backwards = False
1770
+
1771
+ def _run(self, new_inputs: List[Tensor], function_id: FunctionID):
1772
+ # we will try to end the current execution lazily, since
1773
+ # we dont want to do unnecessary checking of the existing outputs
1774
+ # on the hot path, but both recording and warmup only happen once
1775
+ # so we check up front
1776
+ if self.in_recording:
1777
+ self.try_end_curr_recording(function_id)
1778
+
1779
+ if self.in_warmup:
1780
+ self.try_end_curr_warmup(function_id)
1781
+
1782
+ # warming up a function and subsequentally recording may use different memory addresses
1783
+ # because both depend on the state of the caching allocator. if we warm up graph A,
1784
+ # then warm up graph B and make more allocations, the subsequent recording of A will not
1785
+ # necessarily use the same addresses as in the warm up. Thus any warm up of a node can only
1786
+ # be followed by warm up runs.
1787
+ if (
1788
+ not (
1789
+ function_id in self.warmed_up_functions
1790
+ or config.triton.skip_cudagraph_warmup
1791
+ )
1792
+ ) or self.in_warmup:
1793
+ # If we are in the middle of executing cuda graphs, then we need to checkpoint memory state.
1794
+ # Both Recording and Warmup will be reflected in the allocator and dont need changes
1795
+ if self.path_state == ExecutionState.EXECUTION:
1796
+ self.apply_checkpoint_execution_state_in_allocator()
1797
+
1798
+ return self.run_eager(new_inputs, function_id)
1799
+
1800
+ child_nodes = (
1801
+ self.roots if self.current_node is None else self.current_node.children
1802
+ )
1803
+
1804
+ if not self.in_recording:
1805
+ for child in child_nodes[function_id]:
1806
+ # here we are checking memory consistency between recording and execution,
1807
+ # as well as things like stability of tensor locations, etc
1808
+ # and other
1809
+ if child.check_invariants(new_inputs):
1810
+ return self.execute_node(child, new_inputs)
1811
+
1812
+ # now that we know the new function can't be run as a child of the
1813
+ # current node, if it is a root, try to end the current execution.
1814
+ # as noted above, we want to do this lazily to avoid having to
1815
+ # check all existing outputs
1816
+ if self.current_node is not None and function_id in self.roots:
1817
+ self.try_end_curr_execution()
1818
+
1819
+ # run again to hit the root matching case which must succeed
1820
+ if self.current_node is None:
1821
+ return self.run(new_inputs, function_id)
1822
+
1823
+ # at this point, we necessarily will do a new recording
1824
+ self.debug_fail_counter += 1
1825
+
1826
+ self.try_end_curr_execution()
1827
+ if self.current_node is not None:
1828
+ self.apply_checkpoint_execution_state_in_allocator()
1829
+
1830
+ # now, we are in a recording state !
1831
+ return self.record_function(new_inputs, function_id)
1832
+
1833
+ def shutdown(self):
1834
+ """
1835
+ Remove all cached tensors in all nodes. Because cached tensors can hold gradients which in turn
1836
+ might reference a backward which invokes a CUDA Graph Node, we have to manually clear them on shutdown
1837
+ to avoid a reference cycle.
1838
+ """
1839
+ nodes = []
1840
+ for roots in self.roots.values():
1841
+ nodes.extend(roots)
1842
+
1843
+ while nodes:
1844
+ node = nodes.pop()
1845
+ for children in node.children.values():
1846
+ nodes.extend(children)
1847
+ node.remove_node_cached_tensors()
1848
+ node.graph = None
1849
+
1850
+ self.graph = None
1851
+ self.roots = None # type: ignore[assignment]
1852
+ self.current_node = None
1853
+
1854
+ def record_function(self, new_inputs, function_id) -> List[Optional[Tensor]]:
1855
+ graph_id = self.new_graph_id()
1856
+ log.debug(
1857
+ "Recording function %d of graph recording id %d",
1858
+ function_id.id,
1859
+ graph_id.id,
1860
+ )
1861
+ torch.cuda.synchronize()
1862
+ node = CUDAGraphNode(
1863
+ self.ids_to_funcs[function_id],
1864
+ graph_id,
1865
+ self.current_node,
1866
+ new_inputs,
1867
+ self.cuda_graphs_thread_pool,
1868
+ self.device_index,
1869
+ self.ids_to_stack_traces[function_id],
1870
+ self.stream,
1871
+ )
1872
+ if self.current_node is None:
1873
+ self.roots[function_id].append(node)
1874
+ else:
1875
+ self.current_node.add_child(function_id, node)
1876
+ self.current_node = node
1877
+ self.path_state = ExecutionState.RECORDING
1878
+ self.update_generation()
1879
+ torch.cuda.synchronize()
1880
+ return node.run_first_inputs(new_inputs)
1881
+
1882
+ def execute_node(self, node: CUDAGraphNode, new_inputs) -> List[Optional[Tensor]]:
1883
+ self.current_node = node
1884
+ self.path_state = ExecutionState.EXECUTION
1885
+ self.update_generation()
1886
+ return node.run(new_inputs)
1887
+
1888
+ def run_eager(self, new_inputs, function_id: FunctionID):
1889
+ # this is only stored on current node, because when we start a new path,
1890
+ # we will deallocate it
1891
+ already_warm = function_id in self.warmed_up_functions
1892
+ if not already_warm:
1893
+ log.debug("Running warmup of function %d", function_id.id)
1894
+ else:
1895
+ log.debug(
1896
+ "Running eager of function %d because ancestor needed to warm up",
1897
+ function_id.id,
1898
+ )
1899
+ self.warmed_up_functions.add(function_id)
1900
+ node = CUDAWarmupNode(
1901
+ self.ids_to_funcs[function_id],
1902
+ self.current_node,
1903
+ self.cuda_graphs_thread_pool,
1904
+ self.graph,
1905
+ self.device_index,
1906
+ self.ids_to_stack_traces[function_id],
1907
+ self.stream,
1908
+ already_warm,
1909
+ )
1910
+ self.current_node = node
1911
+ self.path_state = ExecutionState.WARMUP
1912
+ self.update_generation()
1913
+ return node.run(new_inputs)
1914
+
1915
+ def new_graph_id(self) -> GraphID:
1916
+ return GraphID(next(self.graph_counter))
1917
+
1918
+ def new_func_id(self) -> FunctionID:
1919
+ return FunctionID(next(self.func_counter))
1920
+
1921
+ def add_function(
1922
+ self,
1923
+ model,
1924
+ inputs,
1925
+ static_input_idxs,
1926
+ stack_traces,
1927
+ mode,
1928
+ constants,
1929
+ ) -> Tuple[Callable[..., Any], List[Optional[Tensor]]]:
1930
+ id = self.new_func_id()
1931
+ self.ids_to_stack_traces[id] = stack_traces
1932
+ self.ids_to_funcs[id] = WrappedFunction(
1933
+ model,
1934
+ static_input_idxs,
1935
+ id,
1936
+ tuple(t for t in constants if isinstance(t, torch.Tensor) and t.is_cuda),
1937
+ )
1938
+ self.id_to_mode[id] = mode
1939
+ fn = functools.partial(self.run, function_id=id)
1940
+
1941
+ # container needs to set clean up when fn dies
1942
+ get_container(self.device_index).add_strong_reference(fn)
1943
+ return fn, fn(inputs)
1944
+
1945
+ @property
1946
+ def in_recording(self):
1947
+ return self.path_state == ExecutionState.RECORDING
1948
+
1949
+ @property
1950
+ def in_warmup(self):
1951
+ return self.path_state == ExecutionState.WARMUP
1952
+
1953
+ def get_roots(self) -> Iterator[CUDAGraphNode]:
1954
+ for nodes in self.roots.values():
1955
+ yield from nodes
1956
+
1957
+ @property
1958
+ def current_node(self):
1959
+ return self._current_node
1960
+
1961
+ @current_node.setter
1962
+ def current_node(self, value):
1963
+ self._current_node = value
1964
+ if value is None:
1965
+ self.path_state = ExecutionState.NONE
1966
+
1967
+ def update_generation(self):
1968
+ self.current_gen = self.get_curr_generation()
1969
+
1970
+ @staticmethod
1971
+ def get_curr_generation() -> int:
1972
+ if MarkStepBox.mark_step_counter != 0:
1973
+ return MarkStepBox.mark_step_counter
1974
+
1975
+ return GenerationTracker.generation
1976
+
1977
+ @staticmethod
1978
+ def user_invoked_mark_step():
1979
+ return MarkStepBox.mark_step_counter != 0
1980
+
1981
+ def can_start_new_generation(self) -> bool:
1982
+ if not self.in_new_torch_compile_invocation():
1983
+ return False
1984
+
1985
+ if self.user_invoked_mark_step():
1986
+ return True
1987
+
1988
+ return not self.running_forwards_with_pending_backwards
1989
+
1990
+ def in_new_torch_compile_invocation(self):
1991
+ return self.current_gen != self.get_curr_generation()
1992
+
1993
+ def try_end_curr_recording(self, function_id: FunctionID) -> None:
1994
+ """
1995
+ Check if the current recording can be terminated, either because all outputs of the
1996
+ previously recorded node are dead or because it was executed in a different
1997
+ generation. Will set current_node to None and in_recording to False if successful.
1998
+ """
1999
+ assert self.in_recording
2000
+ assert self.current_node is not None
2001
+
2002
+ # multiple invocations, allow overwriting the previous generation
2003
+ if self.can_start_new_generation():
2004
+ self.dealloc_current_path_weakrefs()
2005
+ self.clear_current_path_state_and_set_to_none()
2006
+ return
2007
+
2008
+ if self.current_node.all_outputs_are_dead():
2009
+ self.clear_current_path_state_and_set_to_none()
2010
+ return
2011
+
2012
+ self.check_warn_on_unable_to_start_executing(function_id)
2013
+
2014
+ def try_end_curr_execution(self) -> None:
2015
+ """
2016
+ Check if the current executing node can be terminated, either because all outputs of the
2017
+ previously executed node are dead or because it was executed in a different generation.
2018
+ Will set current_node to None if successful.
2019
+ """
2020
+
2021
+ assert not self.in_recording
2022
+ if self.current_node is None:
2023
+ return
2024
+
2025
+ if self.can_start_new_generation():
2026
+ self.clear_current_path_state_and_set_to_none()
2027
+ return
2028
+
2029
+ if self.current_node.all_outputs_are_dead():
2030
+ self.clear_current_path_state_and_set_to_none()
2031
+
2032
+ def try_end_curr_warmup(self, function_id: FunctionID):
2033
+ if self.can_start_new_generation():
2034
+ self.dealloc_current_path_weakrefs()
2035
+ self.current_node = None
2036
+ return
2037
+
2038
+ if self.current_node.all_outputs_are_dead():
2039
+ self.current_node = None
2040
+ return
2041
+
2042
+ self.check_warn_on_unable_to_start_executing(function_id)
2043
+
2044
+ def check_warn_on_unable_to_start_executing(self, function_id: FunctionID):
2045
+ "Warn if we in a potential loop where we are unable to hit fast path"
2046
+ if (
2047
+ function_id in self.warned_functions
2048
+ or not self.in_new_torch_compile_invocation()
2049
+ ):
2050
+ return
2051
+
2052
+ existing_nodes = [
2053
+ node
2054
+ for node in self.current_node._path_from_root
2055
+ if node.wrapped_function.id == function_id
2056
+ ]
2057
+
2058
+ if len(existing_nodes) <= 1:
2059
+ return
2060
+
2061
+ # repeated same pattern
2062
+ parents = {
2063
+ n.parent.wrapped_function.id
2064
+ for n in itertools.chain(existing_nodes, (self.current_node,))
2065
+ if n.parent is not None
2066
+ }
2067
+ if len(parents) == len(existing_nodes):
2068
+ return
2069
+
2070
+ self.warned_functions.add(function_id)
2071
+ warnings.warn(
2072
+ "Unable to hit fast path of CUDAGraphs because of pending, uninvoked backwards. "
2073
+ "Consider running with torch.no_grad() or using torch.compiler.cudagraph_mark_step_begin() "
2074
+ "before each model invocation"
2075
+ )
2076
+
2077
+ def dealloc_current_path_weakrefs(self):
2078
+ # TODO: we could also allow the these weak refs to continue to be allocated,
2079
+ # but that adds some complications.
2080
+ for node in self.current_node._path_from_root:
2081
+ assert len(node.tensor_weakrefs) == len(node.stack_traces)
2082
+ for t, stack_trace in zip(node.tensor_weakrefs, node.stack_traces):
2083
+ ten = None if t is None else t()
2084
+ if ten is None:
2085
+ continue
2086
+
2087
+ stack_trace = (
2088
+ stack_trace.strip()
2089
+ if stack_trace
2090
+ else "[Could not find stack trace]"
2091
+ )
2092
+ msg = (
2093
+ "Error: accessing tensor output of CUDAGraphs that has been overwritten by a subsequent run. "
2094
+ f"Stack trace: {stack_trace}. "
2095
+ "To prevent overwriting, clone the tensor outside of torch.compile() "
2096
+ "or call torch.compiler.cudagraph_mark_step_begin() before each model invocation."
2097
+ )
2098
+ torch._C._set_storage_access_error_msg(ten, msg)
2099
+
2100
+ deleted = set()
2101
+ for storage_ref in self.current_node.path_live_weakrefs():
2102
+ if storage_ref() and storage_ref.data_ptr() not in deleted:
2103
+ deleted.add(storage_ref.data_ptr())
2104
+ torch._C._free_And_Remove_DeleterFn(storage_ref())
2105
+
2106
+ def clear_current_path_state_and_set_to_none(self):
2107
+ self.current_node.clear_path_state()
2108
+ self.current_node = None
2109
+
2110
+ def apply_checkpoint_execution_state_in_allocator(self):
2111
+ """
2112
+ Checkpoint the current execution state in the caching allocator so that
2113
+ additional cudagraph recordings can be made respecting existent live storages.
2114
+ """
2115
+ self.debug_checkpointing_counter += 1
2116
+ log.debug(
2117
+ "Checkpointing cuda caching allocator state. Number of checkpoints %d",
2118
+ self.debug_checkpointing_counter,
2119
+ )
2120
+
2121
+ state = self.current_node.checkpointed_caching_state
2122
+ device = self.current_node.device
2123
+ assert state is not None and device is not None
2124
+
2125
+ # currently we deallocate on instead of allowing stale recordings
2126
+ stale_storages: List[int] = []
2127
+
2128
+ # remove cached tensors, otherwise they would prevent memory from being
2129
+ # reclaimed in subsequent recordings
2130
+ self.current_node.remove_path_cached_tensors()
2131
+ live_storages_wrappers = list(self.current_node.path_live_weakrefs())
2132
+
2133
+ live_storages_weak_refs = [t() for t in live_storages_wrappers]
2134
+ ptrs_to_deallocate = self.current_node.data_ptrs_dead_since_invocation()
2135
+ torch._C._cuda_setCheckpointPoolState(
2136
+ device, state, stale_storages, live_storages_weak_refs
2137
+ )
2138
+
2139
+ # NB: deduplicate aliased outputs
2140
+ for ptr in set(ptrs_to_deallocate):
2141
+ torch._C._cuda_cudaCachingAllocator_raw_delete(ptr)
2142
+
2143
+ # Now the live blocks should be exactly equal to the live storages in private pool
2144
+ if config.triton.slow_path_cudagraph_asserts:
2145
+ check_memory_pool(
2146
+ self.device_index, self.cuda_graphs_thread_pool, live_storages_wrappers
2147
+ )
2148
+ for wrapper in live_storages_wrappers:
2149
+ assert wrapper()
2150
+ assert torch._C._has_Standard_Deleter(wrapper())
2151
+ assert wrapper.data_ptr() not in ptrs_to_deallocate
2152
+
2153
+ def live_cudagraph_pool_storages_in_curr_execution(
2154
+ self,
2155
+ ) -> List[StorageWeakRefPointer]:
2156
+ if self.current_node is None:
2157
+ return []
2158
+ # explicitly ignoring previous recorded outputs from past path
2159
+ return [t() for t in self.current_node.path_live_weakrefs()]
venv/lib/python3.10/site-packages/torch/_inductor/cudagraph_utils.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import Dict, Iterable, Optional
3
+
4
+ import torch
5
+ from torch._inductor.codecache import CompiledFxGraph
6
+
7
+
8
+ def get_mutating_use_stack_trace(placeholder_node: torch.fx.Node) -> Optional[str]:
9
+ # reinplaced uses might have a single, non-copy_ use
10
+ if len(placeholder_node.users) == 1:
11
+ return next(iter(placeholder_node.users)).meta.get("stack_trace", None)
12
+
13
+ for use in placeholder_node.users:
14
+ if use.target == torch.ops.aten.copy_.default:
15
+ if stack_trace := use.meta.get("stack_trace", None):
16
+ return stack_trace
17
+
18
+ return None
19
+
20
+
21
+ def format_default_skip_message(reason: str) -> str:
22
+ return f"skipping cudagraphs due to {reason}"
23
+
24
+
25
+ def get_mutation_stack_trace(
26
+ gm: torch.fx.GraphModule, mutation_indices: Iterable[int]
27
+ ) -> str:
28
+ stack_trace: Optional[str] = ""
29
+ placeholders = [node for node in gm.graph.nodes if node.op == "placeholder"]
30
+
31
+ for idx in mutation_indices:
32
+ placeholder = placeholders[idx]
33
+ if stack_trace := get_mutating_use_stack_trace(placeholder):
34
+ break
35
+
36
+ if stack_trace:
37
+ msg = f"skipping cudagraphs due to mutation on input. Found from : \n {stack_trace}"
38
+ return msg
39
+
40
+ return format_default_skip_message("mutated inputs")
41
+
42
+
43
+ def check_for_mutation(
44
+ gm: torch.fx.GraphModule, compiled_graph: CompiledFxGraph, num_fixed: int
45
+ ) -> Optional[str]:
46
+ default_msg = format_default_skip_message("mutated inputs")
47
+
48
+ # doesnt work for non-trees because the warmup run would apply mutation twice
49
+ if torch._inductor.config.triton.cudagraph_trees:
50
+ # checking if mutation is only on parameters/static inputs
51
+ mutation_indices = [
52
+ idx for idx in compiled_graph.mutated_input_idxs if idx >= num_fixed
53
+ ]
54
+ has_mutation = len(mutation_indices) != 0
55
+ if not has_mutation:
56
+ return None
57
+
58
+ return get_mutation_stack_trace(gm, mutation_indices)
59
+
60
+ else:
61
+ has_mutation = len(compiled_graph.mutated_inputs) != 0
62
+ return None if not has_mutation else default_msg
63
+
64
+
65
+ def get_use_stack_trace(node) -> Optional[str]:
66
+ for use in node.users:
67
+ if stack_trace := use.meta.get("stack_trace", None):
68
+ return stack_trace
69
+ return None
70
+
71
+
72
+ def check_multiple_devices_or_any_cpu_nodes(
73
+ device_node_mapping: Dict[torch.device, torch.fx.Node]
74
+ ) -> Optional[str]:
75
+ if cpu_node := device_node_mapping.get(torch.device("cpu")):
76
+ if stack_trace := get_use_stack_trace(cpu_node):
77
+ return format_default_skip_message(
78
+ f"cpu device. Found from : \n {stack_trace}"
79
+ )
80
+
81
+ return format_default_skip_message("cpu device")
82
+
83
+ if (
84
+ len(device_node_mapping) == 1
85
+ and next(iter(device_node_mapping.keys())).type == "cuda"
86
+ ):
87
+ return None
88
+
89
+ keys_repr = (repr(key) for key in device_node_mapping.keys())
90
+ return format_default_skip_message(f"multiple devices: {', '.join(keys_repr)}")
91
+
92
+
93
+ def check_lowering_disable_cudagraph(
94
+ device_node_mapping: Dict[torch.device, torch.fx.Node]
95
+ ):
96
+ return check_multiple_devices_or_any_cpu_nodes(device_node_mapping)
97
+
98
+
99
+ @dataclasses.dataclass
100
+ class BoxedDeviceIndex:
101
+ value: Optional[int]
102
+
103
+ def set(self, device_idx: Optional[int]):
104
+ assert device_idx is None or isinstance(device_idx, int)
105
+ self.value = device_idx
venv/lib/python3.10/site-packages/torch/_inductor/debug.py ADDED
@@ -0,0 +1,655 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import contextlib
3
+ import cProfile
4
+ import dataclasses
5
+ import functools
6
+ import itertools
7
+ import logging
8
+ import os
9
+ import os.path
10
+ import pickle
11
+ import pstats
12
+ import shutil
13
+ import subprocess
14
+ from typing import Any, Dict, List, Optional
15
+ from unittest.mock import patch
16
+
17
+ from functorch.compile import draw_graph, get_aot_graph_name, get_graph_being_compiled
18
+
19
+ import torch
20
+ from torch import fx as fx
21
+
22
+ from torch._dynamo.repro.after_aot import save_graph_repro, wrap_compiler_debug
23
+ from torch._dynamo.utils import get_debug_dir
24
+ from torch.fx.graph_module import GraphModule
25
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata
26
+ from torch.fx.passes.tools_common import legalize_graph
27
+ from torch.utils._pytree import tree_map
28
+
29
+ from . import config, ir # noqa: F811, this is needed
30
+ from .scheduler import (
31
+ BaseSchedulerNode,
32
+ FusedSchedulerNode,
33
+ NopKernelSchedulerNode,
34
+ OutputNode,
35
+ SchedulerNode,
36
+ )
37
+ from .virtualized import V
38
+
39
+ log = logging.getLogger(__name__)
40
+
41
+ SchedulerNodeList = List[Any]
42
+ BufMeta = collections.namedtuple("BufMeta", ["name", "n_origin"])
43
+ GRAPHVIZ_COMMAND_SCALABLE = ["dot", "-Gnslimit=2", "-Gnslimit1=2", "-Gmaxiter=5000"]
44
+
45
+
46
+ @functools.lru_cache(None)
47
+ def has_dot() -> bool:
48
+ try:
49
+ subprocess.check_output(["which", "dot"], stderr=subprocess.PIPE)
50
+ return True
51
+ except subprocess.SubprocessError:
52
+ return False
53
+
54
+
55
+ def draw_buffers(nodes: List[BaseSchedulerNode], print_graph=False, fname=None):
56
+ """
57
+ Draw a graph in fname.svg.
58
+ """
59
+ if not has_dot():
60
+ log.warning("draw_buffers() requires `graphviz` package")
61
+ return
62
+
63
+ if fname is None:
64
+ fname = get_graph_being_compiled()
65
+
66
+ graph = create_fx_from_snodes(nodes)
67
+
68
+ for node in graph.nodes:
69
+ if "fusion_meta" not in node.meta:
70
+ continue
71
+ group = node.meta["fusion_meta"].group
72
+ if isinstance(group, tuple):
73
+ if isinstance(group[1], int):
74
+ group = (group[1],)
75
+ else:
76
+ group = group[1]
77
+
78
+ # gather meta data
79
+ dtype = None
80
+ if isinstance(node, ir.ComputedBuffer):
81
+ dtype = node.data.dtype
82
+
83
+ metadata = TensorMetadata(group, dtype, None, None, None, None, None) # type: ignore[arg-type]
84
+ node.meta["tensor_meta"] = metadata
85
+
86
+ if print_graph:
87
+ print(graph)
88
+
89
+ gm = GraphModule({}, graph)
90
+ legalize_graph(gm)
91
+ gm.graph.lint()
92
+ draw_graph(
93
+ gm, fname, clear_meta=False, dot_graph_shape=config.trace.dot_graph_shape
94
+ )
95
+
96
+
97
+ def create_fx_from_snodes(snodes: List[BaseSchedulerNode]) -> fx.Graph:
98
+ """
99
+ Creates a FX Graph from a list of SchedulerNode objects.
100
+ """
101
+
102
+ def get_fake_func(name):
103
+ def func1(*args):
104
+ return 0
105
+
106
+ func1.__name__ = name
107
+ return func1
108
+
109
+ FusionMeta = collections.namedtuple("FusionMeta", ["group", "snode", "type"])
110
+
111
+ buf_to_fx_node = {}
112
+ graph = torch.fx.Graph()
113
+ first_node = None
114
+
115
+ outputs = []
116
+ group: Any = None
117
+ # create call_function node for each Buffer and Kernel
118
+ for snode in snodes:
119
+ if snode.is_extern():
120
+ node_type = "extern"
121
+ group = node_type
122
+ elif snode.is_template():
123
+ node_type = "template"
124
+ group = node_type
125
+ elif isinstance(snode, NopKernelSchedulerNode):
126
+ node_type = "nop"
127
+ group = node_type
128
+ elif isinstance(snode, SchedulerNode):
129
+ node_type = "compute"
130
+ group = snode.group
131
+ elif isinstance(snode, FusedSchedulerNode):
132
+ node_type = "fused"
133
+ group = snode.group
134
+ else:
135
+ raise RuntimeError("Unknown node type")
136
+
137
+ fused_name = torch._inductor.utils.get_fused_kernel_name(
138
+ snode.get_nodes(), "original_aten"
139
+ )
140
+ func_name = f"{node_type}: {fused_name}"
141
+ node_func = get_fake_func(func_name)
142
+ kwargs = {}
143
+ if hasattr(snode, "get_device"):
144
+ kwargs = {"device": snode.get_device()}
145
+ fx_node = graph.call_function(node_func, args=(), kwargs=kwargs)
146
+
147
+ def in_output(snode):
148
+ if isinstance(snode, FusedSchedulerNode):
149
+ return any(in_output(x) for x in snode.snodes)
150
+ return any(isinstance(user.node, OutputNode) for user in snode.users)
151
+
152
+ if in_output(snode):
153
+ outputs.append(fx_node)
154
+ name = snode.get_name()
155
+ fx_node.name = name
156
+
157
+ fx_node.meta["fusion_meta"] = FusionMeta(group, snode, node_type)
158
+
159
+ if isinstance(snode, FusedSchedulerNode):
160
+ for x in snode.snodes:
161
+ buf_to_fx_node[x.get_name()] = fx_node
162
+ buf_to_fx_node[name] = fx_node
163
+
164
+ if first_node is None:
165
+ first_node = fx_node
166
+
167
+ # create edges between nodes
168
+ for snode in snodes:
169
+ name = snode.get_name()
170
+ deps = snode.read_writes.reads
171
+
172
+ fx_node = buf_to_fx_node[name]
173
+ new_args = []
174
+ for dep in deps:
175
+ if dep.name in buf_to_fx_node:
176
+ dep_node = buf_to_fx_node[dep.name]
177
+ else:
178
+ with graph.inserting_before(first_node):
179
+ dep_node = graph.placeholder(dep.name)
180
+ buf_to_fx_node[dep.name] = dep_node
181
+ new_args.append(dep_node)
182
+
183
+ fx_node.args = tuple(new_args)
184
+
185
+ graph.output(outputs[0] if len(outputs) == 1 else tuple(outputs))
186
+ return graph
187
+
188
+
189
+ def update_orig_fx_node_name_to_buf_name(
190
+ nodes: SchedulerNodeList,
191
+ node_name_to_buf_name: Dict[str, str],
192
+ parent_buf_name: Optional[str] = None,
193
+ n_origins: int = 0,
194
+ ):
195
+ if nodes is None:
196
+ return
197
+ for node in nodes:
198
+ # for FusedSchedulerNode, traverse recursively into get_nodes()
199
+ buf_name = node.get_name()
200
+ children_nodes = node.get_nodes()
201
+ if children_nodes is not None and len(children_nodes) > 1:
202
+ update_orig_fx_node_name_to_buf_name(
203
+ children_nodes,
204
+ node_name_to_buf_name,
205
+ buf_name if parent_buf_name is None else parent_buf_name,
206
+ )
207
+ continue
208
+ else:
209
+ assert len(children_nodes) == 1 and children_nodes[0] == node
210
+
211
+ ir_node = node.node
212
+ if ir_node is None or ir_node.origins is None:
213
+ continue
214
+ for origin in ir_node.origins:
215
+ node_name = origin.name
216
+ # when buf1 and buf2 both have origin=node1
217
+ # we draw node1 according to buf1
218
+ if node_name not in node_name_to_buf_name:
219
+ node_name_to_buf_name[node_name] = (
220
+ buf_name if parent_buf_name is None else parent_buf_name
221
+ )
222
+
223
+
224
+ def get_node_name_to_buf_meta(node_name_to_buf_name: Dict[str, str]):
225
+ buf_name_to_n_node = {}
226
+ for node_name, buf_name in node_name_to_buf_name.items():
227
+ if buf_name not in buf_name_to_n_node:
228
+ buf_name_to_n_node[buf_name] = {node_name}
229
+ else:
230
+ buf_name_to_n_node[buf_name].add(node_name)
231
+
232
+ node_name_to_buf_meta = {}
233
+ for node_name, buf_name in node_name_to_buf_name.items():
234
+ n_node = len(buf_name_to_n_node[buf_name])
235
+ node_name_to_buf_meta[node_name] = BufMeta(buf_name, n_node)
236
+ return node_name_to_buf_meta
237
+
238
+
239
+ def annotate_orig_fx_with_snodes(
240
+ gm: torch.fx.GraphModule, snodes: SchedulerNodeList
241
+ ) -> None:
242
+ """
243
+ Creates a FX Graph from a list of SchedulerNode objects.
244
+ """
245
+ node_name_to_buf_name: Dict[str, str] = {}
246
+ update_orig_fx_node_name_to_buf_name(snodes, node_name_to_buf_name)
247
+ if node_name_to_buf_name is None:
248
+ return
249
+ node_name_to_buf_meta = get_node_name_to_buf_meta(node_name_to_buf_name)
250
+ for node in gm.graph.nodes:
251
+ if node.name in node_name_to_buf_meta:
252
+ node.meta["buf_meta"] = node_name_to_buf_meta.get(node.name)
253
+
254
+
255
+ @contextlib.contextmanager
256
+ def enable_aot_logging():
257
+ compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
258
+
259
+ import torch._functorch.aot_autograd
260
+
261
+ log = logging.getLogger(torch._functorch.aot_autograd.__name__)
262
+
263
+ stack = contextlib.ExitStack()
264
+ if not compile_debug:
265
+ try:
266
+ yield
267
+ finally:
268
+ stack.close()
269
+ return
270
+
271
+ # Enable all graphs to be logged to a file by setting the flags to True
272
+ # and the log level of the file logger to DEBUG
273
+ stack.enter_context(patch("functorch.compile.config.debug_partitioner", True))
274
+
275
+ path = os.path.join(get_debug_dir(), "torchinductor")
276
+ os.makedirs(path, exist_ok=True)
277
+
278
+ fh = logging.FileHandler(
279
+ os.path.join(
280
+ path,
281
+ f"aot_{get_aot_graph_name()}_debug.log",
282
+ )
283
+ )
284
+ fh.setLevel(logging.DEBUG)
285
+ fh.setFormatter(
286
+ logging.Formatter("[%(filename)s:%(lineno)d %(levelname)s] %(message)s")
287
+ )
288
+ log.addHandler(fh)
289
+ try:
290
+ yield
291
+ finally:
292
+ log.removeHandler(fh)
293
+ stack.close()
294
+
295
+
296
+ class DebugContext:
297
+ _counter = itertools.count()
298
+
299
+ @staticmethod
300
+ def wrap(fn):
301
+ @functools.wraps(fn)
302
+ def inner(*args, **kwargs):
303
+ with DebugContext():
304
+ return fn(*args, **kwargs)
305
+
306
+ return wrap_compiler_debug(inner, compiler_name="inductor")
307
+
308
+ @staticmethod
309
+ def create_debug_dir(folder_name: str) -> Optional[str]:
310
+ debug_dir = config.trace.debug_dir or get_debug_dir()
311
+ for n in DebugContext._counter:
312
+ dirname = os.path.join(
313
+ debug_dir,
314
+ "torchinductor",
315
+ f"{folder_name}.{n}",
316
+ )
317
+ if not os.path.exists(dirname):
318
+ os.makedirs(dirname)
319
+ return dirname
320
+ return None
321
+
322
+ def __init__(self):
323
+ self._prof = None
324
+ self._path = None
325
+ self._stack = contextlib.ExitStack()
326
+
327
+ def copy(self, new_path: str):
328
+ if not self._path:
329
+ return
330
+ assert new_path.endswith(".debug"), new_path
331
+ if os.path.exists(new_path):
332
+ shutil.rmtree(new_path)
333
+ try:
334
+ shutil.copytree(self._path, new_path)
335
+ self._path = new_path
336
+ except OSError:
337
+ log.warning(
338
+ "Failed to copy debug files from %s to %s", self._path, new_path
339
+ )
340
+ pass
341
+
342
+ def fopen(self, filename: str, write_mode: str = "w", *args, **kwargs):
343
+ assert self._path
344
+ return open(os.path.join(self._path, filename), write_mode, *args, **kwargs)
345
+
346
+ @contextlib.contextmanager
347
+ def fopen_context(self, filename: str, write_mode: str = "w", *args, **kwargs):
348
+ assert self._path
349
+ with open(os.path.join(self._path, filename), write_mode, *args, **kwargs) as f:
350
+ yield f
351
+
352
+ def filename(self, suffix: str):
353
+ assert self._path
354
+ return os.path.join(self._path, suffix)
355
+
356
+ def upload_tar(self):
357
+ if config.trace.upload_tar is not None:
358
+ import tarfile
359
+
360
+ assert self._path
361
+ tar_file = os.path.join(
362
+ self._path, f"{os.path.basename(self._path)}.tar.gz"
363
+ )
364
+ with tarfile.open(tar_file, "w:gz") as tar:
365
+ tar.add(self._path, arcname=os.path.basename(self._path))
366
+ config.trace.upload_tar(tar_file)
367
+
368
+ def __enter__(self):
369
+ if config.debug:
370
+ log = logging.getLogger("torch._dynamo")
371
+ prev_level = log.level
372
+ log.setLevel(logging.DEBUG)
373
+
374
+ def reset_log_level(level):
375
+ log.setLevel(level)
376
+
377
+ self._stack.callback(reset_log_level, prev_level)
378
+
379
+ self._stack.enter_context(V.set_debug_handler(self))
380
+
381
+ if not config.trace.enabled:
382
+ return
383
+
384
+ self._path = self.create_debug_dir(get_aot_graph_name())
385
+
386
+ if config.trace.debug_log:
387
+ self._setup_log_capture("debug.log", logging.DEBUG)
388
+ if config.trace.info_log:
389
+ self._setup_log_capture("info.log", logging.INFO)
390
+ if config.trace.compile_profile:
391
+ self._prof = cProfile.Profile()
392
+ self._prof.enable()
393
+
394
+ def _setup_log_capture(self, filename: str, level: int):
395
+ log = logging.getLogger("torch._inductor")
396
+ fd = self._stack.enter_context(self.fopen(filename))
397
+ ch = logging.StreamHandler(fd)
398
+ ch.setLevel(level)
399
+ ch.setFormatter(
400
+ logging.Formatter("[%(filename)s:%(lineno)d %(levelname)s] %(message)s")
401
+ )
402
+ log.addHandler(ch)
403
+ log.setLevel(min(log.level, level))
404
+ self._stack.callback(log.removeHandler, ch)
405
+
406
+ def __exit__(self, exc_type, exc_val, exc_tb):
407
+ if self._prof:
408
+ self._prof.disable()
409
+ self._save_profile_data()
410
+
411
+ if self._path:
412
+ self.upload_tar()
413
+ log.warning("%s debug trace: %s", get_graph_being_compiled(), self._path)
414
+ self._stack.close()
415
+
416
+ def _save_profile_data(self):
417
+ assert self._prof
418
+ self._prof.dump_stats(self.filename("compile.prof"))
419
+ with self.fopen("compile.stats") as fd:
420
+ stats = pstats.Stats(self._prof, stream=fd)
421
+ stats.strip_dirs()
422
+ stats.sort_stats("cumtime")
423
+ stats.print_stats(100)
424
+ stats.sort_stats("tottime")
425
+ stats.print_stats(100)
426
+
427
+ def __getattr__(self, name):
428
+ if config.trace.enabled and getattr(config.trace, name):
429
+ try:
430
+ return getattr(DebugFormatter(self), name)
431
+ except Exception:
432
+ log.warning("Ignoring exception in debug code", exc_info=True)
433
+ else:
434
+
435
+ def ignored(*args, **kwargs):
436
+ pass
437
+
438
+ return ignored
439
+
440
+
441
+ class DebugFormatter:
442
+ def __init__(self, handler):
443
+ self.fopen = handler.fopen
444
+ self.fopen_context = handler.fopen_context
445
+ self.filename = handler.filename
446
+ self.handler = handler
447
+
448
+ def fx_graph(self, gm: torch.fx.GraphModule, inputs: List[torch.Tensor]):
449
+ with self.fopen("fx_graph_runnable.py") as fd:
450
+ save_graph_repro(fd, gm, inputs, "inductor")
451
+
452
+ with self.fopen("fx_graph_readable.py") as fd:
453
+ fd.write(gm.print_readable(print_output=False))
454
+
455
+ def fx_graph_transformed(
456
+ self, gm: torch.fx.GraphModule, inputs: List[torch.Tensor]
457
+ ):
458
+ with self.fopen("fx_graph_transformed.py") as fd:
459
+ fd.write(gm.print_readable(print_output=False))
460
+
461
+ def ir_pre_fusion(self, nodes: SchedulerNodeList):
462
+ self._write_ir("ir_pre_fusion.txt", nodes)
463
+
464
+ def ir_post_fusion(self, nodes: SchedulerNodeList):
465
+ self._write_ir("ir_post_fusion.txt", nodes)
466
+
467
+ def _write_ir(self, filename: str, nodes: SchedulerNodeList):
468
+ with self.fopen(filename) as fd:
469
+ log.info("Writing debug ir to %s", fd.name)
470
+ for node in nodes:
471
+ fd.write(node.debug_str())
472
+ fd.write("\n\n\n")
473
+
474
+ def graph_diagram(self, nodes: SchedulerNodeList):
475
+ draw_buffers(nodes, fname=self.filename("graph_diagram.svg"))
476
+
477
+ def draw_orig_fx_graph(self, gm: torch.fx.GraphModule, nodes: SchedulerNodeList):
478
+ annotate_orig_fx_with_snodes(gm, nodes)
479
+ draw_graph(
480
+ gm,
481
+ fname=self.filename("orig_fx_graph_diagram.svg"),
482
+ clear_meta=False,
483
+ prog=GRAPHVIZ_COMMAND_SCALABLE,
484
+ parse_stack_trace=True,
485
+ dot_graph_shape=config.trace.dot_graph_shape,
486
+ )
487
+
488
+ def output_code(self, filename):
489
+ shutil.copy(filename, self.filename("output_code.py"))
490
+
491
+ def log_autotuning_results(
492
+ self,
493
+ name: str,
494
+ input_nodes: List[ir.IRNode],
495
+ timings: Dict["ChoiceCaller", float], # type: ignore[name-defined] # noqa: F821
496
+ elapse: float,
497
+ ):
498
+ import json
499
+
500
+ from .ir import FixedLayout
501
+
502
+ def build_node_info(node: ir.IRNode):
503
+ if hasattr(node, "name"):
504
+ node_name = node.name
505
+ else:
506
+ node_name = ""
507
+ node_info = {
508
+ "name": node_name,
509
+ "type": type(node).__name__,
510
+ }
511
+ try:
512
+ layout = node.get_layout()
513
+ if isinstance(layout, FixedLayout):
514
+ offset = 0
515
+ try:
516
+ offset = int(layout.offset)
517
+ except Exception:
518
+ try:
519
+ offset = V.graph.sizevars.size_hint(
520
+ layout.offset, fallback=0
521
+ )
522
+ except Exception:
523
+ pass
524
+ static_layout = FixedLayout(
525
+ layout.device,
526
+ dtype=layout.dtype,
527
+ size=list(V.graph.sizevars.size_hints(layout.size)),
528
+ stride=list(V.graph.sizevars.size_hints(layout.stride)),
529
+ offset=offset,
530
+ )
531
+ node_info["layout"] = str(static_layout)
532
+ else:
533
+ node_info["layout"] = str(node.get_layout())
534
+ except Exception as e:
535
+ pass
536
+ try:
537
+ node_info["dtype"] = str(node.get_dtype())
538
+ except Exception as e:
539
+ pass
540
+ try:
541
+ node_info["device"] = str(node.get_device())
542
+ except Exception as e:
543
+ pass
544
+ try:
545
+ node_info["stride"] = str(
546
+ V.graph.sizevars.size_hints(node.get_stride())
547
+ )
548
+ except Exception as e:
549
+ pass
550
+ try:
551
+ node_info["size"] = str(V.graph.sizevars.size_hints(node.get_size()))
552
+ except Exception as e:
553
+ pass
554
+ try:
555
+ node_info["numel"] = str(V.graph.sizevars.size_hint(node.get_numel()))
556
+ except Exception as e:
557
+ pass
558
+ if hasattr(node, "data") and isinstance(node.data, ir.IRNode):
559
+ node_info["data"] = build_node_info(node.data)
560
+ return node_info
561
+
562
+ general_properties = {
563
+ "op_name": name,
564
+ "cuda_device_name": torch.cuda.get_device_name(),
565
+ "cuda_device_count": torch.cuda.device_count(),
566
+ "input_nodes": [build_node_info(node) for node in input_nodes],
567
+ "autotuning_time": elapse,
568
+ }
569
+ with self.fopen_context(
570
+ "autotuning_result_json_list.txt", "at", encoding="utf-8"
571
+ ) as fd:
572
+ for caller, time in timings.items():
573
+ info_dict = dict(caller.info_dict())
574
+ info_dict.update(general_properties)
575
+ info_dict["benchmark_result"] = time
576
+ json.dump(info_dict, fd)
577
+ fd.write("\n")
578
+
579
+
580
+ @dataclasses.dataclass
581
+ class TensorMetadataHolder:
582
+ tensor_metadata: TensorMetadata
583
+ device: torch.device
584
+
585
+
586
+ save_args_cnt = itertools.count()
587
+
588
+
589
+ def save_args_for_compile_fx_inner(*args, **kwargs):
590
+ """
591
+ This function is used to save arguments for a compile_fx_inner function call
592
+ to the file system. Later on one can replay the compile_fx_inner call
593
+ with the saved arguments using load_args_and_run_compile_fx_inner.
594
+ """
595
+
596
+ folder = "/tmp/inductor_saved_args"
597
+ if not os.path.exists(folder):
598
+ os.mkdir(folder)
599
+
600
+ def handle_tensor(x):
601
+ """
602
+ Pickle FakeTensor will result in error:
603
+ AttributeError: Can't pickle local object 'WeakValueDictionary.__init__.<locals>.remove'
604
+
605
+ Convert all Tensor to metadata. This may also makes pickle faster.
606
+ """
607
+ if isinstance(x, torch.Tensor):
608
+ return TensorMetadataHolder(_extract_tensor_metadata(x), x.device)
609
+ else:
610
+ return x
611
+
612
+ args_to_save, kwargs_to_save = tree_map(handle_tensor, (args, kwargs))
613
+
614
+ fn_name = "compile_fx_inner"
615
+ path = f"{folder}/{fn_name}_{next(save_args_cnt)}.pkl"
616
+ with open(path, "wb") as f:
617
+ pickle.dump((args_to_save, kwargs_to_save), f)
618
+
619
+ if log.isEnabledFor(logging.DEBUG):
620
+ message = f"""
621
+ Arguments for a compile_fx_inner call is saved to {path}. To replay the call,
622
+ run the following:
623
+
624
+ from torch._inductor.debug import load_args_and_run_compile_fx_inner
625
+ load_args_and_run_compile_fx_inner({path!r})
626
+ """
627
+ # call print rather than log.debug. log.debug will print message
628
+ # prefix for each line which makes the code snippet harder to be
629
+ # copied.
630
+ # Not a big deal since the code is already been guarded by checking
631
+ # the log level.
632
+ print(message)
633
+
634
+
635
+ def load_args_and_run_compile_fx_inner(path: str):
636
+ from torch._inductor.compile_fx import compile_fx_inner
637
+
638
+ with open(path, "rb") as f:
639
+ args, kwargs = pickle.load(f)
640
+
641
+ def handle_tensor(x):
642
+ if isinstance(x, TensorMetadataHolder):
643
+ return torch._dynamo.testing.rand_strided(
644
+ x.tensor_metadata.shape,
645
+ x.tensor_metadata.stride,
646
+ x.tensor_metadata.dtype,
647
+ x.device,
648
+ )
649
+ else:
650
+ return x
651
+
652
+ fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
653
+ with fake_mode, config.patch("save_args", False):
654
+ args, kwargs = tree_map(handle_tensor, (args, kwargs))
655
+ return compile_fx_inner(*args, **kwargs)
venv/lib/python3.10/site-packages/torch/_inductor/decomposition.py ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ import math
4
+ import sys
5
+ import typing
6
+ from typing import Optional
7
+
8
+ import torch
9
+ import torch._decomp as decomp
10
+ import torch._prims_common as utils
11
+ import torch.ao.quantization.fx._decomposed
12
+ from torch._decomp import (
13
+ core_aten_decompositions,
14
+ get_decompositions,
15
+ remove_decompositions,
16
+ )
17
+ from torch._decomp.decompositions import (
18
+ _grid_sampler_2d as decomp_grid_sampler_2d,
19
+ pw_cast_for_opmath,
20
+ )
21
+ from torch._decomp.decompositions_for_rng import extra_random_decomps
22
+ from torch._higher_order_ops.out_dtype import out_dtype
23
+ from torch._prims_common import (
24
+ elementwise_dtypes,
25
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
26
+ type_to_dtype,
27
+ )
28
+
29
+ from . import config, inductor_prims
30
+
31
+ log = logging.getLogger(__name__)
32
+ aten = torch.ops.aten
33
+ prims = torch.ops.prims
34
+ quantized_decomposed = torch.ops.quantized_decomposed
35
+
36
+ inductor_decompositions = get_decompositions(
37
+ [
38
+ aten._adaptive_avg_pool2d_backward,
39
+ aten.arange,
40
+ aten.bitwise_and_,
41
+ aten.bitwise_or_,
42
+ aten.clamp_min_,
43
+ aten.dist,
44
+ aten.empty_like,
45
+ aten.flip,
46
+ aten.gelu,
47
+ aten.hardtanh,
48
+ aten.index_select,
49
+ aten.lcm,
50
+ aten.leaky_relu,
51
+ aten.linalg_vector_norm,
52
+ aten._log_softmax,
53
+ aten.max_pool2d_with_indices_backward,
54
+ aten._native_batch_norm_legit,
55
+ aten._native_batch_norm_legit_functional,
56
+ aten._native_batch_norm_legit_no_training,
57
+ aten.native_batch_norm,
58
+ aten.native_group_norm,
59
+ aten.native_layer_norm,
60
+ aten.nll_loss2d_backward,
61
+ aten._softmax,
62
+ aten.sin_,
63
+ aten.sqrt_,
64
+ out_dtype,
65
+ aten._to_copy,
66
+ aten.tril_indices,
67
+ aten.triu_indices,
68
+ aten.upsample_bilinear2d.vec,
69
+ ]
70
+ )
71
+ decompositions = {**core_aten_decompositions(), **inductor_decompositions}
72
+
73
+ # Remove unwanted decompositions included via the core ATen decompositions from
74
+ # the Inductor decomp table.
75
+ decomps_to_exclude = [
76
+ aten._unsafe_index,
77
+ aten._scaled_dot_product_flash_attention_for_cpu.default, # See comments in torch/_decomp/decompositions.py
78
+ aten.clamp_max,
79
+ aten.clamp_min,
80
+ aten.glu, # inductor lowers this directly
81
+ aten.split.Tensor, # inductor lowers this directly
82
+ aten.squeeze, # inductor lowers this directly
83
+ aten.sum, # inductor lowers this directly
84
+ aten.unbind, # inductor lowers this directly
85
+ ]
86
+
87
+ remove_decompositions(decompositions, decomps_to_exclude)
88
+
89
+
90
+ def register_decomposition(ops):
91
+ for op in [ops] if callable(ops) else ops:
92
+ if op in decompositions:
93
+ log.warning("duplicate decomp: %s", ops)
94
+ return decomp.register_decomposition(ops, decompositions)
95
+
96
+
97
+ # TODO: for now, inductor doesn't handle asserts
98
+ # because the condition is symbool -> tensor in the graph.
99
+ @register_decomposition([aten._assert_async.msg])
100
+ def assert_async_msg_decomp(tensor, msg):
101
+ return
102
+
103
+
104
+ # Following `assert_async_msg_decomp` and implement as non-op.
105
+ @register_decomposition([aten._functional_assert_async.msg])
106
+ def functional_assert_async_msg_decomp(tensor, msg):
107
+ return
108
+
109
+
110
+ @register_decomposition([aten.sym_constrain_range_for_size.default])
111
+ def sym_constrain_range_for_size(symbol, *, min=None, max=None):
112
+ return
113
+
114
+
115
+ @register_decomposition([aten.clamp])
116
+ @pw_cast_for_opmath
117
+ def clamp(x, min=None, max=None):
118
+ if min is not None:
119
+ x = x.clamp_min(min)
120
+ if max is not None:
121
+ x = x.clamp_max(max)
122
+ return x
123
+
124
+
125
+ @register_decomposition([aten.full])
126
+ def full(size, fill_value, **kwargs):
127
+ dtype = kwargs.get("dtype")
128
+ if dtype is None:
129
+ kwargs["dtype"] = type_to_dtype(type(fill_value))
130
+ return aten.full(size, fill_value, **kwargs)
131
+ return NotImplemented
132
+
133
+
134
+ # Not really sure how to put this into the main library. PrimTorch wants
135
+ # empty_permuted to go to the prim, and typically users don't really want
136
+ # to decompose to empty_strided (but inductor is OK with it, because we are
137
+ # cool with strides and everything goes to empty_strided)
138
+ @register_decomposition([aten.empty_permuted.default])
139
+ def empty_permuted(size, physical_layout, **kwargs):
140
+ perm = [0] * len(size)
141
+ for p, l in enumerate(physical_layout):
142
+ perm[l] = p
143
+ return torch.empty([size[l] for l in physical_layout], **kwargs).permute(perm)
144
+
145
+
146
+ @register_decomposition([aten.convolution_backward])
147
+ def convolution_backward(
148
+ grad_output,
149
+ input,
150
+ weight,
151
+ bias_sizes,
152
+ stride,
153
+ padding,
154
+ dilation,
155
+ transposed,
156
+ output_padding,
157
+ groups,
158
+ output_mask,
159
+ ):
160
+ if not output_mask[2] or grad_output.device.type != "cuda":
161
+ return NotImplemented
162
+ grad_bias = aten.sum(grad_output, [0] + list(range(2, grad_output.dim())))
163
+ grad_inp, grad_weight, _ = aten.convolution_backward(
164
+ grad_output,
165
+ input,
166
+ weight,
167
+ bias_sizes,
168
+ stride,
169
+ padding,
170
+ dilation,
171
+ transposed,
172
+ output_padding,
173
+ groups,
174
+ [output_mask[0], output_mask[1], False],
175
+ )
176
+ return (grad_inp, grad_weight, grad_bias)
177
+
178
+
179
+ @register_decomposition([aten.log2])
180
+ def log2(x):
181
+ return torch.log(x) * (1.0 / math.log(2.0))
182
+
183
+
184
+ @register_decomposition([aten.round.decimals])
185
+ def round_dec(x, decimals=0):
186
+ ten_pow_decimals = 10.0**decimals
187
+ return aten.round(x * ten_pow_decimals) * (1.0 / ten_pow_decimals)
188
+
189
+
190
+ @register_decomposition([aten.bmm])
191
+ @pw_cast_for_opmath
192
+ def bmm(self, batch2):
193
+ if config.coordinate_descent_tuning:
194
+ if self.shape[1] == 1 or batch2.shape[2] == 1:
195
+ out = (self.unsqueeze(-1) * batch2.unsqueeze(1)).sum(dim=2)
196
+ return out
197
+ if self.device.type == "cpu":
198
+ if self.size(1) == 1 and batch2.size(-1) == 1:
199
+ return torch.sum(
200
+ self.squeeze(1) * batch2.squeeze(-1), dim=1, keepdim=True
201
+ ).unsqueeze(1)
202
+ return NotImplemented
203
+
204
+
205
+ @register_decomposition([aten.addmm])
206
+ @pw_cast_for_opmath
207
+ def addmm(self, mat1, mat2, beta=1, alpha=1):
208
+ if self.device.type == "cpu":
209
+ if mat1.size(0) == 1 and mat2.size(-1) == 1:
210
+ out = torch.sum(
211
+ mat1.squeeze(0) * mat2.squeeze(-1), dim=0, keepdim=True
212
+ ).unsqueeze(0)
213
+ return alpha * out + beta * self
214
+ if mat1.size(0) == 1 and mat2.size(0) <= 16 and mat2.size(1) <= 16:
215
+ out = (mat1.T * mat2).sum(dim=0, keepdim=True)
216
+ return alpha * out + beta * self
217
+ return NotImplemented
218
+
219
+
220
+ @register_decomposition([aten.mm])
221
+ @pw_cast_for_opmath
222
+ def mm(self, input2):
223
+ from torch.fx.experimental.symbolic_shapes import (
224
+ definitely_true,
225
+ guard_size_oblivious,
226
+ )
227
+
228
+ # Our matrix vector multiplies only achieve peak bandwidth with coordinate descent tuning.
229
+ # todo: Look into why and fix it (hopefully)
230
+ if config.coordinate_descent_tuning:
231
+ if self.shape[0] == 1 or input2.shape[1] == 1:
232
+ return (self.unsqueeze(2) * input2.unsqueeze(0)).sum(dim=1)
233
+ if self.device.type == "cpu":
234
+ if (
235
+ guard_size_oblivious(self.size(-1) == 1)
236
+ and guard_size_oblivious(self.size(0) > 0)
237
+ and guard_size_oblivious(input2.size(0) == 1)
238
+ and (self.dtype == input2.dtype)
239
+ and definitely_true((torch.numel(self) + torch.numel(input2)) <= 32)
240
+ ):
241
+ return torch.cat([self[i, :] * input2 for i in range(self.size(0))])
242
+ if guard_size_oblivious(self.size(0) == 1) and guard_size_oblivious(
243
+ input2.size(-1) == 1
244
+ ):
245
+ return torch.sum(
246
+ self.squeeze(0) * input2.squeeze(-1), dim=0, keepdim=True
247
+ ).unsqueeze(0)
248
+ return NotImplemented
249
+
250
+
251
+ # This pass does two things:
252
+ # - Eliminate cat when there is only one tensor input
253
+ # - Normalize cat calls, so that legacy empty 1-D tensors are removed (NB: we
254
+ # don't remove ALL empty tensors, only the naughty ones)
255
+ @register_decomposition([aten.cat.default])
256
+ def cat(tensors, dim=0):
257
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
258
+
259
+ def non_empty_tensor(x):
260
+ # For better or worse, this is a valid cat:
261
+ #
262
+ # torch.cat([torch.randn(2, 2, 4), torch.randn(0), torch.randn(3, 2, 4)])
263
+ #
264
+ # We'd like to eliminate naughtiness like this for downstream passes
265
+ # like split_cat. The easiest way is to just drop such inputs
266
+ # (guarding that they are non-zero).
267
+ #
268
+ # Is it permissible for this filtering to be size-oblivious? A case
269
+ # where this could matter is cat([(2, 2), (u0,)], dim=0); if u0
270
+ # happened to be zero, we would have liked to have filtered it out.
271
+ # But actually, the ONLY way this could have passed is if u0 == 0,
272
+ # so by the time we get here we have already installed a deferred
273
+ # runtime assert forcing u0 to be zero. So if this hasn't happened,
274
+ # we know that the unbacked SymInt has appropriate size and there are
275
+ # no problems.
276
+ return len(x.shape) != 1 or guard_size_oblivious(x.shape[0] > 0)
277
+
278
+ filtered_tensors = list(filter(non_empty_tensor, tensors))
279
+
280
+ if len(filtered_tensors) == 1:
281
+ return filtered_tensors[0].clone()
282
+ elif 1 < len(filtered_tensors) < len(tensors):
283
+ # on the first call, when we remove empty tensors, we redispatch recursively
284
+ return aten.cat.default(filtered_tensors, dim)
285
+ # when no 'filtering' has occurred, we raise to prevent infinite recursion (no more decomposition needed)
286
+ return NotImplemented
287
+
288
+
289
+ @register_decomposition([aten.angle])
290
+ def angle(x):
291
+ if x.is_complex():
292
+ return torch.where(
293
+ torch.isnan(x.real), float("nan"), torch.atan2(x.imag, x.real)
294
+ )
295
+
296
+ # when x is real number
297
+ # if x >= 0, return 0
298
+ # if x < 0, return pi
299
+ # if x is nan, return nan
300
+ _, dtype = elementwise_dtypes(
301
+ x,
302
+ type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
303
+ )
304
+ pi = torch.scalar_tensor(math.pi, dtype=dtype, device=x.device)
305
+ ret = torch.where(x < 0, pi, 0.0)
306
+ return torch.where(torch.isnan(x), float("nan"), ret)
307
+
308
+
309
+ @register_decomposition([aten.add])
310
+ def add(x, y, *, alpha=None):
311
+ x_is_complex_tensor = torch.is_tensor(x) and x.is_complex()
312
+ y_is_complex_tensor = torch.is_tensor(y) and y.is_complex()
313
+ if not x_is_complex_tensor or not y_is_complex_tensor:
314
+ return NotImplemented
315
+ z = y
316
+ if alpha is not None:
317
+ z = alpha * y
318
+ complex_type = torch.promote_types(x.dtype, y.dtype)
319
+ return (x.view(x.real.dtype) + z.view(y.real.dtype)).view(complex_type)
320
+
321
+
322
+ @register_decomposition([aten.conj_physical])
323
+ def conj_physical(self):
324
+ assert not self.is_complex(), "TODO: implement this"
325
+ return self
326
+
327
+
328
+ @register_decomposition([aten.lift, aten.detach_])
329
+ def lift(self):
330
+ return self
331
+
332
+
333
+ @register_decomposition([aten.bernoulli.default])
334
+ def bernoulli(self, *, generator=None):
335
+ assert generator is None
336
+ return (torch.rand_like(self, dtype=torch.float32) < self).to(self.dtype)
337
+
338
+
339
+ @register_decomposition([aten.fmin, prims.fmin])
340
+ def fmin(self, other):
341
+ return torch.where(torch.isnan(other) | (other > self), self, other)
342
+
343
+
344
+ @register_decomposition([aten.fmax, prims.fmax])
345
+ def fmax(self, other):
346
+ return torch.where(torch.isnan(other) | (other < self), self, other)
347
+
348
+
349
+ @register_decomposition(aten.amax)
350
+ def amax(self, dim=None, keepdim=False):
351
+ if self.dtype == torch.bool:
352
+ return torch.any(self, dim=dim, keepdim=keepdim)
353
+ return NotImplemented
354
+
355
+
356
+ @register_decomposition(aten.amin)
357
+ def amin(self, dim=None, keepdim=False):
358
+ if self.dtype == torch.bool:
359
+ return torch.all(self, dim=dim, keepdim=keepdim)
360
+ return NotImplemented
361
+
362
+
363
+ @register_decomposition([aten.narrow_copy])
364
+ def narrow_copy(self, dim, start, length):
365
+ return torch.narrow(self, dim, start, length).clone()
366
+
367
+
368
+ @register_decomposition([aten.expand_copy])
369
+ def expand_copy(self, size, *, implicit=False):
370
+ return aten.expand(self, size, implicit=implicit).clone()
371
+
372
+
373
+ @register_decomposition([aten.view_copy.default])
374
+ def view_copy_default(self, size):
375
+ return aten.view(self, size).clone()
376
+
377
+
378
+ @register_decomposition([aten.view_copy.dtype])
379
+ def view_copy_dtype(self, dtype):
380
+ return self.to(dtype).clone()
381
+
382
+
383
+ def get_like_layout(
384
+ tensor: torch.Tensor, memory_format: Optional[torch.memory_format]
385
+ ) -> torch.memory_format:
386
+ # TODO: _to_copy tensor to stride permutation
387
+ if memory_format is torch.preserve_format or memory_format is None:
388
+ return utils.suggest_memory_format(tensor)
389
+ else:
390
+ return memory_format
391
+
392
+
393
+ @register_decomposition(aten.rand_like)
394
+ def rand_like(self, *, dtype=None, device=None, memory_format=None, **kwargs):
395
+ return torch.rand(
396
+ [*self.size()],
397
+ dtype=dtype or self.dtype,
398
+ device=device or self.device,
399
+ **kwargs,
400
+ ).to(memory_format=get_like_layout(self, memory_format))
401
+
402
+
403
+ @register_decomposition(aten.randn_like)
404
+ def randn_like(self, *, dtype=None, device=None, memory_format=None, **kwargs):
405
+ return torch.randn(
406
+ [*self.size()],
407
+ dtype=dtype or self.dtype,
408
+ device=device or self.device,
409
+ **kwargs,
410
+ ).to(memory_format=get_like_layout(self, memory_format))
411
+
412
+
413
+ @register_decomposition(aten.full_like)
414
+ def full_like(
415
+ self,
416
+ fill_value,
417
+ *,
418
+ dtype=None,
419
+ layout=None,
420
+ device=None,
421
+ pin_memory=False,
422
+ requires_grad=False,
423
+ memory_format=torch.preserve_format,
424
+ ):
425
+ return torch.full(
426
+ [*self.size()],
427
+ fill_value,
428
+ dtype=dtype or self.dtype,
429
+ layout=layout or self.layout,
430
+ device=device or self.device,
431
+ requires_grad=requires_grad,
432
+ ).to(memory_format=get_like_layout(self, memory_format))
433
+
434
+
435
+ @register_decomposition(aten.randint_like.default)
436
+ def randint_like(self, high, *, dtype=None, device=None, memory_format=None, **kwargs):
437
+ return aten.randint.low(
438
+ 0,
439
+ high,
440
+ [*self.size()],
441
+ dtype=dtype or self.dtype,
442
+ device=device or self.device,
443
+ **kwargs,
444
+ ).to(memory_format=get_like_layout(self, memory_format))
445
+
446
+
447
+ @register_decomposition(aten.randint_like.low_dtype)
448
+ def randint_like_low(
449
+ self, low, high, *, dtype=None, device=None, memory_format=None, **kwargs
450
+ ):
451
+ return aten.randint.low(
452
+ low,
453
+ high,
454
+ [*self.size()],
455
+ dtype=dtype or self.dtype,
456
+ device=device or self.device,
457
+ **kwargs,
458
+ ).to(memory_format=get_like_layout(self, memory_format))
459
+
460
+
461
+ @register_decomposition(aten.randint.default)
462
+ def randint(high, size, **kwargs):
463
+ return aten.randint.low(0, high, size, **kwargs)
464
+
465
+
466
+ # The difference between quantize_per_tensor.default and quantize_per_tensor.tensor is
467
+ # scale and zero_point is scalar or scalar tensor
468
+ @register_decomposition(quantized_decomposed.quantize_per_tensor.default)
469
+ def quantize_per_tensor_default_decomp_impl(
470
+ input: torch.Tensor,
471
+ scale: float,
472
+ zero_point: int,
473
+ quant_min: int,
474
+ quant_max: int,
475
+ dtype: torch.dtype,
476
+ ) -> torch.Tensor:
477
+ if input.dtype == torch.bfloat16:
478
+ input = input.to(torch.float32)
479
+ inv_scale = 1.0 / scale
480
+ return torch.clamp(
481
+ torch.round(input * inv_scale) + zero_point, quant_min, quant_max
482
+ ).to(dtype)
483
+
484
+
485
+ # The difference between dequantize_per_tensor.default and dequantize_per_tensor.tensor is
486
+ # scale and zero_point is scalar or scalar tensor
487
+ @register_decomposition(quantized_decomposed.dequantize_per_tensor.default)
488
+ def dequantize_per_tensor_default_decomp_impl(
489
+ input: torch.Tensor,
490
+ scale: float,
491
+ zero_point: int,
492
+ quant_min: int,
493
+ quant_max: int,
494
+ dtype: torch.dtype,
495
+ ) -> torch.Tensor:
496
+ return (input.to(torch.float32) - zero_point) * scale
497
+
498
+
499
+ @register_decomposition(quantized_decomposed.quantize_per_tensor.tensor)
500
+ def quantize_per_tensor_tensor_decomp_impl(
501
+ input: torch.Tensor,
502
+ scale: torch.Tensor,
503
+ zero_point: torch.Tensor,
504
+ quant_min: int,
505
+ quant_max: int,
506
+ dtype: torch.dtype,
507
+ ) -> torch.Tensor:
508
+ if input.dtype == torch.bfloat16:
509
+ input = input.to(torch.float32)
510
+ inv_scale = 1.0 / scale
511
+ return torch.clamp(
512
+ torch.round(input * inv_scale) + zero_point, quant_min, quant_max
513
+ ).to(dtype)
514
+
515
+
516
+ @register_decomposition(quantized_decomposed.dequantize_per_tensor.tensor)
517
+ def dequantize_per_tensor_tensor_decomp_impl(
518
+ input: torch.Tensor,
519
+ scale: torch.Tensor,
520
+ zero_point: torch.Tensor,
521
+ quant_min: int,
522
+ quant_max: int,
523
+ dtype: torch.dtype,
524
+ ) -> torch.Tensor:
525
+ return (input.to(torch.float32) - zero_point.to(torch.int32)) * scale.to(
526
+ torch.float32
527
+ )
528
+
529
+
530
+ @register_decomposition(torch.ops.quantized.embedding_bag_byte_unpack)
531
+ def q_embedding_bag_byte_unpack_decomp(packed):
532
+ def bitcast_u8_to_f32(u8):
533
+ x, y, z, w = (u8[..., n].to(torch.int32) for n in (0, 1, 2, 3))
534
+ if sys.byteorder == "little":
535
+ return (x + (y << 8) + (z << 16) + (w << 24)).view(torch.float32)[..., None]
536
+ else:
537
+ return ((x << 24) + (y << 16) + (z << 8) + w).view(torch.float32)[..., None]
538
+
539
+ scales = bitcast_u8_to_f32(packed[..., -8:-4])
540
+ offsets = bitcast_u8_to_f32(packed[..., -4:])
541
+ return packed[..., :-8].to(torch.float32) * scales + offsets
542
+
543
+
544
+ @register_decomposition([aten.grid_sampler_2d])
545
+ @pw_cast_for_opmath
546
+ def grid_sampler_2d(
547
+ a: torch.Tensor,
548
+ grid: torch.Tensor,
549
+ interpolation_mode: int = 0,
550
+ padding_mode: int = 0,
551
+ align_corners: bool = False,
552
+ ) -> torch.Tensor:
553
+ # We do not expand the grid (_expand_grid=False) on cpu for performance reasons
554
+ # Experimenting locally it was found that compiled CUDA code is accelerated by ~5x
555
+ # and CPU code by ~2x on bicubic mode, if we expand the grid from (N, H, W, 2) into (N, C, H, W, 2)
556
+ # However, this leads to a slowdown around ~0.8x on CPU bilinear mode, channels first.
557
+ # Thus we apply this hack to not expand the grid for this case.
558
+ _expand_grid = not (
559
+ a.device == torch.device("cpu")
560
+ and interpolation_mode == 0
561
+ and a.is_contiguous(memory_format=torch.contiguous_format)
562
+ )
563
+
564
+ output = decomp_grid_sampler_2d(
565
+ a,
566
+ grid=grid,
567
+ interpolation_mode=interpolation_mode,
568
+ padding_mode=padding_mode,
569
+ align_corners=align_corners,
570
+ _expand_grid=_expand_grid,
571
+ )
572
+ return output
573
+
574
+
575
+ @register_decomposition(aten._foreach_addcmul.Scalar)
576
+ def _foreach_addcmul_scalar(self, left_tensors, right_tensors, scalar=1):
577
+ return aten._foreach_add.List(
578
+ self, aten._foreach_mul.List(left_tensors, right_tensors), alpha=scalar
579
+ )
580
+
581
+
582
+ @register_decomposition(aten._foreach_addcdiv.Scalar)
583
+ def _foreach_addcdiv_scalar(self, left_tensors, right_tensors, scalar=1):
584
+ return aten._foreach_add.List(
585
+ self, aten._foreach_div.List(left_tensors, right_tensors), alpha=scalar
586
+ )
587
+
588
+
589
+ @register_decomposition(aten._foreach_lerp.Scalar)
590
+ def _foreach_lerp_scalar(start_tensors, end_tensors, weight):
591
+ return aten._foreach_add.List(
592
+ start_tensors,
593
+ aten._foreach_mul.Scalar(
594
+ aten._foreach_sub.List(end_tensors, start_tensors), weight
595
+ ),
596
+ )
597
+
598
+
599
+ @aten.miopen_batch_norm.default.py_impl(torch._C.DispatchKey.Autograd)
600
+ @register_decomposition(aten.miopen_batch_norm)
601
+ def miopen_batch_norm(
602
+ input: torch.Tensor,
603
+ weight: torch.Tensor,
604
+ bias: typing.Optional[torch.Tensor],
605
+ running_mean: typing.Optional[torch.Tensor],
606
+ running_var: typing.Optional[torch.Tensor],
607
+ training: bool,
608
+ exponential_average_factor: float,
609
+ epsilon: float,
610
+ ):
611
+ a, b, c = aten.native_batch_norm(
612
+ input,
613
+ weight,
614
+ bias,
615
+ running_mean,
616
+ running_var,
617
+ training,
618
+ exponential_average_factor,
619
+ epsilon,
620
+ )
621
+
622
+ if training:
623
+ return (a, b, c)
624
+ return (
625
+ a,
626
+ weight.new_zeros((0,)),
627
+ weight.new_zeros((0,)),
628
+ )
629
+
630
+
631
+ @functools.lru_cache(None)
632
+ def fast_random_decomps():
633
+ return {**decompositions, **extra_random_decomps}
634
+
635
+
636
+ def select_decomp_table():
637
+ """decomps can change based on config"""
638
+ if config.fallback_random:
639
+ return decompositions
640
+ return fast_random_decomps()
641
+
642
+
643
+ @register_decomposition(aten.masked_scatter)
644
+ def masked_scatter(self, mask, source):
645
+ if self.device.type == "cuda":
646
+ # This two-step algorithm is the same as eager CUDA, for eager CPU we
647
+ # use a 1-shot serial iteration.
648
+ self, mask = aten.broadcast_tensors([self, mask])
649
+ source_idx = mask.reshape(-1).cumsum(0) - 1
650
+ return inductor_prims.masked_scatter_with_index(self, mask, source_idx, source)
651
+ return NotImplemented
652
+
653
+
654
+ @register_decomposition(quantized_decomposed.choose_qparams.tensor)
655
+ def choose_qparams_tensor(
656
+ input: torch.Tensor, quant_min: int, quant_max: int, eps: float, dtype: torch.dtype
657
+ ):
658
+ min_val, max_val = torch.aminmax(input)
659
+ scale = (max_val - min_val) / float(quant_max - quant_min)
660
+ scale = torch.max(scale, torch.Tensor([eps]))
661
+ zero_point = quant_min - torch.round(min_val / scale).to(torch.int)
662
+ zero_point = torch.clamp(zero_point, quant_min, quant_max)
663
+ return scale.to(torch.float64), zero_point.to(torch.int64)
664
+
665
+
666
+ @register_decomposition(aten.put)
667
+ def put(self, index, source, accumulate=False):
668
+ flattened = self.flatten()
669
+ flattened = torch.index_put(
670
+ flattened, [index], source.reshape(index.shape), accumulate
671
+ )
672
+ return flattened.reshape(self.shape)
673
+
674
+
675
+ @register_decomposition(aten.put_)
676
+ def put_(self, index, source, accumulate=False):
677
+ out = aten.put(self, index, source, accumulate=accumulate)
678
+ return self.copy_(out)
venv/lib/python3.10/site-packages/torch/_inductor/dependencies.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dataclasses
3
+ import itertools
4
+ import logging
5
+ import re
6
+ import typing
7
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
8
+ from unittest.mock import patch
9
+
10
+ import sympy
11
+
12
+ import torch
13
+ from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
14
+
15
+ from .codegen.common import index_prevent_reordering
16
+ from .utils import (
17
+ get_dtype_size,
18
+ reduction_num_outputs,
19
+ sympy_index_symbol,
20
+ sympy_str,
21
+ sympy_subs,
22
+ VarRanges,
23
+ )
24
+ from .virtualized import OpsHandler, ReductionType, V
25
+
26
+ log = logging.getLogger(__name__)
27
+ is_indirect = re.compile(r"indirect|tmp").search
28
+ Dep = Union["MemoryDep", "StarDep", "WeakDep"]
29
+
30
+
31
+ class MemoryDep(typing.NamedTuple):
32
+ name: str
33
+ index: sympy.Expr # type: ignore[assignment]
34
+ var_names: Tuple[sympy.Symbol, ...]
35
+ size: Tuple[sympy.Expr, ...]
36
+
37
+ def __repr__(self):
38
+ return f"MemoryDep({self.name!r}, {self.index}, {self.ranges})"
39
+
40
+ @property
41
+ def ranges(self) -> Dict[sympy.Symbol, sympy.Expr]:
42
+ """{c0: 128, c1: 512, ...}"""
43
+ return dict(zip(self.var_names, self.size))
44
+
45
+ def get_numel(self) -> sympy.Expr:
46
+ if self.is_indirect():
47
+ numel = V.graph.get_numel(self.name)
48
+ else:
49
+ vars = set(self.index.free_symbols)
50
+ numel = sympy.Integer(1)
51
+ for var, size in zip(self.var_names, self.size):
52
+ if var in vars:
53
+ numel = numel * size
54
+ return numel
55
+
56
+ def rename(self, renames: Dict[str, str]) -> "MemoryDep":
57
+ if self.name in renames:
58
+ return MemoryDep(
59
+ renames[self.name], self.index, var_names=self.var_names, size=self.size
60
+ )
61
+ return self
62
+
63
+ def numbytes_hint(self):
64
+ return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(
65
+ V.graph.get_dtype(self.name)
66
+ )
67
+
68
+ def has_unbacked_symbols(self):
69
+ return len(free_unbacked_symbols(self.get_numel())) > 0
70
+
71
+ def is_contiguous(self) -> bool:
72
+ return isinstance(self.index, sympy.Symbol) and self.index in self.var_names
73
+
74
+ def is_scalar(self) -> bool:
75
+ if isinstance(self.index, sympy.Symbol):
76
+ return self.index not in self.var_names and not self.is_indirect()
77
+ return isinstance(self.index, (int, sympy.Integer))
78
+
79
+ def is_indirect(self) -> bool:
80
+ return any(is_indirect(v.name) for v in self.index.free_symbols) # type: ignore[attr-defined]
81
+
82
+
83
+ class StarDep(typing.NamedTuple):
84
+ # depends on the entire buffer
85
+ name: str
86
+
87
+ @property
88
+ def index(self):
89
+ raise NotImplementedError("StarDep does not have an index")
90
+
91
+ def get_numel(self) -> sympy.Expr:
92
+ return V.graph.get_numel(self.name)
93
+
94
+ def rename(self, renames: Dict[str, str]) -> "StarDep":
95
+ if self.name in renames:
96
+ return StarDep(renames[self.name])
97
+ return self
98
+
99
+ def numbytes_hint(self):
100
+ return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(
101
+ V.graph.get_dtype(self.name)
102
+ )
103
+
104
+ def has_unbacked_symbols(self):
105
+ return len(free_unbacked_symbols(self.get_numel())) > 0
106
+
107
+ def is_contiguous(self) -> bool:
108
+ return False
109
+
110
+ def is_scalar(self) -> bool:
111
+ return False
112
+
113
+ def is_indirect(self) -> bool:
114
+ return False
115
+
116
+
117
+ # Used for tracking mutation ordering
118
+ # if A reads a buffer and B mutates it
119
+ # B must be ordered after A
120
+ #
121
+ # It is weak because if it turns out A's read is never used, we can still
122
+ # eliminate it
123
+ class WeakDep(typing.NamedTuple):
124
+ name: str
125
+
126
+ @property
127
+ def index(self):
128
+ raise NotImplementedError("WeakDep does not have an index")
129
+
130
+ def get_numel(self) -> sympy.Expr:
131
+ return sympy.Integer(1)
132
+
133
+ def rename(self, renames: Dict[str, str]) -> "WeakDep":
134
+ if self.name in renames:
135
+ return WeakDep(renames[self.name])
136
+ return self
137
+
138
+ def numbytes_hint(self):
139
+ return 1 # Purely inserted for ordering, not an actual dep
140
+
141
+ def has_unbacked_symbols(self):
142
+ return False
143
+
144
+ def is_contiguous(self) -> bool:
145
+ return False
146
+
147
+
148
+ class IndexExprDep(typing.NamedTuple):
149
+ index: sympy.Expr # type: ignore[assignment]
150
+ var_names: Tuple[sympy.Symbol, ...]
151
+ size: Tuple[sympy.Expr, ...]
152
+
153
+
154
+ @dataclasses.dataclass
155
+ class ReadWrites:
156
+ reads: Set[Dep]
157
+ writes: Set[Dep]
158
+ index_exprs: Set[IndexExprDep]
159
+ range_vars: Optional[List[sympy.Expr]] = None
160
+ var_ranges: Optional[VarRanges] = None
161
+ op_counts: typing.Counter[str] = dataclasses.field(
162
+ default_factory=collections.Counter
163
+ )
164
+
165
+ def rename(self, renames: typing.Dict[str, str]) -> "ReadWrites":
166
+ return ReadWrites(
167
+ {dep.rename(renames) for dep in self.reads},
168
+ {dep.rename(renames) for dep in self.writes},
169
+ self.index_exprs,
170
+ self.range_vars,
171
+ self.var_ranges,
172
+ op_counts=self.op_counts,
173
+ )
174
+
175
+ def with_read(self, dep: Dep) -> "ReadWrites":
176
+ assert isinstance(dep, (WeakDep, StarDep))
177
+ return ReadWrites(
178
+ set.union(self.reads, {dep}),
179
+ self.writes,
180
+ self.index_exprs,
181
+ self.range_vars,
182
+ self.var_ranges,
183
+ op_counts=self.op_counts,
184
+ )
185
+
186
+ def merge(self, other: "ReadWrites"):
187
+ reads = set.union(self.reads, other.reads)
188
+ writes = set.union(self.writes, other.writes)
189
+ index_exprs = set.union(self.index_exprs, other.index_exprs)
190
+ op_counts = collections.Counter(self.op_counts)
191
+ op_counts.update(other.op_counts)
192
+ return ReadWrites(reads - writes, writes, index_exprs, op_counts=op_counts)
193
+
194
+ @staticmethod
195
+ def merge_list(read_writes: List["ReadWrites"]):
196
+ all_writes = set.union(*[rw.writes for rw in read_writes])
197
+ all_reads = set.union(*[rw.reads for rw in read_writes]) - all_writes
198
+ all_index_exprs = set.union(*[rw.index_exprs for rw in read_writes])
199
+
200
+ op_counts: typing.Counter[Any] = collections.Counter()
201
+ for rw in read_writes:
202
+ op_counts.update(rw.op_counts)
203
+
204
+ return ReadWrites(all_reads, all_writes, all_index_exprs, op_counts=op_counts)
205
+
206
+ def remove_reads(self, rem_reads):
207
+ return ReadWrites(
208
+ self.reads - rem_reads,
209
+ self.writes,
210
+ self.index_exprs,
211
+ self.range_vars,
212
+ self.var_ranges,
213
+ op_counts=self.op_counts,
214
+ )
215
+
216
+ def reads_and_writes(self):
217
+ return itertools.chain(self.reads, self.writes)
218
+
219
+
220
+ class _RecordLoadStoreInner(V.MockHandler): # type: ignore[name-defined]
221
+ def __init__(self, var_ranges: VarRanges, normalize: bool):
222
+ super().__init__()
223
+ self._reads: Set[Dep] = set()
224
+ self._writes: Set[MemoryDep] = set()
225
+ self._index_exprs: Set[IndexExprDep] = set()
226
+ self._var_ranges: VarRanges = var_ranges
227
+ self._normalize: bool = normalize
228
+
229
+ def canonicalize(
230
+ self, index: sympy.Expr
231
+ ) -> Tuple[sympy.Expr, Tuple[sympy.Symbol, ...], Tuple[sympy.Expr, ...]]:
232
+ if not self._normalize:
233
+ sizes = [V.graph.sizevars.simplify(x) for x in self._var_ranges.values()]
234
+ var_names = tuple(
235
+ k for k, v in zip(self._var_ranges.keys(), sizes) if v != 1
236
+ )
237
+ sizes = tuple(v for v in sizes if v != 1)
238
+ return index, var_names, sizes # type: ignore[return-value]
239
+
240
+ # Try to further simplify the indexes even if simplify_loops didn't
241
+ # convert it to the simplest form because of the interference from
242
+ # different indexing formulas.
243
+ free_symbols = index.free_symbols
244
+ var_ranges = {
245
+ k: V.graph.sizevars.simplify(v)
246
+ for k, v in self._var_ranges.items()
247
+ # TODO(jansel): explore this further normalization
248
+ # if k in free_symbols
249
+ }
250
+ index_vars = [*var_ranges.keys()]
251
+ sizes = tuple(var_ranges.values())
252
+ new_sizes, reindex, prune = V.graph.sizevars._simplify_loops(
253
+ index_vars,
254
+ sizes,
255
+ index_prevent_reordering([index], index_vars, sizes),
256
+ )
257
+
258
+ # assign new variables each dimension to deal with numbering mismatches
259
+ # d0, d1, d2 could become d0, d2 -- which won't match d0, d1
260
+ new_vars, add_var = var_builder(canonicalization_prefix())
261
+ replacement = dict(zip(index_vars, reindex([add_var(x) for x in new_sizes])))
262
+ index = sympy_subs(sympy.expand(index), replacement)
263
+
264
+ new_vars = [*new_vars.keys()]
265
+ new_sizes = [*new_sizes]
266
+ free_symbols = index.free_symbols
267
+ while new_vars and new_vars[-1] not in free_symbols:
268
+ # Reduction has last (reduced) dim in its sizes, but
269
+ # downstream users won't. Normalize this away.
270
+ new_vars.pop()
271
+ new_sizes.pop()
272
+ return index, tuple(new_vars), tuple(new_sizes) # type: ignore[arg-type]
273
+
274
+ def load(self, name: str, index: sympy.Expr) -> str:
275
+ self._reads.add(MemoryDep(name, *self.canonicalize(index)))
276
+ return f"load({name}, {sympy_str(index)})"
277
+
278
+ def load_seed(self, name: str, index: int):
279
+ assert isinstance(index, int)
280
+ return self.load(name, sympy.Integer(index))
281
+
282
+ def store(self, name: str, index: sympy.Expr, value: str, mode=None) -> str:
283
+ self._writes.add(MemoryDep(name, *self.canonicalize(index)))
284
+ return f"store({name}, {sympy_str(index)}, {value}, {mode})"
285
+
286
+ def store_reduction(self, name: str, index, value) -> str:
287
+ return self.store(name, index, f"store_reduction({value})")
288
+
289
+ def index_expr(self, index: sympy.Expr, dtype) -> str:
290
+ self._index_exprs.add(IndexExprDep(*self.canonicalize(index)))
291
+ return f"index_expr({sympy_str(index)}, {dtype})"
292
+
293
+ def bucketize(
294
+ self,
295
+ values,
296
+ offsets_name: str,
297
+ offsets_size: sympy.Expr,
298
+ indexing_dtype: torch.dtype,
299
+ right: bool,
300
+ ):
301
+ self._reads.add(StarDep(offsets_name))
302
+ return f"bucketize({values}, {offsets_name}, {sympy_str(offsets_size)}, {indexing_dtype}, {right})"
303
+
304
+
305
+ class _OpCounter:
306
+ """Shim to count how many times each op is used"""
307
+
308
+ def __init__(self, inner):
309
+ super().__init__()
310
+ self.parent_handler = inner
311
+ self._op_counts: typing.Counter[Any] = collections.Counter()
312
+
313
+ def __getattr__(self, name):
314
+ self._op_counts[name] += 1
315
+ return getattr(self.parent_handler, name)
316
+
317
+
318
+ class RecordLoadStore(V.KernelFormatterHandler): # type: ignore[name-defined]
319
+ def __init__(self, var_ranges: VarRanges, normalize: bool):
320
+ parent_handler = _RecordLoadStoreInner(
321
+ var_ranges=var_ranges, normalize=normalize
322
+ )
323
+ parent_handler = _OpCounter(parent_handler)
324
+ super().__init__(parent_handler=parent_handler)
325
+
326
+
327
+ def var_builder(prefix: str) -> Tuple[VarRanges, Callable[[sympy.Expr], sympy.Symbol]]:
328
+ cnt = itertools.count()
329
+ var_ranges: VarRanges = dict()
330
+
331
+ def add_var(length: sympy.Expr) -> sympy.Symbol:
332
+ v = sympy_index_symbol(f"{prefix}{next(cnt)}")
333
+ var_ranges[v] = length
334
+ return v
335
+
336
+ return var_ranges, add_var
337
+
338
+
339
+ def index_vars_no_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str):
340
+ var_ranges, add_var = var_builder(prefix)
341
+ args: List[List[sympy.Symbol]] = []
342
+ for size in argsizes:
343
+ args.append(list(map(add_var, size)))
344
+ return args, var_ranges
345
+
346
+
347
+ def index_vars_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str = "d"):
348
+ from .ir import SqueezeView
349
+
350
+ var_ranges, add_var = var_builder(prefix)
351
+ args: List[List[sympy.Expr]] = []
352
+ new_sizes: List[List[sympy.Expr]] = []
353
+ for size in argsizes:
354
+ new_size, reindex = SqueezeView.squeezer(size)
355
+ new_sizes.append(new_size)
356
+ args.append(reindex(list(map(add_var, new_size))))
357
+ return args, var_ranges
358
+
359
+
360
+ def extract_read_writes(
361
+ fn: Callable[..., Any],
362
+ *argsizes: Tuple[sympy.Expr, ...],
363
+ normalize: bool = False,
364
+ prefix: str = "d",
365
+ ):
366
+ args, var_ranges = index_vars_squeeze(*argsizes, prefix=prefix)
367
+ rw = RecordLoadStore(var_ranges, normalize=normalize)
368
+ with V.set_ops_handler(rw):
369
+ fn(*args)
370
+
371
+ if normalize:
372
+ range_vars = [] # Number of vars could differ due to normalization
373
+ else:
374
+ range_vars = list(itertools.chain.from_iterable(args))
375
+
376
+ inner = rw.parent_handler.parent_handler
377
+ return ReadWrites(
378
+ set(inner._reads),
379
+ set(inner._writes),
380
+ inner._index_exprs,
381
+ range_vars,
382
+ var_ranges,
383
+ rw.parent_handler._op_counts,
384
+ )
385
+
386
+
387
+ def extract_input_node_reduction_ranges(
388
+ input_node: "torch._inductor.ir.TensorBox",
389
+ ) -> Tuple[Optional[List[sympy.Expr]], Optional[List[sympy.Expr]]]:
390
+ """
391
+ Returns the size and reduction size of all inputs, if the sizes and reduction_sizes (if exist) are all the same.
392
+ It's possible that a node has multiple inputs, some are Reduction nodes and others are Pointwise nodes.
393
+ In this case, reduction_sizes of the Reduction nodes need to be the same.
394
+ Otherwise returns (None, None).
395
+ """
396
+
397
+ from .ir import ComputedBuffer, Loops
398
+
399
+ if isinstance(input_node.data, ComputedBuffer):
400
+ # Input node has already been realized. Return its size and reduction_size.
401
+ size = input_node.get_size()
402
+ reduction_size = input_node.get_reduction_size()
403
+ if len(reduction_size) > 0:
404
+ return (size, reduction_size)
405
+ else:
406
+ return (None, None)
407
+
408
+ if not isinstance(input_node.data.data, Loops): # type: ignore[attr-defined]
409
+ # Other IRNodes do not have reduction_ranges.
410
+ return (None, None)
411
+
412
+ # There is one issue: what if there are views / permutations between the input node and its dependent realized nodes?
413
+ # The current method still uses reduction ranges from the dependent realized node, which is not ideal.
414
+ # Is there a way to check whether there are permutations inbetween?
415
+ reads = input_node.get_reads()
416
+ reduction_size = None
417
+ size = None
418
+ while reduction_size is None and len(reads) > 0:
419
+ seen = set()
420
+ new_reads = []
421
+ for read in reads:
422
+ if not isinstance(read, MemoryDep):
423
+ continue
424
+ if read.name in seen:
425
+ continue
426
+ seen.add(read.name)
427
+ buffer = V.graph.get_buffer(read.name)
428
+ if buffer is None:
429
+ continue
430
+ if (
431
+ isinstance(buffer, ComputedBuffer)
432
+ and len(buffer.get_reduction_size()) > 0
433
+ ):
434
+ if reduction_size is None:
435
+ reduction_size = buffer.get_reduction_size()
436
+ size = buffer.get_size()
437
+ elif (
438
+ reduction_size != buffer.get_reduction_size()
439
+ or size != buffer.get_size()
440
+ ):
441
+ return (None, None)
442
+ else:
443
+ new_reads.extend(buffer.get_reads())
444
+ if reads == new_reads:
445
+ return (size, reduction_size)
446
+ else:
447
+ reads = new_reads
448
+ return (size, reduction_size)
449
+
450
+
451
+ def canonicalization_prefix():
452
+ return "c"
453
+
454
+
455
+ # ops handler which computes all the free unbacked symbols for an IR
456
+ class FreeUnbackedSymbolsOpsHandler:
457
+ symbols: Set[sympy.Symbol]
458
+
459
+ def __init__(self):
460
+ self.symbols = set()
461
+
462
+ def __getattr__(self, name: str) -> Callable[..., Any]:
463
+ def inner(*args, **kwargs):
464
+ for a in itertools.chain(args, kwargs.values()):
465
+ if isinstance(a, (sympy.Expr, sympy.logic.boolalg.Boolean)):
466
+ self.symbols |= free_unbacked_symbols(a)
467
+
468
+ return inner
469
+
470
+ def indirect_indexing(self, index_var, size, check=True) -> sympy.Symbol:
471
+ assert not isinstance(index_var, (sympy.Expr, sympy.logic.boolalg.Boolean))
472
+ self.symbols |= free_unbacked_symbols(size)
473
+ return sympy_index_symbol(f"({str(index_var)})")
474
+
475
+ def frexp(self, x):
476
+ return (None,) * 2
477
+
478
+ def reduction(
479
+ self,
480
+ dtype: torch.dtype,
481
+ src_dtype: torch.dtype,
482
+ reduction_type: ReductionType,
483
+ value: Union[None, Tuple[None, ...]],
484
+ ) -> Union[None, Tuple[None, ...]]:
485
+ num_values = reduction_num_outputs(reduction_type)
486
+ return (None,) * num_values if num_values > 1 else None
487
+
488
+
489
+ def _typecheck_FreeUnbackedSymbolsOpsHandler(
490
+ h: FreeUnbackedSymbolsOpsHandler,
491
+ ) -> OpsHandler[None]:
492
+ return h
493
+
494
+
495
+ def extract_free_unbacked_symbols(fn: Callable[..., Any], index, rindex=None):
496
+ from .ir import FlexibleLayout
497
+
498
+ args = [index, rindex] if rindex is not None else [index]
499
+ handler = FreeUnbackedSymbolsOpsHandler()
500
+ # NB: I cargo culted the allow_indexing patch here, I don't understand why
501
+ # people do this all over
502
+ with V.set_ops_handler(handler), patch.object(
503
+ FlexibleLayout, "allow_indexing", True
504
+ ):
505
+ fn(*args)
506
+ return handler.symbols
venv/lib/python3.10/site-packages/torch/_inductor/exc.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import tempfile
5
+ import textwrap
6
+ from functools import lru_cache
7
+
8
+ if os.environ.get("TORCHINDUCTOR_WRITE_MISSING_OPS") == "1":
9
+
10
+ @lru_cache(None)
11
+ def _record_missing_op(target):
12
+ with open(f"{tempfile.gettempdir()}/missing_ops.txt", "a") as fd:
13
+ fd.write(str(target) + "\n")
14
+
15
+ else:
16
+
17
+ def _record_missing_op(target): # type: ignore[misc]
18
+ pass
19
+
20
+
21
+ class OperatorIssue(RuntimeError):
22
+ @staticmethod
23
+ def operator_str(target, args, kwargs):
24
+ lines = [f"target: {target}"] + [
25
+ f"args[{i}]: {arg}" for i, arg in enumerate(args)
26
+ ]
27
+ if kwargs:
28
+ lines.append(f"kwargs: {kwargs}")
29
+ return textwrap.indent("\n".join(lines), " ")
30
+
31
+
32
+ class MissingOperatorWithoutDecomp(OperatorIssue):
33
+ def __init__(self, target, args, kwargs):
34
+ _record_missing_op(target)
35
+ super().__init__(f"missing lowering\n{self.operator_str(target, args, kwargs)}")
36
+
37
+
38
+ class MissingOperatorWithDecomp(OperatorIssue):
39
+ def __init__(self, target, args, kwargs):
40
+ _record_missing_op(target)
41
+ super().__init__(
42
+ f"missing decomposition\n{self.operator_str(target, args, kwargs)}"
43
+ + textwrap.dedent(
44
+ f"""
45
+
46
+ There is a decomposition available for {target} in
47
+ torch._decomp.get_decompositions(). Please add this operator to the
48
+ `decompositions` list in torch._inductor.decompositions
49
+ """
50
+ )
51
+ )
52
+
53
+
54
+ class LoweringException(OperatorIssue):
55
+ def __init__(self, exc: Exception, target, args, kwargs):
56
+ super().__init__(
57
+ f"{type(exc).__name__}: {exc}\n{self.operator_str(target, args, kwargs)}"
58
+ )
59
+
60
+
61
+ class InvalidCxxCompiler(RuntimeError):
62
+ def __init__(self):
63
+ from . import config
64
+
65
+ super().__init__(
66
+ f"No working C++ compiler found in {config.__name__}.cpp.cxx: {config.cpp.cxx}"
67
+ )
68
+
69
+
70
+ class CppWrapperCodeGenError(RuntimeError):
71
+ def __init__(self, msg: str):
72
+ super().__init__(f"C++ wrapper codegen error: {msg}")
73
+
74
+
75
+ class CppCompileError(RuntimeError):
76
+ def __init__(self, cmd: list[str], output: str):
77
+ if isinstance(output, bytes):
78
+ output = output.decode("utf-8")
79
+
80
+ super().__init__(
81
+ textwrap.dedent(
82
+ """
83
+ C++ compile error
84
+
85
+ Command:
86
+ {cmd}
87
+
88
+ Output:
89
+ {output}
90
+ """
91
+ )
92
+ .strip()
93
+ .format(cmd=" ".join(cmd), output=output)
94
+ )
95
+
96
+
97
+ class CUDACompileError(CppCompileError):
98
+ pass
venv/lib/python3.10/site-packages/torch/_inductor/freezing.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import itertools
4
+ import logging
5
+
6
+ import weakref
7
+ from typing import Any, List, Optional, Tuple
8
+
9
+ import torch
10
+ import torch.utils._pytree as pytree
11
+ from torch._dynamo.utils import dynamo_timed, lazy_format_graph_code
12
+ from torch._functorch.aot_autograd import MutationType
13
+ from torch._functorch.compile_utils import fx_graph_cse
14
+ from torch._inductor.constant_folding import constant_fold, replace_node_with_constant
15
+
16
+ from torch._inductor.fx_passes.freezing_patterns import freezing_passes
17
+ from torch._inductor.fx_passes.post_grad import view_to_reshape
18
+
19
+ from . import config
20
+
21
+ aten = torch.ops.aten
22
+ prims = torch.ops.prims
23
+
24
+ log = logging.getLogger(__name__)
25
+
26
+
27
+ def replace_params_with_constants(
28
+ gm: torch.fx.GraphModule,
29
+ flat_params: list[Any],
30
+ fw_metadata: torch._functorch.aot_autograd.ViewAndMutationMeta,
31
+ ) -> List[int]:
32
+ """
33
+ Replaces the parameters of a PyTorch GraphModule with constants wherever possible.
34
+ Returns a list of indices representing the input parameters that were not converted to constants.
35
+ """
36
+ params = [node for node in gm.graph.nodes if node.op == "placeholder"]
37
+ fake_inp_nodes = params[: len(params)]
38
+ preserved_arg_indices = []
39
+ aliased_input_args = [
40
+ out_info.base_idx
41
+ for out_info in fw_metadata.output_info
42
+ if out_info.base_idx is not None
43
+ ]
44
+
45
+ # TODO (tmanlaibaatar) figure out why this is different
46
+ # from mutated_inp_runtime_indices
47
+ mutated_inps = [
48
+ i
49
+ for i, m in enumerate(fw_metadata.input_info)
50
+ if m.mutation_type
51
+ in (MutationType.MUTATED_IN_GRAPH, MutationType.MUTATED_OUT_GRAPH)
52
+ ]
53
+
54
+ for i, (real_input, node) in enumerate(zip(flat_params, fake_inp_nodes)):
55
+ if i in mutated_inps or i in aliased_input_args:
56
+ preserved_arg_indices.append(i)
57
+ continue
58
+ replace_node_with_constant(gm, node, real_input)
59
+ # add on non param inputs
60
+ preserved_arg_indices.extend(range(len(flat_params), len(params)))
61
+ # is this necessary ?
62
+ gm.recompile()
63
+ return preserved_arg_indices
64
+
65
+
66
+ def freeze(
67
+ dynamo_gm: torch.fx.GraphModule,
68
+ aot_autograd_gm: torch.fx.GraphModule,
69
+ example_inputs: List[torch._subclasses.FakeTensor],
70
+ ) -> Tuple[torch.fx.GraphModule, List[int]]:
71
+ """
72
+ Inlines parameters that are not mutated into constants and optimizes the graph through constant propagation
73
+ and other techniques. If enabled, the function also discards the original parameters of the module for memory efficiency.
74
+
75
+ Assumes that this function is run in dynamo tracing post aot_autograd.
76
+
77
+ Args:
78
+ dynamo_gm (torch.fx.GraphModule): The Dynamo constructed GraphModule.
79
+ aot_autograd_gm (torch.fx.GraphModule): The aot_autograd constructed GraphModule to be frozen.
80
+ example_inputs (List[torch.Tensor]): A list of example input tensors to be used in the freezing process.
81
+
82
+ Returns:
83
+ Tuple[torch.fx.GraphModule, List[int]]: A tuple containing the frozen GraphModule and a list of indices
84
+ of the inputs that were preserved (not turned into constants).
85
+ """
86
+ # We have convert conv's weight to channels last which may meet error for .view
87
+ # when doing fake_tensor_prop. So we need to convert view to reshape first.
88
+ # See the details in fx_codegen_and_compile of compile_fx.py.
89
+ view_to_reshape(aot_autograd_gm)
90
+
91
+ if tracing_context := torch._guards.TracingContext.try_get():
92
+ fw_metadata = tracing_context.fw_metadata
93
+ params_flat = tracing_context.params_flat
94
+ assert fw_metadata is not None and params_flat is not None
95
+
96
+ preserved_arg_indices = replace_params_with_constants(
97
+ aot_autograd_gm, params_flat, fw_metadata
98
+ )
99
+ else:
100
+ inputs = [
101
+ node for node in aot_autograd_gm.graph.nodes if node.op == "placeholder"
102
+ ]
103
+ preserved_arg_indices = list(range(len(inputs)))
104
+
105
+ # TODO - further restrict cse ? right now needed to dedup aliasing ops
106
+ cse_graph = fx_graph_cse(aot_autograd_gm.graph)
107
+ aot_autograd_gm.graph = cse_graph
108
+ aot_autograd_gm.recompile()
109
+
110
+ aot_example_inputs = [example_inputs[ind] for ind in preserved_arg_indices]
111
+ freezing_passes(aot_autograd_gm, aot_example_inputs)
112
+
113
+ constant_fold(aot_autograd_gm)
114
+ # invalidate nn Modules
115
+ if config.freezing_discard_parameters:
116
+ invalidate_eager_modules()
117
+ discard_traced_gm_params(dynamo_gm)
118
+
119
+ log.debug("%s", lazy_format_graph_code("FROZEN GRAPH", aot_autograd_gm))
120
+
121
+ return aot_autograd_gm, preserved_arg_indices
122
+
123
+
124
+ class ErasedTensor(torch.Tensor):
125
+ @staticmethod
126
+ def __new__(cls, elem, name, owning_mod):
127
+ return super().__new__(cls, elem.to(device="meta"))
128
+
129
+ def __init__(self, elem, name: Optional[str], mod):
130
+ self.erased_name = name
131
+ self.owning_mod_ref = weakref.ref(mod)
132
+
133
+ @classmethod
134
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
135
+ erased_tensors = [
136
+ e
137
+ for e in pytree.arg_tree_leaves(*args, **kwargs)
138
+ if isinstance(e, ErasedTensor)
139
+ ]
140
+ assert len(erased_tensors) > 0
141
+ e = erased_tensors[0]
142
+
143
+ raise RuntimeError(
144
+ f"Trying to run Pytorch Eager Module after Dynamo Freezing. "
145
+ "The original parameters have been discarded for memory efficiency. "
146
+ f"Found in op {func} for erased parameter {e.erased_name} of {e.owning_mod_ref()}"
147
+ )
148
+
149
+
150
+ @torch.utils._python_dispatch._disable_current_modes()
151
+ def invalidate_eager_modules():
152
+ for mod in torch._guards.TracingContext.get().module_context.nn_modules.values():
153
+ if not isinstance(mod, torch.nn.Module):
154
+ continue
155
+
156
+ for attr_name, tensor in list(
157
+ itertools.chain(
158
+ mod.named_parameters(recurse=False), mod.named_buffers(recurse=False)
159
+ )
160
+ ):
161
+ with torch._dispatch.python.no_python_dispatcher():
162
+ e_t = ErasedTensor(tensor, attr_name, mod)
163
+ if isinstance(tensor, torch.nn.Parameter):
164
+ e_t.requires_grad_(True)
165
+ e_t._is_param = True # type: ignore[attr-defined]
166
+ setattr(mod, attr_name, e_t)
167
+
168
+
169
+ @torch.utils._python_dispatch._disable_current_modes()
170
+ def discard_traced_gm_params(mod: torch.fx.GraphModule):
171
+ for attr_name, tensor in list(
172
+ itertools.chain(
173
+ mod.named_parameters(recurse=False), mod.named_buffers(recurse=False)
174
+ )
175
+ ):
176
+ with torch._dispatch.python.no_python_dispatcher():
177
+ e_t = ErasedTensor(tensor, attr_name, mod)
178
+ if isinstance(tensor, torch.nn.Parameter):
179
+ e_t.requires_grad_(True)
180
+ e_t._is_param = True # type: ignore[attr-defined]
181
+ setattr(mod, attr_name, e_t)
182
+
183
+
184
+ def enforce_output_layout(gm: torch.fx.GraphModule):
185
+ """
186
+ Make sure the output node's layout does not change due to compiler optimizations
187
+ by adding aten.as_strided nodes with the expected strides.
188
+
189
+ Only used for inference so we can assume all graph outputs are model outputs.
190
+ """
191
+ *_, output_node = gm.graph.nodes
192
+ out_list = output_node.args[0]
193
+ with gm.graph.inserting_before(output_node):
194
+ for n in out_list:
195
+ if not isinstance(
196
+ n.meta["val"], torch.Tensor
197
+ ) or not torch._prims_common.is_non_overlapping_and_dense(n.meta["val"]):
198
+ continue
199
+
200
+ # add a node to enforce eager layout
201
+ ft = n.meta["val"]
202
+ new_node = gm.graph.call_function(
203
+ prims.inductor_force_stride_order.default, (n, ft.stride())
204
+ )
205
+
206
+ # can not call
207
+ # n.replace_all_uses_with(new_node)
208
+ # since it will replace the usage of n in new_node itself.
209
+ output_node.replace_input_with(n, new_node)
210
+
211
+ gm.graph.lint()
212
+ gm.recompile()
213
+
214
+
215
+ def enforce_as_strided_input_layout(gm: torch.fx.GraphModule):
216
+ """
217
+ Make sure the as_strided node's input's layout does not change due to compiler
218
+ optimizations, because the as_strided strides info depends on input tensor stride info.
219
+ """
220
+
221
+ as_strided_ops = [
222
+ torch.ops.aten.as_strided.default,
223
+ torch.ops.aten.as_strided_.default,
224
+ torch.ops.aten.as_strided_scatter.default,
225
+ ]
226
+ strided_nodes = [n for n in gm.graph.nodes if n.target in as_strided_ops]
227
+ for n in strided_nodes:
228
+ with gm.graph.inserting_before(n):
229
+ # add a node to enforce eager layout
230
+ ft = n.args[0].meta["val"]
231
+ new_node = gm.graph.call_function(
232
+ prims.inductor_force_stride_order.default, (n.args[0], ft.stride())
233
+ )
234
+ n.replace_input_with(n.args[0], new_node)
235
+
236
+ gm.graph.lint()
237
+ gm.recompile()
238
+
239
+
240
+ @dynamo_timed
241
+ def convert_conv_weights_to_channels_last(gm: torch.fx.GraphModule):
242
+ """
243
+ Convert 4d convolution weight tensor to channels last format.
244
+
245
+ This pass is performed before freezing so the added nodes can be constant
246
+ folded by freezing.
247
+ """
248
+ convs = [n for n in gm.graph.nodes if n.target == aten.convolution.default]
249
+ for conv in convs:
250
+ weight_node = conv.args[1]
251
+ if len(weight_node.meta["val"].size()) != 4 or weight_node.meta[
252
+ "val"
253
+ ].is_contiguous(memory_format=torch.channels_last):
254
+ # not a 4d tensor or already channels last, skip
255
+ continue
256
+
257
+ with gm.graph.inserting_before(conv):
258
+ new_node = gm.graph.call_function(
259
+ aten.clone.default,
260
+ (weight_node,),
261
+ {"memory_format": torch.channels_last},
262
+ )
263
+ conv.replace_input_with(weight_node, new_node)
264
+
265
+ enforce_as_strided_input_layout(gm)
266
+ enforce_output_layout(gm)
venv/lib/python3.10/site-packages/torch/_inductor/fx_utils.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from collections import defaultdict
3
+ from typing import Any, Callable, DefaultDict, Dict, Optional, Tuple, Type
4
+
5
+ import torch
6
+ import torch.fx
7
+ from torch.fx.experimental.symbolic_shapes import statically_known_true, sym_eq
8
+ from torch.utils import _pytree as pytree
9
+ from torch.utils._pytree import tree_map
10
+ from .virtualized import V
11
+
12
+
13
+ # Check the pattern: (nn.module, F.function/torch.Tensor.method) matched.
14
+ # Works for length 2 patterns with 1 module and 1 function/method.
15
+ def matches_module_function_pattern(
16
+ pattern: Tuple[Type[torch.nn.modules.Module], Callable[..., Any]],
17
+ node: torch.fx.node.Node,
18
+ modules: Dict[str, torch.nn.modules.Module],
19
+ ) -> bool:
20
+ if len(node.args) == 0:
21
+ return False
22
+ if not isinstance(node.args[0], torch.fx.Node) or not isinstance(
23
+ node, torch.fx.Node
24
+ ):
25
+ return False
26
+ # the first node is call_module
27
+ if node.args[0].op != "call_module":
28
+ return False
29
+ if not isinstance(node.args[0].target, str):
30
+ return False
31
+ if node.args[0].target not in modules:
32
+ return False
33
+ if type(modules[node.args[0].target]) is not pattern[0]:
34
+ return False
35
+ # the second node is call_function or call_method
36
+ if node.op != "call_function" and node.op != "call_method":
37
+ return False
38
+ if node.target != pattern[1]:
39
+ return False
40
+ # make sure node.args[0] output is only used by current node.
41
+ if len(node.args[0].users) > 1:
42
+ return False
43
+ return True
44
+
45
+
46
+ class FakeTensorUpdater:
47
+ """
48
+ The main idea here is that it's difficult to maintain accurate fake
49
+ tensors (our primary form of metadata) for each node in our graph as we
50
+ transform it.
51
+
52
+ The most reliable way to obtain this information is by rerunning
53
+ faketensor propagation. However, in general, faketensor propagation is
54
+ fairly expensive. So, instead we'd like to only rerun faketensor
55
+ propagation on nodes that have changed.
56
+
57
+ In order to detect which nodes have changed, we first hash its node,
58
+ target, and argument lists (which are immutable in FX).
59
+
60
+ Then, whenever we call incremental_update, we check which FX nodes have a
61
+ new hash, and recompute the faketensor metadata for that node. Then, we
62
+ continue to recursively compute the faketensors for all users until the
63
+ fake tensors stop changing.
64
+ """
65
+
66
+ def __init__(self, graph: torch.fx.Graph):
67
+ self.processed_hashes = set()
68
+ self.graph = graph
69
+
70
+ for node in self.graph.nodes:
71
+ self.processed_hashes.add(self.hash_node(node))
72
+
73
+ def hash_node(self, node: torch.fx.Node):
74
+ # todo(chilli): Not a great hash function
75
+ return (node, node.target, id(node.args), id(node.kwargs))
76
+
77
+ def incremental_update(self):
78
+ processed = set()
79
+ existing_storages: DefaultDict[Optional[int], int] = defaultdict(int)
80
+ for node in self.graph.nodes:
81
+ existing_storages[get_node_storage(node)] += 1
82
+
83
+ def is_intlist_same(new, old):
84
+ return statically_known_true(sym_eq(new, old))
85
+
86
+ def is_fake_tensor_same(new, old):
87
+ if type(new) != type(old):
88
+ return False
89
+ if isinstance(new, (list, tuple)):
90
+ if len(new) != len(old):
91
+ return False
92
+ return all(
93
+ is_fake_tensor_same(new_i, old_i) for new_i, old_i in zip(new, old)
94
+ )
95
+ assert isinstance(new, torch.Tensor)
96
+ if not is_intlist_same(new.shape, old.shape) or new.layout != old.layout:
97
+ return False
98
+ if new.layout == torch.strided and (
99
+ not is_intlist_same(new.stride(), old.stride())
100
+ or not statically_known_true(
101
+ new.storage_offset() == old.storage_offset()
102
+ )
103
+ ):
104
+ return False
105
+
106
+ if get_storage(new) == get_storage(old):
107
+ return True
108
+
109
+ # This is the case where it returns a completely fresh storage that's used nowhere else.
110
+ if (
111
+ existing_storages[get_storage(old)] == 1
112
+ and get_storage(new) not in existing_storages
113
+ ):
114
+ return True
115
+ return False
116
+
117
+ for node in self.graph.nodes:
118
+ if self.hash_node(node) in self.processed_hashes:
119
+ continue
120
+
121
+ def is_aten_node(node):
122
+ return node.op == "call_function" and isinstance(
123
+ node.target, torch._ops.OpOverload
124
+ )
125
+
126
+ if not is_aten_node(node):
127
+ continue
128
+
129
+ processing = [node]
130
+ while len(processing) > 0:
131
+ updating_node = processing.pop()
132
+ if updating_node in processed:
133
+ continue
134
+ if is_aten_node(updating_node):
135
+ continue
136
+
137
+ is_valid, args, kwargs = get_fake_args_kwargs(updating_node)
138
+ if not is_valid:
139
+ continue
140
+ with V.fake_mode:
141
+ new_fake_tensor = updating_node.target(*args, **kwargs)
142
+ if "val" in updating_node.meta and is_fake_tensor_same(
143
+ new_fake_tensor, updating_node.meta["val"]
144
+ ):
145
+ continue
146
+ updating_node.meta["val"] = new_fake_tensor
147
+
148
+ # todo(chilli): This code path is not exercised by our existing
149
+ # tests - add a test
150
+ existing_storages[get_node_storage(new_fake_tensor)] += 1
151
+ processed.add(updating_node)
152
+ processing.extend(updating_node.users)
153
+
154
+ self.processed_hashes.add(self.hash_node(updating_node))
155
+
156
+
157
+ def get_storage(t: torch.Tensor) -> int:
158
+ return t.untyped_storage()._cdata
159
+
160
+
161
+ def get_node_storage(node: torch.fx.Node) -> Optional[int]:
162
+ if "val" not in node.meta:
163
+ return None
164
+ if not isinstance(node.meta["val"], torch.Tensor):
165
+ return None
166
+ if not torch._C._has_storage(node.meta["val"]):
167
+ return None
168
+ return get_storage(node.meta["val"])
169
+
170
+
171
+ def get_fake(x):
172
+ if isinstance(x, torch.fx.Node):
173
+ if "val" not in x.meta:
174
+ return x
175
+ return x.meta["val"]
176
+ return x
177
+
178
+
179
+ def get_fake_args_kwargs(x: torch.fx.Node) -> Tuple[bool, Tuple[Any], Dict[str, Any]]:
180
+ """
181
+ First value returns a boolean if any of the input nodes don't have a faketensor.
182
+ """
183
+ args, kwargs = tree_map(get_fake, (x.args, x.kwargs))
184
+ if any(
185
+ isinstance(a, torch.fx.Node) for a in pytree.arg_tree_leaves(*args, **kwargs)
186
+ ):
187
+ return False, args, kwargs
188
+ return True, args, kwargs
189
+
190
+
191
+ def is_node_realized(node: torch.fx.Node) -> bool:
192
+ """Returns true if a node is always realized when lowered to inductor IR.
193
+
194
+ NOTE: This may return some false negatives. e.g. it doesn't
195
+ handle buffers realized heuristically during lowering, or
196
+ buffers realized indirectly through view ops.
197
+ """
198
+ from torch._inductor.lowering import fallbacks, needs_realized_inputs
199
+
200
+ def is_buffer(node: torch.fx.Node) -> bool:
201
+ if node.op == "call_function" and node.target is operator.getitem:
202
+ # For nodes with multiple outputs, we get the fx graph:
203
+ # foo = torch.ops.aten.foo(...)
204
+ # getitem = foo[0]
205
+ # getitem_1 = foo[1]
206
+ # where we need to check if foo is a fallback kernel
207
+ return is_buffer(node.args[0]) # type: ignore[arg-type]
208
+ return node.op in ("placeholder", "output") or node.target in fallbacks
209
+
210
+ if is_buffer(node):
211
+ return True
212
+
213
+ def realizes_inputs(node: torch.fx.Node) -> bool:
214
+ return node.op == "output" or node.target in needs_realized_inputs
215
+
216
+ if any(realizes_inputs(user) for user in node.users):
217
+ return True
218
+
219
+ # Otherwise, assume node isn't realized
220
+ return False
venv/lib/python3.10/site-packages/torch/_inductor/graph.py ADDED
@@ -0,0 +1,1324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import logging
3
+ import operator
4
+ import os
5
+ import re
6
+ import sys
7
+ import time
8
+ from collections import defaultdict
9
+ from contextlib import contextmanager
10
+ from typing import Any, Callable, DefaultDict, Dict, List, Optional, Set, Tuple
11
+
12
+ import sympy
13
+
14
+ import torch
15
+ import torch._logging
16
+ import torch.fx
17
+ from torch._decomp import get_decompositions
18
+ from torch._dynamo.utils import defake, dynamo_timed
19
+ from torch._logging import LazyString, trace_structured
20
+ from torch._subclasses.fake_tensor import FakeTensor
21
+ from torch.fx.experimental._backward_state import BackwardState
22
+ from torch.fx.experimental.sym_node import magic_methods, method_to_operator
23
+ from torch.fx.experimental.symbolic_shapes import has_free_symbols, ShapeEnv, SymTypes
24
+ from torch.utils._mode_utils import no_dispatch
25
+
26
+ from . import config, ir
27
+ from .codegen.common import (
28
+ DeviceOpOverrides,
29
+ get_device_op_overrides,
30
+ get_scheduling_for_device,
31
+ get_wrapper_codegen_for_device,
32
+ register_backend_for_device,
33
+ )
34
+ from .codegen.cpp_wrapper_cpu import CppWrapperCpu
35
+ from .codegen.cpp_wrapper_cuda import CppWrapperCuda
36
+ from .codegen.wrapper import WrapperCodeGen
37
+ from .exc import (
38
+ CppWrapperCodeGenError,
39
+ LoweringException,
40
+ MissingOperatorWithDecomp,
41
+ MissingOperatorWithoutDecomp,
42
+ )
43
+ from .ir import (
44
+ Constant,
45
+ FixedLayout,
46
+ InputBuffer,
47
+ Pointwise,
48
+ Reduction,
49
+ StorageBox,
50
+ TensorBox,
51
+ )
52
+ from .lowering import (
53
+ constrain_to_fx_strides,
54
+ FALLBACK_ALLOW_LIST,
55
+ fallback_handler,
56
+ fallback_node_due_to_unsupported_type,
57
+ layout_constraints,
58
+ lowerings,
59
+ make_fallback,
60
+ needs_realized_inputs,
61
+ unsupported_output_tensor,
62
+ )
63
+ from .sizevars import SizeVarAllocator
64
+ from .utils import convert_shape_to_inductor, gather_origins, get_sympy_Expr_dtype
65
+ from .virtualized import V
66
+
67
+ log = logging.getLogger(__name__)
68
+ perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
69
+ output_code_log = torch._logging.getArtifactLogger(__name__, "output_code")
70
+
71
+
72
+ if config.is_fbcode():
73
+ from torch._inductor.fb.utils import log_module_code
74
+ else:
75
+
76
+ def log_module_code(*args, **kwargs):
77
+ pass
78
+
79
+
80
+ def supported_dtype_of_cpp_wrapper(dtype, cuda):
81
+ supported_dtype = {
82
+ torch.float32,
83
+ torch.float64,
84
+ torch.int64,
85
+ torch.int32,
86
+ torch.int16,
87
+ torch.int8,
88
+ torch.uint8,
89
+ torch.bool,
90
+ torch.bfloat16,
91
+ torch.complex32,
92
+ torch.complex64,
93
+ torch.complex128,
94
+ torch.float16,
95
+ }
96
+ if cuda:
97
+ supported_dtype.add(torch.float8_e4m3fn)
98
+ supported_dtype.add(torch.float8_e5m2)
99
+ supported_dtype.add(torch.float8_e4m3fnuz)
100
+ supported_dtype.add(torch.float8_e5m2fnuz)
101
+
102
+ return dtype in supported_dtype
103
+
104
+
105
+ def may_get_constant_buffer_dtype(constant_buffer):
106
+ assert isinstance(
107
+ constant_buffer, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
108
+ ), "get_constant_buffer_dtype only supports input of sympy.Symbol, sympy.Expr or sympy.core.numbers.Integer"
109
+ if isinstance(constant_buffer, sympy.core.numbers.Integer):
110
+ return torch.int64
111
+
112
+ if isinstance(constant_buffer, sympy.Expr):
113
+ return get_sympy_Expr_dtype(constant_buffer)
114
+
115
+ if constant_buffer.is_integer:
116
+ return torch.int64
117
+ elif constant_buffer.is_float:
118
+ return torch.float32
119
+ else:
120
+ return None
121
+
122
+
123
+ def is_magic_method(op):
124
+ magic_ops = {method_to_operator(m) for m in magic_methods}
125
+ return op in magic_ops
126
+
127
+
128
+ def getattr_recursive(obj, target):
129
+ target_atoms = target.split(".")
130
+ attr_itr = obj
131
+ for i, atom in enumerate(target_atoms):
132
+ if not hasattr(attr_itr, atom):
133
+ raise RuntimeError(
134
+ f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}"
135
+ )
136
+ attr_itr = getattr(attr_itr, atom)
137
+ return attr_itr
138
+
139
+
140
+ class GraphLowering(torch.fx.Interpreter):
141
+ graph_outputs: List[ir.IRNode]
142
+
143
+ def symbolic_sizes_strides(self, ex: torch.Tensor):
144
+ """
145
+ Support dynamic shapes and dynamic strides by assigning variables
146
+ to each dimension. We duck-shape tensors, so if two tensors
147
+ have the same size they get assigned the same symbolic variable.
148
+ """
149
+ if self.reuse_shape_env:
150
+ return convert_shape_to_inductor(ex.size()), convert_shape_to_inductor(
151
+ ex.stride()
152
+ )
153
+ else:
154
+ from torch._dynamo.source import ConstantSource
155
+
156
+ # TODO: this should not be needed once #93059 lands
157
+ # https://github.com/pytorch/pytorch/pull/94031#discussion_r1096044816
158
+ # TODO: make a dedicated UnknownSource for this?
159
+ # NB: This is using the legacy default behavior from
160
+ # create_symbolic_sizes_strides_storage_offset but we hope we can
161
+ # just delete this entirely
162
+ source = ConstantSource(
163
+ f"__inductor_unknown_tensor_{len(self._shape_env.var_to_val)}"
164
+ )
165
+ (
166
+ size,
167
+ stride,
168
+ _,
169
+ ) = self._shape_env.create_symbolic_sizes_strides_storage_offset(
170
+ ex,
171
+ source,
172
+ )
173
+
174
+ size = [i.node.expr if isinstance(i, torch.SymInt) else i for i in size]
175
+ stride = [i.node.expr if isinstance(i, torch.SymInt) else i for i in stride]
176
+ return size, stride
177
+
178
+ def static_sizes_strides(self, ex: torch.Tensor):
179
+ """
180
+ Primarily used to weights
181
+ """
182
+ size = [sympy.Integer(i) for i in ex.size()]
183
+ stride = [sympy.Integer(i) for i in ex.stride()]
184
+ return size, stride
185
+
186
+ def init_backend_registration(self):
187
+ if get_scheduling_for_device("cpu") is None:
188
+ from .codegen.cpp import CppScheduling
189
+
190
+ register_backend_for_device("cpu", CppScheduling, WrapperCodeGen)
191
+
192
+ if get_scheduling_for_device("cuda") is None:
193
+ from .codegen.cuda_combined_scheduling import CUDACombinedScheduling
194
+
195
+ # CUDACombinedScheduling combines Triton and CUDA C++ scheduling for CUDA devices via delegation
196
+ register_backend_for_device("cuda", CUDACombinedScheduling, WrapperCodeGen)
197
+
198
+ def __init__(
199
+ self,
200
+ gm: torch.fx.GraphModule,
201
+ example_inputs: Optional[List[torch.Tensor]] = None,
202
+ shape_env=None,
203
+ num_static_inputs=None,
204
+ graph_id=None,
205
+ cpp_wrapper=False,
206
+ aot_mode=False,
207
+ user_visible_outputs=frozenset(),
208
+ layout_opt=None,
209
+ extern_node_serializer=None,
210
+ is_inference=False,
211
+ is_const_graph=False,
212
+ const_output_index=None,
213
+ const_code=None,
214
+ const_module=None,
215
+ name=None,
216
+ ):
217
+ super().__init__(gm)
218
+
219
+ self.example_inputs = example_inputs
220
+ self.layout_opt = (
221
+ layout_opt
222
+ if layout_opt is not None
223
+ else self.decide_layout_opt(gm, is_inference=is_inference)
224
+ )
225
+ self.num_channels_last_conv = 0
226
+ self.is_inference = is_inference
227
+ self.is_const_graph = is_const_graph
228
+ self.const_code = const_code
229
+ self.const_module = const_module
230
+
231
+ self.extra_traceback = False # we do our own error wrapping
232
+ if shape_env is None:
233
+ shape_env = ShapeEnv()
234
+ self.reuse_shape_env = False
235
+ else:
236
+ self._shape_env = shape_env
237
+ self.reuse_shape_env = True
238
+ self._shape_env = shape_env
239
+ self.sizevars = SizeVarAllocator(shape_env)
240
+ self.graph_input_names: List[str] = []
241
+ self.graph_inputs: Dict[str, TensorBox] = {}
242
+ self.graph_inputs_original: Dict[str, InputBuffer] = {}
243
+ self.device_types: Set[str] = (
244
+ const_module.device_types if const_module else set()
245
+ )
246
+ self.device_idxs: Set[int] = const_module.device_idxs if const_module else set()
247
+ self.cuda = False
248
+ self.buffers: List[ir.Buffer] = []
249
+ self.const_output_index: Dict[str, int] = (
250
+ const_output_index if const_output_index else {}
251
+ )
252
+ self.folded_constants: Set[str] = (
253
+ set(const_output_index.keys()) if const_output_index else set()
254
+ )
255
+ self.constants: Dict[str, torch.Tensor] = (
256
+ const_module.constants if const_module else {}
257
+ )
258
+ self.constant_reprs: Dict[str, str] = {}
259
+ self.removed_buffers: Set[str] = set()
260
+ self.removed_inplace_buffers: Set[str] = set()
261
+ self.mutated_buffers: Set[str] = set()
262
+ self.never_reuse_buffers: Set[str] = set()
263
+ self.inplaced_to_remove: Set[str] = set()
264
+ self.device_ops: DeviceOpOverrides = None # type: ignore[assignment]
265
+ self.wrapper_code: WrapperCodeGen = None # type: ignore[assignment]
266
+ # See `ProxyExecutor Design Note` in ir.py for more details
267
+ self.extern_kernel_nodes: List[ir.ExternKernelNode] = []
268
+ self.extern_node_serializer: Optional[
269
+ Callable[[List[ir.ExternKernelNode]], Any]
270
+ ] = extern_node_serializer
271
+ self.current_node: torch.fx.Node = None # type: ignore[assignment]
272
+ self.num_static_inputs = num_static_inputs
273
+ self.lists: Dict[str, List[str]] = {}
274
+ self.mutated_inputs: Set[str] = set()
275
+ self.mutated_input_idxs: List[int] = []
276
+ self.name_to_buffer: Dict[str, ir.Buffer] = {}
277
+ self.name_to_users: DefaultDict[str, List[ir.IRNode]] = defaultdict(list)
278
+ self.creation_time = time.time()
279
+ self.name = name
280
+ self.cpp_wrapper = cpp_wrapper
281
+
282
+ # record multi_kernel choice for cpp_wrapper so the second pass knows
283
+ # which sub-kernel is picked. Copy cpp_wrapper to another variable
284
+ # since cpp_wrapper flag is set to false for the first pass of codegen.
285
+ self.record_multi_kernel_choice = cpp_wrapper
286
+ self.multi_kernel_to_choice: Dict[str, int] = {}
287
+
288
+ self.aot_mode = aot_mode
289
+ self.graph_id = graph_id
290
+ self.scheduler: "torch._inductor.scheduler.Scheduler" = None # type: ignore[assignment]
291
+ self.nodes_prefer_channels_last = (
292
+ self.find_nodes_prefer_channels_last() if self.layout_opt else set()
293
+ )
294
+ self._warned_fallback = {"aten.convolution_backward"}
295
+ self.user_visible_outputs = user_visible_outputs
296
+ self.cache_key: str = "" # This is the cache key for the compiled artifact
297
+ self.cache_path: str = "" # This is the path in the filesystem where the compiled artifact is stored
298
+ self.cache_linemap: List[
299
+ Tuple[int, str]
300
+ ] = (
301
+ []
302
+ ) # This is the linemap used by the profiler to mark custom compiled kernels getting run
303
+ # Used if lowering encounters cases where cudagraphs are not supported
304
+ self.disable_cudagraphs_reason: Optional[str] = None
305
+
306
+ # only keeping one node per device for stack trace purposes
307
+ self.device_node_mapping: Dict[torch.device, torch.fx.Node] = {}
308
+ self.orig_gm: torch.fx.GraphModule = gm.__copy__()
309
+ self.dynamo_flat_name_to_original_fqn = self.module.meta.get(
310
+ "dynamo_flat_name_to_original_fqn", {}
311
+ )
312
+ self.allocated_constant_name = (
313
+ const_module.allocated_constant_name if const_module is not None else {}
314
+ )
315
+ self.init_backend_registration()
316
+
317
+ @staticmethod
318
+ def decide_layout_opt(gm, *, is_inference) -> bool:
319
+ """
320
+ Decide if we should enable layout optimization for this graph based on
321
+ heuristics.
322
+ """
323
+ if not config.layout_optimization:
324
+ return False
325
+
326
+ if config.force_layout_optimization:
327
+ return True
328
+
329
+ conv_nodes = [
330
+ n for n in gm.graph.nodes if n.target == torch.ops.aten.convolution.default
331
+ ]
332
+ nconv = len(conv_nodes)
333
+
334
+ if nconv == 0:
335
+ return False
336
+
337
+ # For cpu backend and mkldnn enabled, we always use channels_last for better performance.
338
+ if (
339
+ torch.backends.mkldnn.enabled
340
+ and torch.backends.mkldnn.is_available()
341
+ and all(
342
+ n.args[idx].meta["val"].device == torch.device("cpu")
343
+ for n in conv_nodes
344
+ for idx in [0, 1]
345
+ )
346
+ ):
347
+ return True
348
+
349
+ # Following models are skipped due to this:
350
+ # jx_nest_base
351
+ # volo_d1_224
352
+ if len(list(gm.graph.nodes)) >= 300 * nconv:
353
+ log.debug("Skipped layout opt because only a few conv")
354
+ return False
355
+
356
+ if any(
357
+ has_free_symbols(n.args[idx].meta["val"])
358
+ for n in conv_nodes
359
+ for idx in [0, 1]
360
+ ):
361
+ log.debug(
362
+ "See perf regression with dynamic shape. Follow up in https://github.com/pytorch/pytorch/issues/102670"
363
+ )
364
+ return False
365
+
366
+ def is_grouped(n):
367
+ return n.args[-1] > 1 and n.args[1].meta["val"].size(1) > 1
368
+
369
+ def is_in_out_channel(n):
370
+ return (
371
+ n.args[1].meta["val"].size(0) * 2 <= n.args[1].meta["val"].size(1)
372
+ and n.args[1].meta["val"].size(2) > 1
373
+ )
374
+
375
+ def is_small_channel(n):
376
+ return (
377
+ n.args[1].meta["val"].size(0) <= 64
378
+ and n.args[1].meta["val"].size(1) <= 64
379
+ )
380
+
381
+ # only grouped convolutions benchmarked as slower in conv samples for inference only
382
+ if is_inference:
383
+ from torch.utils.flop_counter import FlopCounterMode
384
+
385
+ flop_counts: Dict[str, float] = defaultdict(float)
386
+ for node in conv_nodes:
387
+ success, args, kwargs = torch._inductor.fx_utils.get_fake_args_kwargs(
388
+ node
389
+ )
390
+
391
+ if success:
392
+ with FlopCounterMode(display=False) as flop_counter_mode:
393
+ with V.fake_mode:
394
+ node.target(*args, **kwargs)
395
+
396
+ counted_flops = flop_counter_mode.get_total_flops()
397
+ if is_grouped(node):
398
+ node_type = "grouped"
399
+ elif is_small_channel(node):
400
+ node_type = "small"
401
+ elif is_in_out_channel(node):
402
+ node_type = "in_out"
403
+ else:
404
+ node_type = "default"
405
+
406
+ flop_counts[node_type] += counted_flops
407
+ else:
408
+ log.debug("Conv inputs meta not found")
409
+
410
+ # average benchmarked channels last speedup / slowdown, < 1 is speedup.
411
+ # taken from the set of convolution inputs in benchmarks/dynamo/microbenchmarks/operator_inp_logs/torchbench_train/
412
+ # To regenerate these numbers follow https://gist.github.com/eellison/55d7a6ed6f39829d68ac56f95f4df5bb
413
+ GROUPED_MULTIPLIER = 1.358
414
+ DEFAULT_MULTIPLIER = 0.823
415
+ IN_OUT_MULTIPLIER = 0.725
416
+ SMALL_MULTIPLIER = 0.783
417
+
418
+ total_flops = sum(flop_counts.values())
419
+ # TODO - get different values per hardware
420
+ weighted_flops = (
421
+ flop_counts["grouped"] * GROUPED_MULTIPLIER
422
+ + flop_counts["small"] * SMALL_MULTIPLIER
423
+ + flop_counts["in_out"] * IN_OUT_MULTIPLIER
424
+ + flop_counts["default"] * DEFAULT_MULTIPLIER
425
+ )
426
+ do_layout_opt = weighted_flops <= total_flops
427
+ if not do_layout_opt:
428
+ log.debug(
429
+ "Skipped layout opt in inference because weighted flops indicate slowdown, default: %d, channels last: %d",
430
+ total_flops,
431
+ weighted_flops,
432
+ )
433
+ return do_layout_opt
434
+
435
+ # Channels last layout can dramatically hurt grouped conv perf. E.g.
436
+ # Conv with arguments like
437
+ # {"input_shape": [32, 224, 112, 112], "weight_shape": [224, 112, 3, 3],
438
+ # "stride": [2, 2], "padding": [1, 1], "groups": 2}
439
+ # slows down 31x using channels last..
440
+
441
+ # But a lot of timm models use depthwise separable convolution which will
442
+ # result in grouped convolution with in-channel size == 1.
443
+ # For those grouped convolution, channels last still helps a lot.
444
+ # E.g.
445
+ # Conv with arguments
446
+ # {"input_shape": [128, 58, 56, 56], "weight_shape": [58, 1, 3, 3],
447
+ # "stride": [2, 2], "padding": [1, 1], "groups": 58}
448
+ # get 1.86x speedup with channels last layout.
449
+ #
450
+ # The following heuristics skip using channels-last if the model contains
451
+ # grouped convolution with in-channels > 1.
452
+ if any(map(is_grouped, conv_nodes)):
453
+ log.debug(
454
+ "Skip layout opt because found grouped convolution with >1 in_channels!"
455
+ )
456
+ return False
457
+
458
+ # For some models that contain convolution with larger in-channel than out-channel, applying
459
+ # channels last hurts performance.
460
+ # Following models are skipped due to this:
461
+ # - pytorch_unet
462
+ # - phlippe_densenet (slightly worse)
463
+ # - Background_Matting (1.22x -> 0.821x)
464
+ # - pytorch_CycleGAN_and_pix2pix (1.597x -> 1.294x)
465
+ if any(map(is_in_out_channel, conv_nodes)):
466
+ log.debug(
467
+ "Skip layout opt because some convolutions have smaller out_channel"
468
+ )
469
+ return False
470
+
471
+ # Following models are skipped due to this:
472
+ # - functorch_maml_omniglot
473
+ if all(map(is_small_channel, conv_nodes)):
474
+ log.debug("Skip layout opt because all convolution channels are too small")
475
+ return False
476
+
477
+ return True
478
+
479
+ def qualify_name(self, name: str) -> str:
480
+ """Prepend the given name with the graph name if any."""
481
+ if self.name is not None:
482
+ return f"{self.name}_{name}"
483
+ return name
484
+
485
+ def make_subgraph(
486
+ self,
487
+ gm: torch.fx.GraphModule,
488
+ example_inputs: List[torch.Tensor],
489
+ subgraph_name: str,
490
+ ) -> "GraphLowering":
491
+ """
492
+ Make a subgraph of the current graph with all inherited
493
+ parts, except the graph module (`gm`) and `example_inputs`.
494
+ The subgraphs are lowered separately, but intended to be
495
+ inlined in the parent graph's codegening. Hence the need
496
+ for maintaining the same `shape_env` and other properties.
497
+ The subgraph name is qualified by the parent graph's name.
498
+ """
499
+ return GraphLowering(
500
+ gm=gm,
501
+ example_inputs=example_inputs,
502
+ shape_env=self._shape_env,
503
+ cpp_wrapper=self.cpp_wrapper,
504
+ aot_mode=self.aot_mode,
505
+ extern_node_serializer=self.extern_node_serializer,
506
+ is_inference=self.is_inference,
507
+ name=self.qualify_name(subgraph_name),
508
+ )
509
+
510
+ def find_nodes_prefer_channels_last(self):
511
+ """
512
+ The rule to decide if an node prefer channels last is simple.
513
+ 1. if it's input/output of a convolution
514
+ 2. if one of its user prefers channels last
515
+
516
+ We have rule 1 because cudnn runs a faster convolution kernel for channels last inputs;
517
+ Rule 2 is also important. It makes sure that indirect inputs to convolution also prefers
518
+ channels last.
519
+
520
+ Consider the scenario: conv -> batch-norm -> relu -> conv
521
+ Without rule 2, batch-norm output may use a contiguous layout. That will cause 2 extra copies:
522
+ 1. the output of batch-norm should be channels last initially since its input is a conv's output.
523
+ Forcing the batch-norm's output to be contiguous results in the first copy
524
+ 2. The second conv's input is initially contiguous. This layout is propagated from the batch-norm's output.
525
+ We need convert it to channels last layout which results in the second copy.
526
+ With rule 2, we makes sure all the tensors in the chain uses channels last layout. So both copies
527
+ can be saved.
528
+ """
529
+ output_set = set()
530
+ for n in reversed(self.module.graph.nodes):
531
+ if n.target == torch.ops.aten.convolution.default:
532
+ output_set.add(n)
533
+ continue
534
+
535
+ for user in n.users:
536
+ if user in output_set:
537
+ output_set.add(n)
538
+ break
539
+
540
+ # need a second pass to add downstream nodes of those channel last nodes to the sets.
541
+ # This pass is especially needed to avoid mix-layout kernel inputs in backward pass.
542
+ #
543
+ # Let's say a conv-batchnorm 's output is passed to relu whose output is in turn returned
544
+ # from the fwd graph. Without this second pass, we will force relu's output to be contiguous.
545
+ # Then in the kernel in backward pass, the contiguous output of relu may be mix with other channels last
546
+ # tensors and passed to a kernel.
547
+ #
548
+ # This pass improve yolov3 training speedup from 1.116x (worse than disabling layout optimization speedup 1.196x) to 1.457x.
549
+ # It also improves dla102 training speedup from 1.240x (worse than disabling layout optimization speedup 1.523x) to 1.835x .
550
+ # This also helps the following models:
551
+ # - res2net101_26w_4s
552
+ # - res2net50_14w_8s
553
+ # - sebotnet33ts_256
554
+ for n in self.module.graph.nodes:
555
+ if n in output_set:
556
+ for child in n.users:
557
+ output_set.add(child)
558
+
559
+ return output_set
560
+
561
+ def warn_fallback(self, name):
562
+ if name not in self._warned_fallback:
563
+ self._warned_fallback.add(name)
564
+ perf_hint_log.info("Using FallbackKernel: %s", name)
565
+
566
+ def add_device_info(self, device: torch.device):
567
+ self.device_types.add(device.type)
568
+ if device.index is not None:
569
+ self.device_idxs.add(device.index)
570
+ if V.graph.current_node and device not in self.device_node_mapping:
571
+ self.device_node_mapping[device] = V.graph.current_node
572
+
573
+ @property
574
+ def fake_mode(self):
575
+ return V.fake_mode
576
+
577
+ def get_buffer(self, buffer_name: str):
578
+ if buffer_name in self.name_to_buffer:
579
+ return self.name_to_buffer[buffer_name]
580
+ if buffer_name in self.graph_inputs:
581
+ return self.graph_inputs[buffer_name]
582
+ return None
583
+
584
+ def get_dtype(self, buffer_name: str):
585
+ if buffer_name in self.constants:
586
+ return self.constants[buffer_name].dtype
587
+ if buffer_name in self.name_to_buffer:
588
+ return self.name_to_buffer[buffer_name].get_dtype()
589
+ if buffer_name in self.graph_inputs:
590
+ return self.graph_inputs[buffer_name].get_dtype()
591
+ m = re.match(r"(as_strided|reinterpret_tensor)\(([a-zA-Z0-9_]+),", buffer_name)
592
+ if m:
593
+ return self.get_dtype(m.group(1))
594
+ raise KeyError(f"could not find {buffer_name}")
595
+
596
+ def get_numel(self, buffer_name: str):
597
+ from .ir import MultiOutputLayout
598
+
599
+ if buffer_name in self.constants:
600
+ return self.constants[buffer_name].numel()
601
+ if buffer_name in self.name_to_buffer:
602
+ buf = self.name_to_buffer[buffer_name]
603
+ if isinstance(getattr(buf, "layout", None), MultiOutputLayout):
604
+ return 1
605
+ return buf.get_numel()
606
+ if buffer_name in self.graph_inputs:
607
+ return self.graph_inputs[buffer_name].get_numel()
608
+ raise KeyError(f"could not find {buffer_name}")
609
+
610
+ @dynamo_timed
611
+ def run(self, *args):
612
+ return super().run(*args)
613
+
614
+ def register_buffer(self, buffer: ir.Buffer):
615
+ name = self.qualify_name(f"buf{len(self.buffers)}")
616
+ self.buffers.append(buffer)
617
+ self.name_to_buffer[name] = buffer
618
+ # Skip empty CPU tensor so that CUDA graphs can succeed, see https://github.com/pytorch/pytorch/pull/114144
619
+ if not isinstance(buffer, ir.ComputedBuffer) or not buffer.is_zero_elements():
620
+ self.add_device_info(buffer.get_device())
621
+ return name
622
+
623
+ def register_list(self, buffer_names: List[str]):
624
+ name = self.qualify_name("list_" + "_".join(buffer_names))
625
+ self.lists[name] = buffer_names
626
+ return name
627
+
628
+ def register_users_of(self, node_output):
629
+ def register(value):
630
+ if isinstance(value, (list, tuple)):
631
+ for x in value:
632
+ register(x)
633
+ if isinstance(value, ir.IRNode):
634
+ if (
635
+ not hasattr(value, "data")
636
+ or not isinstance(value.data, ir.IRNode)
637
+ or not (
638
+ hasattr(value.data, "data")
639
+ and isinstance(value.data.data, ir.IRNode)
640
+ )
641
+ ):
642
+ return
643
+
644
+ for read_name in value.get_read_names():
645
+ self.name_to_users[read_name].append(value)
646
+
647
+ register(node_output)
648
+
649
+ def mark_buffer_mutated(self, name: str):
650
+ """
651
+ When a buffer is mutated we need to make sure all the reads to
652
+ the old version are realized before the mutation happens.
653
+ """
654
+ assert isinstance(name, str)
655
+ self.mutated_buffers.add(name)
656
+
657
+ if name not in self.name_to_users:
658
+ return
659
+
660
+ for user in self.name_to_users[name]:
661
+ user.realize()
662
+
663
+ def add_tensor_constant(self, data, name=None):
664
+ def allocate(name):
665
+ if not config.aot_inductor.use_runtime_constant_folding:
666
+ for constant_name, value in self.constants.items():
667
+ if (
668
+ not data.is_mkldnn
669
+ and data.size() == value.size()
670
+ and data.stride() == value.stride()
671
+ and data.dtype == value.dtype
672
+ and data.device == value.device
673
+ and torch.eq(data, value).all()
674
+ ):
675
+ return constant_name
676
+
677
+ if name is None:
678
+ name = f"constant{len(self.constants)}"
679
+ if name[0].isdigit():
680
+ name = f"constant_{name}"
681
+ name = self.qualify_name(name)
682
+ # We may generate a var name for each constant in the codegen.
683
+ # Let's only keep sane characters.
684
+ prefix = re.sub(r"[^a-zA-Z0-9_]", "_", name)
685
+ name = prefix
686
+ cnt = 0
687
+ while name in self.constants:
688
+ name = f"{prefix}_{cnt}"
689
+ cnt += 1
690
+ self.constants[name] = data
691
+ self.constant_reprs[name] = (
692
+ f"{data.device!r} {data.dtype!r} "
693
+ f"{tuple(data.size())!r} {tuple(data.stride())!r} "
694
+ f"{hash(data):x}"
695
+ )
696
+ return name
697
+
698
+ new_name = allocate(name)
699
+ self.allocated_constant_name[new_name] = name
700
+
701
+ return TensorBox.create(
702
+ ir.ConstantBuffer(
703
+ new_name,
704
+ FixedLayout(data.device, data.dtype, *self.static_sizes_strides(data)),
705
+ )
706
+ )
707
+
708
+ def constant_name(self, name: str, device_override: Optional[torch.device]):
709
+ """
710
+ We AOT copy constants to the devices they are needed on.
711
+ If device_override doesn't match the constant's device, then
712
+ copy it and return a different name.
713
+ """
714
+ if self.constants[name].device == device_override or device_override is None:
715
+ return name
716
+ alt_name = f"{name}_{device_override.type}{device_override.index or 0}"
717
+ if alt_name not in self.constants:
718
+ self.constants[alt_name] = self.constants[name].to(device_override)
719
+ return alt_name
720
+
721
+ def placeholder(self, target: str, args, kwargs):
722
+ example = super().placeholder(target, args, kwargs)
723
+ self.graph_input_names.append(target)
724
+ if isinstance(example, SymTypes):
725
+ expr = example.node.expr
726
+ self.graph_inputs[target] = expr
727
+ return expr
728
+ elif isinstance(example, (int, bool, float)):
729
+ expr = sympy.sympify(example)
730
+ self.graph_inputs[target] = expr
731
+ return expr
732
+ if isinstance(example, BackwardState):
733
+ # Ignored arg, must be unused
734
+ # Alternately we could filter this out in AotAutograd
735
+ return None
736
+ assert isinstance(example, torch.Tensor), example
737
+ # todo(chilli): We can remove the last check once we turn buffers into
738
+ # static shape tensors. That's a hack to workaround Inductor believing
739
+ # the buffer should be static but us passing in a fake tensor with
740
+ # symbolic shapes.
741
+ if not example._has_symbolic_sizes_strides:
742
+ # the first N inputs are weights
743
+ sizes, strides = self.static_sizes_strides(example)
744
+ else:
745
+ sizes, strides = self.symbolic_sizes_strides(example)
746
+ # TODO(jansel): handle input aliasing
747
+ target = self.qualify_name(target)
748
+ tensor = TensorBox.create(
749
+ InputBuffer(
750
+ target,
751
+ FixedLayout(example.device, example.dtype, sizes, strides),
752
+ )
753
+ )
754
+ self.graph_inputs[target] = tensor
755
+ self.graph_inputs_original[target] = tensor.data.data
756
+ self.add_device_info(example.device)
757
+ return tensor
758
+
759
+ def call_function(self, target, args, kwargs):
760
+ if target is operator.getitem and isinstance(args[0], (list, tuple, dict)):
761
+ return super().call_function(target, args, kwargs)
762
+
763
+ if hasattr(target, "_inductor_lowering_function"):
764
+ # passthrough lowerings from .pattern_matcher
765
+ return target(*args, **kwargs)
766
+
767
+ def get_custom_op_layout_constraints(target, args, kwargs):
768
+ # Custom operations that require preserving stride order
769
+ # which run through implicit fallback must constrain their
770
+ # arguments' fx strides
771
+ layout_constraint = None
772
+ if torch._C.Tag.needs_fixed_stride_order in target.tags:
773
+ # We have to set the current args because call_function will immediately
774
+ # evaluate this lowering after creating the fallback, without evaluating
775
+ # the layout constraint
776
+ args, kwargs = constrain_to_fx_strides(
777
+ self.current_node, *args, **kwargs
778
+ )
779
+ # Also register the layout constraint so when the fallback
780
+ # is used again, we can constrain the args to the same layout
781
+ layout_constraint = constrain_to_fx_strides
782
+ return layout_constraint, args, kwargs
783
+
784
+ if target not in lowerings:
785
+ assert isinstance(
786
+ target, torch._ops.OpOverload
787
+ ), f"{target} is not an OpOverload"
788
+ base_name = target.name().split(".")[0]
789
+ if base_name in FALLBACK_ALLOW_LIST:
790
+ make_fallback(target)
791
+ elif config.implicit_fallbacks:
792
+ layout_constraint, args, kwargs = get_custom_op_layout_constraints(
793
+ target, args, kwargs
794
+ )
795
+ error = (
796
+ MissingOperatorWithDecomp
797
+ if get_decompositions([target])
798
+ else MissingOperatorWithoutDecomp
799
+ )
800
+ log.info(
801
+ "Creating implicit fallback for:\n%s",
802
+ error.operator_str(target, args, kwargs),
803
+ )
804
+ make_fallback(target, layout_constraint)
805
+
806
+ elif get_decompositions([target]):
807
+ # There isn't a good way to dynamically patch this in
808
+ # since AOT Autograd already ran. The error message tells
809
+ # the user how to fix it.
810
+ raise MissingOperatorWithDecomp(target, args, kwargs)
811
+ else:
812
+ raise MissingOperatorWithoutDecomp(target, args, kwargs)
813
+
814
+ try:
815
+ log.debug(" via %s", lowerings[target])
816
+ out = lowerings[target](*args, **kwargs)
817
+ return out
818
+ except Exception as e:
819
+ raise LoweringException(e, target, args, kwargs).with_traceback(
820
+ e.__traceback__
821
+ ) from None
822
+
823
+ @staticmethod
824
+ def can_inline_constant(t: torch.Tensor) -> bool:
825
+ """
826
+ True if this is a small constant attr that will be inlined.
827
+ """
828
+ return len(t.shape) == 1 and t.shape[0] <= 8
829
+
830
+ def get_attr(self, target, args, kwargs):
831
+ # this is a constant
832
+ value = getattr_recursive(self.module, target)
833
+
834
+ if isinstance(value, torch.fx.GraphModule):
835
+ return ir.Subgraph(name=target, graph_module=value)
836
+
837
+ if (
838
+ config.aot_inductor.use_runtime_constant_folding
839
+ or config.always_keep_tensor_constants
840
+ or unsupported_output_tensor(value)
841
+ ):
842
+ return self.add_tensor_constant(value, target)
843
+
844
+ with no_dispatch():
845
+ if value.shape == ():
846
+ return Constant(value.item(), value.dtype, value.device)
847
+ if self.can_inline_constant(value):
848
+ # tensor lowering has constant inlining logic
849
+ from .lowering import tensor
850
+
851
+ return tensor(value.tolist(), dtype=value.dtype, device=value.device)
852
+
853
+ return self.add_tensor_constant(value, target)
854
+
855
+ def call_module(self, target, args, kwargs):
856
+ raise AssertionError()
857
+
858
+ def call_method(self, target, args, kwargs):
859
+ raise AssertionError()
860
+
861
+ def output(self, target, args, kwargs):
862
+ result = super().output(target, args, kwargs)
863
+ assert isinstance(result, (tuple, list)), type(result)
864
+ assert all(
865
+ isinstance(
866
+ x,
867
+ (
868
+ TensorBox,
869
+ ir.Constant,
870
+ type(None),
871
+ ir.ConstantBuffer,
872
+ sympy.Expr,
873
+ sympy.logic.boolalg.Boolean,
874
+ int,
875
+ ),
876
+ )
877
+ for x in result
878
+ ), result
879
+ self.graph_outputs = [ir.ExternKernel.realize_input(x) for x in result]
880
+ value: ir.IRNode
881
+ for name, value in self.graph_inputs.items():
882
+ assert isinstance(
883
+ value, (TensorBox, sympy.Expr)
884
+ ), f"Unsupported inductor graph input type: {type(value)}"
885
+ if not isinstance(value, TensorBox):
886
+ continue
887
+ value.realize()
888
+ assert isinstance(value, TensorBox)
889
+ value = value.data
890
+ assert isinstance(value, ir.StorageBox)
891
+ value_storage_box = value
892
+ value = value.data
893
+ if not isinstance(value, InputBuffer) or value.get_name() != name:
894
+ # one of our inputs was mutated, need to turn that into a copy
895
+ ir.MutationLayout.realize_into(value, self.graph_inputs_original[name])
896
+ # replace output with mutated input
897
+ try:
898
+ ind = self.graph_outputs.index(value_storage_box)
899
+ self.graph_outputs[ind] = self.graph_inputs_original[name]
900
+ except ValueError:
901
+ pass
902
+
903
+ self.finalize()
904
+ log.debug(
905
+ "Force channels last inputs for %d conv for the current graph with id %d",
906
+ self.num_channels_last_conv,
907
+ self.graph_id if self.graph_id is not None else -1,
908
+ )
909
+
910
+ def finalize(self):
911
+ for buf in self.buffers:
912
+ buf.decide_layout()
913
+
914
+ @contextmanager
915
+ def set_current_node(self, node: torch.fx.Node):
916
+ old = self.current_node
917
+ try:
918
+ self.current_node = node
919
+ yield
920
+ finally:
921
+ self.current_node = old
922
+
923
+ def run_node(self, n: torch.fx.Node):
924
+ def debug(msg):
925
+ log.debug("lowering %s %s", LazyString(n.format_node), msg)
926
+
927
+ origins = {n}
928
+ if n.op == "call_function":
929
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
930
+ origins |= gather_origins(args, kwargs)
931
+ with ir.IRNode.current_origins(origins), self.set_current_node(
932
+ n
933
+ ), V.set_current_node(n):
934
+ if (
935
+ n.op == "call_function"
936
+ and n.target is not operator.getitem
937
+ and fallback_node_due_to_unsupported_type(n)
938
+ ):
939
+ debug("fallback_handler")
940
+ result = fallback_handler(n.target, add_to_fallback_set=False)(
941
+ *args, **kwargs # type: ignore[possibly-undefined]
942
+ )
943
+ elif n.op == "call_function" and n.target in layout_constraints:
944
+ debug("layout_constraints")
945
+ args, kwargs = layout_constraints[n.target](n, *args, **kwargs) # type: ignore[index]
946
+ result = self.call_function(n.target, args, kwargs)
947
+ elif is_magic_method(n.target):
948
+ # TODO: this is sus, it probably should be handled in the
949
+ # lowerings themselves similarly to sym_size/sym-stride
950
+ debug("is_magic_method")
951
+ if isinstance(n.meta["val"], torch.SymInt):
952
+ result = n.meta["val"].node.expr
953
+ else:
954
+ result = super().run_node(n)
955
+ else:
956
+ debug("")
957
+ result = super().run_node(n)
958
+
959
+ # require the same stride order for dense outputs,
960
+ # 1. user-land view() will not throw because inductor
961
+ # output different strides than eager
962
+ # long term the solution is to make view() always succeed
963
+ # with infallible strides.
964
+ # 2: as_strided ops, we need make sure its input has same size/stride with
965
+ # eager model to align with eager behavior.
966
+ as_strided_ops = [
967
+ torch.ops.aten.as_strided.default,
968
+ torch.ops.aten.as_strided_.default,
969
+ torch.ops.aten.as_strided_scatter.default,
970
+ ]
971
+ is_output = any(user.op == "output" for user in n.users)
972
+ is_input_for_as_strided = any(
973
+ user.target in as_strided_ops for user in n.users
974
+ )
975
+ if (
976
+ is_output
977
+ and isinstance(result, TensorBox)
978
+ and isinstance(result.data, ir.BaseView)
979
+ ):
980
+ # Realize so that outputs are correctly aliased
981
+ result.realize()
982
+
983
+ if (is_output or is_input_for_as_strided) and isinstance(
984
+ n.meta["val"], torch.Tensor
985
+ ):
986
+ strides = n.meta["val"].stride()
987
+ dense = torch._prims_common.is_non_overlapping_and_dense(n.meta["val"])
988
+ # requiring a stride order for a non-dense output wouldn't
989
+ # recreate the same strides, and would fail with view, defer for now.
990
+ if dense and len(strides):
991
+ stride_order = ir.get_stride_order(strides)
992
+ if (
993
+ len(result.get_size()) == 4
994
+ and n in self.nodes_prefer_channels_last
995
+ and n.name not in self.user_visible_outputs
996
+ and not is_input_for_as_strided
997
+ ):
998
+ stride_order = ir.NHWC_STRIDE_ORDER
999
+ result = ir.ExternKernel.require_stride_order(result, stride_order)
1000
+
1001
+ # Realize if (1) any user need inputs realized, or (2) there is
1002
+ # already too many reads and rematerializing can be bad.
1003
+ num_users = len(set(n.users))
1004
+ if num_users > 1 and isinstance(result, TensorBox):
1005
+ for user in n.users:
1006
+ if user.target in needs_realized_inputs:
1007
+ result.realize_hint()
1008
+ # This inclusion is somewhat controversial (from
1009
+ # discussion between Horace, Natalia, and Elias).
1010
+ # Currently, it's not very clear why this is helpful.
1011
+ # The general idea here is that even though a node may
1012
+ # have FlexibleLayout, we still often *treat* it as if
1013
+ # it was contiguous. This appears to sometimes result in
1014
+ # suboptimal behavior.
1015
+ #
1016
+ # When we do a better job selecting layout, we should
1017
+ # revisit this.
1018
+ need_fixed_layout = [
1019
+ torch.ops.aten.convolution_backward.default,
1020
+ torch.ops.aten.mm.default,
1021
+ torch.ops.aten._int_mm.default,
1022
+ ]
1023
+ if not self.layout_opt:
1024
+ need_fixed_layout.append(torch.ops.aten.convolution.default)
1025
+ if torch._C._has_mkldnn:
1026
+ need_fixed_layout += [
1027
+ torch.ops.mkldnn._convolution_pointwise.default,
1028
+ torch.ops.mkldnn._convolution_pointwise.binary,
1029
+ torch.ops.mkldnn._convolution_pointwise_.binary,
1030
+ torch.ops.mkldnn._convolution_transpose_pointwise.default,
1031
+ torch.ops.mkldnn._linear_pointwise.default,
1032
+ torch.ops.mkldnn._linear_pointwise.binary,
1033
+ torch.ops.aten.mkldnn_rnn_layer.default,
1034
+ torch.ops.onednn.qconv2d_pointwise.default,
1035
+ torch.ops.onednn.qconv2d_pointwise.binary,
1036
+ torch.ops.onednn.qlinear_pointwise.default,
1037
+ torch.ops.onednn.qlinear_pointwise.tensor,
1038
+ ]
1039
+ if torch._C.has_mkl:
1040
+ need_fixed_layout += [torch.ops.mkl._mkl_linear.default]
1041
+ if user.target in need_fixed_layout:
1042
+ result = ir.ExternKernel.require_stride_order(
1043
+ result, ir.get_stride_order(n.meta["val"].stride())
1044
+ )
1045
+ if user.op == "output":
1046
+ if isinstance(result.data.data, (Pointwise, Reduction)):
1047
+ result.realize()
1048
+
1049
+ # TODO(jansel): introduce a store vs inline choice
1050
+ result.mark_reuse(len(n.users))
1051
+
1052
+ # Realize if the IRNode already has accumulated lots of reads
1053
+ if isinstance(result, TensorBox) and result.has_exceeded_max_reads():
1054
+ # Prevent excessive accumulation in a computed buffer, when
1055
+ # there are multiple branches each with small number of memory
1056
+ # reads, but they converge to a user.
1057
+ result.realize_hint()
1058
+
1059
+ # Realize if a Pointwise has too much stuff to be inlined.
1060
+ # As this may cause RecursionError during Inductor's evaluation.
1061
+ if isinstance(result, TensorBox) and isinstance(result.data, StorageBox):
1062
+ curr = result.data.data
1063
+ if isinstance(curr, Pointwise):
1064
+ # Use inner fn as a rough proxy. Good enough.
1065
+ if curr.has_large_inner_fn():
1066
+ result.realize()
1067
+
1068
+ # This is not complete, but it doesn't have to be: origin_node
1069
+ # tracking is best effort. The logic here critically relies on direct
1070
+ # TensorBox -> StorageBox denoting a non-view; we don't bother trying
1071
+ # to get views to work. Feel free to add any extra cases as needed.
1072
+ #
1073
+ # Note: we can't YOLO tree_map over this result, because if there are
1074
+ # buffers or a view involved, we might not be able to validly assign
1075
+ # the origin_node here.
1076
+ if isinstance(result, TensorBox) and isinstance(result.data, ir.StorageBox):
1077
+ if isinstance(result.data.data, ir.Loops):
1078
+ result.data.data.origin_node = n
1079
+ elif isinstance(result.data.data, ir.Buffer):
1080
+ result.data.data.origin_node = n
1081
+ if isinstance(result.data.data, ir.ComputedBuffer) and isinstance(
1082
+ result.data.data.data, ir.Loops
1083
+ ):
1084
+ result.data.data.data.origin_node = n
1085
+ # Not really multi-output, can straightforwardly recurse in
1086
+ elif (
1087
+ isinstance(result.data.data, ir.MultiOutput)
1088
+ and not result.data.data.indices
1089
+ ):
1090
+ if isinstance(result.data.data.inputs[0], ir.Buffer):
1091
+ result.data.data.inputs[0].origin_node = n
1092
+
1093
+ self.register_users_of(result)
1094
+
1095
+ return result
1096
+
1097
+ def validate_can_generate_cpp_wrapper(self):
1098
+ if config.disable_cpp_codegen:
1099
+ raise CppWrapperCodeGenError("C++ codegen is disabled")
1100
+
1101
+ if sys.platform not in ["linux", "darwin"]:
1102
+ raise CppWrapperCodeGenError(f"Unsupported platform {sys.platform}")
1103
+
1104
+ for value in self.graph_inputs.values():
1105
+ dtype = None
1106
+ if isinstance(value, TensorBox):
1107
+ dtype = value.get_dtype()
1108
+ elif isinstance(
1109
+ value, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
1110
+ ):
1111
+ dtype = may_get_constant_buffer_dtype(value)
1112
+
1113
+ if not supported_dtype_of_cpp_wrapper(dtype, self.cuda):
1114
+ raise CppWrapperCodeGenError(f"Unsupported input dtype {dtype}")
1115
+
1116
+ def init_wrapper_code(self):
1117
+ self.cuda = "cuda" in self.device_types
1118
+ if self.cpp_wrapper:
1119
+ self.validate_can_generate_cpp_wrapper()
1120
+ self.wrapper_code = CppWrapperCuda() if self.cuda else CppWrapperCpu()
1121
+ else:
1122
+ device_types = self.device_types.copy()
1123
+ device_types.discard("cpu")
1124
+ # TODO(Eikan): Only support mixing cpu and other device now.
1125
+ assert len(device_types) <= 1, "Does not support mixing {}".format(
1126
+ "+".join(device_types)
1127
+ )
1128
+ only_cpu = len(device_types) == 0
1129
+ device_type = "cpu" if only_cpu else device_types.pop()
1130
+
1131
+ self.device_ops = get_device_op_overrides(device_type)
1132
+ wrapper_code_gen_cls = get_wrapper_codegen_for_device(device_type)
1133
+ assert (
1134
+ wrapper_code_gen_cls is not None
1135
+ ), f"Device {device_type} not supported"
1136
+ self.wrapper_code = wrapper_code_gen_cls()
1137
+
1138
+ if self.const_module:
1139
+ # If we have const module, we could reuse the kernels
1140
+ # This could avoid duplication and save time on doing recompilation (if Triton.)
1141
+ self.wrapper_code._names_iter = self.const_module.wrapper_code._names_iter
1142
+ self.wrapper_code.src_to_kernel = (
1143
+ self.const_module.wrapper_code.src_to_kernel
1144
+ )
1145
+
1146
+ def codegen_with_cpp_wrapper(self):
1147
+ """
1148
+ For CPU, the cpp wrapper codegen is done in one pass.
1149
+ For GPU, the cpp wrapper codegen is done in two steps: JIT-compile the model with python
1150
+ wrapper code and run it to generate autotuned kernel binaries in the first pass; and then
1151
+ generate cpp wrapper code and compile it to a dynamic library in the second pass.
1152
+ """
1153
+ if "cuda" in self.device_types:
1154
+ # first pass
1155
+ self.cpp_wrapper = False
1156
+ compiled = self.compile_to_module().call
1157
+
1158
+ def materialize(x):
1159
+ if isinstance(x, (torch.SymInt, torch.SymFloat)):
1160
+ # Need concrete value to run dynamic shapes and tune the result
1161
+ return x.node.hint
1162
+ elif isinstance(x, FakeTensor):
1163
+ return defake(x)
1164
+ else:
1165
+ assert isinstance(
1166
+ x, torch.Tensor
1167
+ ), "Unknown type when creating real inputs" + str(type(x))
1168
+ return x
1169
+
1170
+ if tracing_context := torch._guards.TracingContext.try_get():
1171
+ if tracing_context.output_strides:
1172
+ tracing_context.output_strides.clear()
1173
+
1174
+ params_flat = [
1175
+ param
1176
+ for param in tracing_context.params_flat # type: ignore[union-attr]
1177
+ if param is not None
1178
+ ]
1179
+ real_inputs = [
1180
+ materialize(x) for x in itertools.chain(params_flat, V.real_inputs)
1181
+ ]
1182
+ else:
1183
+ real_inputs = [materialize(x) for x in V.real_inputs]
1184
+
1185
+ with torch.utils._python_dispatch._disable_current_modes():
1186
+ assert self.example_inputs is not None
1187
+ compiled(real_inputs)
1188
+ del real_inputs
1189
+
1190
+ # second pass
1191
+ # TODO: reuse self.scheduler from the first pass to speed up the second pass
1192
+ self.cpp_wrapper = True
1193
+ self.removed_buffers.clear()
1194
+ self.inplaced_to_remove.clear()
1195
+ return self.codegen()
1196
+ else:
1197
+ # cpu
1198
+ return self.codegen()
1199
+
1200
+ def codegen(self):
1201
+ from .scheduler import Scheduler
1202
+
1203
+ self.init_wrapper_code()
1204
+
1205
+ self.scheduler = Scheduler(self.buffers)
1206
+ V.debug.draw_orig_fx_graph(self.orig_gm, self.scheduler.nodes)
1207
+
1208
+ self.scheduler.codegen()
1209
+ return self.wrapper_code.generate(self.is_inference)
1210
+
1211
+ def codegen_subgraph(self, parent_graph):
1212
+ """
1213
+ This is a more compact version of the `codegen()` above
1214
+ where we codegen this graph as a subgraph of some parent
1215
+ graph. The parent graph is passed as an argument: the
1216
+ intention is to inline codegening of the subgraph in
1217
+ the parent graph's wrapper code (including the generated
1218
+ kerenls). The wrapper code is not finalized (via `.generate()`
1219
+ call), as this will be done in the parent graph's `codegen()`.
1220
+ """
1221
+ from .scheduler import Scheduler
1222
+
1223
+ self.wrapper_code = parent_graph.wrapper_code
1224
+ self.device_ops = parent_graph.device_ops
1225
+ self.cpp_wrapper = parent_graph.cpp_wrapper
1226
+
1227
+ self.scheduler = Scheduler(self.buffers)
1228
+ self.scheduler.codegen()
1229
+
1230
+ def count_bytes(self):
1231
+ from .scheduler import Scheduler
1232
+
1233
+ scheduler = Scheduler(self.buffers)
1234
+
1235
+ total_bytes = 0
1236
+ node_counts = []
1237
+ node_runtimes = []
1238
+ for node in scheduler.nodes:
1239
+ num_bytes = node.get_read_write_buffers_sizes()
1240
+ total_bytes += num_bytes
1241
+ node_counts.append((node, num_bytes // 4))
1242
+ node_runtimes.append((node, node.get_estimated_runtime()))
1243
+ return total_bytes, node_counts, node_runtimes
1244
+
1245
+ @dynamo_timed(phase_name="code_gen")
1246
+ def compile_to_module(self):
1247
+ from .codecache import PyCodeCache
1248
+
1249
+ code, linemap = (
1250
+ self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
1251
+ )
1252
+ linemap = [(line_no, node.stack_trace) for line_no, node in linemap]
1253
+ key, path = PyCodeCache.write(code)
1254
+ mod = PyCodeCache.load_by_key_path(
1255
+ key, path, linemap=linemap, attrs=self.constants
1256
+ )
1257
+ self.cache_key = key
1258
+ self.cache_path = path
1259
+ self.cache_linemap = linemap
1260
+
1261
+ # Logged twice as per https://github.com/pytorch/pytorch/pull/99038#discussion_r1167826029
1262
+ # TODO. Revisit this once the logging API is more mature
1263
+ assert mod.__file__ is not None
1264
+
1265
+ log_module_code(mod.__file__)
1266
+ log.debug("Output code written to: %s", mod.__file__)
1267
+ output_code_log.debug("Output code: \n%s", code)
1268
+ trace_structured(
1269
+ "inductor_output_code",
1270
+ lambda: {"filename": mod.__file__},
1271
+ payload_fn=lambda: code,
1272
+ )
1273
+ output_code_log.info("Output code written to: %s", mod.__file__)
1274
+ if config.benchmark_kernel:
1275
+ print(f"Compiled module path: {mod.__file__}", file=sys.stderr)
1276
+ V.debug.output_code(mod.__file__)
1277
+ V.debug.copy(os.path.splitext(mod.__file__)[0] + ".debug")
1278
+ return mod
1279
+
1280
+ def compile_to_fn(self):
1281
+ if self.aot_mode:
1282
+ from .codecache import AotCodeCompiler
1283
+
1284
+ assert self.cpp_wrapper, "AOT mode only supports C++ wrapper"
1285
+ code, linemap = self.codegen_with_cpp_wrapper()
1286
+ output_code_log.debug("Output code: \n%s", code)
1287
+
1288
+ serialized_extern_kernel_nodes = None
1289
+ if (
1290
+ config.is_fbcode()
1291
+ and self.extern_kernel_nodes
1292
+ and self.extern_node_serializer
1293
+ ):
1294
+ serialized_extern_kernel_nodes = self.extern_node_serializer(
1295
+ self.extern_kernel_nodes
1296
+ )
1297
+ output_code_log.debug(
1298
+ "Serialized Extern Kernel Nodes: \n%s",
1299
+ serialized_extern_kernel_nodes,
1300
+ )
1301
+
1302
+ # Directly return the file path with the compiled code
1303
+ return AotCodeCompiler.compile(
1304
+ self, code, serialized_extern_kernel_nodes, cuda=self.cuda
1305
+ )
1306
+ else:
1307
+ return self.compile_to_module().call
1308
+
1309
+ def get_output_names(self):
1310
+ return [
1311
+ node.get_name()
1312
+ for node in self.graph_outputs
1313
+ if not isinstance(node, ir.NoneAsConstantBuffer)
1314
+ and not isinstance(node, ir.ShapeAsConstantBuffer)
1315
+ ]
1316
+
1317
+ def is_unspec_arg(self, name: str):
1318
+ # dynamo wraps unspec variable as 0d CPU tensor,
1319
+ # need to convert to scalar during codegen (triton only)
1320
+ return (
1321
+ name in self.graph_inputs.keys()
1322
+ and self.graph_inputs[name].get_numel() == 1
1323
+ and self.graph_inputs[name].get_device().type == "cpu"
1324
+ )
venv/lib/python3.10/site-packages/torch/_inductor/hooks.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from typing import Callable, List, TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ import torch
6
+
7
+ # Executed in the order they're registered
8
+ INTERMEDIATE_HOOKS: List[Callable[[str, "torch.Tensor"], None]] = []
9
+
10
+
11
+ @contextlib.contextmanager
12
+ def intermediate_hook(fn):
13
+ INTERMEDIATE_HOOKS.append(fn)
14
+ try:
15
+ yield
16
+ finally:
17
+ INTERMEDIATE_HOOKS.pop()
18
+
19
+
20
+ def run_intermediate_hooks(name, val):
21
+ global INTERMEDIATE_HOOKS
22
+ hooks = INTERMEDIATE_HOOKS
23
+ INTERMEDIATE_HOOKS = []
24
+ try:
25
+ for hook in hooks:
26
+ hook(name, val)
27
+ finally:
28
+ INTERMEDIATE_HOOKS = hooks
venv/lib/python3.10/site-packages/torch/_inductor/index_propagation.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file implements the IndexPropagation ops handler, which wraps an
2
+ underlying handler to add a limited form of constant propagation, as well as
3
+ propagation of sympy expressions downstream of ops.index_expr calls.
4
+
5
+ For example, say we have the IR:
6
+
7
+ tmp0 = ops.index_expr(x, torch.int32)
8
+ tmp1 = ops.constant(2, torch.int32)
9
+ tmp2 = ops.mul(tmp0, tmp1)
10
+ tmp3 = ops.indirect_indexing(tmp2, x_size)
11
+ tmp4 = ops.load("buf0", tmp3)
12
+
13
+ The underlying handler would just see:
14
+
15
+ ops.load("buf0", x * 2)
16
+
17
+ This is limited by the set of operators handled in the sympy expression
18
+ printers. So simple operations like minimum and maximum cannot be translated to
19
+ SymPy expressions yet, despite sympy.Min and sympy.Max existing.
20
+
21
+ """
22
+ import itertools
23
+ from dataclasses import dataclass
24
+ from typing import Any, Callable, Dict, Literal, Optional, overload, Tuple, Union
25
+
26
+ import sympy
27
+
28
+ from typing_extensions import TypeAlias
29
+
30
+ import torch
31
+ from torch._prims_common import is_boolean_dtype, is_integer_dtype
32
+ from torch.utils._sympy.functions import FloorDiv, ModularIndexing, Where
33
+
34
+
35
+ @dataclass
36
+ class TypedExpr:
37
+ """A SymPy expression with associated type"""
38
+
39
+ expr: sympy.Expr
40
+ dtype: torch.dtype
41
+
42
+
43
+ class SymPyOps:
44
+ """An ops handler where all IR values are SymPy expressions
45
+
46
+ When a value cannot be represented as a SymPy expression, the method is
47
+ either not defined, or returns NotImplemented
48
+
49
+ """
50
+
51
+ @staticmethod
52
+ def identity(value: Any) -> Any:
53
+ return value
54
+
55
+ @staticmethod
56
+ def constant(value: Union[int, float, bool], dtype: torch.dtype) -> TypedExpr:
57
+ if is_boolean_dtype(dtype):
58
+ expr = sympy.Integer(bool(value))
59
+ elif is_integer_dtype(dtype):
60
+ expr = sympy.Integer(int(value))
61
+ else:
62
+ expr = sympy.Float(float(value))
63
+ return TypedExpr(expr, dtype)
64
+
65
+ @staticmethod
66
+ def index_expr(value: sympy.Expr, dtype: torch.dtype) -> Union[int, TypedExpr]:
67
+ if isinstance(value, int):
68
+ value = sympy.Integer(value)
69
+ return TypedExpr(value, dtype)
70
+
71
+ @staticmethod
72
+ def to_dtype(
73
+ value: Any, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None
74
+ ) -> Union[int, TypedExpr]:
75
+ if isinstance(value.expr, (sympy.Integer, sympy.Float)):
76
+ return SymPyOps.constant(value.expr, dtype)
77
+ elif is_integer_dtype(dtype) and is_integer_dtype(value.dtype):
78
+ return SymPyOps.index_expr(value.expr, dtype)
79
+ else:
80
+ # TODO: Inductor doesn't handle floating point in sympy expressions well at the moment
81
+ return NotImplemented
82
+
83
+ @staticmethod
84
+ def square(x: TypedExpr) -> TypedExpr:
85
+ return TypedExpr(x.expr * x.expr, x.dtype)
86
+
87
+ @staticmethod
88
+ def add(x: TypedExpr, y: TypedExpr) -> TypedExpr:
89
+ result_type = torch.promote_types(x.dtype, y.dtype)
90
+ return TypedExpr(x.expr + y.expr, result_type)
91
+
92
+ @staticmethod
93
+ def sub(x: TypedExpr, y: TypedExpr) -> TypedExpr:
94
+ result_type = torch.promote_types(x.dtype, y.dtype)
95
+ return TypedExpr(x.expr - y.expr, result_type)
96
+
97
+ @staticmethod
98
+ def mul(x: TypedExpr, y: TypedExpr) -> TypedExpr:
99
+ result_type = torch.promote_types(x.dtype, y.dtype)
100
+ return TypedExpr(x.expr * y.expr, result_type)
101
+
102
+ @staticmethod
103
+ def neg(x: TypedExpr) -> TypedExpr:
104
+ return TypedExpr(-x.expr, x.dtype)
105
+
106
+ @staticmethod
107
+ def floordiv(x: TypedExpr, y: TypedExpr) -> TypedExpr:
108
+ result_type = torch.promote_types(x.dtype, y.dtype)
109
+ if not is_integer_dtype(result_type):
110
+ return NotImplemented
111
+
112
+ return TypedExpr(FloorDiv(x.expr, y.expr), result_type)
113
+
114
+ @staticmethod
115
+ def mod(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
116
+ result_type = torch.promote_types(x.dtype, y.dtype)
117
+ if not is_integer_dtype(result_type):
118
+ return NotImplemented
119
+
120
+ result_expr = ModularIndexing(x.expr, sympy.Integer(1), y.expr)
121
+ return TypedExpr(result_expr, result_type)
122
+
123
+ @staticmethod
124
+ def remainder(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
125
+ result_type = torch.promote_types(x.dtype, y.dtype)
126
+ if not is_integer_dtype(result_type):
127
+ return NotImplemented
128
+ # In these cases, remainder in Python == remainder in C++, so this transformation
129
+ # is sound
130
+ if (
131
+ x.expr.is_nonnegative is not None
132
+ and x.expr.is_nonnegative == y.expr.is_positive
133
+ ):
134
+ result_expr = ModularIndexing(x.expr, sympy.Integer(1), y.expr)
135
+ return TypedExpr(result_expr, result_type)
136
+ return NotImplemented
137
+
138
+ @staticmethod
139
+ def minimum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
140
+ result_type = torch.promote_types(x.dtype, y.dtype)
141
+ return TypedExpr(sympy.Min(x.expr, y.expr), result_type)
142
+
143
+ @staticmethod
144
+ def maximum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
145
+ result_type = torch.promote_types(x.dtype, y.dtype)
146
+ return TypedExpr(sympy.Max(x.expr, y.expr), result_type)
147
+
148
+
149
+ @dataclass
150
+ class IndexPropVar:
151
+ value: Any # Either an IR value, or TypedExpr if is_symbolic is true
152
+ is_symbolic: bool = False
153
+
154
+ @staticmethod
155
+ def new_symbolic(expr: TypedExpr) -> "IndexPropVar":
156
+ return IndexPropVar(expr, is_symbolic=True)
157
+
158
+ def __post_init__(self):
159
+ assert not self.is_symbolic or isinstance(
160
+ self.value, TypedExpr
161
+ ), "Symbolic IndexPropVar must contain a TypedExpr"
162
+
163
+
164
+ IndexPropResult: TypeAlias = Union[IndexPropVar, Tuple["IndexPropResult", ...]]
165
+
166
+
167
+ class IndexPropagation:
168
+ """Ops wrapper that tries to propagate constant and index_expr values through the computation.
169
+
170
+ This aims to maximize the compile time simplification possible, and convert
171
+ indirect indexing from arange into normal static indexing.
172
+
173
+ """
174
+
175
+ def __init__(self, inner: Any):
176
+ self._inner = inner
177
+
178
+ def materialize_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> Any:
179
+ # Construct a new constant/index_expr from the SymPy expression
180
+ if isinstance(expr, sympy.Integer):
181
+ return self._inner.constant(int(expr), dtype)
182
+ elif expr.is_number:
183
+ return self._inner.constant(float(expr), dtype)
184
+ return self._inner.index_expr(expr, dtype)
185
+
186
+ def unwrap(self, a: Union[Any, IndexPropVar]) -> Any:
187
+ if isinstance(a, (list, tuple)):
188
+ return tuple(self.unwrap(v) for v in a)
189
+
190
+ if not isinstance(a, IndexPropVar):
191
+ return a
192
+
193
+ # Prefer the sympy representation if possible
194
+ if a.is_symbolic:
195
+ return self.materialize_expr(a.value.expr, a.value.dtype)
196
+
197
+ return a.value
198
+
199
+ def wrap(self, a) -> IndexPropResult:
200
+ if isinstance(a, (list, tuple)):
201
+ return tuple(self.wrap(v) for v in a)
202
+ return IndexPropVar(a)
203
+
204
+ @overload
205
+ def fallback(
206
+ self,
207
+ name: Literal["indirect_indexing"],
208
+ args: Tuple[Any, ...],
209
+ kwargs: Dict[str, Any],
210
+ ) -> IndexPropVar:
211
+ ...
212
+
213
+ @overload
214
+ def fallback(
215
+ self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
216
+ ) -> IndexPropResult:
217
+ ...
218
+
219
+ def fallback(
220
+ self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
221
+ ) -> IndexPropResult:
222
+ # Fallback to the wrapped handler
223
+ new_args = [self.unwrap(a) for a in args]
224
+ new_kwargs = {k: self.unwrap(v) for k, v in kwargs.items()}
225
+ return self.wrap(getattr(self._inner, name)(*new_args, **new_kwargs))
226
+
227
+ def propagate_sympy(
228
+ self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
229
+ ) -> IndexPropResult:
230
+ # Build a new SymPy expression from this ops call
231
+ def unwrap(a: Union[Any, IndexPropVar]) -> Any:
232
+ if not isinstance(a, IndexPropVar):
233
+ return a
234
+ return a.value
235
+
236
+ new_args = [unwrap(a) for a in args]
237
+ new_kwargs = {k: unwrap(v) for k, v in kwargs.items()}
238
+ new_expr = getattr(SymPyOps, name)(*new_args, **new_kwargs)
239
+ is_valid_expr = new_expr is not NotImplemented and (
240
+ # Inductor doesn't expect floating point in sympy expressions, but
241
+ # allow floating point constants to be propagated
242
+ isinstance(new_expr.expr, sympy.Number)
243
+ or new_expr.expr.is_integer
244
+ )
245
+ if not is_valid_expr:
246
+ return self.fallback(name, args, kwargs)
247
+ return IndexPropVar.new_symbolic(new_expr)
248
+
249
+ def __getattr__(self, name: str) -> Callable[..., IndexPropResult]:
250
+ def inner(*args: Any, **kwargs: Any) -> IndexPropResult:
251
+ if not hasattr(SymPyOps, name):
252
+ return self.fallback(name, args, kwargs)
253
+
254
+ var_arguments = [
255
+ a
256
+ for a in itertools.chain(args, kwargs.values())
257
+ if isinstance(a, IndexPropVar)
258
+ ]
259
+ if not all(v.is_symbolic for v in var_arguments):
260
+ return self.fallback(name, args, kwargs)
261
+
262
+ return self.propagate_sympy(name, args, kwargs)
263
+
264
+ return inner
265
+
266
+ def indirect_indexing(
267
+ self, index: Union[Any, IndexPropVar], size: Any, check: bool = True
268
+ ) -> Any:
269
+ # nb. We do index + Where(...) rather than Where(idx >= 0, idx, idx + sz) because we don't have CSE
270
+ # for SymPy expressions, so we don't want to repeat idx too much
271
+
272
+ # indirect_indexing returns a sympy value, so no need to wrap in IndexPropVar here
273
+ if isinstance(index, IndexPropVar) and index.is_symbolic:
274
+ # If we are turning a indirect indexing into direct, we need to wrap it.
275
+ index = index.value.expr
276
+ return index + Where(index >= 0, 0, size)
277
+ return self.fallback("indirect_indexing", (index, size, check), {}).value
venv/lib/python3.10/site-packages/torch/_inductor/inductor_prims.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from typing import Optional, Sequence
5
+
6
+ import torch
7
+ from torch import _prims, Tensor
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ def make_prim(
13
+ schema: str,
14
+ impl_aten,
15
+ return_type=_prims.RETURN_TYPE.NEW,
16
+ doc: str = "",
17
+ tags: Optional[Sequence[torch.Tag]] = None,
18
+ ):
19
+ def meta(*args, **kwargs):
20
+ return _prims.TensorMeta(impl_aten(*args, **kwargs))
21
+
22
+ return _prims._make_prim(
23
+ schema=schema,
24
+ return_type=return_type,
25
+ meta=meta,
26
+ impl_aten=impl_aten,
27
+ doc=doc,
28
+ tags=tags,
29
+ )
30
+
31
+
32
+ def eager_force_stride(input_tensor: Tensor, stride) -> Tensor:
33
+ if input_tensor.stride() == stride:
34
+ return input_tensor
35
+ new_tensor = input_tensor.clone().as_strided(
36
+ input_tensor.shape,
37
+ stride,
38
+ )
39
+ new_tensor.copy_(input_tensor)
40
+ return new_tensor
41
+
42
+
43
+ # Custom prims used for handling randomness
44
+ seed = make_prim(
45
+ "inductor_seed(Device device) -> Tensor",
46
+ lambda device: torch.randint(2**63 - 1, [], device=device),
47
+ doc="create a fresh seed (one per call) for use with inductor_rand",
48
+ tags=(torch.Tag.nondeterministic_seeded,),
49
+ )
50
+ seeds = make_prim(
51
+ "inductor_seeds(int count, Device device) -> Tensor",
52
+ lambda count, device: torch.randint(2**63 - 1, [count], device=device),
53
+ doc="Horizontal fusion of many inductor_seed() calls",
54
+ tags=(torch.Tag.nondeterministic_seeded,),
55
+ )
56
+ lookup_seed = make_prim(
57
+ # if inductor_lookup_seed changes, update partitioners.py
58
+ "inductor_lookup_seed(Tensor seeds, int index) -> Tensor",
59
+ lambda seeds, index: seeds[index],
60
+ doc="Extract a single seed from the result of inductor_seeds()",
61
+ )
62
+ random = make_prim(
63
+ "inductor_random(SymInt[] size, Tensor seed, str mode) -> Tensor",
64
+ lambda size, seed, mode: getattr(torch, mode)(size, device=seed.device),
65
+ doc="torch.rand()/torch.randn() using backend-specific RNG that can be fused",
66
+ )
67
+ randint = make_prim(
68
+ "inductor_randint(SymInt low, SymInt high, SymInt[] size, Tensor seed) -> Tensor",
69
+ lambda low, high, size, seed: torch.randint(low, high, size, device=seed.device),
70
+ doc="torch.randint() using backend-specific RNG that can be fused",
71
+ )
72
+ force_stride_order = make_prim(
73
+ "inductor_force_stride_order(Tensor input, SymInt[] stride) -> Tensor",
74
+ eager_force_stride,
75
+ doc="Force the stride order for input tensor. No-op if the input tensor already has the stride. Do a copy otherwise",
76
+ )
77
+ masked_scatter_with_index = make_prim(
78
+ "inductor_masked_scatter_with_index(Tensor input, Tensor mask, Tensor source_idx, Tensor source) -> Tensor",
79
+ lambda input_tensor, mask, index, source: torch.masked_scatter(
80
+ input_tensor, mask, source
81
+ ),
82
+ doc="masked_scatter with precomputed indices",
83
+ )
84
+ _unsafe_index_put_ = make_prim(
85
+ "_unsafe_index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)",
86
+ lambda self, indices, values, accumulate=False: torch.ops.aten.index_put_(
87
+ self, indices, values, accumulate
88
+ ),
89
+ doc="Unsafe index_put_ (doesn't issue device asserts)",
90
+ )
venv/lib/python3.10/site-packages/torch/_inductor/ir.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_inductor/kernel/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from . import mm, mm_common, mm_plus_mm, unpack_mixed_mm
venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (284 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/bmm.cpython-310.pyc ADDED
Binary file (3.91 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/conv.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/mm_common.cpython-310.pyc ADDED
Binary file (5.84 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/kernel/__pycache__/unpack_mixed_mm.cpython-310.pyc ADDED
Binary file (3.01 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/kernel/bmm.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ..lowering import register_lowering
4
+ from ..select_algorithm import (
5
+ autotune_select_algorithm,
6
+ ExternKernelChoice,
7
+ TritonTemplate,
8
+ )
9
+ from ..utils import ceildiv as cdiv, use_aten_gemm_kernels, use_triton_template
10
+
11
+ from .mm_common import addmm_epilogue, mm_args, mm_configs, mm_options
12
+
13
+ aten = torch.ops.aten
14
+
15
+
16
+ def bmm_grid(b, m, n, meta):
17
+ return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), b, 1)
18
+
19
+
20
+ bmm_template = TritonTemplate(
21
+ name="bmm",
22
+ grid=bmm_grid,
23
+ source=r"""
24
+ {{def_kernel("A", "B")}}
25
+ M = {{size("A", -2)}}
26
+ N = {{size("B", -1)}}
27
+ K = {{size("A", -1)}}
28
+
29
+ stride_aq = {{stride("A", 0)}}
30
+ stride_am = {{stride("A", 1)}}
31
+ stride_ak = {{stride("A", 2)}}
32
+
33
+ stride_bq = {{stride("B", 0)}}
34
+ stride_bk = {{stride("B", 1)}}
35
+ stride_bn = {{stride("B", 2)}}
36
+
37
+ # based on triton.ops.matmul
38
+ pid = tl.program_id(0)
39
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
40
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
41
+
42
+ # re-order program ID for better L2 performance
43
+ width = GROUP_M * grid_n
44
+ group_id = pid // width
45
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
46
+ pid_m = group_id * GROUP_M + (pid % group_size)
47
+ pid_n = (pid % width) // (group_size)
48
+
49
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
50
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
51
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
52
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
53
+ rk = tl.arange(0, BLOCK_K)
54
+
55
+ idx_q = tl.program_id(1) # batch dimension for BMM
56
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak + idx_q*stride_aq)
57
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn + idx_q*stride_bq)
58
+
59
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
60
+ for k in range(K, 0, -BLOCK_K):
61
+ if EVEN_K:
62
+ a = tl.load(A)
63
+ b = tl.load(B)
64
+ else:
65
+ a = tl.load(A, mask=rk[None, :] < k, other=0.)
66
+ b = tl.load(B, mask=rk[:, None] < k, other=0.)
67
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
68
+ A += BLOCK_K * stride_ak
69
+ B += BLOCK_K * stride_bk
70
+
71
+ # rematerialize rm and rn to save registers
72
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
73
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
74
+ idx_q = tl.program_id(1) # batch dimension for BMM
75
+ idx_m = rm[:, None]
76
+ idx_n = rn[None, :]
77
+ mask = (idx_m < M) & (idx_n < N)
78
+
79
+ # inductor generates a suffix
80
+ {{store_output(("idx_q", "idx_m", "idx_n"), "acc", "mask")}}
81
+ """,
82
+ )
83
+
84
+ aten_bmm = ExternKernelChoice(torch.bmm, "at::bmm_out")
85
+ aten_baddbmm = ExternKernelChoice(torch.baddbmm, "at::baddbmm_out")
86
+
87
+
88
+ @register_lowering(aten.bmm)
89
+ def tuned_bmm(mat1, mat2, *, layout=None):
90
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
91
+
92
+ # options to tune from
93
+ choices = [aten_bmm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
94
+ if use_triton_template(layout):
95
+ for config in mm_configs(m, n, k):
96
+ bmm_template.maybe_append_choice(
97
+ choices,
98
+ input_nodes=(mat1, mat2),
99
+ layout=layout,
100
+ **mm_options(config, m, n, k, layout),
101
+ )
102
+
103
+ return autotune_select_algorithm("bmm", choices, [mat1, mat2], layout)
104
+
105
+
106
+ # Don't register this since it is slower than decomposing it
107
+ # @register_lowering(aten.baddbmm)
108
+ def tuned_baddbmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
109
+ m, n, k, layout, mat1, mat2, inp = mm_args(mat1, mat2, inp, layout=layout)
110
+
111
+ # options to tune from
112
+ choices = (
113
+ [aten_baddbmm.bind((inp, mat1, mat2), layout, alpha=alpha, beta=beta)]
114
+ if use_aten_gemm_kernels()
115
+ else []
116
+ )
117
+ if use_triton_template(layout):
118
+ for config in mm_configs(m, n, k):
119
+ bmm_template.maybe_append_choice(
120
+ choices,
121
+ input_nodes=(inp, mat1, mat2),
122
+ layout=layout,
123
+ **mm_options(config, m, n, k, layout),
124
+ prefix_args=1,
125
+ epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
126
+ )
127
+
128
+ return autotune_select_algorithm("baddbmm", choices, [inp, mat1, mat2], layout)
venv/lib/python3.10/site-packages/torch/_inductor/kernel/conv.py ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ import logging
5
+ from typing import cast, List, Optional, Sequence, Tuple, TypedDict
6
+
7
+ import torch
8
+ from .. import config, ir
9
+ from ..ir import TensorBox
10
+
11
+ from ..lowering import (
12
+ add_layout_constraint,
13
+ constrain_to_fx_strides,
14
+ lowerings as L,
15
+ register_lowering,
16
+ )
17
+ from ..select_algorithm import (
18
+ autotune_select_algorithm,
19
+ ExternKernelChoice,
20
+ TritonTemplate,
21
+ )
22
+ from ..utils import (
23
+ ceildiv,
24
+ is_ones,
25
+ is_zeros,
26
+ pad_listlike,
27
+ sympy_product,
28
+ use_triton_template,
29
+ )
30
+ from ..virtualized import V
31
+ from .mm_common import filtered_configs
32
+
33
+ log = logging.getLogger(__name__)
34
+
35
+
36
+ aten = torch.ops.aten
37
+
38
+
39
+ def conv_grid(n, c, h, w, meta):
40
+ return (
41
+ ceildiv(n * h * w, meta["BLOCK_M"]),
42
+ ceildiv(c, meta["BLOCK_N"]),
43
+ meta["GROUPS"],
44
+ )
45
+
46
+
47
+ # List of dictionaries to store the kernel configs. Configs that evaluate to true
48
+ # will be utilised on the target platform
49
+ kernel_configs = [
50
+ # "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
51
+ {"config": (64, 256, 16, 2, 4), "cond": True},
52
+ {"config": (256, 64, 16, 2, 4), "cond": True},
53
+ {"config": (1024, 16, 16, 1, 8), "cond": True},
54
+ {"config": (128, 128, 32, 2, 8), "cond": True},
55
+ {"config": (64, 64, 32, 2, 4), "cond": True},
56
+ {"config": (64, 256, 32, 2, 8), "cond": True},
57
+ {"config": (256, 64, 32, 2, 8), "cond": True},
58
+ ]
59
+
60
+ # Create filtered list of configs based on conv
61
+ platform_configs = tuple(
62
+ cast(Tuple[int, int, int, int, int], config["config"])
63
+ for config in kernel_configs
64
+ if config["cond"]
65
+ )
66
+
67
+ # On ROCm convert num_stages to 1 as pipelining provides no benefit
68
+ if torch.version.hip:
69
+ platform_configs = tuple(
70
+ (config[0], config[1], config[2], 1, config[4]) for config in platform_configs
71
+ )
72
+
73
+ conv_configs = functools.partial(
74
+ filtered_configs,
75
+ configs=platform_configs,
76
+ )
77
+
78
+ LOOP_BODY = """
79
+ idx_x_h = i - PADDING_H + idx_y_h * STRIDE_H
80
+ idx_x_w = j - PADDING_W + idx_y_w * STRIDE_W
81
+ idx_x_c = tl.arange(0, BLOCK_K) + k
82
+
83
+ x_ptrs = x_base + (
84
+ (idx_x_h * stride_xh)[:, None]
85
+ + (idx_x_w * stride_xw)[:, None]
86
+ + (idx_x_c * stride_xc)[None, :]
87
+ )
88
+ mask_x = (
89
+ (idx_n < BATCH)[:, None]
90
+ & (idx_x_h >= 0)[:, None]
91
+ & (idx_x_h < IN_H)[:, None]
92
+ & (idx_x_w >= 0)[:, None]
93
+ & (idx_x_w < IN_W)[:, None]
94
+ & (idx_x_c < GROUP_IN_C)[None, :]
95
+ )
96
+ matrix_x = tl.load(x_ptrs, mask=mask_x, other=0.0)
97
+
98
+ w_ptrs = w_base + (
99
+ (idx_x_c * stride_wc_in)[:, None] + (i * stride_wh) + (j * stride_ww)
100
+ )
101
+ mask_w = (idx_x_c[:, None] < GROUP_IN_C) & (idx_y_c[None, :] < GROUP_OUT_C)
102
+ matrix_w = tl.load(w_ptrs, mask=mask_w, other=0.0)
103
+ acc += tl.dot(matrix_x, matrix_w, allow_tf32=ALLOW_TF32)
104
+ """
105
+
106
+ """
107
+ This is a relatively simple conv implementation that can likely be
108
+ improved. Many alternate conv versions can be found here:
109
+ https://github.com/pytorch/torchdynamo/pull/971
110
+ """
111
+ conv2d_template = TritonTemplate(
112
+ name="convolution",
113
+ grid=conv_grid,
114
+ source=r"""
115
+ {{def_kernel("X", "W")}}
116
+ # Tensor dimensions
117
+ BATCH = {{size("X", 0)}}
118
+ IN_C = {{size("X", 1)}}
119
+ IN_H = {{size("X", 2)}}
120
+ IN_W = {{size("X", 3)}}
121
+ OUT_C = {{size(None, 1)}}
122
+ OUT_H = {{size(None, 2)}}
123
+ OUT_W = {{size(None, 3)}}
124
+
125
+ # Strides:
126
+ stride_xn = {{stride("X", 0)}}
127
+ stride_xc = {{stride("X", 1)}}
128
+ stride_xh = {{stride("X", 2)}}
129
+ stride_xw = {{stride("X", 3)}}
130
+ stride_wc_out = {{stride("W", 0)}}
131
+ stride_wc_in = {{stride("W", 1)}}
132
+ stride_wh = {{stride("W", 2)}}
133
+ stride_ww = {{stride("W", 3)}}
134
+
135
+ nhw = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
136
+ idx_y_w = nhw % OUT_W
137
+ nh = nhw // OUT_W
138
+ idx_y_h = nh % OUT_H
139
+ idx_n = nh // OUT_H
140
+ idx_y_c = tl.program_id(1) * BLOCK_N + tl.arange(0, BLOCK_N)
141
+
142
+ {% if GROUPS == 1 %}
143
+ group = 0
144
+ GROUP_IN_C = IN_C
145
+ GROUP_OUT_C = OUT_C
146
+ {% else %}
147
+ group = tl.program_id(2)
148
+ GROUP_IN_C = IN_C // GROUPS
149
+ GROUP_OUT_C = OUT_C // GROUPS
150
+ {% endif %}
151
+
152
+ x_base = X + (group * stride_xc * GROUP_IN_C + idx_n * stride_xn)[:, None]
153
+ w_base = (
154
+ W + (group * stride_wc_out * GROUP_OUT_C + idx_y_c * stride_wc_out)[None, :]
155
+ )
156
+
157
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
158
+
159
+ {% if UNROLL %}
160
+ {% for i in range(KERNEL_H) %}
161
+ {% for j in range(KERNEL_W) %}
162
+ i = {{i}}
163
+ j = {{j}}
164
+ for k in range(0, GROUP_IN_C, BLOCK_K):
165
+ """
166
+ + LOOP_BODY
167
+ + """
168
+ {% endfor %}
169
+ {% endfor %}
170
+ {% else %}
171
+ # Could be simplified, but slightly slower:
172
+ # for i in range(KERNEL_H):
173
+ # for j in range(KERNEL_W):
174
+ # for k in range(0, GROUP_IN_C, BLOCK_K):
175
+ BLOCK_K_COUNT = (GROUP_IN_C + BLOCK_K - 1) // BLOCK_K
176
+ for ijk in range(KERNEL_H * KERNEL_W * BLOCK_K_COUNT):
177
+ k = (ijk % BLOCK_K_COUNT) * BLOCK_K
178
+ ij = ijk // BLOCK_K_COUNT
179
+ i = ij // KERNEL_W
180
+ j = ij % KERNEL_W
181
+ """
182
+ + LOOP_BODY
183
+ + """
184
+ {% endif %}
185
+
186
+ mask = (
187
+ (idx_n < BATCH)[:, None]
188
+ & (idx_y_h < OUT_H)[:, None]
189
+ & (idx_y_w < OUT_W)[:, None]
190
+ & (idx_y_c < GROUP_OUT_C)[None, :]
191
+ )
192
+ idx_n = idx_n[:, None]
193
+ idx_c = idx_y_c[None, :] + group * GROUP_OUT_C
194
+ idx_h = idx_y_h[:, None]
195
+ idx_w = idx_y_w[:, None]
196
+
197
+ # inductor generates a suffix
198
+ {{store_output(("idx_n", "idx_c", "idx_h", "idx_w"), "acc", "mask")}}
199
+ """,
200
+ )
201
+
202
+ aten_convolution = ExternKernelChoice(
203
+ torch.convolution,
204
+ "at::convolution",
205
+ has_out_variant=False,
206
+ op_overload=aten.convolution.default,
207
+ )
208
+
209
+
210
+ def conv1x1_via_mm(x, w, *, out):
211
+ w = torch.squeeze(torch.squeeze(w, -1), -1)
212
+ return torch.matmul(
213
+ x.permute(0, 2, 3, 1), w.permute(1, 0), out=out.permute(0, 2, 3, 1)
214
+ )
215
+
216
+
217
+ aten_conv1x1_via_mm = ExternKernelChoice(conv1x1_via_mm, None)
218
+
219
+
220
+ class ConvLayoutParams(TypedDict):
221
+ stride: tuple[int, ...]
222
+ padding: tuple[int, ...]
223
+ dilation: tuple[int, ...]
224
+ transposed: bool
225
+ output_padding: tuple[int, ...]
226
+ groups: int
227
+
228
+
229
+ def conv_layout(
230
+ x: TensorBox,
231
+ weight: TensorBox,
232
+ bias: Optional[TensorBox],
233
+ stride: Sequence[int],
234
+ padding: tuple[int, ...],
235
+ dilation: tuple[int, ...],
236
+ transposed: bool,
237
+ output_padding: tuple[int, ...],
238
+ groups: int,
239
+ ) -> ir.Layout:
240
+ """Determine output layout for a convolution"""
241
+ with V.graph.fake_mode:
242
+ output = torch.ops.aten.convolution(
243
+ ir.ir_node_to_tensor(x, guard_shape=True),
244
+ ir.ir_node_to_tensor(weight, guard_shape=True),
245
+ ir.ir_node_to_tensor(bias, guard_shape=True),
246
+ stride,
247
+ tuple(V.graph.sizevars.size_hint(p) for p in padding), # type: ignore[arg-type]
248
+ dilation,
249
+ transposed,
250
+ tuple(V.graph.sizevars.size_hint(p) for p in output_padding), # type: ignore[arg-type]
251
+ groups,
252
+ )
253
+ sizes = ir.convert_shape_to_inductor(output.size())
254
+ stride = ir.convert_shape_to_inductor(output.stride()) # type: ignore[assignment]
255
+
256
+ return ir.FixedLayout(
257
+ x.get_device(),
258
+ x.get_dtype(),
259
+ sizes,
260
+ stride,
261
+ )
262
+
263
+
264
+ def channels_last_order(rank):
265
+ order = list(reversed(range(rank)))
266
+ order.insert(1, order.pop(-1))
267
+ return order
268
+
269
+
270
+ def convert_1x1_conv_to_mm(x, weight, bias):
271
+ # special case for 1x1 convolution, which is actually just a matmul
272
+ rank = len(weight.get_size())
273
+ for _ in range(rank - 2):
274
+ weight = L[aten.squeeze](weight, dim=-1)
275
+ weight = L[aten.permute](weight, [1, 0])
276
+
277
+ if x.get_size()[0] != 1:
278
+ x = ir.ExternKernel.require_stride_order(x, channels_last_order(rank))
279
+ else:
280
+ x.realize()
281
+ x.freeze_layout()
282
+
283
+ x_permute = list(range(rank))
284
+ x_permute.append(x_permute.pop(1))
285
+ x = L[aten.permute](x, x_permute)
286
+ *sizes, in_chan = x.get_size()
287
+ x = L[aten.reshape](x, [sympy_product(sizes), in_chan])
288
+ if bias is None:
289
+ result = L[aten.mm](x, weight)
290
+ else:
291
+ result = L[aten.addmm](bias, x, weight)
292
+ result = L[aten.reshape](result, [*sizes, -1])
293
+ result_permute = list(range(rank))
294
+ result_permute.insert(1, result_permute.pop(-1))
295
+ return L[aten.permute](result, result_permute)
296
+
297
+
298
+ @register_lowering(aten.convolution)
299
+ def convolution(
300
+ x: TensorBox,
301
+ weight: TensorBox,
302
+ bias: TensorBox,
303
+ stride: List[int],
304
+ padding: List[int],
305
+ dilation: List[int],
306
+ transposed: bool,
307
+ output_padding: List[int],
308
+ groups: int,
309
+ ):
310
+ stride = tuple(stride)
311
+ padding = tuple(padding)
312
+ dilation = tuple(dilation)
313
+ output_padding = tuple(output_padding)
314
+ if not isinstance(groups, int):
315
+ groups = V.graph.sizevars.evaluate_static_shape(groups)
316
+ assert isinstance(groups, int)
317
+ kwargs: ConvLayoutParams = {
318
+ "stride": stride,
319
+ "padding": padding,
320
+ "dilation": dilation,
321
+ "transposed": transposed,
322
+ "output_padding": output_padding,
323
+ "groups": groups,
324
+ }
325
+
326
+ if len(x.get_size()) == len(weight.get_size()) - 1:
327
+ # add batch dimension to simplify rest of function
328
+ return L[aten.squeeze](
329
+ convolution(L[aten.expand](x, [1, *x.get_size()]), weight, bias, **kwargs),
330
+ dim=0,
331
+ )
332
+
333
+ out_chan, in_chan, *kernel_shape = V.graph.sizevars.evaluate_static_shapes(
334
+ weight.get_size()
335
+ )
336
+ ndim = len(kernel_shape)
337
+ stride = pad_listlike(stride, ndim)
338
+ padding = pad_listlike(padding, ndim)
339
+ dilation = pad_listlike(dilation, ndim)
340
+ output_padding = pad_listlike(output_padding, ndim)
341
+
342
+ def channels_last_conv():
343
+ if V.graph.layout_opt and ndim == 2:
344
+ return True
345
+
346
+ layout = conv_layout(x, weight, None, **kwargs)
347
+ req_stride_order = ir.get_stride_order(
348
+ V.graph.sizevars.size_hints(layout.stride)
349
+ )
350
+ return req_stride_order == ir.NHWC_STRIDE_ORDER
351
+
352
+ autotuning_gemm = config.max_autotune or config.max_autotune_gemm
353
+
354
+ if (
355
+ (config.conv_1x1_as_mm or (autotuning_gemm and channels_last_conv()))
356
+ and is_ones(kernel_shape)
357
+ and is_ones(stride)
358
+ and is_zeros(padding)
359
+ and is_ones(dilation)
360
+ and not transposed
361
+ and is_zeros(output_padding)
362
+ and groups == 1
363
+ ):
364
+ return convert_1x1_conv_to_mm(x, weight, bias)
365
+
366
+ if bias is not None and ir.get_device_type(x) != "cpu":
367
+ # peel off the bias, cudnn is slower with it
368
+ result = convolution(x, weight, None, **kwargs)
369
+ return L[aten.add](
370
+ result, L[aten.view](bias, [result.get_size()[1]] + ndim * [1])
371
+ )
372
+
373
+ x.realize()
374
+ weight.realize()
375
+
376
+ # ndim can be 1 for convolution in models such as demucs
377
+ # TODO: check if it's beneficial to convert Conv1d to Conv2d and then
378
+ # apply channels last.
379
+ if V.graph.layout_opt and ndim == 2:
380
+ V.graph.num_channels_last_conv += 1
381
+ x = ir.ExternKernel.require_channels_last(x)
382
+ # TODO maybe we can convert weights to channels last just once before
383
+ # running the model.
384
+ weight = ir.ExternKernel.require_channels_last(weight)
385
+ layout = conv_layout(x, weight, None, **kwargs)
386
+ else:
387
+ layout = conv_layout(x, weight, None, **kwargs)
388
+ req_stride_order = ir.get_stride_order(
389
+ V.graph.sizevars.size_hints(layout.stride)
390
+ )
391
+ x = ir.ExternKernel.require_stride_order(x, req_stride_order)
392
+ weight = ir.ExternKernel.require_stride_order(weight, req_stride_order)
393
+
394
+ ordered_kwargs_for_cpp_kernel = [
395
+ "stride",
396
+ "padding",
397
+ "dilation",
398
+ "transposed",
399
+ "output_padding",
400
+ "groups",
401
+ ]
402
+ if bias is None:
403
+ args = [x, weight]
404
+ kwargs["bias"] = None # type: ignore[typeddict-unknown-key]
405
+ ordered_kwargs_for_cpp_kernel.insert(0, "bias")
406
+ else:
407
+ args = [x, weight, bias]
408
+ bias.realize()
409
+ bias.freeze_layout()
410
+ V.graph.sizevars.evaluate_static_shapes(bias.get_size())
411
+ choices = [
412
+ aten_convolution.bind(
413
+ args,
414
+ layout,
415
+ ordered_kwargs_for_cpp_kernel,
416
+ **kwargs,
417
+ )
418
+ ]
419
+
420
+ if (
421
+ use_triton_template(layout)
422
+ # templates only support these:
423
+ and ndim == 2
424
+ and is_ones(dilation)
425
+ and not transposed
426
+ and is_zeros(output_padding)
427
+ # there are some odd models where this check fails (e.g. shufflenet_v2_x1_0)
428
+ and V.graph.sizevars.statically_known_equals(in_chan, x.get_size()[1]) # type: ignore[arg-type]
429
+ ):
430
+ if (
431
+ is_ones(kernel_shape)
432
+ and is_ones(stride)
433
+ and is_zeros(padding)
434
+ and groups == 1
435
+ ):
436
+ choices.append(aten_conv1x1_via_mm.bind(args, layout))
437
+
438
+ for cfg in conv_configs(
439
+ sympy_product([x.get_size()[0], *x.get_size()[2:]]),
440
+ out_chan,
441
+ in_chan,
442
+ ):
443
+ conv2d_template.maybe_append_choice(
444
+ choices,
445
+ input_nodes=(x, weight),
446
+ layout=layout,
447
+ KERNEL_H=kernel_shape[0],
448
+ KERNEL_W=kernel_shape[1],
449
+ STRIDE_H=stride[0],
450
+ STRIDE_W=stride[1],
451
+ PADDING_H=padding[0],
452
+ PADDING_W=padding[1],
453
+ GROUPS=groups,
454
+ # TODO(jansel): try unroll for bigger kernels once fixed:
455
+ # https://github.com/openai/triton/issues/1254
456
+ UNROLL=is_ones(kernel_shape),
457
+ ALLOW_TF32=torch.backends.cudnn.allow_tf32,
458
+ num_stages=cfg.num_stages,
459
+ num_warps=cfg.num_warps,
460
+ **cfg.kwargs,
461
+ )
462
+
463
+ return autotune_select_algorithm("convolution", choices, args, layout)
464
+
465
+
466
+ @register_lowering(aten._convolution)
467
+ def _convolution(
468
+ x,
469
+ weight,
470
+ bias,
471
+ stride,
472
+ padding,
473
+ dilation,
474
+ transposed,
475
+ output_padding,
476
+ groups,
477
+ benchmark,
478
+ deterministic,
479
+ cudnn_enabled,
480
+ allow_tf32,
481
+ ):
482
+ return convolution(
483
+ x, weight, bias, stride, padding, dilation, transposed, output_padding, groups
484
+ )
485
+
486
+
487
+ def constrain_conv_to_fx_strides(fx_node, *args, **kwargs):
488
+ assert fx_node.target == torch.ops.aten.convolution.default
489
+ if V.graph.layout_opt:
490
+ return args, kwargs
491
+ else:
492
+ return constrain_to_fx_strides(fx_node, *args, **kwargs)
493
+
494
+
495
+ add_layout_constraint(aten.convolution, constrain_conv_to_fx_strides)
venv/lib/python3.10/site-packages/torch/_inductor/kernel/mm.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ import torch
6
+ from torch._inductor.virtualized import V
7
+ from .. import config as inductor_config
8
+ from ..codegen.cuda.gemm_template import CUTLASSGemmTemplate
9
+ from ..lowering import register_lowering
10
+ from ..select_algorithm import (
11
+ autotune_select_algorithm,
12
+ ExternKernelChoice,
13
+ TritonTemplate,
14
+ )
15
+ from ..utils import (
16
+ use_aten_gemm_kernels,
17
+ use_cutlass_template,
18
+ use_max_autotune,
19
+ use_triton_template,
20
+ )
21
+ from .mm_common import (
22
+ addmm_epilogue,
23
+ int8_mm_configs,
24
+ mm_args,
25
+ mm_configs,
26
+ mm_grid,
27
+ mm_options,
28
+ )
29
+
30
+ log = logging.getLogger(__name__)
31
+ aten = torch.ops.aten
32
+
33
+ mm_template = TritonTemplate(
34
+ name="mm",
35
+ grid=mm_grid,
36
+ source=r"""
37
+ {{def_kernel("A", "B")}}
38
+ M = {{size("A", 0)}}
39
+ N = {{size("B", 1)}}
40
+ K = {{size("A", 1)}}
41
+ if M * N == 0:
42
+ # early exit due to zero-size input(s)
43
+ return
44
+ stride_am = {{stride("A", 0)}}
45
+ stride_ak = {{stride("A", 1)}}
46
+ stride_bk = {{stride("B", 0)}}
47
+ stride_bn = {{stride("B", 1)}}
48
+
49
+ # based on triton.ops.matmul
50
+ pid = tl.program_id(0)
51
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
52
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
53
+
54
+ # re-order program ID for better L2 performance
55
+ width = GROUP_M * grid_n
56
+ group_id = pid // width
57
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
58
+ pid_m = group_id * GROUP_M + (pid % group_size)
59
+ pid_n = (pid % width) // (group_size)
60
+
61
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
62
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
63
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
64
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
65
+ rk = tl.arange(0, BLOCK_K)
66
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
67
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
68
+
69
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
70
+ for k in range(K, 0, -BLOCK_K):
71
+ if EVEN_K:
72
+ a = tl.load(A)
73
+ b = tl.load(B)
74
+ else:
75
+ a = tl.load(A, mask=rk[None, :] < k, other=0.)
76
+ b = tl.load(B, mask=rk[:, None] < k, other=0.)
77
+ if B_PROLOGUE_CAST_TYPE is not None:
78
+ b = b.to(B_PROLOGUE_CAST_TYPE)
79
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
80
+ A += BLOCK_K * stride_ak
81
+ B += BLOCK_K * stride_bk
82
+
83
+ # rematerialize rm and rn to save registers
84
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
85
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
86
+ idx_m = rm[:, None]
87
+ idx_n = rn[None, :]
88
+ mask = (idx_m < M) & (idx_n < N)
89
+
90
+ # inductor generates a suffix
91
+ {{store_output(("idx_m", "idx_n"), "acc", "mask")}}
92
+ """,
93
+ )
94
+
95
+ aten_mm = ExternKernelChoice(torch.mm, "at::mm_out")
96
+
97
+
98
+ aten_addmm = ExternKernelChoice(
99
+ torch.addmm, "at::addmm_out", op_overload=aten.addmm.default
100
+ )
101
+
102
+ aten__int_mm = ExternKernelChoice(torch._int_mm, "at::_int_mm")
103
+
104
+
105
+ def _is_int8_mat(mat):
106
+ return mat.get_dtype() in (torch.int8, torch.uint8)
107
+
108
+
109
+ def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1):
110
+ """
111
+ Giving torch.addmm a 1D tensor calls a different (faster) cublasLt
112
+ kernel under the hood. There are a few shapes where this is slower,
113
+ but they are rare.
114
+ """
115
+ if inp.stride(0) == 0 or inp.size(0) == 1:
116
+ return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta)
117
+ return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta)
118
+
119
+
120
+ aten_bias_addmm = ExternKernelChoice(bias_addmm, None)
121
+
122
+
123
+ @register_lowering(aten.mm, type_promotion_kind=None)
124
+ def tuned_mm(mat1, mat2, *, layout=None):
125
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
126
+
127
+ # options to tune from
128
+ choices = [aten_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
129
+
130
+ if m * n != 0 and use_triton_template(layout):
131
+ for config in mm_configs(m, n, k):
132
+ mm_template.maybe_append_choice(
133
+ choices,
134
+ input_nodes=(mat1, mat2),
135
+ layout=layout,
136
+ **mm_options(config, m, n, k, layout),
137
+ )
138
+
139
+ if m * n != 0 and use_cutlass_template(layout):
140
+ CUTLASSGemmTemplate.add_cutlass_gemm_choices(
141
+ choices, layout, [mat1, mat2], fuseable=True, non_fuseable=True
142
+ )
143
+
144
+ from torch._inductor.ir import FixedLayout, FlexibleLayout
145
+
146
+ if (
147
+ len(choices) == 1
148
+ and use_aten_gemm_kernels()
149
+ and isinstance(layout, FixedLayout)
150
+ ):
151
+ # If we are not autotuning, we can swap to a FlexibleLayout
152
+ # in order to get fusion optimizations to kick in, e.g. ConcatFusion
153
+ layout = FlexibleLayout(
154
+ device=layout.device, dtype=layout.dtype, size=layout.size
155
+ )
156
+ choices = [aten_mm.bind((mat1, mat2), layout)]
157
+
158
+ return autotune_select_algorithm("mm", choices, [mat1, mat2], layout)
159
+
160
+
161
+ @register_lowering(aten._int_mm, type_promotion_kind=None)
162
+ def tuned_int_mm(mat1, mat2, *, layout=None):
163
+ m, n, k, layout, mat1, mat2 = mm_args(
164
+ mat1, mat2, layout=layout, out_dtype=torch.int32
165
+ )
166
+ choices = (
167
+ [aten__int_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else []
168
+ )
169
+ if m * n != 0 and use_triton_template(layout, enable_int32=True):
170
+ # TODO: Re-enable eager mode implementation once cuBLAS is fixed
171
+ choices = []
172
+ for config in int8_mm_configs(m, n, k):
173
+ mm_template.maybe_append_choice(
174
+ choices,
175
+ input_nodes=(mat1, mat2),
176
+ layout=layout,
177
+ **mm_options(config, m, n, k, layout),
178
+ )
179
+ return autotune_select_algorithm("int_mm", choices, [mat1, mat2], layout)
180
+
181
+
182
+ @register_lowering(aten.addmm, type_promotion_kind=None)
183
+ def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
184
+ m, n, k, layout, mat1, mat2, inp_expanded = mm_args(mat1, mat2, inp, layout=layout)
185
+ if m * n == 0 or not use_max_autotune():
186
+ choices = (
187
+ [
188
+ aten_addmm.bind(
189
+ (inp, mat1, mat2),
190
+ layout,
191
+ alpha=alpha,
192
+ beta=beta,
193
+ )
194
+ ]
195
+ if use_aten_gemm_kernels()
196
+ else []
197
+ )
198
+ return autotune_select_algorithm("addmm", choices, [inp, mat1, mat2], layout)
199
+
200
+ choices = (
201
+ [
202
+ aten_addmm.bind(
203
+ (inp_expanded, mat1, mat2),
204
+ layout,
205
+ alpha=alpha,
206
+ beta=beta,
207
+ )
208
+ ]
209
+ if use_aten_gemm_kernels()
210
+ else []
211
+ )
212
+
213
+ if (
214
+ use_aten_gemm_kernels()
215
+ and inp_expanded.get_stride()[0] == 0
216
+ and inp_expanded.get_device().type == "cuda"
217
+ and inductor_config.triton.autotune_cublasLt
218
+ ):
219
+ # unexpand inp to make sure fused addmm from cublasLt is used
220
+ choices.insert(
221
+ 0,
222
+ aten_bias_addmm.bind(
223
+ (inp_expanded, mat1, mat2), layout, alpha=alpha, beta=beta
224
+ ),
225
+ )
226
+
227
+ if use_triton_template(layout):
228
+ for config in mm_configs(m, n, k):
229
+ mm_template.maybe_append_choice(
230
+ choices,
231
+ input_nodes=(inp_expanded, mat1, mat2),
232
+ layout=layout,
233
+ **mm_options(config, m, n, k, layout),
234
+ prefix_args=1,
235
+ epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta),
236
+ )
237
+
238
+ if use_cutlass_template(layout):
239
+ CUTLASSGemmTemplate.add_cutlass_gemm_choices(
240
+ choices,
241
+ layout,
242
+ [mat1, mat2, inp_expanded],
243
+ alpha=alpha,
244
+ beta=beta,
245
+ input_reorder=[2, 0, 1],
246
+ fuseable=False,
247
+ )
248
+
249
+ return autotune_select_algorithm(
250
+ "addmm", choices, [inp_expanded, mat1, mat2], layout
251
+ )
252
+
253
+
254
+ def fallback_mixed_mm(mat1, mat2, *, out):
255
+ return torch.mm(mat1, mat2.to(mat1.dtype), out=out)
256
+
257
+
258
+ aten_fallback_mixed_mm = ExternKernelChoice(fallback_mixed_mm, None)
259
+
260
+
261
+ @functools.lru_cache(None)
262
+ def _is_sm7x_or_older_gpu(index: Optional[int]) -> bool:
263
+ props = torch.cuda.get_device_properties(index or 0)
264
+ return props.major <= 7
265
+
266
+
267
+ def tuned_mixed_mm(mat1, mat2, mat2_dtype):
268
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=None)
269
+ choices = [aten_fallback_mixed_mm.bind((mat1, mat2), layout)]
270
+ if (
271
+ mat1.layout.dtype != torch.float32 and not mat2.layout.is_contiguous()
272
+ ) or _is_sm7x_or_older_gpu(layout.device.index):
273
+ # can't use triton kernel unless one of these is true or if running on v100 (numerical issues)
274
+ return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout)
275
+ if inductor_config.force_mixed_mm:
276
+ choices = []
277
+ b_prologue_cast_type = f"tl.{mat2_dtype}".replace("torch.", "")
278
+ has_int8_tensor = _is_int8_mat(mat1) or _is_int8_mat(mat2)
279
+ for config in mm_configs(m, n, k, has_int8_tensor=has_int8_tensor):
280
+ mm_template.maybe_append_choice(
281
+ choices,
282
+ input_nodes=(mat1, mat2),
283
+ layout=layout,
284
+ **mm_options(config, m, n, k, layout, b_prologue_cast_type),
285
+ )
286
+ return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout)
287
+
288
+
289
+ # This op is a special case of the int_mm op which we use based on the pattern
290
+ # _int_mm -> mul (defined in ../fx_passes/post_grad.py) in order to prevent
291
+ # realization of the int32 _int_mm output by forcing fusion with the mul op.
292
+ # This is only used when config.force_fuse_int_mm_with_mul = True
293
+ def tuned_fused_int_mm_mul(mat1, mat2, mat3, out_dtype, *, layout=None):
294
+ out_dtype = (
295
+ torch.promote_types(mat3.get_dtype(), torch.int32)
296
+ if out_dtype is None
297
+ else out_dtype
298
+ )
299
+ m, n, k, layout, mat1, mat2, mat3 = mm_args(
300
+ mat1, mat2, mat3, layout=layout, out_dtype=out_dtype
301
+ )
302
+ choices: List[Dict[Any, Any]] = []
303
+ for config in int8_mm_configs(m, n, k):
304
+ mm_template.maybe_append_choice(
305
+ choices,
306
+ input_nodes=(mat1, mat2, mat3),
307
+ layout=layout,
308
+ **dict(mm_options(config, m, n, k, layout), ACC_TYPE="tl.int32"),
309
+ suffix_args=1,
310
+ epilogue_fn=V.ops.mul,
311
+ )
312
+ return autotune_select_algorithm("int_mm", choices, [mat1, mat2, mat3], layout)
venv/lib/python3.10/site-packages/torch/_inductor/kernel/mm_common.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ from typing import cast, List, Tuple
4
+
5
+ import sympy
6
+
7
+ import torch
8
+ from torch._inductor.select_algorithm import realize_inputs
9
+ from torch._inductor.virtualized import V
10
+
11
+ from .. import config as inductor_config
12
+ from ..utils import ceildiv as cdiv, next_power_of_2
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ def triton_config(num_stages, num_warps, **kwargs):
18
+ from triton import Config
19
+
20
+ return Config(kwargs, num_stages=num_stages, num_warps=num_warps)
21
+
22
+
23
+ def filtered_configs(
24
+ m: int,
25
+ n: int,
26
+ k: int,
27
+ configs: List[Tuple[int, int, int, int, int]],
28
+ has_int8_tensor=False,
29
+ ):
30
+ """Heuristic to shrink configs when they are bigger than the input size"""
31
+
32
+ # According to https://github.com/openai/triton/issues/2156#issuecomment-1695897424
33
+ # it's safer to use at least [32, 32] block size for int8/uint8
34
+ # tensors
35
+ min_block_size = 32 if has_int8_tensor else 16
36
+ m = max(
37
+ next_power_of_2(
38
+ V.graph.sizevars.size_hint(
39
+ m, fallback=torch._inductor.config.unbacked_symint_fallback # type: ignore[arg-type]
40
+ )
41
+ ),
42
+ min_block_size,
43
+ )
44
+ n = max(
45
+ next_power_of_2(
46
+ V.graph.sizevars.size_hint(
47
+ n, fallback=torch._inductor.config.unbacked_symint_fallback # type: ignore[arg-type]
48
+ )
49
+ ),
50
+ min_block_size,
51
+ )
52
+ k = max(
53
+ next_power_of_2(
54
+ V.graph.sizevars.size_hint(
55
+ k, fallback=torch._inductor.config.unbacked_symint_fallback # type: ignore[arg-type]
56
+ )
57
+ ),
58
+ min_block_size,
59
+ )
60
+ used = set()
61
+ for block_m, block_n, block_k, num_stages, num_warps in configs:
62
+ # shrink configs for small sizes
63
+ block_m = max(min(block_m, m), min_block_size)
64
+ block_n = max(min(block_n, n), min_block_size)
65
+ block_k = max(min(block_k, k), min_block_size)
66
+ # each warp computes 16x16 tile = 256
67
+ num_warps = min(num_warps, block_m * block_n // 256)
68
+ if torch.version.hip:
69
+ for matrix_instr_nonkdim in [0, 16]:
70
+ if matrix_instr_nonkdim != 0 and (
71
+ block_m % matrix_instr_nonkdim != 0
72
+ or block_n % matrix_instr_nonkdim != 0
73
+ ):
74
+ # block_m and block_n must be a multiple of matrix_instr_nonkdim
75
+ continue
76
+ if (
77
+ block_m,
78
+ block_n,
79
+ block_k,
80
+ num_stages,
81
+ num_warps,
82
+ matrix_instr_nonkdim,
83
+ ) not in used:
84
+ used.add(
85
+ (
86
+ block_m,
87
+ block_n,
88
+ block_k,
89
+ num_stages,
90
+ num_warps,
91
+ matrix_instr_nonkdim,
92
+ )
93
+ )
94
+ yield triton_config(
95
+ BLOCK_M=block_m,
96
+ BLOCK_N=block_n,
97
+ BLOCK_K=block_k,
98
+ num_stages=num_stages,
99
+ num_warps=num_warps,
100
+ matrix_instr_nonkdim=matrix_instr_nonkdim,
101
+ )
102
+ else:
103
+ if (block_m, block_n, block_k, num_stages, num_warps, 0) not in used:
104
+ used.add((block_m, block_n, block_k, num_stages, num_warps, 0))
105
+ yield triton_config(
106
+ BLOCK_M=block_m,
107
+ BLOCK_N=block_n,
108
+ BLOCK_K=block_k,
109
+ num_stages=num_stages,
110
+ num_warps=num_warps,
111
+ )
112
+
113
+
114
+ # List of dictionaries to store the kernel configs. Configs that evaluate to true
115
+ # will be utilised on the target platform
116
+ mm_kernel_configs = [
117
+ # "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
118
+ {"config": (64, 64, 32, 2, 4), "cond": True},
119
+ {"config": (64, 128, 32, 3, 4), "cond": True},
120
+ {"config": (128, 64, 32, 3, 4), "cond": True},
121
+ {"config": (64, 128, 32, 4, 8), "cond": True},
122
+ {"config": (128, 64, 32, 4, 8), "cond": True},
123
+ {"config": (64, 32, 32, 5, 8), "cond": True},
124
+ {"config": (32, 64, 32, 5, 8), "cond": True},
125
+ {"config": (128, 128, 32, 2, 8), "cond": True},
126
+ {"config": (64, 64, 64, 3, 8), "cond": True},
127
+ {"config": (32, 32, 128, 2, 4), "cond": torch.version.hip is None},
128
+ {"config": (64, 64, 16, 2, 4), "cond": True},
129
+ {"config": (32, 32, 16, 1, 2), "cond": True},
130
+ ]
131
+
132
+ int8_mm_kernel_configs = [
133
+ {"config": (64, 64, 32, 2, 4), "cond": True},
134
+ {"config": (64, 128, 32, 3, 4), "cond": True},
135
+ {"config": (128, 64, 32, 3, 4), "cond": True},
136
+ {"config": (64, 128, 32, 4, 8), "cond": True},
137
+ {"config": (128, 64, 32, 4, 8), "cond": True},
138
+ {"config": (64, 32, 32, 5, 8), "cond": True},
139
+ {"config": (32, 64, 32, 5, 8), "cond": True},
140
+ {"config": (128, 128, 32, 2, 8), "cond": True},
141
+ {"config": (64, 64, 64, 3, 8), "cond": True},
142
+ # {"config": (32, 32, 128, 2, 4), "cond": True},
143
+ # {"config": (64, 64, 16, 2, 4), "cond": True},
144
+ # {"config": (32, 32, 16, 1, 2), "cond": True},
145
+ {"config": (128, 256, 128, 3, 8), "cond": torch.version.hip is None},
146
+ {"config": (256, 128, 128, 3, 8), "cond": torch.version.hip is None},
147
+ ]
148
+
149
+ # Create filtered list of configs based on cond evaluation
150
+
151
+
152
+ mm_platform_configs = tuple(
153
+ cast(Tuple[int, int, int, int, int], config["config"])
154
+ for config in mm_kernel_configs
155
+ if config["cond"]
156
+ )
157
+ int8_platform_configs = tuple(
158
+ cast(Tuple[int, int, int, int, int], config["config"])
159
+ for config in int8_mm_kernel_configs
160
+ if config["cond"]
161
+ )
162
+
163
+ # On ROCm convert num_stages to 1 as pipelining provides no benefit
164
+ if torch.version.hip:
165
+ mm_platform_configs = tuple(
166
+ (config[0], config[1], config[2], 1, config[4])
167
+ for config in mm_platform_configs
168
+ )
169
+ int8_platform_configs = tuple(
170
+ (config[0], config[1], config[2], 1, config[4])
171
+ for config in mm_platform_configs
172
+ )
173
+
174
+ mm_configs = functools.partial(
175
+ filtered_configs,
176
+ configs=mm_platform_configs,
177
+ )
178
+
179
+ int8_mm_configs = functools.partial(
180
+ filtered_configs,
181
+ configs=int8_platform_configs,
182
+ )
183
+
184
+
185
+ def mm_grid(m, n, meta):
186
+ """
187
+ The CUDA grid size for matmul triton templates.
188
+ """
189
+ return (cdiv(m, meta["BLOCK_M"]) * cdiv(n, meta["BLOCK_N"]), 1, 1)
190
+
191
+
192
+ def acc_type(dtype):
193
+ if dtype in (torch.float16, torch.bfloat16):
194
+ return "tl.float32"
195
+ return f"tl.{dtype}".replace("torch.", "")
196
+
197
+
198
+ def mm_options(config, sym_m, sym_n, sym_k, layout, b_prologue_cast_type=None):
199
+ """
200
+ Common options to matmul triton templates.
201
+ """
202
+ even_k_symbolic = (
203
+ # it isn't worth guarding on this
204
+ sympy.gcd(sym_k, config.kwargs["BLOCK_K"])
205
+ == config.kwargs["BLOCK_K"]
206
+ )
207
+ allow_tf32 = torch.backends.cuda.matmul.allow_tf32 and (
208
+ not inductor_config.force_same_precision
209
+ or ((sym_m % 16) == 0 and (sym_n % 16) == 0 and (sym_k % 8) == 0)
210
+ )
211
+ return dict(
212
+ GROUP_M=8,
213
+ EVEN_K=even_k_symbolic,
214
+ ALLOW_TF32=allow_tf32,
215
+ ACC_TYPE=acc_type(layout.dtype),
216
+ B_PROLOGUE_CAST_TYPE=b_prologue_cast_type,
217
+ num_stages=config.num_stages,
218
+ num_warps=config.num_warps,
219
+ **config.kwargs,
220
+ )
221
+
222
+
223
+ def mm_args(mat1, mat2, *others, layout=None, out_dtype=None, use_4x2_dim=False):
224
+ """
225
+ Common arg processing for mm,bmm,addmm,etc
226
+ """
227
+ mat1, mat2 = realize_inputs(mat1, mat2)
228
+ *b1, m, k1 = mat1.get_size()
229
+ *b2, k2, n = mat2.get_size()
230
+ b = [V.graph.sizevars.guard_equals(a, b) for a, b in zip(b1, b2)]
231
+ if use_4x2_dim:
232
+ k2 = k2 * 2
233
+ k = V.graph.sizevars.guard_equals(k1, k2)
234
+ if layout is None:
235
+ from torch._inductor.ir import FixedLayout
236
+
237
+ if out_dtype is None:
238
+ out_dtype = mat1.get_dtype()
239
+ layout = FixedLayout(
240
+ mat1.get_device(),
241
+ out_dtype,
242
+ [*b, m, n],
243
+ )
244
+ else:
245
+ assert out_dtype is None, "out_dtype is ignored if layout is specified."
246
+
247
+ from ..lowering import expand
248
+
249
+ others = [realize_inputs(expand(x, layout.size)) for x in others]
250
+
251
+ return [m, n, k, layout, mat1, mat2, *others]
252
+
253
+
254
+ def addmm_epilogue(dtype, alpha, beta):
255
+ def epilogue(acc, bias):
256
+ if alpha != 1:
257
+ acc = V.ops.mul(acc, V.ops.constant(alpha, dtype))
258
+ if beta != 1:
259
+ bias = V.ops.mul(bias, V.ops.constant(beta, dtype))
260
+ return V.ops.add(acc, bias)
261
+
262
+ return epilogue
venv/lib/python3.10/site-packages/torch/_inductor/kernel/mm_plus_mm.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+
5
+ from ..lowering import lowerings
6
+ from ..select_algorithm import (
7
+ autotune_select_algorithm,
8
+ ExternKernelChoice,
9
+ TritonTemplate,
10
+ )
11
+ from ..utils import use_aten_gemm_kernels, use_triton_template
12
+ from ..virtualized import V
13
+ from .mm_common import mm_args, mm_grid, mm_options
14
+
15
+ aten = torch.ops.aten
16
+
17
+ aten_mm_plus_mm = ExternKernelChoice(
18
+ torch.ops.inductor._mm_plus_mm, "torch::inductor::_mm_plus_mm"
19
+ )
20
+
21
+ mm_plus_mm_template = TritonTemplate(
22
+ name="mm_plus_mm",
23
+ grid=mm_grid,
24
+ debug=False,
25
+ source=r"""
26
+ {{def_kernel("A", "B", "C", "D")}}
27
+ M = {{size("A", 0)}}
28
+ N = {{size("B", 1)}}
29
+ K1 = {{size("A", 1)}}
30
+ if M * N == 0:
31
+ # early exit due to zero-size input(s)
32
+ return
33
+ # K2 = {{size("C", 1)}}
34
+ stride_am = {{stride("A", 0)}}
35
+ stride_ak = {{stride("A", 1)}}
36
+ stride_bk = {{stride("B", 0)}}
37
+ stride_bn = {{stride("B", 1)}}
38
+ stride_cm = {{stride("C", 0)}}
39
+ stride_ck = {{stride("C", 1)}}
40
+ stride_dk = {{stride("D", 0)}}
41
+ stride_dn = {{stride("D", 1)}}
42
+
43
+ # based on triton.ops.matmul
44
+ pid = tl.program_id(0)
45
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
46
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
47
+
48
+ # re-order program ID for better L2 performance
49
+ width = GROUP_M * grid_n
50
+ group_id = pid // width
51
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
52
+ pid_m = group_id * GROUP_M + (pid % group_size)
53
+ pid_n = (pid % width) // (group_size)
54
+
55
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
56
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
57
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
58
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
59
+ rk = tl.arange(0, BLOCK_K)
60
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
61
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
62
+ C = C + (ram[:, None] * stride_cm + rk[None, :] * stride_ck)
63
+ D = D + (rk[:, None] * stride_dk + rbn[None, :] * stride_dn)
64
+
65
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
66
+ for k1 in range(K1, 0, -BLOCK_K):
67
+ # First matmul with A @ B
68
+ if EVEN_K:
69
+ a = tl.load(A)
70
+ b = tl.load(B)
71
+ else:
72
+ a = tl.load(A, mask=rk[None, :] < k1, other=0.)
73
+ b = tl.load(B, mask=rk[:, None] < k1, other=0.)
74
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
75
+ A += BLOCK_K * stride_ak
76
+ B += BLOCK_K * stride_bk
77
+
78
+ for k2 in range(K1, 0, -BLOCK_K):
79
+
80
+ # Second matmul with C @ D
81
+ if EVEN_K:
82
+ c = tl.load(C)
83
+ d = tl.load(D)
84
+ else:
85
+ c = tl.load(C, mask=rk[None, :] < k2, other=0.)
86
+ d = tl.load(D, mask=rk[:, None] < k2, other=0.)
87
+ acc += tl.dot(c, d, allow_tf32=ALLOW_TF32)
88
+ C += BLOCK_K * stride_ck
89
+ D += BLOCK_K * stride_dk
90
+
91
+
92
+ idx_m = rm[:, None]
93
+ idx_n = rn[None, :]
94
+ mask = (idx_m < M) & (idx_n < N)
95
+
96
+ # inductor generates a suffix
97
+ {{store_output(("idx_m", "idx_n"), "acc", "mask")}}
98
+ """,
99
+ )
100
+
101
+
102
+ @functools.lru_cache(None)
103
+ def mm_configs():
104
+ import triton
105
+
106
+ # List of dictionaries to store the kernel configs. Configs that evaluate to true
107
+ # will be utilised on the target platform
108
+ mm_triton_configs = [
109
+ {
110
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 32},
111
+ "num_stages": 2,
112
+ "num_warps": 4,
113
+ "cond": True,
114
+ },
115
+ {
116
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 32},
117
+ "num_stages": 3,
118
+ "num_warps": 8,
119
+ "cond": True,
120
+ },
121
+ {
122
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 32},
123
+ "num_stages": 4,
124
+ "num_warps": 16,
125
+ "cond": True,
126
+ },
127
+ {
128
+ "config": {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32},
129
+ "num_stages": 4,
130
+ "num_warps": 8,
131
+ "cond": True,
132
+ },
133
+ {
134
+ "config": {"BLOCK_M": 32, "BLOCK_N": 64, "BLOCK_K": 32},
135
+ "num_stages": 4,
136
+ "num_warps": 8,
137
+ "cond": True,
138
+ },
139
+ {
140
+ "config": {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32},
141
+ "num_stages": 1,
142
+ "num_warps": 8,
143
+ "cond": True,
144
+ },
145
+ {
146
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 64},
147
+ "num_stages": 1,
148
+ "num_warps": 8,
149
+ "cond": True,
150
+ },
151
+ {
152
+ "config": {"BLOCK_M": 32, "BLOCK_N": 32, "BLOCK_K": 128},
153
+ "num_stages": 1,
154
+ "num_warps": 8,
155
+ "cond": torch.version.hip is None,
156
+ },
157
+ {
158
+ "config": {"BLOCK_M": 64, "BLOCK_N": 64, "BLOCK_K": 16},
159
+ "num_stages": 2,
160
+ "num_warps": 4,
161
+ "cond": True,
162
+ },
163
+ {
164
+ "config": {"BLOCK_M": 32, "BLOCK_N": 32, "BLOCK_K": 16},
165
+ "num_stages": 1,
166
+ "num_warps": 2,
167
+ "cond": True,
168
+ },
169
+ ]
170
+
171
+ # Filter out configs in which cond evaluates to true
172
+ # On ROCm convert num_stages to 1 as pipelining provides no benefit
173
+ if torch.version.hip:
174
+ filtered_configs = [
175
+ triton.Config(c["config"], num_stages=1, num_warps=c["num_warps"])
176
+ for c in mm_triton_configs
177
+ if c["cond"]
178
+ ]
179
+ else:
180
+ filtered_configs = [
181
+ triton.Config(
182
+ c["config"], num_stages=c["num_stages"], num_warps=c["num_warps"]
183
+ )
184
+ for c in mm_triton_configs
185
+ if c["cond"]
186
+ ]
187
+
188
+ return filtered_configs
189
+
190
+
191
+ def tuned_mm_plus_mm(mat1, mat2, mat3, mat4, *, layout=None):
192
+ """
193
+ Computes mm(mat1, mat2) + mm(mat3, mat4)
194
+ """
195
+ m1, n1, k1, layout1, mat1, mat2 = mm_args(mat1, mat2, layout=layout)
196
+ m2, n2, _, layout2, mat3, mat4 = mm_args(mat3, mat4, layout=layout)
197
+ # Optimization is optional, because we can always just not do the fusion
198
+ if (
199
+ m1 * n1 == 0
200
+ or m2 * n2 == 0
201
+ or not V.graph.sizevars.statically_known_list_equals(
202
+ mat1.get_size(), mat3.get_size()
203
+ )
204
+ or not V.graph.sizevars.statically_known_list_equals(
205
+ mat2.get_size(), mat4.get_size()
206
+ )
207
+ ):
208
+ # TODO(jansel): support different K values when this is fixed:
209
+ # https://github.com/openai/triton/issues/967
210
+ return lowerings[aten.add](
211
+ lowerings[aten.mm](mat1, mat2), lowerings[aten.mm](mat3, mat4)
212
+ )
213
+
214
+ assert layout1 == layout2
215
+ # options to tune from
216
+ choices = (
217
+ [aten_mm_plus_mm.bind((mat1, mat2, mat3, mat4), layout1)]
218
+ if use_aten_gemm_kernels()
219
+ else []
220
+ )
221
+ if use_triton_template(layout1):
222
+ for config in mm_configs():
223
+ # see https://github.com/openai/triton/issues/1298
224
+ # BLOCK_K = K causes llvm error
225
+ if config.kwargs["BLOCK_K"] < k1:
226
+ mm_plus_mm_template.maybe_append_choice(
227
+ choices,
228
+ input_nodes=(mat1, mat2, mat3, mat4),
229
+ layout=layout1,
230
+ **mm_options(config, m1, n1, k1, layout1),
231
+ )
232
+
233
+ return autotune_select_algorithm(
234
+ "mm_plus_mm", choices, [mat1, mat2, mat3, mat4], layout1
235
+ )
venv/lib/python3.10/site-packages/torch/_inductor/kernel/unpack_mixed_mm.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import List
3
+
4
+ from ..select_algorithm import autotune_select_algorithm, ChoiceCaller, TritonTemplate
5
+ from .mm_common import mm_args, mm_configs, mm_grid, mm_options
6
+
7
+ log = logging.getLogger(__name__)
8
+
9
+ uint4x2_mixed_mm_template = TritonTemplate(
10
+ name="uint4x2_mixed_mm",
11
+ grid=mm_grid,
12
+ source=r"""
13
+ {{def_kernel("A", "B")}}
14
+ M = {{size("A", 0)}}
15
+ N = {{size("B", 1)}}
16
+ K = {{size("A", 1)}}
17
+ stride_am = {{stride("A", 0)}}
18
+ stride_ak = {{stride("A", 1)}}
19
+ stride_bk = {{stride("B", 0)}}
20
+ stride_bn = {{stride("B", 1)}}
21
+
22
+ # based on triton.ops.matmul
23
+ pid = tl.program_id(0)
24
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
25
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
26
+
27
+ # re-order program ID for better L2 performance
28
+ width = GROUP_M * grid_n
29
+ group_id = pid // width
30
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
31
+ pid_m = group_id * GROUP_M + (pid % group_size)
32
+ pid_n = (pid % width) // (group_size)
33
+
34
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
35
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
36
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
37
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
38
+ rk = tl.arange(0, BLOCK_K)
39
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
40
+ B = B + (rk[:, None]//2 * stride_bk + rbn[None, :] * stride_bn)
41
+ b_shifts = 4*(rk%2)
42
+ b_subs = 8*(1-(rk%2))
43
+
44
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
45
+ for k in range(K, 0, -BLOCK_K):
46
+ if EVEN_K:
47
+ a = tl.load(A)
48
+ b = tl.load(B)
49
+ else:
50
+ a = tl.load(A, mask=rk[None, :] < k, other=0.)
51
+ b = tl.load(B, mask=rk[:, None] < k, other=0.)
52
+ b = ((b >> b_shifts[:, None]) & 0xF) - 8
53
+ b = b.to(B_PROLOGUE_CAST_TYPE)
54
+ acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
55
+ A += BLOCK_K * stride_ak
56
+ B += BLOCK_K//2 * stride_bk
57
+
58
+ # rematerialize rm and rn to save registers
59
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
60
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
61
+ idx_m = rm[:, None]
62
+ idx_n = rn[None, :]
63
+ mask = (idx_m < M) & (idx_n < N)
64
+
65
+ # inductor generates a suffix
66
+ {{store_output(("idx_m", "idx_n"), "acc", "mask")}}
67
+ """,
68
+ )
69
+
70
+
71
+ def tuned_uint4x2_mixed_mm(mat1, mat2, mat2_mm_shape, mat2_dtype):
72
+ m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=None, use_4x2_dim=True)
73
+ choices: List[ChoiceCaller] = []
74
+ b_prologue_cast_type = f"tl.{mat2_dtype}".replace("torch.", "")
75
+ for config in mm_configs(m, n, k):
76
+ uint4x2_mixed_mm_template.maybe_append_choice(
77
+ choices,
78
+ input_nodes=(mat1, mat2),
79
+ layout=layout,
80
+ **mm_options(config, m, n, k, layout, b_prologue_cast_type),
81
+ )
82
+ return autotune_select_algorithm("uint4x2_mixed_mm", choices, [mat1, mat2], layout)
venv/lib/python3.10/site-packages/torch/_inductor/lowering.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_inductor/metrics.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import csv
4
+ import inspect
5
+ import os
6
+ import re
7
+ from dataclasses import dataclass
8
+ from functools import lru_cache
9
+
10
+ from typing import Dict, List, Set, Tuple, TYPE_CHECKING, Union
11
+
12
+ from torch._inductor import config
13
+ from torch._inductor.utils import get_benchmark_name
14
+
15
+ # Prevent circular import
16
+ if TYPE_CHECKING:
17
+ from torch._inductor.scheduler import (
18
+ BaseSchedulerNode,
19
+ ExternKernelSchedulerNode,
20
+ NopKernelSchedulerNode,
21
+ SchedulerNode,
22
+ )
23
+
24
+ # counter for tracking how many kernels have been generated
25
+ generated_kernel_count = 0
26
+ generated_cpp_vec_kernel_count = 0
27
+ num_bytes_accessed = 0
28
+ nodes_num_elem: List[
29
+ Tuple[
30
+ Union[NopKernelSchedulerNode, SchedulerNode, ExternKernelSchedulerNode],
31
+ int,
32
+ ]
33
+ ] = []
34
+ node_runtimes: List[Tuple[BaseSchedulerNode, float]] = []
35
+
36
+ # counters for tracking fusions
37
+ ir_nodes_pre_fusion = 0
38
+
39
+ # counters for tracking to_dtype inserted
40
+ cpp_to_dtype_count = 0
41
+
42
+ # counters for tracking cpp_wrapper disabled
43
+ disable_cpp_wrapper = 0
44
+
45
+
46
+ # reset all counters
47
+ def reset():
48
+ global generated_kernel_count
49
+ global generated_cpp_vec_kernel_count
50
+ global num_bytes_accessed, nodes_num_elem
51
+ global ir_nodes_pre_fusion
52
+ global cpp_to_dtype_count
53
+ global disable_cpp_wrapper
54
+
55
+ generated_kernel_count = 0
56
+ generated_cpp_vec_kernel_count = 0
57
+ num_bytes_accessed = 0
58
+ nodes_num_elem.clear()
59
+ node_runtimes.clear()
60
+ ir_nodes_pre_fusion = 0
61
+ cpp_to_dtype_count = 0
62
+ disable_cpp_wrapper = 0
63
+
64
+
65
+ @dataclass
66
+ class CachedMetricsDeltas:
67
+ """
68
+ The subset of metrics we want update across cache hits, e.g., the
69
+ FxGraphCache.
70
+ """
71
+
72
+ generated_kernel_count: int
73
+ generated_cpp_vec_kernel_count: int
74
+ ir_nodes_pre_fusion: int
75
+ cpp_to_dtype_count: int
76
+
77
+
78
+ class CachedMetricsHelper:
79
+ """
80
+ A helper class to help calculate and apply counter deltas for those
81
+ metrics we want to save with cache entries (e.g., FxGraphCache) and
82
+ apply on a cache hit.
83
+ """
84
+
85
+ def __init__(self):
86
+ global generated_kernel_count
87
+ global generated_cpp_vec_kernel_count
88
+ global ir_nodes_pre_fusion
89
+ global cpp_to_dtype_count
90
+
91
+ self.generated_kernel_count = generated_kernel_count
92
+ self.generated_cpp_vec_kernel_count = generated_cpp_vec_kernel_count
93
+ self.ir_nodes_pre_fusion = ir_nodes_pre_fusion
94
+ self.cpp_to_dtype_count = cpp_to_dtype_count
95
+
96
+ def get_deltas(self) -> CachedMetricsDeltas:
97
+ global generated_kernel_count
98
+ global generated_cpp_vec_kernel_count
99
+ global ir_nodes_pre_fusion
100
+ global cpp_to_dtype_count
101
+
102
+ return CachedMetricsDeltas(
103
+ generated_kernel_count - self.generated_kernel_count,
104
+ generated_cpp_vec_kernel_count - self.generated_cpp_vec_kernel_count,
105
+ ir_nodes_pre_fusion - self.ir_nodes_pre_fusion,
106
+ cpp_to_dtype_count - self.cpp_to_dtype_count,
107
+ )
108
+
109
+ @staticmethod
110
+ def apply_deltas(delta: CachedMetricsDeltas):
111
+ global generated_kernel_count
112
+ global generated_cpp_vec_kernel_count
113
+ global ir_nodes_pre_fusion
114
+ global cpp_to_dtype_count
115
+
116
+ generated_kernel_count += delta.generated_kernel_count
117
+ generated_cpp_vec_kernel_count += delta.generated_cpp_vec_kernel_count
118
+ ir_nodes_pre_fusion += delta.ir_nodes_pre_fusion
119
+ cpp_to_dtype_count += delta.cpp_to_dtype_count
120
+
121
+
122
+ REGISTERED_METRIC_TABLES: Dict[str, MetricTable] = {}
123
+
124
+
125
+ @dataclass
126
+ class MetricTable:
127
+ table_name: str
128
+ column_names: List[str]
129
+
130
+ num_rows_added: int = 0
131
+
132
+ def add_row(self, row_fn):
133
+ if self.table_name not in enabled_metric_tables():
134
+ return
135
+
136
+ row_dict = row_fn()
137
+ assert len(self.column_names) == len(
138
+ row_dict
139
+ ), f"{len(self.column_names)} v.s. {len(row_dict)}"
140
+ assert set(self.column_names) == set(
141
+ row_dict.keys()
142
+ ), f"{set(self.column_names)} v.s. {set(row_dict.keys())}"
143
+
144
+ row = [
145
+ get_benchmark_name(),
146
+ ]
147
+ row += [row_dict[column_name] for column_name in self.column_names]
148
+ self._write_row(row)
149
+
150
+ def output_filename(self):
151
+ return f"metric_table_{self.table_name}.csv"
152
+
153
+ def write_header(self):
154
+ filename = self.output_filename()
155
+ with open(filename, "w") as fd:
156
+ writer = csv.writer(fd, lineterminator="\n")
157
+ writer.writerow(["model_name"] + self.column_names)
158
+
159
+ def _write_row(self, row):
160
+ filename = self.output_filename()
161
+ if self.num_rows_added == 0 and not os.path.exists(filename):
162
+ self.write_header()
163
+
164
+ self.num_rows_added += 1
165
+
166
+ for idx, orig_val in enumerate(row):
167
+ if isinstance(orig_val, float):
168
+ new_val = f"{orig_val:.6f}"
169
+ elif orig_val is None:
170
+ new_val = ""
171
+ else:
172
+ new_val = orig_val
173
+ row[idx] = new_val
174
+
175
+ with open(filename, "a") as fd:
176
+ writer = csv.writer(fd, lineterminator="\n")
177
+ writer.writerow(row)
178
+
179
+ @staticmethod
180
+ def register_table(name, column_names):
181
+ table = MetricTable(name, column_names)
182
+ REGISTERED_METRIC_TABLES[name] = table
183
+
184
+
185
+ MetricTable.register_table(
186
+ "slow_fusion",
187
+ [
188
+ "kernel1_path",
189
+ "kernel1_latency",
190
+ "kernel2_path",
191
+ "kernel2_latency",
192
+ "fused_kernel_path",
193
+ "fused_kernel_latency",
194
+ "slow_down_ratio",
195
+ ],
196
+ )
197
+
198
+ # track the fusion statistics for each graph
199
+ MetricTable.register_table(
200
+ "graph_stats",
201
+ [
202
+ "graph_id",
203
+ "num_nodes_before_fusion",
204
+ "num_nodes_after_fusion",
205
+ ],
206
+ )
207
+
208
+ # track the perf difference between persistent reduction and non-persistent
209
+ # reductions
210
+ MetricTable.register_table(
211
+ "persistent_red_perf",
212
+ [
213
+ "kernel1_name",
214
+ "kernel2_name",
215
+ "kernel1_latency",
216
+ "kernel2_latency",
217
+ "size_hints",
218
+ "reduction_hint",
219
+ "speedup",
220
+ ],
221
+ )
222
+
223
+ # Log metadata for pointwise/reduction kernels. E.g., model name, kernel path, numel, rnumel, reduction hint
224
+ MetricTable.register_table(
225
+ "kernel_metadata",
226
+ [
227
+ "kernel_name",
228
+ "kernel_path",
229
+ "kernel_category", # pointwise/reduction/foreach etc.
230
+ "size_hints",
231
+ "reduction_hint",
232
+ "line_of_code",
233
+ "num_load",
234
+ "num_store",
235
+ "num_for_loop",
236
+ "num_atomic_add",
237
+ "num_args",
238
+ # xyz numel can be different to size_hints since size_hints are rounded
239
+ # up to the nearest power of 2.
240
+ # Inductor kernel will burn in the xyz numel in kernel code for static
241
+ # shape kernels.
242
+ # Logging them will be helpful to find unaligned shape for reduction
243
+ "xnumel",
244
+ "ynumel",
245
+ "rnumel",
246
+ "kernel_args_num_gb",
247
+ ],
248
+ )
249
+
250
+
251
+ def _parse_kernel_fn_code(kernel_module_code):
252
+ """
253
+ The kernel_module_code is the python module that contains kernel function code.
254
+ kernel function is the proper triton kernel function annotated with
255
+ @triton.jit
256
+ """
257
+ from .codecache import PyCodeCache
258
+ from .wrapper_benchmark import get_triton_kernel
259
+
260
+ mod = PyCodeCache.load(kernel_module_code)
261
+ kernel = get_triton_kernel(mod)
262
+ # kernel is a CachingAutotune; kernel.fn is the JITFunction;
263
+ # kernel.fn.fn is the function being decorate by triton.jit
264
+ return inspect.getsource(kernel.fn.fn)
265
+
266
+
267
+ def _parse_kernel_line_of_code(proper_kernel_fn_code):
268
+ """
269
+ Return the line of code for the kernel excluding the decorators.
270
+ """
271
+ return len(proper_kernel_fn_code.splitlines())
272
+
273
+
274
+ def _parse_size_hints(kernel_module_code, kernel_category):
275
+ if kernel_category == "foreach":
276
+ # foreach kernel does not have size_hints
277
+ return None
278
+ m = re.search(r"size_hints=(\[[0-9, ]*\]),", kernel_module_code)
279
+ assert m, "size_hints missing!"
280
+ return m.group(1)
281
+
282
+
283
+ def _parse_reduction_hint(kernel_category, kernel_module_code):
284
+ if kernel_category not in ("reduction", "persistent_reduction"):
285
+ return None
286
+ m = re.search(r"reduction_hint=ReductionHint\.(\w*),", kernel_module_code)
287
+ assert m, "reduction_hint not found in kernel source code!"
288
+ return m.group(1)
289
+
290
+
291
+ def _count_pattern(proper_kernel_fn_code, pattern):
292
+ return proper_kernel_fn_code.count(pattern)
293
+
294
+
295
+ def _count_args(proper_kernel_fn_code):
296
+ def_line = proper_kernel_fn_code.splitlines()[0]
297
+ assert def_line.startswith("def ")
298
+ start_idx = def_line.index("(")
299
+ end_idx = def_line.index("):")
300
+ decl_csv = def_line[start_idx + 1 : end_idx]
301
+ comps = decl_csv.split(",")
302
+ return len(comps)
303
+
304
+
305
+ def _parse_proper_kernel_fn_code(kernel_fn_code):
306
+ """
307
+ Skip decorators.
308
+ """
309
+ start_pos = kernel_fn_code.index("def ")
310
+ return kernel_fn_code[start_pos:]
311
+
312
+
313
+ def _parse_numel(proper_kernel_fn_code, numel_arg_name):
314
+ m = re.search(f"{numel_arg_name} = ([\\d]+)", proper_kernel_fn_code)
315
+ if m:
316
+ return int(m.group(1))
317
+ else:
318
+ return None
319
+
320
+
321
+ def _parse_kernel_args_num_gb(kernel_fn_code, kernel_category):
322
+ """
323
+ inductor meta looks like:
324
+ inductor_meta={... 'mutated_arg_names': [], 'no_x_dim': False, 'kernel_num_gb': 2.0},
325
+ """
326
+ m = re.search(r".kernel_num_gb.:\s*([0-9.]+)", kernel_fn_code)
327
+ if m:
328
+ return float(m.group(1))
329
+ else:
330
+ """
331
+ There are a few cases that kernel_num_gdb field can be missing:
332
+ 1. the field will be missing if config.benchmark_kernel and
333
+ config.profile_bandwidth are false
334
+ 2. even if config.benchmark_kernel or config.profile_bandwidth is true.
335
+ foreach kernel does not have kernel_num_gb field in the metadata
336
+ """
337
+ return None
338
+
339
+
340
+ def log_kernel_metadata(kernel_name, kernel_path, kernel_module_code):
341
+ """
342
+ An utility to log kernel metadata. We may parse metadata from kernel source code here.
343
+
344
+ It's fine to parse the generated kernel code here since the logging is
345
+ disabled by default. It would hurt compilation time.
346
+ """
347
+ from .wrapper_benchmark import get_kernel_category_by_source_code
348
+
349
+ kernel_category = get_kernel_category_by_source_code(kernel_module_code)
350
+ reduction_hint = _parse_reduction_hint(kernel_category, kernel_module_code)
351
+ size_hints = _parse_size_hints(kernel_module_code, kernel_category)
352
+ kernel_fn_code = _parse_kernel_fn_code(kernel_module_code)
353
+
354
+ proper_kernel_fn_code = _parse_proper_kernel_fn_code(kernel_fn_code)
355
+
356
+ # the line of code excluding the decortors
357
+ kernel_line_of_code = _parse_kernel_line_of_code(proper_kernel_fn_code)
358
+
359
+ get_metric_table("kernel_metadata").add_row(
360
+ lambda: {
361
+ "kernel_name": kernel_name,
362
+ "kernel_path": kernel_path,
363
+ "kernel_category": kernel_category,
364
+ "size_hints": size_hints,
365
+ "reduction_hint": reduction_hint,
366
+ "line_of_code": kernel_line_of_code,
367
+ "num_load": _count_pattern(proper_kernel_fn_code, "tl.load"),
368
+ "num_store": _count_pattern(proper_kernel_fn_code, "tl.store"),
369
+ "num_for_loop": _count_pattern(proper_kernel_fn_code, "for "),
370
+ "num_atomic_add": _count_pattern(proper_kernel_fn_code, "tl.atomic_add"),
371
+ "num_args": _count_args(proper_kernel_fn_code),
372
+ "xnumel": _parse_numel(proper_kernel_fn_code, "xnumel"),
373
+ "ynumel": _parse_numel(proper_kernel_fn_code, "ynumel"),
374
+ "rnumel": _parse_numel(proper_kernel_fn_code, "rnumel"),
375
+ "kernel_args_num_gb": _parse_kernel_args_num_gb(
376
+ kernel_fn_code, kernel_category
377
+ ),
378
+ }
379
+ )
380
+
381
+
382
+ def purge_old_log_files():
383
+ """
384
+ Purge the old log file at the beginning when the benchmark script runs.
385
+ Should do it in the parent process rather than the child processes running
386
+ each individual model.
387
+ """
388
+ for name, table in REGISTERED_METRIC_TABLES.items():
389
+ if name in enabled_metric_tables():
390
+ filename = table.output_filename()
391
+ if os.path.exists(filename):
392
+ os.unlink(filename)
393
+
394
+ table.write_header()
395
+
396
+
397
+ @lru_cache
398
+ def enabled_metric_tables() -> Set[str]:
399
+ config_str = config.enabled_metric_tables
400
+
401
+ enabled = set()
402
+ for name in config_str.split(","):
403
+ name = name.strip()
404
+ if not name:
405
+ continue
406
+ assert (
407
+ name in REGISTERED_METRIC_TABLES
408
+ ), f"Metric table name {name} is not registered"
409
+ enabled.add(name)
410
+ return enabled
411
+
412
+
413
+ def is_metric_table_enabled(name):
414
+ return name in enabled_metric_tables()
415
+
416
+
417
+ def get_metric_table(name):
418
+ assert name in REGISTERED_METRIC_TABLES, f"Metric table {name} is not defined"
419
+ return REGISTERED_METRIC_TABLES[name]
venv/lib/python3.10/site-packages/torch/_inductor/ops_handler.py ADDED
@@ -0,0 +1,655 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from typing import Any, Callable, Generic, Literal, Optional, Tuple, TypeVar, Union
3
+ from unittest.mock import patch
4
+
5
+ import sympy
6
+ from typing_extensions import Protocol
7
+
8
+ import torch
9
+ import torch.utils._pytree as pytree
10
+ from torch.fx.graph import inplace_methods, magic_methods
11
+ from .utils import IndentedBuffer, reduction_num_outputs, sympy_index_symbol, sympy_str
12
+
13
+ T = TypeVar("T")
14
+ StoreMode = Optional[Literal["atomic_add"]]
15
+ ReductionType = Literal[
16
+ "argmax",
17
+ "argmin",
18
+ "welford_reduce",
19
+ "welford_combine",
20
+ "any",
21
+ "max",
22
+ "min",
23
+ "prod",
24
+ "sum",
25
+ "xor_sum",
26
+ ]
27
+
28
+
29
+ def _arg_str(a) -> str:
30
+ if isinstance(a, sympy.Expr):
31
+ return sympy_str(a)
32
+ return str(a)
33
+
34
+
35
+ # NB: This is not done as a parent class, because our ops handlers
36
+ # implementations make heavy use of __getattr__ magic, and pre-existing
37
+ # stubs for methods would interfere with this mechanism.
38
+ #
39
+ # TODO: A superclass that does desugaring for operations like
40
+ # reciprocal/square might be useful.
41
+ class OpsHandler(Protocol[T]):
42
+ """
43
+ Protocol describing the set of valid operations on ``torch._inductor.virtualized.ops``,
44
+ as well as the contract for op handlers. The type T signifies the domain
45
+ of the abstract analysis AKA what all of the functions return / take as arguments
46
+ anywhere compute occurs.
47
+
48
+ While these operators are typically dtype polymorphic (e.g., you can use mul
49
+ on both integers and floats), they do NOT do promotion and usually return the
50
+ same dtype as the input. You are expected to have handled type promotion
51
+ during ATen decompositions. Most operators correspond exactly to pointwise
52
+ operations as defined by torch, so when in doubt about semantics, check the
53
+ corresponding torch documentation. These are all scalar operations (so they
54
+ are defined to operate on a single element at a time.)
55
+
56
+ For convenience, many operators take a src_dtype which indicates what the dtype
57
+ of the input argument is. Although in principle this can be derived by an
58
+ analysis, providing this for ops where it is useful helps avoid having to repeatedly
59
+ recompute dtype in code generation.
60
+
61
+ Note that this often describes a class of static methods, for stateless
62
+ ops handlers.
63
+
64
+ Handlers are often defined using ``__getattr__`` metaprogramming, which means
65
+ that you cannot declare that a type implements a protocol by inheriting from
66
+ it (as the type stubs count as attribute declarations and impede the getattr
67
+ magic method from being called). Instead, define a function that casts an
68
+ argument of your type to the protocol, which is sufficient to induce mypy to
69
+ test that the protocol is implemented correctly. Search for ``_typecheck_``
70
+ in this file to see some examples. If you see an obscure error where a
71
+ class doesn't implement a Protocol, but mypy doesn't say why, check to see
72
+ that ``__getattr__`` is typed correctly (typically, it is not possible to
73
+ type ``__getattr__`` without typing it as ``Callable[..., Any]``)
74
+ """
75
+
76
+ def constant(self, value: Union[bool, float, int], dtype: torch.dtype) -> T:
77
+ """Produces a scalar constant of type dtype."""
78
+ ...
79
+
80
+ def load_seed(self, name: str, offset: T):
81
+ """Computes inductor_prims.lookup_seed."""
82
+ ...
83
+
84
+ def rand(self, seed: T, offset: T) -> T:
85
+ """Computes inductor_prims.random with mode="rand". offset has dtype int32."""
86
+ ...
87
+
88
+ def randn(self, seed: T, offset: T) -> T:
89
+ """Computes inductor_prims.random with mode="randn". offset has dtype int32."""
90
+ ...
91
+
92
+ def randint64(self, seed: T, offset: T, low: T, high: T) -> T:
93
+ """Computes inductor_prims.randint. offset has dtype int32."""
94
+ ...
95
+
96
+ def masked(self, mask: T, body: Callable[[], T], other: T) -> T:
97
+ """
98
+ Computes body, but only perform loads/stores if the boolean mask
99
+ evaluates to true. For example, you would use this if you needed to
100
+ perform an indirect load that may not be valid on some elements;
101
+ without masking, invalid accesses can cause IMAs. When mask is true,
102
+ the result is the result of body; otherwise it is other.
103
+
104
+ Contrast this with ops.where, which can multiplex between two values
105
+ that have been unconditionally computed.
106
+ """
107
+ ...
108
+
109
+ def where(self, condition: T, input: T, other: T) -> T:
110
+ """
111
+ Computes torch.where: when condition is true, return input; otherwise return other.
112
+ """
113
+ ...
114
+
115
+ def index_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> T:
116
+ """
117
+ Converts a sympy expression into a scalar of type dtype. expr is typically
118
+ an indexing expression, thus the name; however, it can also be used in
119
+ non-indexing situations.
120
+ """
121
+ ...
122
+
123
+ def to_dtype(
124
+ self, x: T, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None
125
+ ) -> T:
126
+ """
127
+ Convert x to dtype. src_dtype can be optionally set to specify what the original
128
+ dtype of x was, which can improve code generation (used by torch to(dtype=dtype)).
129
+ """
130
+ ...
131
+
132
+ def to_dtype_bitcast(self, x: T, dtype: torch.dtype, src_dtype: torch.dtype) -> T:
133
+ """
134
+ Reinterpret cast x to dtype (reinterpreting the bits in memory as another dtype.)
135
+ src_dtype must be the original type of x.
136
+ """
137
+ ...
138
+
139
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
140
+ # These operations are only available in a "kernel" context. Check
141
+ # torch._inductor.codegen.common.CSEProxy for their typical implementation
142
+ # in op handler (routing to their respective implementations in the kernel
143
+ # handler)
144
+ #
145
+ # Importantly, inside a kernel, indexing and mask variables are available
146
+ # in scope, which are typically used by sympy.Expr indexing.
147
+
148
+ def indirect_indexing(
149
+ self, x: T, size: sympy.Expr, check: bool = True
150
+ ) -> sympy.Expr:
151
+ """
152
+ Convert an integral x into a sympy.Expr that can be subsequently used in
153
+ indexing computation. 'size' represents an upper bound on the what valid
154
+ indexes can be; when 'check' is True, we check that the x is in bounds.
155
+
156
+ NB: This is typically mandatory to implement for any analysis, because you
157
+ MUST return a valid sympy.Expr of some sort (even if it's a meaningless symbol).
158
+ """
159
+ ...
160
+
161
+ def load(self, name: str, index: sympy.Expr) -> T:
162
+ """
163
+ Load from the memory location 'name', offset by some indexing expression 'index'.
164
+ """
165
+ ...
166
+
167
+ def store(
168
+ self,
169
+ name: str,
170
+ index: sympy.Expr,
171
+ value: T,
172
+ mode: StoreMode = None,
173
+ ) -> None:
174
+ """
175
+ Store 'value' to the memory location 'name' offset by 'expr'. If
176
+ specified, 'mode' can require the store to be an atomic addition.
177
+ """
178
+ ...
179
+
180
+ # TODO: Better explain how the "collective" semantics of these ops;
181
+ # remember that the input value is a scalar, you can't reduce on it in the
182
+ # traditional sense!
183
+ def reduction(
184
+ self,
185
+ dtype: torch.dtype,
186
+ src_dtype: torch.dtype,
187
+ reduction_type: ReductionType,
188
+ value: T,
189
+ ) -> Union[T, Tuple[T, ...]]:
190
+ """
191
+ Perform a 'reduction_type' reduction on 'value' of dtype 'src_dtype',
192
+ using 'dtype' as the accumulation dtype for the reduction. The result
193
+ is an intermediate computation which should be stored to the final
194
+ location using 'ops.store_reduction'.
195
+
196
+ Valid reduction types are . For Welford reduction types, this
197
+ function returns multiple outputs; consult reduction_num_outputs to
198
+ determine the amount in metaprogramming applications.
199
+ """
200
+ ...
201
+
202
+ # TODO: in practice, this seems to actually return None, but not returning
203
+ # a T makes common __getattr__ idioms not type correctly. Figure out if
204
+ # this should be returning something.
205
+ def store_reduction(self, name: str, index: sympy.Expr, value: T) -> T:
206
+ """
207
+ Store the fully accumulated result of 'reduction' to the memory
208
+ location 'name' offset by 'expr'.
209
+ """
210
+ ...
211
+
212
+ def scan(
213
+ self, dtype: torch.dtype, combine_fn: Callable[[T, T], T], value: T, init: int
214
+ ) -> T:
215
+ """
216
+ Perform an associative scan on 'value'.
217
+ """
218
+ # TODO: Improve the description with some pseudocode
219
+ ...
220
+
221
+ def bucketize(
222
+ self,
223
+ values: T,
224
+ offsets_name: str,
225
+ offsets_size: sympy.Expr,
226
+ indexing_dtype: torch.dtype,
227
+ right: bool,
228
+ ) -> T:
229
+ # See [Note: Inductor bucketize op]
230
+ ...
231
+
232
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
233
+ # The following ops have semantics that correspond exactly to the torch
234
+ # operation with the same corresponding name.
235
+
236
+ def abs(self, x0: T) -> T:
237
+ ...
238
+
239
+ def exp(self, x0: T) -> T:
240
+ ...
241
+
242
+ def exp2(self, x0: T) -> T:
243
+ ...
244
+
245
+ def expm1(self, x0: T) -> T:
246
+ ...
247
+
248
+ def sqrt(self, x0: T) -> T:
249
+ ...
250
+
251
+ def relu(self, x0: T) -> T:
252
+ ...
253
+
254
+ def minimum(self, x0: T, x1: T) -> T:
255
+ ...
256
+
257
+ def maximum(self, x0: T, x1: T) -> T:
258
+ ...
259
+
260
+ def cos(self, x0: T) -> T:
261
+ ...
262
+
263
+ def sin(self, x0: T) -> T:
264
+ ...
265
+
266
+ def lgamma(self, x0: T) -> T:
267
+ ...
268
+
269
+ def erf(self, x0: T) -> T:
270
+ ...
271
+
272
+ def cosh(self, x0: T) -> T:
273
+ ...
274
+
275
+ def sinh(self, x0: T) -> T:
276
+ ...
277
+
278
+ def acos(self, x0: T) -> T:
279
+ ...
280
+
281
+ def acosh(self, x0: T) -> T:
282
+ ...
283
+
284
+ def asin(self, x0: T) -> T:
285
+ ...
286
+
287
+ def asinh(self, x0: T) -> T:
288
+ ...
289
+
290
+ def atan2(self, x0: T, x1: T) -> T:
291
+ ...
292
+
293
+ def atan(self, x0: T) -> T:
294
+ ...
295
+
296
+ def atanh(self, x0: T) -> T:
297
+ ...
298
+
299
+ def copysign(self, x0: T, x1: T) -> T:
300
+ ...
301
+
302
+ def erfc(self, x0: T) -> T:
303
+ ...
304
+
305
+ def erfinv(self, x0: T) -> T:
306
+ ...
307
+
308
+ def frexp(self, x0: T):
309
+ ...
310
+
311
+ def hypot(self, x0: T, x1: T) -> T:
312
+ ...
313
+
314
+ def log10(self, x0: T) -> T:
315
+ ...
316
+
317
+ def nextafter(self, x0: T, x1: T) -> T:
318
+ ...
319
+
320
+ def logical_and(self, x0: T, x1: T) -> T:
321
+ ...
322
+
323
+ def logical_not(self, x0: T) -> T:
324
+ ...
325
+
326
+ def logical_or(self, x0: T, x1: T) -> T:
327
+ ...
328
+
329
+ def logical_xor(self, x0: T, x1: T) -> T:
330
+ ...
331
+
332
+ def bitwise_and(self, x0: T, x1: T) -> T:
333
+ ...
334
+
335
+ def bitwise_not(self, x0: T) -> T:
336
+ ...
337
+
338
+ def bitwise_or(self, x0: T, x1: T) -> T:
339
+ ...
340
+
341
+ def bitwise_xor(self, x0: T, x1: T) -> T:
342
+ ...
343
+
344
+ def bitwise_left_shift(self, x0: T, x1: T) -> T:
345
+ ...
346
+
347
+ def bitwise_right_shift(self, x0: T, x1: T) -> T:
348
+ ...
349
+
350
+ def rsqrt(self, x0: T) -> T:
351
+ ...
352
+
353
+ def log1p(self, x0: T) -> T:
354
+ ...
355
+
356
+ def tan(self, x0: T) -> T:
357
+ ...
358
+
359
+ def tanh(self, x0: T) -> T:
360
+ ...
361
+
362
+ def sigmoid(self, x0: T) -> T:
363
+ ...
364
+
365
+ def signbit(self, x0: T) -> T:
366
+ ...
367
+
368
+ def fmod(self, x0: T, x1: T) -> T:
369
+ ...
370
+
371
+ def log(self, x0: T) -> T:
372
+ ...
373
+
374
+ def isinf(self, x0: T) -> T:
375
+ ...
376
+
377
+ def isnan(self, x0: T) -> T:
378
+ ...
379
+
380
+ def round(self, x0: T) -> T:
381
+ ...
382
+
383
+ def floor(self, x0: T) -> T:
384
+ ...
385
+
386
+ def sign(self, x0: T) -> T:
387
+ ...
388
+
389
+ def to_int(self, x0: T) -> T:
390
+ ...
391
+
392
+ def trunc(self, x0: T) -> T:
393
+ ...
394
+
395
+ def truncdiv(self, x0: T, x1: T) -> T:
396
+ ...
397
+
398
+ def ceil(self, x0: T) -> T:
399
+ ...
400
+
401
+ def neg(self, x0: T) -> T:
402
+ ...
403
+
404
+ def reciprocal(self, x0: T) -> T:
405
+ ...
406
+
407
+ def eq(self, x0: T, x1: T) -> T:
408
+ ...
409
+
410
+ def ne(self, x0: T, x1: T) -> T:
411
+ ...
412
+
413
+ def lt(self, x0: T, x1: T) -> T:
414
+ ...
415
+
416
+ def gt(self, x0: T, x1: T) -> T:
417
+ ...
418
+
419
+ def le(self, x0: T, x1: T) -> T:
420
+ ...
421
+
422
+ def ge(self, x0: T, x1: T) -> T:
423
+ ...
424
+
425
+ def add(self, x0: T, x1: T) -> T:
426
+ ...
427
+
428
+ def sub(self, x0: T, x1: T) -> T:
429
+ ...
430
+
431
+ def mul(self, x0: T, x1: T) -> T:
432
+ ...
433
+
434
+ def floordiv(self, x0: T, x1: T) -> T:
435
+ ...
436
+
437
+ def truediv(self, x0: T, x1: T) -> T:
438
+ ...
439
+
440
+ def div(self, x0: T, x1: T) -> T:
441
+ ...
442
+
443
+ def mod(self, x0: T, x1: T) -> T:
444
+ ...
445
+
446
+ def pow(self, x0: T, x1: T) -> T:
447
+ ...
448
+
449
+ def and_(self, x0: T, x1: T) -> T:
450
+ ...
451
+
452
+ def or_(self, x0: T, x1: T) -> T:
453
+ ...
454
+
455
+ def xor(self, x0: T, x1: T) -> T:
456
+ ...
457
+
458
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
459
+ # In CUDA, optimized implementations of other mathematical operations are
460
+ # offered separately via libdevice for double precision computation (in
461
+ # Triton, these go to tl.math rather than tl). We lower to these
462
+ # operators when doing FP64 on CUDA. Note that some operators
463
+ # unconditional go to tl.math.
464
+ #
465
+ # TODO(ezyang): Is this really the best way to do this? What if we have
466
+ # abs internally route to tl.math automatically when given a double
467
+ # precision input? One reason is that when doing codegen, we often don't
468
+ # know what the dtype of the inputs are! (In principle we do know, but
469
+ # for many analyses it's not conveniently available.)
470
+
471
+ def libdevice_abs(self, x0: T) -> T:
472
+ ...
473
+
474
+ def libdevice_exp(self, x0: T) -> T:
475
+ ...
476
+
477
+ def libdevice_sqrt(self, x0: T) -> T:
478
+ ...
479
+
480
+ def libdevice_cos(self, x0: T) -> T:
481
+ ...
482
+
483
+ def libdevice_sin(self, x0: T) -> T:
484
+ ...
485
+
486
+ def libdevice_sigmoid(self, x0: T) -> T:
487
+ ...
488
+
489
+ def libdevice_log(self, x0: T) -> T:
490
+ ...
491
+
492
+
493
+ class MockHandler:
494
+ def __getattr__(self, name):
495
+ if name == "name":
496
+ return "MockHandler"
497
+
498
+ def inner(*args, **kwargs):
499
+ fargs = [_arg_str(a) for a in args]
500
+ fargs.extend(f"{k}={v}" for k, v in kwargs.items())
501
+ return f"ops.{name}({', '.join(fargs)})"
502
+
503
+ return inner
504
+
505
+ @staticmethod
506
+ def masked(mask, body, other) -> str:
507
+ return f"ops.masked({mask}, {body()}, {other})"
508
+
509
+ @staticmethod
510
+ def frexp(x):
511
+ return (f"ops.frexp({x})[0]", f"ops.frexp({x})[1]")
512
+
513
+ @staticmethod
514
+ def indirect_indexing(index_var, size, check=True) -> sympy.Symbol:
515
+ return sympy_index_symbol(f"({str(index_var)})")
516
+
517
+ @classmethod
518
+ def _init_cls(cls):
519
+ def make_handler(format_string):
520
+ @staticmethod # type: ignore[misc]
521
+ def inner(*args):
522
+ return format_string.format(*args)
523
+
524
+ return inner
525
+
526
+ for name, format_string in itertools.chain(
527
+ magic_methods.items(), inplace_methods.items()
528
+ ):
529
+ setattr(cls, name, make_handler(format_string))
530
+
531
+
532
+ MockHandler._init_cls()
533
+
534
+
535
+ # Use mypy to check protocol implemented correctly
536
+ def _typecheck_MockHandler(h: MockHandler) -> OpsHandler[str]:
537
+ return h
538
+
539
+
540
+ class KernelFormatterHandler:
541
+ def __init__(self, parent_handler):
542
+ self.parent_handler = parent_handler
543
+ self.output = IndentedBuffer(1)
544
+ self.var_counter = itertools.count()
545
+
546
+ @staticmethod
547
+ def ir_to_string(ir_fn, index, rindex=None) -> str:
548
+ from .ir import FlexibleLayout
549
+ from .virtualized import V
550
+
551
+ args = [index, rindex] if rindex is not None else [index]
552
+ names = ["index", "rindex"] if rindex is not None else ["index"]
553
+ formatter = KernelFormatterHandler(MockHandler())
554
+
555
+ with formatter.output.indent(-1):
556
+ formatter.output.writeline(f"def inner_fn({', '.join(names)}):")
557
+ for name, arg in zip(names, args):
558
+ if arg:
559
+ lhs = ", ".join(
560
+ [
561
+ str("_" if isinstance(v, (int, sympy.Integer)) else v)
562
+ for v in arg
563
+ ]
564
+ )
565
+ formatter.output.writeline(f"{lhs} = {name}")
566
+
567
+ with V.set_ops_handler(formatter), patch.object(
568
+ FlexibleLayout, "allow_indexing", True
569
+ ):
570
+ result = ir_fn(*args)
571
+ return formatter.getvalue(result)
572
+
573
+ def __getattr__(self, name) -> Callable[..., Any]:
574
+ def inner(*args, **kwargs):
575
+ line = getattr(self.parent_handler, name)(*args, **kwargs)
576
+ if name == "indirect_indexing":
577
+ return line
578
+
579
+ def write(line):
580
+ # replace line with a new variable name
581
+ varname = f"tmp{next(self.var_counter)}"
582
+ self.output.writeline(f"{varname} = {line}")
583
+ return varname
584
+
585
+ return pytree.tree_map(write, line)
586
+
587
+ return inner
588
+
589
+ def reduction(
590
+ self,
591
+ dtype: torch.dtype,
592
+ src_dtype: torch.dtype,
593
+ reduction_type: ReductionType,
594
+ value: Union[str, Tuple[str, ...]],
595
+ ) -> Union[str, Tuple[str, ...]]:
596
+ line = self.parent_handler.reduction(dtype, src_dtype, reduction_type, value)
597
+ num_values = reduction_num_outputs(reduction_type)
598
+ varnames = [f"tmp{next(self.var_counter)}" for _ in range(num_values)]
599
+ self.output.writeline(f"{','.join(varnames)} = {line}")
600
+ return tuple(varnames) if num_values > 1 else varnames[0]
601
+
602
+ def getvalue(self, result):
603
+ self.output.writeline(f"return {result}")
604
+ return self.output.getvalue()
605
+
606
+
607
+ # Use mypy to check protocol implemented correctly
608
+ def _typecheck_KernelFormatterHandler(h: KernelFormatterHandler) -> OpsHandler[str]:
609
+ return h
610
+
611
+
612
+ class WrapperHandler(Generic[T]):
613
+ def __init__(self, inner: OpsHandler[T]):
614
+ self._inner = inner
615
+
616
+ def __getattr__(self, item):
617
+ return getattr(self._inner, item)
618
+
619
+
620
+ # Use mypy to check protocol implemented correctly
621
+ def _typecheck_WrapperHandler(h: WrapperHandler[T]) -> OpsHandler[T]:
622
+ return h
623
+
624
+
625
+ class OpCounterCSE:
626
+ """Shim to count how many ops are used"""
627
+
628
+ def __init__(self, inner):
629
+ super().__init__()
630
+ self.parent_handler = inner
631
+ self.op_count = 0
632
+ self.var_names = {}
633
+
634
+ def __getattr__(self, name):
635
+ def inner(*args, **kwargs):
636
+ val = getattr(self.parent_handler, name)(*args, **kwargs)
637
+ if name == "indirect_indexing":
638
+ return val
639
+
640
+ def count(val):
641
+ if val not in self.var_names:
642
+ varname = f"tmp{self.op_count}"
643
+ self.op_count += 1
644
+ self.var_names[val] = varname
645
+ return varname
646
+ else:
647
+ return self.var_names[val]
648
+
649
+ return pytree.tree_map(count, val)
650
+
651
+ return inner
652
+
653
+
654
+ def _typecheck_OpCounterCSE(h: OpCounterCSE) -> OpsHandler[str]:
655
+ return h
venv/lib/python3.10/site-packages/torch/_inductor/optimize_indexing.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import sympy
4
+
5
+ import torch
6
+ from torch.utils._sympy.value_ranges import ValueRanges
7
+ from .ir import LoopBody
8
+ from .utils import dominated_nodes
9
+
10
+
11
+ def val_expressable_in_32_bits(val):
12
+ if getattr(val, "is_Boolean", False):
13
+ return True
14
+
15
+ if isinstance(val, sympy.Expr):
16
+ assert val.is_number
17
+ if val.is_Integer or val.is_Boolean:
18
+ val = int(val)
19
+ else:
20
+ val = float(val)
21
+
22
+ # bound within mantissa
23
+ if isinstance(val, float):
24
+ return val <= (2**24) and val >= -(2**24)
25
+
26
+ if isinstance(val, int):
27
+ iinfo = torch.iinfo(torch.int32)
28
+ return val <= iinfo.max and val >= iinfo.min
29
+
30
+ raise Exception(f"Unexpected value {val}")
31
+
32
+
33
+ def range_expressable_in_32_bits(range):
34
+ return val_expressable_in_32_bits(range.lower) and val_expressable_in_32_bits(
35
+ range.upper
36
+ )
37
+
38
+
39
+ def try_to_reduce_precision(node, bounds, indirect_vars, indices, replacement_vals):
40
+ # if a downstream use of a node explicitly converts to int32, or float16/float32/float64,
41
+ # then it's precision is set for that chain of uses, and we don't need to consider those
42
+ # dominated values
43
+ def skip_filter(node):
44
+ return node.target == "to_dtype" and node.args[2] in (
45
+ torch.int32,
46
+ torch.float32,
47
+ torch.float64,
48
+ )
49
+
50
+ # TODO - there are dominated uses whose dtype does not depend on whether
51
+ # we reduce the precision here, e.g. add(int64, int64) one of the args can be reduced to
52
+ # int32 without changing the output precision of the node. this case hasn't shown up
53
+ for dominated in dominated_nodes([node], skip_filter):
54
+ if dominated.target in ["store", "output"]:
55
+ continue
56
+
57
+ if isinstance(dominated.target, str) and "set_indirect" in dominated.target:
58
+ idx = int(dominated.target[len("set_indirect") :])
59
+ indirect_var = indirect_vars[idx]
60
+
61
+ # We check that we can compute all the indices it's involved in with int32
62
+ for index, expr in indices.items():
63
+ if indirect_var in expr.free_symbols:
64
+ index_val = replacement_vals[index]
65
+
66
+ if math.isinf(index_val.lower) or math.isinf(index_val.upper):
67
+ return
68
+
69
+ # all indices are integers, so make sure that we
70
+ # use the bounds of integers instead of floats.
71
+ # TODO - not sure if we should be doing int/float casts while tracing,
72
+ # might interfere with sympy.
73
+
74
+ index_val_int = ValueRanges[sympy.Expr](
75
+ int(index_val.lower), int(index_val.upper)
76
+ )
77
+ if not range_expressable_in_32_bits(index_val_int):
78
+ return
79
+
80
+ if not range_expressable_in_32_bits(bounds[dominated]):
81
+ return
82
+
83
+ args = list(node.args)
84
+ args[2] = torch.int32
85
+ node.args = tuple(args)
86
+
87
+
88
+ def indexing_dtype_strength_reduction(loop_body: LoopBody):
89
+ """
90
+ Performs Value Range Analysis on LoopBody's fx graph to reduce precision of
91
+ intermediaries from int64 to int32
92
+ """
93
+ bv = loop_body.bounds()
94
+
95
+ int64_dtype_nodes = [
96
+ node
97
+ for node in loop_body.get_nodes()
98
+ if (
99
+ node.target == "to_dtype"
100
+ and node.args[2] == torch.int64
101
+ and node not in bv.unbounded_vars
102
+ )
103
+ ]
104
+ if not int64_dtype_nodes:
105
+ return
106
+
107
+ bounds = bv.get_bounds()
108
+
109
+ # TODO - if dominated node of one to_dtype is not expressible in int32,
110
+ # we should short circuit another to_dtype node if that node also dominates
111
+ for node in int64_dtype_nodes:
112
+ try_to_reduce_precision(
113
+ node,
114
+ bounds,
115
+ loop_body.indirect_vars,
116
+ loop_body.indexing_exprs,
117
+ bv.replacement_vals,
118
+ )
venv/lib/python3.10/site-packages/torch/_inductor/pattern_matcher.py ADDED
@@ -0,0 +1,1524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ import functools
5
+ import inspect
6
+ import itertools
7
+ import logging
8
+ import operator
9
+ import os
10
+ import re
11
+ from collections import defaultdict
12
+ from typing import (
13
+ Any,
14
+ Callable,
15
+ DefaultDict,
16
+ Dict,
17
+ Iterable,
18
+ List,
19
+ NoReturn,
20
+ Optional,
21
+ Set,
22
+ Union,
23
+ )
24
+
25
+ from typing_extensions import TypeGuard
26
+
27
+ import torch
28
+ import torch._guards
29
+ import torch.fx
30
+ import torch.utils._pytree as pytree
31
+ from torch._dispatch.python import enable_python_dispatcher
32
+ from torch._dynamo.utils import counters
33
+ from torch._prims_common import is_integer_dtype
34
+ from torch.fx import Node
35
+ from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
36
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
37
+ from torch.fx.immutable_collections import immutable_dict, immutable_list
38
+
39
+ from .._functorch import config as functorch_config
40
+ from .._functorch.aot_autograd import aot_function, make_boxed_func
41
+ from .._functorch.partitioners import default_partition
42
+ from .._subclasses import FakeTensorMode
43
+ from ..fx import Transformer
44
+ from . import config
45
+ from .decomposition import select_decomp_table
46
+ from .lowering import fallback_node_due_to_unsupported_type
47
+
48
+ log = logging.getLogger(__name__)
49
+ aten = torch.ops.aten
50
+ prims = torch.ops.prims
51
+
52
+ Constant = Any
53
+ NodeOrConstant = Union[Constant, torch.fx.Node]
54
+
55
+
56
+ class Multiple:
57
+ pass
58
+
59
+
60
+ # Sentinel indicating multiple quantities can be matched
61
+ MULTIPLE = Multiple()
62
+
63
+
64
+ class Match:
65
+ """
66
+ Represents a successfully matched pattern.
67
+ """
68
+
69
+ def __init__(self, pattern: PatternExpr, args=None, kwargs=None):
70
+ super().__init__()
71
+ self.pattern = pattern
72
+ # The input nodes that must be passed in to the result
73
+ self.args = args or []
74
+ self.kwargs = kwargs or {}
75
+ # The nodes matched in this expression
76
+ self.nodes: List[torch.fx.Node] = []
77
+ # Mapping CallFunction to the node.target
78
+ self.targets: Dict[_TargetExpr, torch.fx.node.Target] = {}
79
+ self.ctx: Optional[MatchContext] = None
80
+ self.replacement_graph: Optional[torch.fx.Graph] = None
81
+
82
+ @property
83
+ def graph(self) -> torch.fx.Graph:
84
+ assert self.ctx
85
+ return self.ctx.graph
86
+
87
+ def extend(self, other: Match):
88
+ if self.kwargs:
89
+ for key in set(self.kwargs.keys()) & set(other.kwargs.keys()):
90
+ if self.kwargs[key] != other.kwargs[key]:
91
+ raise FailedMatch("kwarg mismatch: {}", key)
92
+ self.args.extend(other.args)
93
+ self.nodes.extend(other.nodes)
94
+ self.kwargs.update(other.kwargs)
95
+ self.targets.update(other.targets)
96
+
97
+ def bundle(self) -> Match:
98
+ # Wrap args in an extra list
99
+ self.args = [tuple(self.args)] if self.args else []
100
+ return self
101
+
102
+ def __repr__(self):
103
+ return f"Match(..., {self.args}, {self.kwargs})"
104
+
105
+ def erase_nodes(self, graph: torch.fx.Graph):
106
+ for n in reversed(self.nodes):
107
+ if not n._erased:
108
+ graph.erase_node(n)
109
+
110
+ def output_nodes(self) -> List[Optional[torch.fx.Node]]:
111
+ assert self.ctx
112
+ return [
113
+ (self.ctx.pattern_to_node[p] if p is not None else None)
114
+ for p in self.ctx.outputs
115
+ ]
116
+
117
+ def output_node(self) -> torch.fx.Node:
118
+ return next(p for p in self.output_nodes() if p)
119
+
120
+ def replace_with_graph(self, replacement_graph, args):
121
+ assert self.ctx
122
+ ReplacementPatternEntry.replace_with_graph(
123
+ self, self.ctx.graph, replacement_graph, args
124
+ )
125
+
126
+ def replace_by_example(self, replacement_fn, args, trace_fn=None, run_dce=True):
127
+ assert self.ctx
128
+ if trace_fn is None:
129
+ trace_fn = functools.partial(fwd_only, run_dce=run_dce)
130
+ replacement = trace_fn(
131
+ replacement_fn, torch.fx.map_arg(args, lambda arg: arg.meta["val"])
132
+ )
133
+ ReplacementPatternEntry.replace_with_graph(
134
+ self,
135
+ self.ctx.graph,
136
+ replacement,
137
+ args,
138
+ )
139
+
140
+
141
+ class FailedMatch(RuntimeError):
142
+ def __init__(self, format_string, *args, **kwargs):
143
+ self.format_string = format_string
144
+ # We want to construct error messages lazily instead of eagerly, as
145
+ # constructing them eagerly can significantly worsen compile times.
146
+ if len(format_string) > 200:
147
+ raise RuntimeError(
148
+ f"Format string too long - use lazy construction of strings instead. Format string is\n {format_string}"
149
+ )
150
+ self.args = args
151
+ self.kwargs = kwargs
152
+
153
+ def __str__(self):
154
+ return self.format_string.format(*self.args, **self.kwargs)
155
+
156
+ def __bool__(self):
157
+ return False
158
+
159
+
160
+ def is_match(m: Union[Match, FailedMatch]) -> TypeGuard[Match]:
161
+ """
162
+ TypeGuards cannot act on `self`. Thus this function exists to let mypy
163
+ recognize FailedMatch.__bool__ as a TypeGuard.
164
+ """
165
+ return bool(m)
166
+
167
+
168
+ class MatchContext:
169
+ """
170
+ State needed while running PatternExpr._match().
171
+ """
172
+
173
+ def __init__(
174
+ self,
175
+ outputs: List[Optional[PatternExpr]],
176
+ pattern_to_node: Optional[Dict[PatternExpr, Node]] = None,
177
+ *,
178
+ graph: torch.fx.Graph,
179
+ ):
180
+ self.outputs = outputs
181
+ self.pattern_to_node = {} if pattern_to_node is None else pattern_to_node
182
+ self.graph = graph
183
+ self.exclusive_node_set: List[NodeOrConstant] = []
184
+
185
+ def match(self, pattern, node):
186
+ """wrapper to check reused nodes in patterns"""
187
+ if pattern in self.pattern_to_node:
188
+ if self.pattern_to_node[pattern] == node:
189
+ return Match(pattern) # already checked this node
190
+ else:
191
+ return FailedMatch("repeated pattern differs")
192
+ m = pattern._match(node, self)
193
+ assert pattern not in self.pattern_to_node
194
+ self.pattern_to_node[pattern] = node if m else None
195
+ m.ctx = self
196
+ return m
197
+
198
+ def filter_multi_user_patterns(self):
199
+ return {
200
+ pattern: node
201
+ for pattern, node in self.pattern_to_node.items()
202
+ if pattern.has_multiple_users() and node is not None
203
+ }
204
+
205
+
206
+ class PatternExpr:
207
+ """
208
+ Base class for types of patterns
209
+ """
210
+
211
+ def _match(
212
+ self, node: torch.fx.Node, ctx: MatchContext
213
+ ) -> Union[Match, FailedMatch]:
214
+ raise NotImplementedError()
215
+
216
+ def match(self, node: torch.fx.Node) -> Union[Match, FailedMatch]:
217
+ try:
218
+ return MatchContext([self], graph=node.graph).match(self, node)
219
+ except FailedMatch as e:
220
+ return e
221
+
222
+ def has_multiple_users(self) -> bool:
223
+ return False
224
+
225
+ def __repr__(self):
226
+ return self.__class__.__name__ + "()"
227
+
228
+ def find_anchor_nodes(self, ctx: MatchContext, searched):
229
+ if self in ctx.pattern_to_node:
230
+ yield ctx.pattern_to_node[self]
231
+
232
+
233
+ class Arg(PatternExpr):
234
+ """
235
+ Capture an arg which will become an input to the handler. Args are
236
+ passed in depth first order.
237
+ """
238
+
239
+ def _match(self, node: NodeOrConstant, ctx: MatchContext):
240
+ return Match(self, args=[node]) # matches anything
241
+
242
+
243
+ class Ignored(PatternExpr):
244
+ """
245
+ Match an arg, but don't pass it to handler
246
+ """
247
+
248
+ def _match(self, node: NodeOrConstant, ctx: MatchContext):
249
+ return Match(self) # matches anything
250
+
251
+ def __repr__(self):
252
+ return "*"
253
+
254
+ def pretty_print(self, pp: PatternPrettyPrinter):
255
+ return "Ignored()"
256
+
257
+
258
+ class KeywordArg(PatternExpr):
259
+ """
260
+ Capture a kwarg which will become an input to the handler.
261
+ """
262
+
263
+ def __init__(self, name: str):
264
+ super().__init__()
265
+ self.name = name
266
+
267
+ def __repr__(self):
268
+ return f"KeywordArg({self.name!r})"
269
+
270
+ def _match(self, node: NodeOrConstant, ctx: MatchContext):
271
+ return Match(self, kwargs={self.name: node}) # matches anything
272
+
273
+
274
+ class ExclusiveKeywordArg(PatternExpr):
275
+ """
276
+ Capture a kwarg which will become an input to the handler.
277
+ """
278
+
279
+ def __init__(self, name):
280
+ super().__init__()
281
+ self.name = name
282
+
283
+ def __repr__(self):
284
+ return f"ExclusiveKeywordArg({self.name!r})"
285
+
286
+ def _match(self, node: NodeOrConstant, ctx: MatchContext):
287
+ if node in ctx.exclusive_node_set:
288
+ return FailedMatch("exclusive arg appears twice")
289
+
290
+ ctx.exclusive_node_set.append(node)
291
+ return Match(self, kwargs={self.name: node}) # matches anything
292
+
293
+
294
+ class _TargetExpr(PatternExpr):
295
+ """
296
+ Base class for filtering match by node.target
297
+ """
298
+
299
+ op: Optional[str] = None
300
+
301
+ def __init__(self, fns, users=1):
302
+ if not self.op:
303
+ raise NotImplementedError("Shouldn't directly use _BaseNodeMatch")
304
+ super().__init__()
305
+ fns = [fns] if callable(fns) or isinstance(fns, str) else list(fns)
306
+ for fn in list(fns):
307
+ if isinstance(fn, torch._ops.OpOverloadPacket):
308
+ fns.extend([getattr(fn, overload) for overload in fn.overloads()])
309
+
310
+ self.fns: List[Union[Callable[..., Any], str]] = fns
311
+ self.fns_set: Set[Union[Callable[..., Any], str]] = set(fns)
312
+ self.users: Union[int, Multiple] = users
313
+
314
+ def fns_repr(self) -> str:
315
+ first_repr = self.fns[0]
316
+ if not isinstance(first_repr, str):
317
+ first_repr = first_repr.__name__
318
+
319
+ if len(self.fns) > 1:
320
+ return f"[{first_repr}, ...]"
321
+ elif self.fns[0] is getattr(torch, first_repr, None):
322
+ return f"torch.{first_repr}"
323
+ elif isinstance(self.fns[0], torch._ops.OpOverload):
324
+ return str(self.fns[0])
325
+ else:
326
+ return first_repr
327
+
328
+ def __repr__(self):
329
+ return f"{self.__class__.__name__}({self.fns_repr()})"
330
+
331
+ def has_multiple_users(self) -> bool:
332
+ return isinstance(self.users, Multiple) or self.users > 1
333
+
334
+ def find_anchor_nodes(self, ctx: MatchContext, searched):
335
+ raise NotImplementedError()
336
+
337
+ def _match_fns(self, node: torch.fx.Node):
338
+ return (
339
+ isinstance(node, torch.fx.Node)
340
+ and node.op == self.op
341
+ and extract_target(node) in self.fns_set
342
+ )
343
+
344
+ def _match_users(self, node: torch.fx.Node, ctx: MatchContext):
345
+ return (
346
+ self in ctx.outputs
347
+ or self.users is MULTIPLE
348
+ or len(node.users) == self.users
349
+ )
350
+
351
+
352
+ class _TargetArgsExpr(_TargetExpr):
353
+ """
354
+ Base class for filtering match by node.{target,args,kwargs}
355
+ """
356
+
357
+ def __init__(self, fns, *args, _users=1, **kwargs):
358
+ super().__init__(fns, _users)
359
+ self.args = tuple(args)
360
+ self.kwargs = dict(kwargs)
361
+ if any(
362
+ isinstance(x, (dict, list, tuple))
363
+ for x in itertools.chain(args, kwargs.values())
364
+ ):
365
+ self.flatten = self.pytree_flatten
366
+ else:
367
+ self.flatten = self.simple_flatten
368
+ self.flat_args_kwargs = self.flatten(self.args, self.kwargs)
369
+
370
+ @staticmethod
371
+ def simple_flatten(args, kwargs: Dict[Any, Any]):
372
+ return (*args, *kwargs.values()), (len(args), *kwargs.keys())
373
+
374
+ @staticmethod
375
+ def pytree_flatten(args, kwargs: Dict[Any, Any]):
376
+ def norm_spec(s: pytree.TreeSpec):
377
+ if s.type is None:
378
+ return s
379
+ mapping = {immutable_list: list, tuple: list, immutable_dict: dict}
380
+ return pytree.TreeSpec(
381
+ mapping.get(s.type, s.type),
382
+ s.context,
383
+ list(map(norm_spec, s.children_specs)),
384
+ )
385
+
386
+ flat, spec = pytree.tree_flatten([args, kwargs])
387
+ spec = norm_spec(spec)
388
+ return flat, spec
389
+
390
+ def __repr__(self):
391
+ args = [
392
+ self.fns_repr(),
393
+ *map(repr, self.args),
394
+ *[f"{k}={v}" for k, v in self.kwargs.items()],
395
+ ]
396
+ return f"{self.__class__.__name__}({', '.join(args)})"
397
+
398
+ def pretty_print(self, pp: PatternPrettyPrinter):
399
+ args = [
400
+ self.fns_repr(),
401
+ *(pp.pretty_print(x) for x in self.args),
402
+ *[f"{k}={pp.pretty_print(v)}" for k, v in self.kwargs.items()],
403
+ ]
404
+ if isinstance(self.users, Multiple):
405
+ args.append("_users=MULTIPLE")
406
+ elif self.users > 1:
407
+ args.append(f"_users={self.users}")
408
+
409
+ joiner_str = ", "
410
+ return f"{self.__class__.__name__}({joiner_str.join(args)})"
411
+
412
+ def _match(self, node: torch.fx.Node, ctx: MatchContext):
413
+ if not self._match_fns(node) or len(node.args) != len(self.args):
414
+ return FailedMatch("function_mismatch: node={}, pattern={}", node, self)
415
+
416
+ if not self._match_users(node, ctx):
417
+ return FailedMatch("multiple_users {}", self)
418
+
419
+ _args = node.args
420
+ _kwargs = node.kwargs
421
+ if len(_kwargs) < len(self.kwargs):
422
+ from torch.fx.operator_schemas import normalize_function
423
+
424
+ normalized_args_and_kwargs = normalize_function(
425
+ node.target, node.args, node.kwargs
426
+ )
427
+
428
+ if normalized_args_and_kwargs is None:
429
+ return FailedMatch("function_mismatch: node={}, pattern={}", node, self)
430
+ else:
431
+ _args, _kwargs = normalized_args_and_kwargs
432
+ if len(_args) == len(self.args) and len(_kwargs) >= len(self.kwargs):
433
+ _kwargs = {i: _kwargs[i] for i in _kwargs if i in self.kwargs}
434
+ else:
435
+ return FailedMatch(
436
+ "function_mismatch: node={}, pattern={}", node, self
437
+ )
438
+ else:
439
+ _kwargs = {i: _kwargs[i] for i in _kwargs if i in self.kwargs}
440
+
441
+ node_items, node_spec = self.flatten(_args, _kwargs)
442
+ self_items, self_spec = self.flat_args_kwargs
443
+ if node_spec != self_spec:
444
+ return FailedMatch("args_structure {} {}", node_spec, self_spec)
445
+ assert len(node_items) == len(self_items)
446
+
447
+ m = Match(self)
448
+ for i, pattern, child_node in zip(itertools.count(), self_items, node_items):
449
+ if isinstance(pattern, PatternExpr):
450
+ child_match = ctx.match(pattern, child_node)
451
+ if not child_match:
452
+ return child_match
453
+ m.extend(child_match)
454
+ elif isinstance(child_node, torch.fx.Node) or child_node != pattern:
455
+ return FailedMatch(
456
+ "constant_args: {} {!r}!={pattern!r}", node, child_node
457
+ )
458
+ m.nodes.append(node)
459
+ m.targets[self] = node.target
460
+ return m
461
+
462
+ def find_anchor_nodes(self, ctx: MatchContext, searched):
463
+ """
464
+ This is used when we are matching a pattern with multiple outputs.
465
+ There is a partial match (stored in ctx) and we want to walk
466
+ this pattern to find a connection to an already-matched node.
467
+
468
+ Yields candidate nodes that `self._match` might like.
469
+ """
470
+ if self in ctx.pattern_to_node:
471
+ yield ctx.pattern_to_node[self]
472
+ return
473
+
474
+ for pattern in self.flat_args_kwargs[0]:
475
+ if isinstance(pattern, PatternExpr):
476
+ for other_node in pattern.find_anchor_nodes(ctx, searched):
477
+ if not isinstance(other_node, torch.fx.Node):
478
+ continue
479
+ for node in other_node.users:
480
+ if node not in searched:
481
+ if self._match_fns(node):
482
+ yield node
483
+ searched.add(node)
484
+
485
+
486
+ class CallFunction(_TargetArgsExpr):
487
+ """
488
+ Matches a call_function node in the FX graphs: `fns[i](*args, **kwargs)`
489
+ """
490
+
491
+ op = "call_function"
492
+
493
+
494
+ class CallMethod(_TargetArgsExpr):
495
+ """
496
+ Matches a call_method node in the FX graphs: `fns[i].method(*args, **kwargs)`
497
+ """
498
+
499
+ op = "call_method"
500
+
501
+
502
+ class CallModule(_TargetArgsExpr):
503
+ """
504
+ Matches a call_module node in the FX graphs: `module(*args, **kwargs)`
505
+ """
506
+
507
+ op = "call_module"
508
+
509
+
510
+ class _TargetExprVarArgs(_TargetExpr):
511
+ """
512
+ Matches a call_function node with any arguments which are passed into the pattern
513
+ """
514
+
515
+ def _match(self, node: torch.fx.Node, ctx: MatchContext):
516
+ if not self._match_fns(node):
517
+ return FailedMatch("function_mismatch")
518
+
519
+ if not self._match_users(node, ctx):
520
+ return FailedMatch("multiple_users")
521
+
522
+ m = Match(self)
523
+ m.nodes.append(node)
524
+ m.targets[self] = node.target
525
+ m.args.extend(node.args)
526
+ m.kwargs.update(node.kwargs)
527
+ return m
528
+
529
+
530
+ class CallFunctionVarArgs(_TargetExprVarArgs):
531
+ op = "call_function"
532
+
533
+
534
+ class CallMethodVarArgs(_TargetExprVarArgs):
535
+ op = "call_method"
536
+
537
+
538
+ class CallModuleVarArgs(_TargetExprVarArgs):
539
+ op = "call_module"
540
+
541
+
542
+ class ListOf(PatternExpr):
543
+ """
544
+ Matches a repeated pattern
545
+ """
546
+
547
+ def __init__(self, pattern: PatternExpr, partial=False):
548
+ super().__init__()
549
+ assert isinstance(pattern, PatternExpr)
550
+ self.pattern = pattern
551
+ self.partial = partial
552
+
553
+ def __repr__(self):
554
+ return f"{self.__class__.__name__}({self.pattern})"
555
+
556
+ def _match(self, node: List[torch.fx.Node], ctx: MatchContext): # type: ignore[override]
557
+ if not isinstance(node, (list, tuple)) or len(node) == 0:
558
+ return FailedMatch("non_list")
559
+ m = Match(self)
560
+ # Propagating patterns with multiple users will ensure we don't revisit
561
+ # the same nodes
562
+ pattern_to_node = ctx.filter_multi_user_patterns()
563
+ matched = False
564
+ for i, child_node in enumerate(node):
565
+ child_ctx = MatchContext(
566
+ ctx.outputs, pattern_to_node, graph=child_node.graph
567
+ )
568
+ child_match = child_ctx.match(self.pattern, child_node)
569
+ pattern_to_node = child_ctx.filter_multi_user_patterns()
570
+ if not child_match:
571
+ if not self.partial:
572
+ return FailedMatch("list[{}]: {}", i, child_match)
573
+ continue
574
+ matched = True
575
+ m.extend(child_match.bundle())
576
+ if not matched:
577
+ return FailedMatch("list: no_match")
578
+ return m.bundle()
579
+
580
+
581
+ class MultiOutputPattern(PatternExpr):
582
+ def __init__(self, outputs):
583
+ super().__init__()
584
+ assert all(isinstance(x, (PatternExpr, type(None))) for x in outputs), outputs
585
+ self.outputs: List[Optional[PatternExpr]] = outputs
586
+
587
+ @property
588
+ def fns(self):
589
+ assert self.outputs[0] and hasattr(self.outputs[0], "fns")
590
+ return self.outputs[0].fns
591
+
592
+ def __repr__(self):
593
+ return f"{self.__class__.__name__}({self.outputs})"
594
+
595
+ def pretty_print(self, pp: PatternPrettyPrinter):
596
+ args = [pp.pretty_print(x) for x in self.outputs]
597
+ joiner_str = f",\n{' '}"
598
+ str_out = f"{self.__class__.__name__}([{joiner_str.join(args)}"
599
+ str_out = f"{str_out}\n])"
600
+ return str_out
601
+
602
+ def _match(self, node: torch.fx.Node, ctx: MatchContext):
603
+ m = ctx.match(self.outputs[0], node)
604
+ if not m:
605
+ return m
606
+
607
+ for pattern in self.outputs[1:]:
608
+ if pattern is None:
609
+ continue
610
+ child_match = self._match_from_anchors(pattern, ctx)
611
+ if not child_match:
612
+ return child_match
613
+ m.extend(child_match)
614
+
615
+ return m
616
+
617
+ def _match_from_anchors(self, pattern, ctx):
618
+ prior = dict(ctx.pattern_to_node)
619
+ m = FailedMatch("no anchor found")
620
+ for node in pattern.find_anchor_nodes(ctx, set()):
621
+ m = ctx.match(pattern, node)
622
+ if m:
623
+ return m
624
+ # revert any partial matches
625
+ ctx.pattern_to_node = dict(prior)
626
+ return m
627
+
628
+ def match(self, node: torch.fx.Node) -> Union[Match, FailedMatch]:
629
+ try:
630
+ return MatchContext(self.outputs, graph=node.graph).match(self, node)
631
+ except FailedMatch as e:
632
+ return e
633
+
634
+
635
+ class RepeatedExpr(PatternExpr):
636
+ """
637
+ Checks for a repeated pattern. Useful for repeated operations after a node such as `split` or `unbind`
638
+ """
639
+
640
+ def __init__(self, inner_pattern: PatternExpr):
641
+ super().__init__()
642
+ assert hasattr(inner_pattern, "fns")
643
+ self.inner_pattern = inner_pattern
644
+
645
+ @property
646
+ def fns(self):
647
+ return self.inner_pattern.fns
648
+
649
+ def _match(self, node: torch.fx.Node, ctx: MatchContext):
650
+ m = ctx.match(self.inner_pattern, node)
651
+ if not m:
652
+ return m
653
+ ctx.pattern_to_node.pop(
654
+ self.inner_pattern,
655
+ )
656
+ # Check all anchor nodes match the pattern
657
+ for anchor_node in self.inner_pattern.find_anchor_nodes(ctx, set()):
658
+ anchor_m = MatchContext([self], graph=node.graph).match(
659
+ self.inner_pattern, anchor_node
660
+ )
661
+ if not anchor_m:
662
+ return anchor_m
663
+ m.extend(anchor_m)
664
+ return m
665
+
666
+
667
+ class PatternPrettyPrinter:
668
+ """
669
+ Serializes Patterns to executable python.
670
+ XXX: currently only used and tested for fuse attention patterns. May not cover
671
+ all patterns.
672
+ """
673
+
674
+ def __init__(self):
675
+ self.namespace = torch.fx.graph._Namespace()
676
+ self.memoized_objs_names: Dict[PatternExpr, str] = {}
677
+ self.memoized_objs_pp: Dict[PatternExpr, str] = {}
678
+
679
+ @staticmethod
680
+ def run(obj: PatternExpr, output_name="output"):
681
+ """
682
+ Serializes obj to python code with obj written out to `output_name`
683
+ """
684
+
685
+ pp = PatternPrettyPrinter()
686
+ assert hasattr(obj, "pretty_print")
687
+ out_str = obj.pretty_print(pp=pp)
688
+
689
+ output = []
690
+ for key in pp.memoized_objs_names:
691
+ output.append(f"{pp.memoized_objs_names[key]} = {pp.memoized_objs_pp[key]}")
692
+
693
+ output.append(f"{output_name} = {out_str}")
694
+
695
+ return "\n".join(output)
696
+
697
+ def pretty_print(self, obj):
698
+ if isinstance(obj, _TargetArgsExpr):
699
+ if memoized_name := self.memoized_objs_names.get(obj):
700
+ return memoized_name
701
+ else:
702
+ return self.memoize(obj)
703
+ if hasattr(obj, "pretty_print"):
704
+ return obj.pretty_print(self)
705
+
706
+ return repr(obj)
707
+
708
+ def memoize(self, obj):
709
+ obj_str = obj.pretty_print(self)
710
+ obj_name = obj.fns_repr()
711
+ for prefix in ("aten.", "torch.", "prims."):
712
+ obj_name = obj_name.replace(prefix, "")
713
+
714
+ tmp_name = self.namespace.create_name(obj_name, None)
715
+ self.memoized_objs_names[obj] = tmp_name
716
+ self.memoized_objs_pp[obj] = obj_str
717
+ return tmp_name
718
+
719
+
720
+ @dataclasses.dataclass
721
+ class PatternEntry:
722
+ pattern: PatternExpr
723
+ extra_check: Callable[[Match], bool]
724
+
725
+ def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
726
+ raise NotImplementedError()
727
+
728
+ def register(self, pass_dicts, target=None, prepend=False):
729
+ if target is None:
730
+ assert hasattr(self.pattern, "fns")
731
+ for fn in self.pattern.fns:
732
+ self.register(pass_dicts, fn, prepend=prepend)
733
+ elif isinstance(pass_dicts, (dict, PatternMatcherPass)):
734
+ if prepend:
735
+ pass_dicts[target].insert(0, self)
736
+ else:
737
+ pass_dicts[target].append(self)
738
+ else:
739
+ for x in pass_dicts:
740
+ self.register(x, target, prepend=prepend)
741
+
742
+
743
+ @dataclasses.dataclass
744
+ class LoweringPatternEntry(PatternEntry):
745
+ handler: Callable[..., Any]
746
+
747
+ def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
748
+ handler = functools.wraps(self.handler)(functools.partial(self.handler, match))
749
+ with graph.inserting_before(node):
750
+ replacement = graph.call_function(handler, tuple(match.args), match.kwargs)
751
+ replacement.meta.update(node.meta)
752
+ node.replace_all_uses_with(replacement)
753
+ assert match.nodes[-1] is node
754
+ match.erase_nodes(graph)
755
+
756
+
757
+ @dataclasses.dataclass
758
+ class GraphPatternEntry(PatternEntry):
759
+ """
760
+ A pattern that runs a function on the FX graph
761
+ """
762
+
763
+ handler: Callable[..., Any]
764
+
765
+ def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
766
+ with graph.inserting_before(node):
767
+ self.handler(match, *match.args, **match.kwargs)
768
+
769
+
770
+ @dataclasses.dataclass
771
+ class ReplacementPatternEntry(PatternEntry):
772
+ normalize_args: Callable[..., List[Any]]
773
+
774
+ @staticmethod
775
+ def replace_with_graph(
776
+ match: Match,
777
+ graph: torch.fx.Graph,
778
+ replacement_graph: torch.fx.Graph,
779
+ args: List[Any],
780
+ ):
781
+ output_nodes = match.output_nodes()
782
+ first_node = output_nodes[0]
783
+
784
+ class Replacer(torch.fx.Interpreter):
785
+ call_method = None # type: ignore[assignment]
786
+ call_module = None # type: ignore[assignment]
787
+ get_attr = None # type: ignore[assignment]
788
+
789
+ def run_node(self, node) -> Any:
790
+ if node.op in ("placeholder", "output"):
791
+ return super().run_node(node)
792
+ if node.op == "call_function":
793
+ target = node.target
794
+ args, kwargs = self.fetch_args_kwargs_from_env(node)
795
+ result = graph.call_function(target, args, kwargs)
796
+ if "val" in node.meta and "val" not in result.meta:
797
+ result.meta["val"] = node.meta["val"]
798
+ if isinstance(node.meta["val"], torch.Tensor):
799
+ assert "tensor_meta" in node.meta
800
+ result.meta["tensor_meta"] = node.meta["tensor_meta"]
801
+ return result
802
+ raise NotImplementedError(f"unhandled {node}")
803
+
804
+ output_nodes = match.output_nodes()
805
+
806
+ if len(output_nodes) == 1:
807
+ last_node = output_nodes[0]
808
+ else:
809
+ assert output_nodes[0]
810
+ nodes = list(output_nodes[0].graph.nodes)
811
+ indices = [
812
+ (nodes.index(n), n)
813
+ for n in output_nodes
814
+ if isinstance(n, torch.fx.Node)
815
+ ]
816
+ last_node = min(indices, key=lambda tup: tup[0])[1]
817
+
818
+ def percolate_tags(node, recompute_tag, input_stops):
819
+ queue = [node]
820
+ visited = set()
821
+
822
+ while queue:
823
+ arg = queue.pop()
824
+ if (
825
+ arg not in visited
826
+ and arg not in input_stops
827
+ and hasattr(arg, "meta")
828
+ ):
829
+ visited.add(arg)
830
+ arg.meta["recompute"] = recompute_tag
831
+ queue.extend(arg.all_input_nodes)
832
+
833
+ with graph.inserting_before(last_node):
834
+ replacement = Replacer(replacement_graph).run(*args)
835
+ if isinstance(replacement, torch.fx.Node):
836
+ replacement = [replacement]
837
+
838
+ def maybe_getitem(node):
839
+ if node.op != "call_function":
840
+ return None
841
+ if node.target != operator.getitem:
842
+ return None
843
+ assert len(node.args) == 2
844
+ return node.args[1]
845
+
846
+ def replace(old, new):
847
+ if old is None:
848
+ assert new is None
849
+ return
850
+ assert isinstance(old, torch.fx.Node)
851
+ if new is None:
852
+ old.replace_all_uses_with(None)
853
+ graph.erase_node(old)
854
+ return
855
+ if isinstance(new, torch.fx.Node):
856
+ if "val" not in new.meta:
857
+ new.meta.update(old.meta)
858
+
859
+ # Preserve the recompute tags in the replacement graph. We
860
+ # look at the recompute tags of the original output node to
861
+ # propagate the tag from the output all the way to the input
862
+ # args (named as args in the replace_with_graph).
863
+ # Note that this is best effort. Since patterns are from
864
+ # many to many, there is no easy way to correctly map the
865
+ # recomputable tags. It is possible in some scenarios that we
866
+ # incorrectly tag some nodes as recomputables.
867
+ if "recompute" in old.meta:
868
+ percolate_tags(new, old.meta["recompute"], args)
869
+
870
+ old.replace_all_uses_with(new)
871
+ graph.erase_node(old)
872
+ return
873
+
874
+ # `new` is not a node: it's a list of nodes.
875
+ #
876
+ # This happens when we want to replace a node that has a single
877
+ # packed return with multiple unpacked returns. We need to do
878
+ # some graph surgery here.
879
+ #
880
+ # Example:
881
+ # def original_graph(x):
882
+ # a = op(x)
883
+ # b = a[0]
884
+ # c = a[1]
885
+ # ...
886
+ #
887
+ # Assume that we want to replace op(x) with the graph
888
+ # def new_op(x):
889
+ # w = x + 1
890
+ # z = x + 2
891
+ # return (w, z)
892
+ #
893
+ # We need to replace `op` with the contents of `new_op`,
894
+ # and then rewrite a[0] to be w and a[1] to be z, as so:
895
+ # def new_graph(x):
896
+ # w = x + 1
897
+ # z = x + 2
898
+ # b = w
899
+ # c = z
900
+ # ...
901
+ old_uses = list(old.users.keys())
902
+ for user in old_uses:
903
+ idx = maybe_getitem(user)
904
+ if idx is None:
905
+ raise AssertionError("can't handle")
906
+ replace(user, new[idx])
907
+ graph.erase_node(old)
908
+
909
+ if len(output_nodes) == len(replacement):
910
+ for old, new in zip(output_nodes, replacement):
911
+ replace(old, new)
912
+ else:
913
+ assert len(output_nodes) == 1
914
+ replace(output_nodes[0], replacement)
915
+
916
+ match.erase_nodes(graph)
917
+
918
+ def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
919
+ self.replace_with_graph(
920
+ match,
921
+ graph,
922
+ match.replacement_graph, # type: ignore[arg-type]
923
+ self.normalize_args(*match.args, **match.kwargs),
924
+ )
925
+
926
+
927
+ def _return_true(match):
928
+ return True
929
+
930
+
931
+ def log_trace_failure(search_fn, e):
932
+ log.info(
933
+ "Replacement pattern %s failed to apply due to shape mismatch: %s",
934
+ search_fn.__name__,
935
+ e,
936
+ )
937
+
938
+
939
+ def register_replacement(
940
+ search_fn,
941
+ replace_fn,
942
+ example_inputs: Iterable[Any],
943
+ trace_fn: Callable[[Callable[..., Any], Iterable[Any]], torch.fx.GraphModule],
944
+ pass_dicts,
945
+ extra_check=_return_true,
946
+ scalar_workaround=(),
947
+ exclusive_arg_names=(),
948
+ search_fn_pattern=None,
949
+ ):
950
+ """
951
+ Create a replacement rule based on example functions that get traced
952
+ to create patterns. This supports both training and inference when
953
+ run on a joint forward+backward graph.
954
+
955
+ Args:
956
+ search_fn: traced to give original pattern
957
+ replace_fn: traced to give replacement graph
958
+ example_inputs: example inputs for initial trace
959
+ trace_fn: fwd_only or joint_fwd_bwd
960
+ pass_dict: dict of passes to register to
961
+ extra_check: additional check to run on match(using real shapes)
962
+ """
963
+ argnames_static = [*inspect.signature(search_fn).parameters.keys()]
964
+
965
+ def check_fn(match: Match):
966
+ """
967
+ Often shapes get burned into the pattern, so our initial match ran with
968
+ `ignore_types=(int, ...)`.
969
+
970
+ Recheck the match with the correct shapes.
971
+ """
972
+ argnames = list(argnames_static)
973
+ for name in argnames:
974
+ if name not in match.kwargs:
975
+ raise RuntimeError(
976
+ f"Not all inputs to pattern found in match.kwargs. Perhaps one "
977
+ f"of the inputs is unused? argnames={argnames}, match.kwargs={match.kwargs}"
978
+ )
979
+
980
+ args = list(
981
+ torch.fx.map_arg(
982
+ [match.kwargs[name] for name in argnames], lambda n: n.meta["val"]
983
+ )
984
+ )
985
+ sym_args: List[torch.SymInt] = []
986
+ with torch._dynamo.utils.detect_fake_mode(args):
987
+ for i, grad in enumerate(requires_grad):
988
+ if isinstance(args[i], torch.Tensor):
989
+ if grad and is_integer_dtype(args[i].dtype):
990
+ return False
991
+
992
+ args[i] = torch.empty_strided(
993
+ args[i].size(),
994
+ args[i].stride(),
995
+ dtype=args[i].dtype,
996
+ device=args[i].device,
997
+ requires_grad=grad,
998
+ )
999
+ for v in itertools.chain(args[i].shape, args[i].stride()):
1000
+ if isinstance(v, torch.SymInt) and all(
1001
+ guard_size_oblivious(v != a) for a in sym_args
1002
+ ):
1003
+ sym_args.append(v)
1004
+
1005
+ if sym_args:
1006
+ # AOT Autograd and make fx will dedupe symbolic shape size
1007
+ # accesses of sym ints that appear as inputs
1008
+ # We don't want the sym_size uses to interfere with pattern matching
1009
+ # so we provide them as inputs.
1010
+ # Later, when we actually do the replacement, the symbolic shape
1011
+ # sizes will get re-traced and added to the graph.
1012
+
1013
+ def search_fn_new(*args_new):
1014
+ return search_fn(*args_new[len(args_new) - len(args) :])
1015
+
1016
+ try:
1017
+ specific_graph = trace_fn(search_fn_new, sym_args + args)
1018
+ except RuntimeError as e:
1019
+ log_trace_failure(search_fn, e)
1020
+ return False
1021
+
1022
+ # correct argnames in the graph
1023
+ sym_arg_names = []
1024
+ for i, placeholder in zip(
1025
+ range(len(sym_args) + len(args)),
1026
+ specific_graph.graph.nodes,
1027
+ ):
1028
+ if i < len(sym_args):
1029
+ sym_arg_names.append(placeholder.target)
1030
+ continue
1031
+
1032
+ with specific_graph.graph.inserting_after(placeholder):
1033
+ new_node = specific_graph.graph.placeholder(
1034
+ argnames[i - len(sym_args)]
1035
+ )
1036
+ new_node.target = new_node.name
1037
+ placeholder.replace_all_uses_with(new_node)
1038
+ specific_graph.graph.erase_node(placeholder)
1039
+
1040
+ argnames = sym_arg_names + argnames
1041
+ else:
1042
+ try:
1043
+ specific_graph = trace_fn(search_fn, args)
1044
+ except RuntimeError as e:
1045
+ log_trace_failure(search_fn, e)
1046
+ return False
1047
+
1048
+ specific_pattern = fx_to_pattern(
1049
+ specific_graph,
1050
+ argnames=argnames,
1051
+ exclusive_arg_names=exclusive_arg_names,
1052
+ scalar_workaround=scalar_workaround,
1053
+ )
1054
+ specific_pattern_match = specific_pattern.match(match.output_nodes()[0]) # type: ignore[arg-type]
1055
+ if specific_pattern_match and extra_check(specific_pattern_match):
1056
+ # trace the pattern using the shapes from the user program
1057
+ match.replacement_graph = trace_fn(replace_fn, args) # type: ignore[assignment]
1058
+ return True
1059
+ return False
1060
+
1061
+ def normalize_args(**kwargs):
1062
+ args = []
1063
+ for name in argnames_static:
1064
+ args.append(kwargs.pop(name))
1065
+ for i in range(1, len(kwargs) + 1):
1066
+ if f"tangents_{i}" not in kwargs:
1067
+ break
1068
+ args.append(kwargs.pop(f"tangents_{i}"))
1069
+ assert not kwargs, f"leftover kwargs: {kwargs!r}"
1070
+ return args
1071
+
1072
+ if trace_fn is joint_fwd_bwd:
1073
+ # If inference mode is enabled during compilation, assume that we don't
1074
+ # want to match on any training graph patterns
1075
+ if torch.is_inference_mode_enabled():
1076
+ return False
1077
+
1078
+ # TODO: Revisit the functionalize_rng_ops for lowmem dropout
1079
+ with functorch_config.patch(functionalize_rng_ops=False):
1080
+ requires_grad: List[bool] = [
1081
+ isinstance(x, torch.Tensor) and x.requires_grad for x in example_inputs
1082
+ ]
1083
+ if search_fn_pattern is None:
1084
+ pattern = gen_pattern(
1085
+ search_fn,
1086
+ example_inputs,
1087
+ trace_fn,
1088
+ scalar_workaround,
1089
+ exclusive_arg_names,
1090
+ )
1091
+ else:
1092
+ pattern = search_fn_pattern
1093
+
1094
+ pattern_repr = PatternPrettyPrinter.run(pattern)
1095
+ assert pattern_repr not in _seen_patterns
1096
+ _seen_patterns.add(pattern_repr)
1097
+ pattern = ReplacementPatternEntry(
1098
+ pattern=pattern,
1099
+ extra_check=check_fn,
1100
+ normalize_args=normalize_args,
1101
+ )
1102
+ pattern.register(pass_dicts)
1103
+ return pattern.pattern
1104
+
1105
+
1106
+ @functorch_config.patch(functionalize_rng_ops=False)
1107
+ def gen_pattern(
1108
+ search_fn, example_inputs, trace_fn, scalar_workaround=(), exclusive_arg_names=()
1109
+ ) -> PatternExpr:
1110
+ argnames = [*inspect.signature(search_fn).parameters.keys()]
1111
+
1112
+ if scalar_workaround == ():
1113
+ scalar_workaround = {}
1114
+ flat_inputs = []
1115
+ input_idx = 0 # Positional arguments index
1116
+
1117
+ for argname in argnames:
1118
+ if argname in scalar_workaround:
1119
+ flat_inputs.append(scalar_workaround[argname])
1120
+ else:
1121
+ flat_inputs.append(example_inputs[input_idx])
1122
+ input_idx += 1
1123
+
1124
+ search_gm = trace_fn(search_fn, flat_inputs)
1125
+ return fx_to_pattern(
1126
+ search_gm,
1127
+ ignore_types=(int, float, list, torch.device, torch.dtype),
1128
+ argnames=argnames,
1129
+ scalar_workaround=scalar_workaround,
1130
+ exclusive_arg_names=exclusive_arg_names,
1131
+ )
1132
+
1133
+
1134
+ def register_lowering_pattern(
1135
+ pattern: PatternExpr, extra_check=_return_true, *, pass_dict, prepend=False
1136
+ ):
1137
+ """
1138
+ Register an aten to inductor IR replacement pattern. The decorated
1139
+ function is saved and then called a lowering time allowing direct
1140
+ pattern to inductor IR conversion.
1141
+ """
1142
+
1143
+ def decorator(handler):
1144
+ assert callable(handler)
1145
+ LoweringPatternEntry(
1146
+ pattern=pattern, extra_check=extra_check, handler=handler
1147
+ ).register(pass_dict, prepend=prepend)
1148
+ handler._inductor_lowering_function = True
1149
+ return handler
1150
+
1151
+ return decorator
1152
+
1153
+
1154
+ def register_graph_pattern(
1155
+ pattern: PatternExpr, extra_check=_return_true, *, pass_dict, prepend=False
1156
+ ):
1157
+ """
1158
+ Register a pattern that runs a function on the FX graph, allowing
1159
+ custom transformation code.
1160
+ """
1161
+
1162
+ def decorator(handler):
1163
+ assert callable(handler)
1164
+ GraphPatternEntry(
1165
+ pattern=pattern, extra_check=extra_check, handler=handler
1166
+ ).register(pass_dict, prepend=prepend)
1167
+ return handler
1168
+
1169
+ return decorator
1170
+
1171
+
1172
+ def is_start_of_fx_graph(graph: torch.fx.Graph, node: torch.fx.Node) -> bool:
1173
+ # first node in the graph
1174
+ return node is next(iter(graph.nodes))
1175
+
1176
+
1177
+ # match: copy_, relu_, _set_grad_enabled, manual_seed, enter_functional_autocast, etc
1178
+ _mutation_op_re = re.compile(r"_$|_[.]|(\b|_)(set|enter|exit|seed)(\b|_)")
1179
+
1180
+
1181
+ def is_mutation_op(node: torch.fx.Node) -> bool:
1182
+ if node.op == "call_function":
1183
+ if _mutation_op_re.search(node.target.__name__): # type: ignore[union-attr]
1184
+ return True
1185
+ elif node.op == "call_method":
1186
+ if _mutation_op_re.search(node.target): # type: ignore[union-attr, arg-type]
1187
+ return True
1188
+ return node.kwargs.get("out") is not None
1189
+
1190
+
1191
+ def get_mutation_region_id(graph: torch.fx.Graph, node: torch.fx.Node) -> int:
1192
+ n = node
1193
+ while "mutation_region_id" not in n.meta and not is_start_of_fx_graph(graph, n):
1194
+ n = n.prev
1195
+ mutation_region_id = n.meta.get("mutation_region_id", 0)
1196
+ while n is not node:
1197
+ n = n.next
1198
+ if is_mutation_op(n):
1199
+ mutation_region_id += 1
1200
+ n.meta["mutation_region_id"] = mutation_region_id
1201
+ return mutation_region_id
1202
+
1203
+
1204
+ def should_compute_mutation_region_ids(graph: torch.fx.GraphModule) -> bool:
1205
+ return "mutation_region_id" not in next(iter(graph.nodes)).meta
1206
+
1207
+
1208
+ def compute_mutation_region_ids(graph: torch.fx.GraphModule):
1209
+ mutation_region_id = 0
1210
+ for nd in graph.nodes:
1211
+ if is_mutation_op(nd):
1212
+ mutation_region_id += 1
1213
+ nd.meta["mutation_region_id"] = mutation_region_id
1214
+
1215
+
1216
+ class PatternMatcherPass:
1217
+ def __init__(
1218
+ self, prevent_match_across_mutations=False, pass_name: Optional[str] = None
1219
+ ):
1220
+ super().__init__()
1221
+ self.patterns: DefaultDict[
1222
+ torch.fx.node.Target, List[PatternEntry]
1223
+ ] = defaultdict(list)
1224
+ self.prevent_match_across_mutations = prevent_match_across_mutations
1225
+ self.pass_name = pass_name
1226
+
1227
+ def __getitem__(self, item: torch.fx.node.Target) -> List[PatternEntry]:
1228
+ return self.patterns[item]
1229
+
1230
+ def apply(self, graph: torch.fx.GraphModule) -> int:
1231
+ if not self.patterns:
1232
+ return 0
1233
+ if isinstance(graph, torch.fx.GraphModule):
1234
+ graph = graph.graph
1235
+ if self.prevent_match_across_mutations:
1236
+ if should_compute_mutation_region_ids(graph):
1237
+ compute_mutation_region_ids(graph)
1238
+ get_mutation_region_id_partial = functools.partial(
1239
+ get_mutation_region_id, graph
1240
+ )
1241
+ count = 0
1242
+ for node in reversed(graph.nodes):
1243
+ target = extract_target(node)
1244
+ if (
1245
+ node.op in ["call_function", "call_method", "call_module"]
1246
+ and target in self.patterns
1247
+ ):
1248
+ # conservatively not applying pattern for cpu input,
1249
+ # since some of the patterns induce codegen and split nodes.
1250
+ # Note: we will only skip cpu compute if disable_cpp_codegen=True
1251
+ if fallback_node_due_to_unsupported_type(node, allow_cpu_inputs=False):
1252
+ continue
1253
+
1254
+ for entry in self.patterns[target]:
1255
+ if node._erased:
1256
+ break
1257
+ m = entry.pattern.match(node)
1258
+ # pattern match crosses mutation barrier - discard
1259
+ if (
1260
+ self.prevent_match_across_mutations
1261
+ and is_match(m)
1262
+ and len(set(map(get_mutation_region_id_partial, m.nodes))) != 1 # type: ignore[possibly-undefined]
1263
+ ):
1264
+ continue
1265
+ if os.environ.get("TORCHINDUCTOR_PATTERN_MATCH_DEBUG") == node.name:
1266
+ log.warning("%s%s %s %s", node, node.args, m, entry.pattern)
1267
+ if is_match(m) and entry.extra_check(m):
1268
+ count += 1
1269
+ entry.apply(m, graph, node) # type: ignore[arg-type]
1270
+ counters["inductor"]["pattern_matcher_count"] += 1
1271
+ counters["inductor"]["pattern_matcher_nodes"] += len(m.nodes)
1272
+ return count
1273
+
1274
+ def clear(self):
1275
+ self.patterns.clear()
1276
+
1277
+
1278
+ def _not_implemented(*args, **kwargs) -> NoReturn:
1279
+ raise NotImplementedError()
1280
+
1281
+
1282
+ def fx_to_pattern(
1283
+ gm,
1284
+ ignore_types=(),
1285
+ argnames=(),
1286
+ scalar_workaround=(),
1287
+ exclusive_arg_names=(),
1288
+ ) -> PatternExpr:
1289
+ """
1290
+ Convert an FX graph into a PatternExpr. This is useful for simple
1291
+ patterns that can only match single functions and fixed-length lists.
1292
+ """
1293
+ # scalar_workaround is a hack to capture dropout_p
1294
+ # see https://github.com/pytorch/pytorch/issues/97894
1295
+ scalar_workaround = scalar_workaround or {}
1296
+ inv_scalar_workaround = {v: k for k, v in scalar_workaround.items()}
1297
+ assert len(inv_scalar_workaround) == len(scalar_workaround)
1298
+
1299
+ def process_arg(x):
1300
+ if isinstance(x, (float, int)) and x in inv_scalar_workaround:
1301
+ return KeywordArg(inv_scalar_workaround[x])
1302
+ if type(x) in ignore_types:
1303
+ return Ignored()
1304
+ if isinstance(x, list) and all(isinstance(y, Ignored) for y in x) and x:
1305
+ return Ignored()
1306
+ return x
1307
+
1308
+ argnum = itertools.count()
1309
+
1310
+ class Converter(torch.fx.Interpreter):
1311
+ call_method = _not_implemented
1312
+ call_module = _not_implemented
1313
+ get_attr = _not_implemented
1314
+
1315
+ def placeholder(self, target, args, kwargs):
1316
+ n = next(argnum)
1317
+ if n < len(argnames):
1318
+ name = argnames[n]
1319
+ elif argnames:
1320
+ assert target.startswith("tangent")
1321
+ name = target
1322
+ else:
1323
+ target = re.sub(r"_\d+$", "", target) # de-mangle arg name
1324
+ name = target
1325
+ if name in exclusive_arg_names:
1326
+ return ExclusiveKeywordArg(name)
1327
+ else:
1328
+ return KeywordArg(name)
1329
+
1330
+ def call_function(self, target, args, kwargs):
1331
+ args, kwargs = pytree.tree_map(process_arg, (args, kwargs))
1332
+ if list in ignore_types:
1333
+ # Handle a burned in tensor size which are now [Ignored(), Ignored(), ...]
1334
+ args = [process_arg(a) for a in args]
1335
+ kwargs = {k: process_arg(a) for k, a in kwargs.items()}
1336
+ return CallFunction(target, *args, **kwargs)
1337
+
1338
+ def run_node(self, n):
1339
+ rv = super().run_node(n)
1340
+ if n.op == "output" and isinstance(rv, tuple):
1341
+ assert len(rv) == len(n.args[0])
1342
+ for r, arg in zip(rv, n.args[0]):
1343
+ r.users = len(arg.users)
1344
+ else:
1345
+ rv.users = len(n.users)
1346
+ return rv
1347
+
1348
+ pattern = Converter(gm).run()
1349
+ if not isinstance(pattern, PatternExpr):
1350
+ return MultiOutputPattern(pytree.tree_leaves(pattern))
1351
+ return pattern
1352
+
1353
+
1354
+ @torch.no_grad()
1355
+ def fwd_only(fn, args, *, run_dce=True) -> torch.fx.GraphModule:
1356
+ """Build a normalized inference graph, for use with fx_to_pattern"""
1357
+ # TODO - look into using aot autograd, asserting no mutating ops here
1358
+ with enable_python_dispatcher():
1359
+ mode = (
1360
+ "real" if not torch._inductor.utils.any_is_symbolic(*args) else "symbolic"
1361
+ )
1362
+ gm = make_fx(fn, select_decomp_table(), tracing_mode=mode)(*args)
1363
+ if run_dce:
1364
+ gm.graph.eliminate_dead_code()
1365
+ gm.recompile()
1366
+ return gm
1367
+
1368
+
1369
+ @torch.enable_grad()
1370
+ def joint_fwd_bwd(fn, args) -> torch.fx.GraphModule:
1371
+ """Build a normalized training graph, for use with fx_to_pattern"""
1372
+ gm: Optional[torch.fx.GraphModule] = None
1373
+
1374
+ def record_joint_graph(joint_graph, inputs, **kwargs):
1375
+ nonlocal gm
1376
+ assert not gm
1377
+ gm = clone_graph(joint_graph)
1378
+ return default_partition(joint_graph, inputs, **kwargs)
1379
+
1380
+ with torch._guards.tracing(None):
1381
+ aot_function(
1382
+ fn,
1383
+ lambda g, i: make_boxed_func(g),
1384
+ partition_fn=record_joint_graph,
1385
+ decompositions=select_decomp_table(),
1386
+ keep_inference_input_mutations=True,
1387
+ enable_log=False,
1388
+ )(*args)
1389
+ assert gm
1390
+
1391
+ from .fx_passes.joint_graph import pointless_view
1392
+
1393
+ matcher_pass = PatternMatcherPass()
1394
+
1395
+ pattern = CallFunction(
1396
+ torch.ops.aten.view.default, KeywordArg("arg"), KeywordArg("size")
1397
+ )
1398
+ GraphPatternEntry(
1399
+ pattern=pattern, handler=pointless_view, extra_check=_return_true
1400
+ ).register(matcher_pass.patterns)
1401
+ matcher_pass.apply(gm.graph) # type: ignore[arg-type]
1402
+
1403
+ # remove in/out specs
1404
+ gm.graph._codegen = torch.fx.graph.CodeGen()
1405
+ gm.graph.eliminate_dead_code()
1406
+ gm.recompile()
1407
+ return gm
1408
+
1409
+
1410
+ def _args(n: torch.fx.Node) -> List[torch.fx.node.Argument]:
1411
+ args: List[torch.fx.node.Argument] = list()
1412
+ torch.fx.map_arg((n.args, n.kwargs), args.append)
1413
+ return args
1414
+
1415
+
1416
+ def stable_topological_sort(graph: torch.fx.Graph):
1417
+ # Nodes are in exactly one of these three collections:
1418
+
1419
+ # - Nodes in `pending` are waiting to be processed (in reverse order):
1420
+ pending = list(reversed(graph.nodes))
1421
+
1422
+ # - Nodes in `ready` have been processed and are already in the correct
1423
+ # order.
1424
+ ready = set()
1425
+
1426
+ # - `waiting` is a mapping from a dependency to nodes which depend on that
1427
+ # dependency.
1428
+ waiting = defaultdict(list)
1429
+
1430
+ # The cursor indicates the last processed node so we can add new nodes
1431
+ # after it.
1432
+ cursor = None
1433
+ while pending:
1434
+ node = pending.pop()
1435
+ waiting_for = [x for x in _args(node) if x not in ready]
1436
+ if waiting_for:
1437
+ # We have unprocessed input nodes. Might as well wait for the last
1438
+ # arg so an already sorted list will only recheck this node once.
1439
+ waiting[waiting_for[-1]].append(node)
1440
+ else:
1441
+ ready.add(node)
1442
+ if cursor and cursor.next is not node:
1443
+ cursor.append(node)
1444
+ cursor = node
1445
+ # Mark the nodes that have been waiting for this node to finish as
1446
+ # ready to check again.
1447
+ pending.extend(reversed(waiting.pop(node, ())))
1448
+
1449
+ assert not waiting and len(ready) == len(graph.nodes)
1450
+
1451
+
1452
+ def init_once_fakemode(fn: Callable[..., Any]):
1453
+ """Wrapper around lazy init functions in fx_passes/"""
1454
+
1455
+ @functools.lru_cache(None)
1456
+ @functools.wraps(fn)
1457
+ def lazy_init():
1458
+ counters_ref = counters["inductor"].copy()
1459
+
1460
+ with torch._guards.tracing(
1461
+ None
1462
+ ), maybe_disable_fake_tensor_mode(), FakeTensorMode():
1463
+ result = fn()
1464
+
1465
+ # clear view matches encountered during tracing
1466
+ counters["inductor"] = counters_ref
1467
+
1468
+ return result
1469
+
1470
+ return lazy_init
1471
+
1472
+
1473
+ def config_flag(name):
1474
+ """Function for extra_check to put pass behind a flag"""
1475
+
1476
+ def flag_check(match):
1477
+ return getattr(config, name)
1478
+
1479
+ return flag_check
1480
+
1481
+
1482
+ def clone_graph(input_graph: torch.fx.GraphModule) -> torch.fx.GraphModule:
1483
+ class CopyGraph(Transformer):
1484
+ def run_node(self, old_node):
1485
+ new_node = super().run_node(old_node)
1486
+ if isinstance(new_node, torch.fx.Proxy):
1487
+ new_node.node.meta.update(old_node.meta)
1488
+ new_node.node.name = self.new_graph._graph_namespace.create_name(
1489
+ old_node.name, None
1490
+ )
1491
+ return new_node
1492
+
1493
+ return CopyGraph(input_graph).transform()
1494
+
1495
+
1496
+ _seen_patterns: Set[str] = set()
1497
+
1498
+
1499
+ def get_arg_value(
1500
+ node: torch.fx.Node, arg_number: int, kwarg_name: Optional[str] = None
1501
+ ):
1502
+ return (
1503
+ node.args[arg_number]
1504
+ if len(node.args) > arg_number
1505
+ else node.kwargs.get(kwarg_name) # type: ignore[arg-type]
1506
+ )
1507
+
1508
+
1509
+ def filter_nodes(nodes: Iterable[torch.fx.Node], fn) -> List[torch.fx.Node]:
1510
+ fns = [fn]
1511
+ if isinstance(fn, torch._ops.OpOverloadPacket):
1512
+ fns.extend([getattr(fn, overload) for overload in fn.overloads()])
1513
+
1514
+ return [node for node in nodes if node.target in fns]
1515
+
1516
+
1517
+ def extract_target(node: Node):
1518
+ """For call_function and call_method, we directly use the target function;
1519
+ For call_module, the target is string, and we treat the module class
1520
+ as a function.
1521
+ """
1522
+ if node.op == "call_module":
1523
+ return getattr(node.graph.owning_module, node.target).__class__ # type: ignore[arg-type]
1524
+ return node.target
venv/lib/python3.10/site-packages/torch/_inductor/quantized_lowerings.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def register_quantized_ops():
5
+ from . import lowering
6
+
7
+ quantized = torch.ops.quantized
8
+
9
+ lowering.add_needs_realized_inputs(
10
+ [
11
+ quantized.max_pool2d,
12
+ ]
13
+ )
14
+
15
+ lowering.make_fallback(quantized.max_pool2d)
venv/lib/python3.10/site-packages/torch/_inductor/scheduler.py ADDED
@@ -0,0 +1,2445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dataclasses
3
+ import functools
4
+ import itertools
5
+ import logging
6
+ import math
7
+ import operator
8
+ import os
9
+ import pprint
10
+ import textwrap
11
+ from typing import (
12
+ Any,
13
+ Counter,
14
+ DefaultDict,
15
+ Dict,
16
+ Generic,
17
+ List,
18
+ Optional,
19
+ Sequence,
20
+ Set,
21
+ Tuple,
22
+ TypeVar,
23
+ Union,
24
+ )
25
+
26
+ import sympy
27
+
28
+ import torch
29
+ from torch._dynamo.utils import dynamo_timed
30
+ from torch._inductor.metrics import get_metric_table, is_metric_table_enabled
31
+ from torch.utils._triton import has_triton
32
+
33
+ from . import comms, config, dependencies, ir, metrics
34
+ from .codegen.common import get_scheduling_for_device, Kernel
35
+ from .comm_analysis import estimate_nccl_collective_runtime
36
+ from .dependencies import Dep, MemoryDep, StarDep, WeakDep
37
+ from .ir import ComputedBuffer, MultiOutput, MultiOutputLayout
38
+ from .sizevars import SimplifyIndexing
39
+ from .utils import (
40
+ cache_on_self,
41
+ cmp,
42
+ free_symbol_has,
43
+ get_device_tflops,
44
+ get_dtype_size,
45
+ get_gpu_dram_gbps,
46
+ green_text,
47
+ is_collective,
48
+ is_wait,
49
+ red_text,
50
+ sympy_product,
51
+ )
52
+ from .virtualized import V
53
+
54
+
55
+ log = logging.getLogger(__name__)
56
+ fusion_log = torch._logging.getArtifactLogger(__name__, "fusion")
57
+
58
+
59
+ class WhyNoFuse:
60
+ # TODO when we drop support for Python < 3.10, we can use
61
+ # @dataclass(slots=True) instead of manually specifying __slots__.
62
+ __slots__ = ["node1", "node2", "reason", "args"]
63
+ reason: str
64
+ args: Tuple[Any, ...]
65
+
66
+ def __init__(self, node1: "BaseSchedulerNode", node2: "BaseSchedulerNode"):
67
+ self.node1 = node1
68
+ self.node2 = node2
69
+
70
+ def __call__(self, reason, *args):
71
+ self.reason = reason
72
+ self.args = args
73
+ fusion_log.debug(self)
74
+
75
+ def __str__(self):
76
+ return f"cannot fuse {self.node1.get_name()} with {self.node2.get_name()}: " + (
77
+ self.reason % self.args
78
+ )
79
+
80
+
81
+ def pformat(obj):
82
+ if isinstance(obj, set):
83
+ # pformat has trouble with sets of sympy exprs
84
+ obj = sorted(obj, key=str)
85
+ result = pprint.pformat(obj, indent=4)
86
+ if "\n" in result:
87
+ return f"\n{textwrap.indent(result, ' '*4)}"
88
+ return result
89
+
90
+
91
+ class OutputNode:
92
+ def __init__(self, dep):
93
+ self.unmet_dependencies = {dep}
94
+ self.inverse_users = []
95
+
96
+ def is_reduction(self):
97
+ return False
98
+
99
+ def get_alias_names(self):
100
+ return ()
101
+
102
+ def get_name(self):
103
+ return "OUTPUT"
104
+
105
+ __repr__ = get_name
106
+
107
+
108
+ def _prune_redundant_deps(node, name_to_fused_node):
109
+ """
110
+ Prunes weakdeps intended for mutation ordering
111
+ on an upstream fused node if after fusion there is another dependency
112
+ on the fused upstream node, making the weakdep redundant
113
+
114
+ In essence this enforces an ordering on fusions. As fusions occur, weakdeps will
115
+ be incrementally removed, enabling other fusions, ensuring they are fused in order.
116
+ """
117
+ name_to_dep_count: Counter[str] = collections.Counter()
118
+
119
+ for dep in node.unmet_dependencies:
120
+ if not isinstance(dep, WeakDep):
121
+ name_to_dep_count[name_to_fused_node[dep.name].get_name()] += 1
122
+
123
+ def should_prune(dep):
124
+ if isinstance(dep, WeakDep):
125
+ is_redundant = (
126
+ name_to_dep_count[name_to_fused_node[dep.name].get_name()] > 0
127
+ )
128
+ # These can occur because fused nodes always gather deps from their snodes
129
+ # If B has a weakdep on A
130
+ # B gets fused with C, then any time BC is fused, the weakdep will reappear
131
+ is_self_dep = name_to_fused_node[dep.name] == node
132
+ return is_redundant or is_self_dep
133
+ else:
134
+ return False
135
+
136
+ deps_to_prune = {dep for dep in node.unmet_dependencies if should_prune(dep)}
137
+
138
+ if deps_to_prune:
139
+ node.unmet_dependencies = node.unmet_dependencies - deps_to_prune
140
+ node.set_read_writes(node.read_writes.remove_reads(deps_to_prune))
141
+
142
+
143
+ # TODO(xmfan): reuse an existing mapping for this if it exists, or formalize this into ir.py:ExternKernel
144
+ kernel_name_to_op = {
145
+ "extern_kernels.convolution": torch.ops.aten.convolution,
146
+ "extern_kernels.mm": torch.ops.aten.mm,
147
+ "extern_kernels.bmm": torch.ops.aten.bmm,
148
+ "extern_kernels.addmm": torch.ops.aten.addmm,
149
+ }
150
+
151
+
152
+ class BaseSchedulerNode:
153
+ def __init__(self, scheduler: "Scheduler", node: ir.Buffer):
154
+ self.scheduler: Scheduler = scheduler
155
+ self.node: ir.Buffer = node
156
+ self.users: List[NodeUser] = []
157
+ self.inverse_users: List[BaseSchedulerNode] = []
158
+ self.node_users: List[BaseSchedulerNode] = []
159
+ self.set_read_writes(node.get_read_writes())
160
+ self.ancestors: Set[str] = set()
161
+ self.min_order: int
162
+ self.max_order: int
163
+ self.last_usage: Set[
164
+ str
165
+ ] = set() # buffers that won't be used after this kernel
166
+ self.written = False
167
+
168
+ def __repr__(self):
169
+ return f"{type(self).__name__}(name={self.get_name()!r})"
170
+
171
+ def debug_str(self) -> str:
172
+ """Longer form printout for trace logs"""
173
+ name = self.get_name()
174
+ lines = [
175
+ f"{name}: {type(self).__name__}({type(getattr(self, 'node', None)).__name__})",
176
+ f"{name}.writes = {pformat(self.read_writes.writes)}",
177
+ f"{name}.unmet_dependencies = {pformat(self.unmet_dependencies)}",
178
+ f"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}",
179
+ f"{name}.users = {self.users}",
180
+ ]
181
+ try:
182
+ lines += [
183
+ self.debug_str_extra(),
184
+ ]
185
+ except Exception:
186
+ log.warning("Ignoring error in debug_str()", exc_info=True)
187
+
188
+ return "\n".join(lines).rstrip()
189
+
190
+ def debug_str_extra(self) -> str:
191
+ return ""
192
+
193
+ def log_details(self):
194
+ log.info(
195
+ "%s: unmet_dependencies = %s, writes = %s",
196
+ self,
197
+ self.unmet_dependencies,
198
+ self.read_writes.writes,
199
+ )
200
+
201
+ def update_mutated_names(self, renames: Dict[str, str]):
202
+ self.set_read_writes(self.read_writes.rename(renames))
203
+
204
+ def add_mutation_dep(self, dep):
205
+ self.set_read_writes(self.read_writes.with_read(dep))
206
+
207
+ def add_fake_dep(self, dep):
208
+ self.set_read_writes(self.read_writes.with_read(dep))
209
+
210
+ def set_users(self, users: List["NodeUser"]):
211
+ # deduplicate
212
+ result: Dict[int, NodeUser] = {}
213
+ for use in users:
214
+ if id(use.node) in result:
215
+ result[id(use.node)] = use.merge(result[id(use.node)])
216
+ else:
217
+ result[id(use.node)] = use
218
+ self.users = list(result.values())
219
+
220
+ def set_last_usage(
221
+ self, future_used_buffers: Set[str], mutation_real_name: Dict[str, str]
222
+ ):
223
+ used_buffers = self.used_or_aliased_buffer_names()
224
+ used_buffers = {mutation_real_name.get(k, k) for k in used_buffers}
225
+ self.last_usage = used_buffers - future_used_buffers
226
+
227
+ def get_aliases(self):
228
+ return self.node.get_alias_names()
229
+
230
+ def get_mutations(self):
231
+ return self.node.get_mutation_names()
232
+
233
+ def has_aliasing_or_mutation(self):
234
+ return bool(self.get_aliases() or self.get_mutations())
235
+
236
+ def set_read_writes(self, rw: dependencies.ReadWrites):
237
+ self.read_writes: dependencies.ReadWrites = rw
238
+ self.unmet_dependencies = self.read_writes.reads
239
+ self.prune_deps()
240
+
241
+ def op_counts(self):
242
+ return self.read_writes.op_counts
243
+
244
+ def used_buffer_names(self) -> Set[str]:
245
+ return {
246
+ dep.name
247
+ for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes)
248
+ }
249
+
250
+ def used_or_aliased_buffer_names(self) -> Set[str]:
251
+ used_names = set()
252
+
253
+ for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes):
254
+ used_names.add(dep.name)
255
+ if V.graph.name_to_buffer.get(dep.name):
256
+ layout = V.graph.name_to_buffer[dep.name].get_layout()
257
+ # needed to avoid deallocating aliased buffer
258
+ # if there are still uses of aliases ahead
259
+ if isinstance(layout, ir.AliasedLayout):
260
+ used_names.add(layout.view.data.get_name())
261
+ return used_names
262
+
263
+ def prune_deps(self):
264
+ self.unmet_dependencies = {
265
+ dep
266
+ for dep in self.unmet_dependencies
267
+ if dep.name not in self.scheduler.available_buffer_names
268
+ }
269
+
270
+ def prune_weak_deps(self):
271
+ # Prune weak dependencies on buffers that have been removed
272
+ def should_prune(dep):
273
+ return isinstance(dep, WeakDep) and dep.name in V.graph.removed_buffers
274
+
275
+ to_remove = {dep for dep in self.read_writes.reads if should_prune(dep)}
276
+ self.set_read_writes(self.read_writes.remove_reads(to_remove))
277
+
278
+ def prune_redundant_deps(self, name_to_fused_node):
279
+ _prune_redundant_deps(self, name_to_fused_node)
280
+
281
+ def get_name(self) -> str:
282
+ return self.node.get_name()
283
+
284
+ def get_first_name(self) -> str:
285
+ return self.get_name()
286
+
287
+ def get_names(self) -> Set[str]:
288
+ return {self.get_name()}
289
+
290
+ def get_nodes(self) -> Sequence["BaseSchedulerNode"]:
291
+ return [self]
292
+
293
+ def get_device(self):
294
+ return self.node.get_device()
295
+
296
+ def is_reduction(self):
297
+ return False
298
+
299
+ def is_split_scan(self):
300
+ return False
301
+
302
+ def is_template(self):
303
+ return False
304
+
305
+ def is_extern(self):
306
+ return False
307
+
308
+ def is_foreach(self):
309
+ return False
310
+
311
+ def can_inplace(self, read_dep: dependencies.MemoryDep):
312
+ return False
313
+
314
+ def has_side_effects(self):
315
+ return False
316
+
317
+ def decide_inplace_update(self):
318
+ """
319
+ Decide if there should be inplace updates for the node
320
+ and record the decision in the active kernel.
321
+ """
322
+ if not self.node.should_allocate():
323
+ return
324
+
325
+ if isinstance(self, (SchedulerNode,)) and (
326
+ self.node.get_alias_names() or self.node.get_mutation_names()
327
+ ):
328
+ return
329
+
330
+ if (
331
+ (
332
+ isinstance(self, (SchedulerNode,))
333
+ # o what have i done. lets make this an api
334
+ or (
335
+ isinstance(self, ExternKernelSchedulerNode)
336
+ and isinstance(self.node, (ir.AllReduce, ir.InPlaceHint))
337
+ )
338
+ )
339
+ and config.inplace_buffers
340
+ and (
341
+ not isinstance(V.kernel, torch._inductor.codegen.triton.TritonKernel)
342
+ or getattr(V.kernel, "mutations", None) is not None
343
+ )
344
+ ):
345
+ from .codegen.wrapper import buffer_reuse_key
346
+
347
+ ordered_reads = sorted(self.read_writes.reads, key=lambda x: x.name)
348
+
349
+ for read in ordered_reads:
350
+ input_node: Optional[
351
+ BaseSchedulerNode
352
+ ] = self.scheduler.name_to_node.get(read.name)
353
+ if input_node and V.graph.wrapper_code.can_reuse(input_node, self):
354
+ assert input_node.users is not None
355
+ remaining_uses = [
356
+ x
357
+ for x in input_node.users
358
+ if x.node.get_name()
359
+ not in self.scheduler.available_buffer_names
360
+ ]
361
+ if (
362
+ len(remaining_uses) == 1
363
+ and remaining_uses[0].can_inplace
364
+ and remaining_uses[0].node is self
365
+ and not isinstance(
366
+ input_node.node.get_layout(),
367
+ (
368
+ ir.MultiOutputLayout,
369
+ ir.MutationLayout,
370
+ ir.AliasedLayout,
371
+ ),
372
+ )
373
+ and not (
374
+ isinstance(
375
+ input_node.node, (ir.FallbackKernel, ir.MultiOutput)
376
+ )
377
+ and len(input_node.node.get_alias_names()) > 0
378
+ )
379
+ and buffer_reuse_key(input_node.node)
380
+ == buffer_reuse_key(self.node)
381
+ ):
382
+ # hacky check for if V.kernel is a real kernel or NullHandler
383
+ if hasattr(V.kernel, "args"):
384
+ # if there isn't a triton kernel, then we don't need to call triton-specific things.
385
+ # but TODO this might be a convenient place to signal to the Collective kernels to inplace
386
+ # (and, can we make "kernel" less generic of a name?)
387
+ V.kernel.args.make_inplace(
388
+ input_node.get_name(), self.get_name()
389
+ )
390
+ # mutations not tracked in cpp kernels
391
+ if isinstance(
392
+ V.kernel, torch._inductor.codegen.triton.TritonKernel
393
+ ):
394
+ V.kernel.mutations.add(input_node.get_name())
395
+ V.kernel.mutations.add(self.get_name())
396
+
397
+ # update last usage of reused node
398
+ self.last_usage.discard(input_node.get_name())
399
+
400
+ V.kernel.inplace_update_buffers[
401
+ self.get_name()
402
+ ] = input_node.get_name()
403
+ break
404
+
405
+ def allocate(self):
406
+ if not self.node.should_allocate():
407
+ return
408
+
409
+ if isinstance(self, (SchedulerNode,)) and (
410
+ self.node.get_alias_names() or self.node.get_mutation_names()
411
+ ):
412
+ V.graph.wrapper_code.codegen_allocation(self.node)
413
+ return
414
+
415
+ # hacky check for if V.kernel is a real kernel or NullHandler
416
+ if (
417
+ hasattr(V.kernel, "args")
418
+ and self.get_name() in V.kernel.inplace_update_buffers
419
+ ):
420
+ V.graph.wrapper_code.codegen_inplace_reuse(
421
+ self.scheduler.name_to_node[
422
+ V.kernel.inplace_update_buffers[self.get_name()]
423
+ ].node,
424
+ self.node,
425
+ )
426
+ else:
427
+ V.graph.wrapper_code.codegen_allocation(self.node)
428
+
429
+ def can_free(self):
430
+ # There's no real allocated buffer, no need to free it
431
+ if isinstance(self.node.layout, ir.NoneLayout):
432
+ return False
433
+ for use in self.users:
434
+ if isinstance(use.node, OutputNode):
435
+ return False
436
+ return True
437
+
438
+ def codegen_originating_info(self, buffer, only_once=True):
439
+ if not config.comment_origin:
440
+ return
441
+
442
+ if only_once and self.written:
443
+ return
444
+ origins = self.node.origins
445
+ out_lines = []
446
+
447
+ for o in origins:
448
+ if o.op == "output":
449
+ # These are boring and samey
450
+ continue
451
+
452
+ out_lines.append("")
453
+ # TODO(voz): Should the pragma be constant somewhere?
454
+ out_lines.append("#pragma CMT ORIGIN:")
455
+ op_info_str = f"#pragma CMT {o.op} {o.target}"
456
+ if "seq_nr" in o.meta:
457
+ op_info_str = op_info_str + f" seq_nr:{o.meta['seq_nr']}"
458
+ out_lines.append(op_info_str)
459
+ if "stack_trace" in o.meta:
460
+ stack_trace = f"{o.meta['stack_trace']}"
461
+ stack_trace_last_line = stack_trace.split("|")[-1]
462
+ out_lines.append(
463
+ "#pragma CMT "
464
+ + stack_trace_last_line.replace("{", "{{")
465
+ .replace("}", "}}")
466
+ .replace("\n", "\\")
467
+ )
468
+ out_lines.append("#pragma CMT END ORIGIN")
469
+ out_lines.append("")
470
+
471
+ if len(out_lines) == 0:
472
+ return
473
+
474
+ # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does
475
+ # not use BracesBuffer, so we have no good indicator of a C++ buffer atm.
476
+ buffer.writelines(out_lines)
477
+ self.written = True
478
+
479
+ def get_read_write_buffers_sizes(self) -> int:
480
+ """
481
+ Counting the number of bytes accessed for a kernel is
482
+ surprisingly tricky. In particular, there is a differentiation
483
+ between 'theoretical' memory accesses and practical memory
484
+ accesses. For example, a layernorm kernel may actually access an
485
+ input 3 times, but in theory, it only needs to access its input
486
+ once (and may be optimized to do so through say, persistent
487
+ reductions)
488
+
489
+ Another example is that even though a buffer is passed in, we may
490
+ not access the entire buffer. This may occur if we are accessing
491
+ a slice of the buffer. Another tricky case is for indirect
492
+ indexing, where the amount of bytes accessed depends on the
493
+ values of the input.
494
+
495
+ What this function aims to compute is the memory accesses for
496
+ worst-case inputs, best-case optimization. What this means is
497
+ that for each buffer we compute the amount of potential accesses in two ways and take the minimum.
498
+
499
+ 1. Numel in ranges multiplied by number of deps the buffer has
500
+ 2. The buffer size
501
+ """
502
+ if isinstance(self, NopKernelSchedulerNode):
503
+ return 0
504
+ if isinstance(self, ExternKernelSchedulerNode) and isinstance(
505
+ self.node, MultiOutput
506
+ ):
507
+ return 0
508
+
509
+ if isinstance(self, SchedulerNode):
510
+ node_numel = V.graph.sizevars.size_hint(
511
+ sympy_product(self.get_ranges()[0])
512
+ * sympy_product(self.get_ranges()[1])
513
+ )
514
+ else:
515
+ node_numel = int(1e9)
516
+ buf_accesses = collections.defaultdict(list)
517
+ for dep in self.read_writes.reads | self.read_writes.writes:
518
+ buf_accesses[dep.name].append(dep)
519
+
520
+ reads = {dep.name for dep in self.read_writes.reads}
521
+ writes = {dep.name for dep in self.read_writes.writes}
522
+
523
+ def is_materialized(buf, snodes):
524
+ users = self.scheduler.name_to_node[buf].users
525
+ buf_uses = {user.node for user in users}
526
+ return len(buf_uses - set(snodes)) > 0
527
+
528
+ if isinstance(self, FusedSchedulerNode):
529
+ removed_buffers = {
530
+ dep for dep in writes if not is_materialized(dep, self.snodes)
531
+ }
532
+ writes = writes - removed_buffers
533
+ reads = reads - removed_buffers
534
+ node_bytes = 0
535
+
536
+ for buf_name in reads | writes:
537
+ buf_accessed_elems = sum([node_numel for dep in buf_accesses[buf_name]])
538
+ buf: Union[ir.Buffer, ir.TensorBox]
539
+ if buf_name in V.graph.name_to_buffer:
540
+ buf = V.graph.name_to_buffer[buf_name]
541
+ elif buf_name in V.graph.graph_inputs:
542
+ buf = V.graph.graph_inputs[buf_name]
543
+ else:
544
+ continue
545
+
546
+ def get_buf_elems(buf):
547
+ return V.graph.sizevars.size_hint(sympy_product(buf.get_size()))
548
+
549
+ # Kind of a lazy way to get the MultiOutput nodes corresponding to
550
+ # a MultiOutputLayout
551
+ if isinstance(buf.layout, MultiOutputLayout):
552
+ users = self.scheduler.name_to_node[buf.get_name()].users
553
+ buf_elems = sum(get_buf_elems(user.node.node) for user in users)
554
+ else:
555
+ buf_elems = get_buf_elems(buf)
556
+
557
+ node_bytes += min(buf_elems, buf_accessed_elems) * get_dtype_size(
558
+ buf.get_dtype()
559
+ )
560
+
561
+ return node_bytes
562
+
563
+ def get_estimated_runtime(self) -> float:
564
+ """
565
+ Returns estimated op runtime in nanoseconds (ns)
566
+ """
567
+ layout = None
568
+ dtype = None
569
+ if not hasattr(self, "node") or not self.node:
570
+ assert isinstance(
571
+ self, (FusedSchedulerNode, ForeachKernelSchedulerNode)
572
+ ), f"{type(self)=}"
573
+ assert self.snodes
574
+ if not self.snodes[0].node:
575
+ return 0
576
+ layout = self.snodes[0].node.get_layout()
577
+ dtype = self.snodes[0].node.get_dtype()
578
+ else:
579
+ layout = self.node.get_layout()
580
+ dtype = self.node.get_dtype()
581
+
582
+ if "cuda" != layout.device.type:
583
+ # default to no reordering based on runtime
584
+ return 0
585
+
586
+ # Collective kernels
587
+ if is_collective(self.node):
588
+ return estimate_nccl_collective_runtime(self.node)
589
+ elif is_wait(self.node):
590
+ # ir.Wait is only used for collective ops.
591
+ # The time needed for the collective op is already estimated and considered
592
+ # when we are processing the collective op IR node, so ir.Wait takes 0 time
593
+ # since it doesn't take extra time to get the result after the collective is completed.
594
+ return 0
595
+
596
+ try:
597
+ gpu_memory_bandwidth = get_gpu_dram_gbps()
598
+ gpu_flops = get_device_tflops(dtype) * 10**12
599
+ except Exception:
600
+ return 0
601
+
602
+ if isinstance(self, ExternKernelSchedulerNode):
603
+ assert isinstance(self.node, ir.ExternKernel), f"{type(self.node)=}"
604
+ op = kernel_name_to_op.get(
605
+ getattr(self.node, "python_kernel_name", ""), None
606
+ )
607
+
608
+ # if there is a resolved op, dry-run using fake mode and record flop count
609
+ if op is not None:
610
+ from torch._subclasses.fake_tensor import FakeTensorMode
611
+ from torch.utils.flop_counter import FlopCounterMode
612
+
613
+ with FakeTensorMode(), FlopCounterMode(
614
+ display=False
615
+ ) as flop_counter_mode:
616
+ from .ir import ir_node_to_tensor
617
+
618
+ fake_inputs = [
619
+ ir_node_to_tensor(input, guard_shape=False)
620
+ for input in self.node.inputs
621
+ ]
622
+ cls = self.node.__class__
623
+ cls.process_kernel(op, *fake_inputs, **self.node.kwargs)
624
+
625
+ # TODO(xmfan): find a better heuristic to model FLOPS/latency relationship
626
+ factor = 1.0
627
+ counted_flops = flop_counter_mode.get_total_flops()
628
+ counted_bytes = self.get_read_write_buffers_sizes()
629
+ compute_time = (factor * counted_flops / gpu_flops) * 1e9
630
+ transfer_time = counted_bytes / gpu_memory_bandwidth
631
+
632
+ # Return estimated runtime in nanoseconds
633
+ return max(compute_time, transfer_time)
634
+
635
+ elif isinstance(self, FusedSchedulerNode) or isinstance(
636
+ self.node, ComputedBuffer
637
+ ):
638
+ # Return estimated runtime in nanoseconds (bytes / gbps)
639
+ return self.get_read_write_buffers_sizes() / gpu_memory_bandwidth
640
+
641
+ return 0
642
+
643
+
644
+ class ExternKernelSchedulerNode(BaseSchedulerNode):
645
+ def debug_str_extra(self) -> str:
646
+ return f"{self.get_name()}.node.kernel = {getattr(self.node, 'python_kernel_name', None)}"
647
+
648
+ def is_extern(self):
649
+ return True
650
+
651
+ def has_side_effects(self):
652
+ return hasattr(self.node, "has_side_effects") and self.node.has_side_effects()
653
+
654
+ def can_inplace(self, read_dep: dependencies.MemoryDep):
655
+ if self.get_aliases() or self.is_template():
656
+ return False
657
+
658
+ if read_dep.name not in self.scheduler.name_to_node:
659
+ # don't allow reuse of an 'input' buffer, we don't own it
660
+ # (would this have been fixed if I tracked mutations properly above?)
661
+ return False
662
+ if not isinstance(
663
+ self.node, (torch._inductor.ir.AllReduce, torch._inductor.ir.InPlaceHint)
664
+ ):
665
+ # TODO make this a property of the IR
666
+ return False
667
+
668
+ if len(self.read_writes.writes) == 1:
669
+ write_dep = next(iter(self.read_writes.writes))
670
+ numel_diff = read_dep.get_numel() - write_dep.get_numel()
671
+ return V.graph.sizevars.simplify(numel_diff) == 0
672
+
673
+ return False
674
+
675
+
676
+ class NopKernelSchedulerNode(BaseSchedulerNode):
677
+ pass
678
+
679
+
680
+ class SchedulerNode(BaseSchedulerNode):
681
+ def __init__(
682
+ self,
683
+ scheduler: "Scheduler",
684
+ node: Union[ir.ComputedBuffer, ir.TemplateBuffer],
685
+ ):
686
+ super().__init__(scheduler, node)
687
+ self._compute_attrs()
688
+
689
+ def _compute_attrs(
690
+ self,
691
+ extra_indexing_constraints: Optional[Tuple[Dict[Any, Any], List[Any]]] = None,
692
+ ):
693
+ assert isinstance(self.node, (ir.ComputedBuffer, ir.TemplateBuffer))
694
+ self._sizes, self._body = self.node.simplify_and_reorder(
695
+ extra_indexing_constraints=extra_indexing_constraints
696
+ )
697
+
698
+ group_fn = self.scheduler.get_backend(self.node.get_device()).group_fn
699
+ self.group = (self.node.get_device(), group_fn(self._sizes))
700
+
701
+ if isinstance(self.node, ir.TemplateBuffer):
702
+ self.set_read_writes(self.node.normalized_read_writes())
703
+ else:
704
+ self.set_read_writes(
705
+ dependencies.extract_read_writes(
706
+ self._body, *self._sizes, normalize=True
707
+ )
708
+ )
709
+
710
+ def recompute_size_and_body(
711
+ self, extra_indexing_constraints: Tuple[Dict[Any, Any], List[Any]]
712
+ ):
713
+ self._compute_attrs(extra_indexing_constraints=extra_indexing_constraints)
714
+
715
+ def debug_str_extra(self) -> str:
716
+ name = self.get_name()
717
+ lines = [
718
+ f"{name}.group.device = {self.group[0]}",
719
+ f"{name}.group.iteration = {self.group[1]}",
720
+ f"{name}.sizes = {self._sizes}",
721
+ ]
722
+ if self.get_aliases():
723
+ lines.append(f"{name}.aliases = {pformat(self.get_aliases())}")
724
+ if self.get_mutations():
725
+ lines.append(f"{name}.mutations = {pformat(self.get_mutations())}")
726
+ if isinstance(self._body, ir.LoopBody):
727
+ lines.append(f"class {name}_loop_body:")
728
+ lines.append(textwrap.indent(self._body.debug_str(), " "))
729
+ return "\n".join(lines)
730
+
731
+ def get_ranges(self):
732
+ return self._sizes
733
+
734
+ def is_reduction(self):
735
+ assert isinstance(
736
+ self.node, (ir.ComputedBuffer, ir.TemplateBuffer)
737
+ ), f"{type(self.node)=}"
738
+ return bool(self.node.get_reduction_type())
739
+
740
+ def is_split_scan(self):
741
+ assert isinstance(
742
+ self.node, (ir.ComputedBuffer, ir.TemplateBuffer)
743
+ ), f"{type(self.node)=}"
744
+ return isinstance(self.node, ir.ComputedBuffer) and isinstance(
745
+ self.node.data, ir.SplitScan
746
+ )
747
+
748
+ def is_template(self):
749
+ return isinstance(self.node, ir.TemplateBuffer)
750
+
751
+ def get_template_node(self):
752
+ return self.node if self.is_template() else None
753
+
754
+ def run(self, *index_vars):
755
+ self.decide_inplace_update()
756
+ self.mark_run()
757
+ self.codegen(index_vars)
758
+
759
+ def mark_run(self):
760
+ self.allocate()
761
+
762
+ def ranges_from_index_vars(self, index_vars):
763
+ sizes = self._sizes
764
+ assert sum(map(len, sizes)) == sum(map(len, index_vars))
765
+ var_ranges = dict(
766
+ zip(
767
+ itertools.chain.from_iterable(index_vars),
768
+ itertools.chain.from_iterable(sizes),
769
+ )
770
+ )
771
+ return var_ranges
772
+
773
+ def codegen(self, index_vars):
774
+ var_ranges = self.ranges_from_index_vars(index_vars)
775
+ try:
776
+ with V.set_ops_handler(
777
+ SimplifyIndexing(V.get_ops_handler(), var_ranges)
778
+ ), V.kernel.set_current_node(self):
779
+ self._body(*index_vars)
780
+ except Exception:
781
+ log.fatal("Error in codegen for %s", self.node)
782
+ raise
783
+
784
+ def pointwise_read_writes(self):
785
+ """
786
+ Get the memory dependencies in the non-reduction axis.
787
+ """
788
+ sizes, reduction_sizes = self._sizes
789
+
790
+ def fn(index):
791
+ return self._body(index, [sympy.Integer(0) for _ in reduction_sizes])
792
+
793
+ return dependencies.extract_read_writes(fn, sizes)
794
+
795
+ def can_inplace(self, read_dep: dependencies.MemoryDep):
796
+ if self.get_aliases() or self.is_template():
797
+ return False
798
+ if len(self.read_writes.writes) == 1 and isinstance(
799
+ read_dep, dependencies.MemoryDep
800
+ ):
801
+ write_dep = next(iter(self.read_writes.writes))
802
+ assert isinstance(write_dep, dependencies.MemoryDep), f"{type(write_dep)=}"
803
+ return read_dep.index == write_dep.index and read_dep.size == write_dep.size
804
+ return False
805
+
806
+ @cache_on_self
807
+ def _get_atomic_add_buffers(self) -> Set[str]:
808
+ buffers_store_as_atomic_add = set()
809
+ if isinstance(self._body, ir.LoopBody):
810
+ for node in self._body.get_nodes():
811
+ if (
812
+ node.op == "call_method"
813
+ and node.target == "store"
814
+ and (
815
+ ("mode" in node.kwargs and node.kwargs["mode"] == "atomic_add")
816
+ or (len(node.args) == 5 and node.args[4] == "atomic_add")
817
+ )
818
+ ):
819
+ buffers_store_as_atomic_add.add(
820
+ node.kwargs["name"]
821
+ if "name" in node.kwargs
822
+ else (node.args[1] if len(node.args) >= 2 else "")
823
+ )
824
+ return buffers_store_as_atomic_add
825
+
826
+ def has_atomic_add(self, check_buf):
827
+ return check_buf in self._get_atomic_add_buffers()
828
+
829
+
830
+ class FusedSchedulerNode(BaseSchedulerNode):
831
+ """
832
+ This is a "fake" scheduler node that represents a group of scheduler nodes
833
+ that are meant to be fused together. The way it does this is by maintaining
834
+ its unmet dependencies as the union of its constituent nodes.
835
+ """
836
+
837
+ @classmethod
838
+ def fuse(cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
839
+ assert node1.scheduler is node2.scheduler
840
+ assert isinstance(node1, (SchedulerNode, FusedSchedulerNode)) and isinstance(
841
+ node2, (SchedulerNode, FusedSchedulerNode)
842
+ )
843
+ return cls(node1.scheduler, list(node1.get_nodes()) + list(node2.get_nodes())) # type: ignore[arg-type]
844
+
845
+ def __init__(self, scheduler: "Scheduler", snodes: List[SchedulerNode]):
846
+ # NB: No need to call super().__init__() because we don't need to re-use any of its logic.
847
+ self.snodes = snodes
848
+ self.scheduler = scheduler
849
+ self.node: ir.Buffer = None # type: ignore[assignment]
850
+ self.users: List[NodeUser] = []
851
+ self.inverse_users = []
852
+ self.node_users = []
853
+ self.group = max(snodes, key=lambda x: int(x.is_reduction())).group
854
+ self.ancestors = set.union(
855
+ *[x.ancestors for x in snodes if x.ancestors is not None]
856
+ )
857
+
858
+ self.set_read_writes(
859
+ dependencies.ReadWrites.merge_list([x.read_writes for x in snodes])
860
+ )
861
+
862
+ self.unmet_dependencies = {
863
+ dep
864
+ for dep in set.union(*[x.unmet_dependencies for x in snodes])
865
+ if dep.name not in self.get_names()
866
+ } - self.read_writes.writes
867
+ self.min_order = min([x.min_order for x in self.snodes])
868
+ self.max_order = max([x.max_order for x in self.snodes])
869
+
870
+ @cache_on_self
871
+ def get_name(self) -> str:
872
+ return "_".join([x.get_name() for x in self.snodes])
873
+
874
+ def get_first_name(self) -> str:
875
+ return self.snodes[0].get_name()
876
+
877
+ @cache_on_self
878
+ def get_names(self) -> Set[str]:
879
+ return set.union(*[x.get_names() for x in self.snodes])
880
+
881
+ def debug_str_extra(self) -> str:
882
+ lines = [
883
+ f"{self.get_name()}.snodes[{i}] =\n{node.debug_str()}"
884
+ for i, node in enumerate(self.snodes)
885
+ ]
886
+ return textwrap.indent("\n".join(lines).rstrip(), " ")
887
+
888
+ def set_last_usage(
889
+ self, future_used_buffers: Set[str], mutation_real_name: Dict[str, str]
890
+ ):
891
+ # Set self.last_usage using the global information
892
+ # This will be used for inter-kernel optimisations
893
+ super().set_last_usage(future_used_buffers, mutation_real_name)
894
+ # Set self.last_usage on the snodes
895
+ # This will be used for optimisations within the kernel
896
+ future_used_buffers: Set[str] = set()
897
+ for node in reversed(self.snodes):
898
+ node.set_last_usage(future_used_buffers, mutation_real_name)
899
+ future_used_buffers.update(node.last_usage) # type: ignore[arg-type]
900
+
901
+ @cache_on_self
902
+ def used_buffer_names(self) -> Set[str]:
903
+ return set.union(*[x.used_buffer_names() for x in self.snodes])
904
+
905
+ @cache_on_self
906
+ def used_or_aliased_buffer_names(self) -> Set[str]:
907
+ return set.union(*[x.used_or_aliased_buffer_names() for x in self.snodes])
908
+
909
+ def get_nodes(self) -> List[SchedulerNode]:
910
+ return self.snodes
911
+
912
+ def __repr__(self):
913
+ return f"{type(self).__name__}(nodes={self.get_name()})"
914
+
915
+ @cache_on_self
916
+ def is_reduction(self):
917
+ return any(x.is_reduction() for x in self.snodes)
918
+
919
+ @cache_on_self
920
+ def is_split_scan(self):
921
+ return any(x.is_split_scan() for x in self.snodes)
922
+
923
+ @cache_on_self
924
+ def is_template(self):
925
+ return any(x.is_template() for x in self.snodes)
926
+
927
+ @cache_on_self
928
+ def get_template_node(self):
929
+ for node in self.snodes:
930
+ if node.is_template():
931
+ return node
932
+ return None
933
+
934
+ def get_device(self):
935
+ return self.group[0]
936
+
937
+ @cache_on_self
938
+ def has_aliasing_or_mutation(self):
939
+ return any(x.has_aliasing_or_mutation() for x in self.snodes)
940
+
941
+ @cache_on_self
942
+ def op_counts(self):
943
+ op_counts: Counter[str] = collections.Counter()
944
+ for node in self.snodes:
945
+ op_counts.update(node.op_counts())
946
+ return op_counts
947
+
948
+ def has_atomic_add(self, check_buf):
949
+ return any(
950
+ (
951
+ isinstance(sub_schedule_node1, SchedulerNode)
952
+ and sub_schedule_node1.has_atomic_add(check_buf)
953
+ )
954
+ for sub_schedule_node1 in self.get_nodes()
955
+ )
956
+
957
+ # None of these need to be implemented, as a FusedSchedulerNode is just an
958
+ # abstraction for scheduling purposes
959
+ def update_mutated_names(self, renames: Dict[str, str]):
960
+ raise NotImplementedError
961
+
962
+ def add_mutation_dep(self, name):
963
+ raise NotImplementedError
964
+
965
+ def set_users(self, users: List["NodeUser"]):
966
+ raise NotImplementedError
967
+
968
+ def get_aliases(self):
969
+ raise NotImplementedError
970
+
971
+ def get_mutations(self):
972
+ raise NotImplementedError
973
+
974
+ def can_inplace(self, read_dep: dependencies.MemoryDep):
975
+ raise NotImplementedError
976
+
977
+ def allocate(self):
978
+ raise NotImplementedError
979
+
980
+ def can_free(self):
981
+ raise NotImplementedError
982
+
983
+ def debug_str(self) -> str:
984
+ """Longer form printout for trace logs"""
985
+ name = self.get_name()
986
+ node_typestr = ",".join(type(n).__name__ for n in self.snodes)
987
+ lines = [
988
+ f"{name}: {type(self).__name__}({node_typestr})",
989
+ f"{name}.writes = {pformat(self.read_writes.writes)}",
990
+ f"{name}.unmet_dependencies = {pformat(self.unmet_dependencies)}",
991
+ f"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}",
992
+ f"{name}.users = {self.users}",
993
+ ]
994
+ try:
995
+ lines += [
996
+ self.debug_str_extra(),
997
+ ]
998
+ except Exception:
999
+ log.warning("Ignoring error in debug_str()", exc_info=True)
1000
+
1001
+ return "\n".join(lines).rstrip()
1002
+
1003
+
1004
+ class ForeachKernelSchedulerNode(FusedSchedulerNode):
1005
+ """Scheduler node which consists of a list of scheduler nodes that each operate on a
1006
+ distinct tensor in a list of tensors."""
1007
+
1008
+ def get_consumer_subnode_for(self, producer):
1009
+ if producer.get_name() in self.read_to_node:
1010
+ return self.read_to_node[producer.get_name()]
1011
+
1012
+ return None
1013
+
1014
+ def get_producer_subnode_for(self, consumer):
1015
+ for rd in consumer.read_writes.reads:
1016
+ if rd.name in self.name_to_node:
1017
+ return self.name_to_node[rd.name]
1018
+
1019
+ return None
1020
+
1021
+ @classmethod
1022
+ def can_fuse(cls, producer, consumer):
1023
+ why = WhyNoFuse(producer, consumer)
1024
+ if producer.is_foreach() and consumer.is_foreach():
1025
+ foreach_match = len(producer.snodes) == len(consumer.snodes)
1026
+ if not foreach_match:
1027
+ why("foreach do not have same length")
1028
+ return foreach_match and all(
1029
+ producer.scheduler.can_fuse(l, r)
1030
+ for l, r in zip(producer.snodes, consumer.snodes)
1031
+ )
1032
+ elif consumer.is_foreach():
1033
+ consumer_subnode = consumer.get_consumer_subnode_for(producer)
1034
+ if consumer_subnode is not None:
1035
+ return consumer.scheduler.can_fuse(producer, consumer_subnode)
1036
+
1037
+ why("candidate producer is not dep of any foreach consumer")
1038
+ return False
1039
+
1040
+ elif producer.is_foreach():
1041
+ producer_subnode = producer.get_producer_subnode_for(consumer)
1042
+ if producer_subnode is not None:
1043
+ return producer.scheduler.can_fuse(producer_subnode, consumer)
1044
+
1045
+ why("candidate consumer has no dep in any foreach producer")
1046
+ return False
1047
+
1048
+ raise AssertionError(
1049
+ "At least one node passed to ForeachKernelSchedulerNode.can_fuse should be a foreach node"
1050
+ )
1051
+
1052
+ @classmethod
1053
+ def fuse(cls, producer, consumer):
1054
+ assert producer.is_foreach() or consumer.is_foreach()
1055
+ prev_node_1 = None
1056
+ prev_node_2 = None
1057
+ if producer.is_foreach() and consumer.is_foreach():
1058
+ fused_nodes = [
1059
+ FusedSchedulerNode.fuse(l, r)
1060
+ for l, r in zip(producer.snodes, consumer.snodes)
1061
+ ]
1062
+ elif producer.is_foreach():
1063
+ producer_subnode = producer.get_producer_subnode_for(consumer)
1064
+ fused_nodes = []
1065
+ prev_node_1 = producer
1066
+ prev_node_2 = None
1067
+ for node in producer.snodes:
1068
+ if node is producer_subnode:
1069
+ new_node = FusedSchedulerNode.fuse(node, consumer)
1070
+ prev_node_2 = new_node
1071
+ fused_nodes.append(new_node)
1072
+ else:
1073
+ fused_nodes.append(node)
1074
+
1075
+ elif consumer.is_foreach():
1076
+ consumer_subnode = consumer.get_consumer_subnode_for(producer)
1077
+ fused_nodes = []
1078
+ prev_node_1 = consumer
1079
+ prev_node_2 = None
1080
+
1081
+ for node in consumer.snodes:
1082
+ if node is consumer_subnode:
1083
+ new_node = FusedSchedulerNode.fuse(producer, node)
1084
+ prev_node_2 = new_node
1085
+ fused_nodes.append(new_node)
1086
+ else:
1087
+ fused_nodes.append(node)
1088
+
1089
+ return cls(producer.scheduler, fused_nodes, prev_node_1, prev_node_2) # type: ignore[possibly-undefined]
1090
+
1091
+ def __init__(
1092
+ self,
1093
+ scheduler: "Scheduler",
1094
+ nodes: List[SchedulerNode],
1095
+ prev_node_1=None,
1096
+ prev_node_2=None,
1097
+ ):
1098
+ self.read_to_node = {}
1099
+ self.name_to_node = {}
1100
+
1101
+ if prev_node_1 is None or prev_node_2 is None:
1102
+ super().__init__(scheduler, nodes)
1103
+
1104
+ for node in nodes:
1105
+ for read in node.read_writes.reads:
1106
+ self.read_to_node[read.name] = node
1107
+
1108
+ for name in node.get_names():
1109
+ self.name_to_node[name] = node
1110
+ else:
1111
+ self.scheduler = scheduler
1112
+ self.snodes = nodes
1113
+ self.node: ir.Buffer = None # type: ignore[assignment]
1114
+ self.users: List[NodeUser] = []
1115
+
1116
+ self.set_read_writes(
1117
+ dependencies.ReadWrites.merge_list(
1118
+ [prev_node_1.read_writes, prev_node_2.read_writes]
1119
+ )
1120
+ )
1121
+
1122
+ self.unmet_dependencies = {
1123
+ dep
1124
+ for dep in set.union(
1125
+ prev_node_1.unmet_dependencies, prev_node_2.unmet_dependencies
1126
+ )
1127
+ if dep.name not in self.get_names()
1128
+ } - self.read_writes.writes
1129
+
1130
+ self.min_order = min([prev_node_1.min_order, prev_node_2.min_order])
1131
+ self.max_order = max([prev_node_1.max_order, prev_node_2.max_order])
1132
+
1133
+ foreach_node = prev_node_1 if prev_node_1.is_foreach() else prev_node_2
1134
+ other_node = prev_node_2 if prev_node_1.is_foreach() else prev_node_1
1135
+
1136
+ self.ancestors = foreach_node.ancestors
1137
+ self.ancestors.update(other_node.ancestors)
1138
+
1139
+ self.name_to_node = foreach_node.name_to_node
1140
+ for name in other_node.get_names():
1141
+ self.name_to_node[name] = other_node
1142
+
1143
+ self.group = (nodes[0].get_device(), "foreach")
1144
+
1145
+ self.origins: Set[torch.fx.Node] = set()
1146
+
1147
+ def mark_run(self):
1148
+ raise NotImplementedError
1149
+
1150
+ def codegen(self):
1151
+ assert isinstance(self.node, ir.ComputedBuffer), f"{type(self.node)=}"
1152
+ self.node.get_store_function()(self.node.make_loader()())
1153
+
1154
+ def can_free(self):
1155
+ return NotImplementedError
1156
+
1157
+ def is_foreach(self):
1158
+ return True
1159
+
1160
+ def get_subkernel_nodes(self):
1161
+ """Returns a list of nodes which comprise the foreach kernel, operating on corresponding elements of our input lists.
1162
+ These nodes may be vertically fused."""
1163
+ return list(self.snodes)
1164
+
1165
+ def get_nodes(self):
1166
+ """Returns all nodes contained in this kernel, unpacking fused nodes into their constituent scheduler nodes."""
1167
+ return list(itertools.chain.from_iterable(x.get_nodes() for x in self.snodes))
1168
+
1169
+ def get_first_name(self):
1170
+ return self.snodes[0].get_first_name()
1171
+
1172
+ def prune_redundant_deps(self, name_to_fused_node):
1173
+ _prune_redundant_deps(self, name_to_fused_node)
1174
+
1175
+ for node in self.snodes:
1176
+ node.prune_redundant_deps(name_to_fused_node)
1177
+
1178
+
1179
+ def pick_loop_order(stride_lengths, sizes, priority_idx=()):
1180
+ """
1181
+ A heuristic to decide loop iteration orders. This has not been well
1182
+ tuned and may be something we should autotune.
1183
+ """
1184
+
1185
+ @functools.cmp_to_key
1186
+ def index_cmp(a, b):
1187
+ if sizes[a] == 1 or sizes[b] == 1:
1188
+ # 1-sizes don't matter, just move them to the end
1189
+ return cmp(sizes[a] == 1, sizes[b] == 1)
1190
+
1191
+ stride_len_a = [sl[a] for sl in stride_lengths]
1192
+ stride_len_b = [sl[b] for sl in stride_lengths]
1193
+
1194
+ # equivalent to
1195
+ # np.logical_or(stride_lengths[:, b] == 0, stride_lengths[:, a] < stride_lengths[:, b]).all()
1196
+ a_first = sum(
1197
+ sl_b == 0 or sl_a < sl_b for sl_a, sl_b in zip(stride_len_a, stride_len_b)
1198
+ )
1199
+ b_first = sum(
1200
+ sl_a == 0 or sl_b < sl_a for sl_a, sl_b in zip(stride_len_a, stride_len_b)
1201
+ )
1202
+ if a_first > b_first:
1203
+ return -1
1204
+ if b_first > a_first:
1205
+ return 1
1206
+
1207
+ # otherwise contiguous
1208
+ return cmp(b, a)
1209
+
1210
+ order = list(reversed(range(len(stride_lengths[0]))))
1211
+ if len(priority_idx) > 0:
1212
+ # if we have priority node, only use that node's order
1213
+ stride_lengths = [stride_lengths[pi] for pi in priority_idx]
1214
+ if config.pick_loop_orders:
1215
+ order.sort(key=index_cmp)
1216
+ return order
1217
+
1218
+
1219
+ @dataclasses.dataclass
1220
+ class NodeUser:
1221
+ node: BaseSchedulerNode
1222
+ can_inplace: bool = False
1223
+
1224
+ # A weak user must be scheduled after a given node, but doesn't actually
1225
+ # use the result
1226
+ is_weak: bool = False
1227
+
1228
+ def __hash__(self):
1229
+ return hash((self.node.get_name(), self.can_inplace, self.is_weak))
1230
+
1231
+ def __eq__(self, other):
1232
+ return (
1233
+ self.get_name() == other.get_name()
1234
+ and self.can_inplace == other.can_inplace
1235
+ and self.is_weak == other.is_weak
1236
+ )
1237
+
1238
+ def get_name(self):
1239
+ return self.node.get_name()
1240
+
1241
+ def merge(self, other: "NodeUser") -> "NodeUser":
1242
+ assert self.node is other.node
1243
+ return NodeUser(
1244
+ self.node,
1245
+ self.can_inplace and other.can_inplace,
1246
+ self.is_weak and other.is_weak,
1247
+ )
1248
+
1249
+
1250
+ _post_grad_graph_counter = itertools.count()
1251
+
1252
+
1253
+ class Scheduler:
1254
+ @dynamo_timed
1255
+ def __init__(self, nodes):
1256
+ super().__init__()
1257
+ self.backends = {}
1258
+ self.fuse_cache = {}
1259
+ self.post_grad_graph_id = next(_post_grad_graph_counter)
1260
+
1261
+ self.nodes = []
1262
+ self.available_buffer_names = {
1263
+ *V.graph.graph_inputs.keys(),
1264
+ *V.graph.constants.keys(),
1265
+ }
1266
+
1267
+ self.nodes = [self.create_scheduler_node(n) for n in nodes]
1268
+
1269
+ # some new constants could have been created above
1270
+ self.available_buffer_names.update(V.graph.constants.keys())
1271
+ for node in self.nodes:
1272
+ node.prune_deps()
1273
+
1274
+ self.name_to_node: Dict[str, BaseSchedulerNode] = {
1275
+ n.get_name(): n for n in self.nodes
1276
+ }
1277
+ self.name_to_fused_node: Dict[
1278
+ str, BaseSchedulerNode
1279
+ ] = dict() # set in fuse_nodes()
1280
+
1281
+ # mutation_real_name: Maps back to the original name for codegen
1282
+ # Example:
1283
+ # If you mutate buf0 inside of buf1's kernel, then:
1284
+ # mutation_real_name = {"buf0" : "buf1"}
1285
+ # all subsequent uses of buf0 become buf1's usage in dependency graph
1286
+ self.mutation_real_name = {}
1287
+
1288
+ # We handle mutation by renaming modified versions of the same
1289
+ # buffer in the dependency graph to prevent cycles.
1290
+ # mutation_renames: tracks the current name for a given buffer
1291
+ # (changed once per mutation)
1292
+ # Example:
1293
+ # If you mutate buf0 inside of buf1's kernel, then:
1294
+ # mutation_renames = {"buf1" : "buf0"}
1295
+ # in codegen we only use buf0, never buf1
1296
+ self.mutation_renames = {}
1297
+
1298
+ self.compute_dependencies()
1299
+ self.topological_sort_schedule()
1300
+ self.dead_node_elimination()
1301
+ if config.reorder_for_compute_comm_overlap:
1302
+ comms.decide_global_ordering_of_comms(self.nodes)
1303
+ self.compute_ancestors()
1304
+
1305
+ metrics.ir_nodes_pre_fusion += len(self.nodes)
1306
+ V.debug.ir_pre_fusion(self.nodes)
1307
+ self.num_orig_nodes = len(self.nodes)
1308
+ self.name_to_fused_node = {n.get_name(): n for n in self.nodes}
1309
+ self.create_foreach_nodes()
1310
+ self.topological_sort_schedule()
1311
+ self.logged_slow_fusion = set()
1312
+ self.fuse_nodes()
1313
+ if config.reorder_for_compute_comm_overlap:
1314
+ # Refresh node_users and inverse_users to reflect fused nodes
1315
+ self.compute_node_users()
1316
+ self.nodes = comms.reorder_compute_and_comm_for_overlap(self.nodes)
1317
+ self.compute_last_usage()
1318
+ V.debug.ir_post_fusion(self.nodes)
1319
+ V.debug.graph_diagram(self.nodes)
1320
+ self.debug_draw_graph()
1321
+
1322
+ # used during codegen:
1323
+ self.current_device: torch.device = None # type: ignore[assignment]
1324
+ self.buffer_names_to_free = set()
1325
+
1326
+ # fx graph node to the position it appears in the graph
1327
+ # for debug attribution
1328
+ self.origin_to_index = {}
1329
+
1330
+ get_metric_table("graph_stats").add_row(
1331
+ lambda: {
1332
+ "graph_id": self.post_grad_graph_id,
1333
+ "num_nodes_before_fusion": self.num_orig_nodes,
1334
+ "num_nodes_after_fusion": len(self.nodes),
1335
+ }
1336
+ )
1337
+
1338
+ def debug_draw_graph(self):
1339
+ """Generate an image of the graph for debugging"""
1340
+ if os.environ.get("INDUCTOR_WRITE_SCHEDULER_GRAPH", None) == "1":
1341
+ from .debug import draw_buffers
1342
+
1343
+ draw_buffers(self.nodes, print_graph=True)
1344
+
1345
+ def debug_print_nodes(self, label):
1346
+ if log.isEnabledFor(logging.INFO):
1347
+ log.info("%s:", label)
1348
+ for node in self.nodes:
1349
+ node.log_details()
1350
+
1351
+ def create_scheduler_node(self, node):
1352
+ assert (
1353
+ node.origins is not None
1354
+ ), "All nodes passed to scheduling must have an origin"
1355
+ if node.is_no_op():
1356
+ return NopKernelSchedulerNode(self, node)
1357
+ elif isinstance(node, (ir.ComputedBuffer, ir.TemplateBuffer)):
1358
+ return SchedulerNode(self, node)
1359
+ elif isinstance(node, ir.ExternKernel):
1360
+ return ExternKernelSchedulerNode(self, node)
1361
+ else:
1362
+ raise NotImplementedError(node)
1363
+
1364
+ def create_foreach_nodes(self):
1365
+ removed_node_names = set()
1366
+ fe_nodes = []
1367
+ kept_node_names = self.name_to_fused_node.keys()
1368
+
1369
+ for names in V.graph.lists.values():
1370
+ names = [
1371
+ name
1372
+ for name in names
1373
+ if name in kept_node_names
1374
+ and not isinstance(self.name_to_node[name], NopKernelSchedulerNode)
1375
+ ]
1376
+ if not names:
1377
+ # All nodes eliminated
1378
+ continue
1379
+
1380
+ removed_node_names.update(names)
1381
+ snodes = [self.name_to_node[name] for name in names]
1382
+
1383
+ fe_node = ForeachKernelSchedulerNode(self, snodes) # type: ignore[arg-type]
1384
+
1385
+ fe_nodes.append(fe_node)
1386
+
1387
+ for name in names:
1388
+ self.name_to_fused_node[name] = fe_node
1389
+
1390
+ self.nodes = [
1391
+ node for node in self.nodes if node.get_name() not in removed_node_names
1392
+ ] + fe_nodes
1393
+
1394
+ def compute_dependencies(self):
1395
+ """
1396
+ Create dependency edges between nodes, handling aliasing and
1397
+ mutation properly.
1398
+ """
1399
+
1400
+ T = TypeVar("T")
1401
+
1402
+ class DedupList(Generic[T]):
1403
+ """
1404
+ This data structure behaves like a list except it makes sure the
1405
+ elements remain unique.
1406
+ Normally one could use a set/dict for this purpose however
1407
+ the list in question gets elements appended as it is being
1408
+ iterated over which means that we need to keep the list
1409
+ semantics.
1410
+ """
1411
+
1412
+ def __init__(self, items=None, membership=None):
1413
+ self.items = items or list()
1414
+ self.membership = membership or set()
1415
+
1416
+ def append(self, node_user: T) -> None:
1417
+ if node_user in self.membership:
1418
+ return
1419
+ self.items.append(node_user)
1420
+ self.membership.add(node_user)
1421
+
1422
+ def __add__(self, other: "DedupList[T]") -> "DedupList[T]":
1423
+ new_membership = set.union(self.membership, other.membership)
1424
+ new_items = self.items + [
1425
+ x for x in other.items if x not in self.membership
1426
+ ]
1427
+ return DedupList(new_items, new_membership)
1428
+
1429
+ name_to_users: DefaultDict[str, DedupList[NodeUser]] = collections.defaultdict(
1430
+ DedupList
1431
+ )
1432
+
1433
+ # handle aliasing by using python aliasing in name_to_users
1434
+ # if foo aliases bar then we will make name_to_users["foo"] point
1435
+ # to the same python list as name_to_users["bar"]
1436
+ for node1 in self.nodes:
1437
+ node1_name = node1.get_name()
1438
+ for node2_name in node1.get_aliases():
1439
+ if node1_name in name_to_users and node2_name in name_to_users:
1440
+ # merge the two
1441
+ list1 = name_to_users[node1_name]
1442
+ list2 = name_to_users[node2_name]
1443
+ combined = list1 + list2
1444
+ for key in name_to_users.keys():
1445
+ if name_to_users[key] is list1 or name_to_users[key] is list2:
1446
+ name_to_users[key] = combined
1447
+ elif node1_name in name_to_users:
1448
+ name_to_users[node2_name] = name_to_users[node1_name]
1449
+ else:
1450
+ name_to_users[node1_name] = name_to_users[node2_name]
1451
+
1452
+ def rename(n):
1453
+ if n in self.mutation_renames:
1454
+ return rename(self.mutation_renames[n])
1455
+ return n
1456
+
1457
+ def dep_closure(node_name):
1458
+ reachable_names = {node_name}
1459
+ node = self.name_to_node[node_name]
1460
+ write_dep = next(iter(node.read_writes.writes))
1461
+ for read_dep in node.read_writes.reads:
1462
+ if (
1463
+ read_dep.name in self.name_to_node
1464
+ and isinstance(read_dep, dependencies.MemoryDep)
1465
+ and isinstance(write_dep, dependencies.MemoryDep)
1466
+ and read_dep.index == write_dep.index
1467
+ and read_dep.size == write_dep.size
1468
+ ):
1469
+ reachable_names.update(dep_closure(read_dep.name))
1470
+ return reachable_names
1471
+
1472
+ def add_user(used_by_name, user_node, can_inplace=False, is_weak=False):
1473
+ name_to_users[rename(used_by_name)].append(
1474
+ NodeUser(user_node, can_inplace, is_weak)
1475
+ )
1476
+
1477
+ unbacked_symbol_to_origin_node = {}
1478
+
1479
+ for node in self.nodes:
1480
+ log.debug("scheduling %s", node.node)
1481
+
1482
+ # unbacked symbols don't follow ordinary buffer dependencies, so
1483
+ # we track their def/uses separately
1484
+ unbacked_symbol_defs = sorted(
1485
+ node.node.get_unbacked_symbol_defs(), key=lambda x: x.name
1486
+ )
1487
+ for s in unbacked_symbol_defs:
1488
+ assert isinstance(s, sympy.Symbol)
1489
+ # Pick the first definer as canonical. There may be multiple
1490
+ # because if a MultiOutputLayout buffer propagates an unbacked
1491
+ # symint to multiple outputs, they will all claim to def it.
1492
+ if s not in unbacked_symbol_to_origin_node:
1493
+ unbacked_symbol_to_origin_node[s] = node
1494
+
1495
+ unbacked_symbol_uses = sorted(
1496
+ node.node.get_unbacked_symbol_uses(), key=lambda x: x.name
1497
+ )
1498
+ # if a kernel takes unbacked symints, register dependencies
1499
+ for s in unbacked_symbol_uses:
1500
+ assert (
1501
+ s in unbacked_symbol_to_origin_node
1502
+ ), f"{s} not in {unbacked_symbol_to_origin_node}"
1503
+ node.add_fake_dep(StarDep(unbacked_symbol_to_origin_node[s].get_name()))
1504
+
1505
+ # a node will mutate either 0 or 1 buffers
1506
+ assert len(node.get_mutations()) <= 1
1507
+ for alt_name in node.get_mutations():
1508
+ alt_name = rename(alt_name)
1509
+ # this node must run after the prior writer
1510
+ add_user(alt_name, node)
1511
+ node.add_mutation_dep(StarDep(alt_name))
1512
+ for other_node in name_to_users[alt_name].items:
1513
+ # this node must run after all prior readers
1514
+ other_name = rename(other_node.get_name())
1515
+ known_dep_node_names = dep_closure(node.get_name())
1516
+ if other_name not in known_dep_node_names:
1517
+ # If this node already directly or indirectly depends on other_node,
1518
+ # we don't need to insert an extra dep.
1519
+ node.add_mutation_dep(WeakDep(other_name))
1520
+ add_user(other_name, node, is_weak=True)
1521
+
1522
+ # add normal non-mutation dependencies
1523
+ for read in node.read_writes.reads:
1524
+ is_weak = isinstance(read, WeakDep)
1525
+ add_user(read.name, node, node.can_inplace(read), is_weak)
1526
+
1527
+ node.update_mutated_names(self.mutation_renames)
1528
+
1529
+ # update our renaming scheme for the next iteration
1530
+ for alt_name in node.get_mutations():
1531
+ self.mutation_renames[rename(alt_name)] = node.get_name()
1532
+ self.mutation_renames[alt_name] = node.get_name()
1533
+ self.mutation_real_name[node.get_name()] = self.mutation_real_name.get(
1534
+ alt_name, alt_name
1535
+ )
1536
+
1537
+ # make sure outputs aren't dead-code-eliminated
1538
+ for node_name in V.graph.get_output_names():
1539
+ log.debug("scheduling output %s", node_name)
1540
+ add_user(node_name, OutputNode(StarDep(node_name)))
1541
+
1542
+ # make sure unbacked symints aren't dead-code-eliminated
1543
+ for node in V.graph.graph_outputs:
1544
+ for s in node.get_unbacked_symbol_uses():
1545
+ assert (
1546
+ s in unbacked_symbol_to_origin_node
1547
+ ), f"{s} not in {unbacked_symbol_to_origin_node.keys()}"
1548
+ node_name = unbacked_symbol_to_origin_node[s].node.name
1549
+ log.debug("scheduling output %s for unbacked symint %s", node_name, s)
1550
+ add_user(node_name, OutputNode(StarDep(node_name)))
1551
+
1552
+ # make sure input mutation isn't dead-code-eliminated
1553
+ for name in self.mutation_renames:
1554
+ if name in V.graph.graph_inputs:
1555
+ add_user(name, OutputNode(StarDep(name)))
1556
+ V.graph.mutated_inputs.add(name)
1557
+
1558
+ inp_names = {
1559
+ name: index for index, name in enumerate(V.graph.graph_inputs.keys())
1560
+ }
1561
+ V.graph.mutated_input_idxs = [
1562
+ inp_names[name] for name in V.graph.mutated_inputs
1563
+ ]
1564
+
1565
+ # copy users information onto the nodes
1566
+ for node in self.nodes:
1567
+ node.set_users(name_to_users[node.get_name()].items)
1568
+
1569
+ # populate inverse_users
1570
+ for node in self.nodes:
1571
+ for user in node.users:
1572
+ user.node.inverse_users.append(node)
1573
+
1574
+ def compute_node_users(self):
1575
+ # set up buffer name to (fused)snode mapping
1576
+ buf_to_snode = {}
1577
+ for node in self.nodes:
1578
+ if isinstance(node, FusedSchedulerNode):
1579
+ for x in node.snodes:
1580
+ buf_to_snode[x.get_name()] = node
1581
+ buf_to_snode[node.get_name()] = node
1582
+
1583
+ for node in self.nodes:
1584
+ node.node_users = []
1585
+ node.inverse_users = []
1586
+
1587
+ # compute inverse_users
1588
+ for node in self.nodes:
1589
+ inverse_users = []
1590
+ for dep in node.unmet_dependencies:
1591
+ assert dep.name in buf_to_snode
1592
+ dep_node = buf_to_snode[dep.name]
1593
+ inverse_users.append(dep_node)
1594
+ node.inverse_users = inverse_users
1595
+
1596
+ # compute node_users
1597
+ # TODO: ideally, we should deduplicate .users and .node_users,
1598
+ # but currently .users contains extra information that's difficult to
1599
+ # extract into a standalone container.
1600
+ node_to_users: Dict[BaseSchedulerNode, List[BaseSchedulerNode]] = {}
1601
+ for node in self.nodes:
1602
+ for inverse_user in node.inverse_users:
1603
+ node_to_users.setdefault(inverse_user, []).append(node)
1604
+ for node, users in node_to_users.items():
1605
+ node.node_users = users
1606
+
1607
+ def dead_node_elimination(self):
1608
+ """
1609
+ Remove any nodes without users
1610
+ """
1611
+ again = True # repeat until a fixed point
1612
+ while again:
1613
+ updated_nodes = []
1614
+ for node in self.nodes:
1615
+
1616
+ def can_eliminate_user(user: NodeUser):
1617
+ return user.is_weak or user.get_name() in V.graph.removed_buffers
1618
+
1619
+ can_eliminate = not node.has_side_effects() and all(
1620
+ can_eliminate_user(u) for u in node.users
1621
+ )
1622
+
1623
+ if not can_eliminate:
1624
+ updated_nodes.append(node)
1625
+ else:
1626
+ # dead code
1627
+ log.debug("removed dead node: %s", node.get_name())
1628
+ V.graph.removed_buffers.add(node.get_name())
1629
+
1630
+ again = len(self.nodes) > len(updated_nodes)
1631
+ self.nodes = updated_nodes
1632
+
1633
+ # Prune any WeakDeps no longer needed
1634
+ for node in self.nodes:
1635
+ node.prune_weak_deps()
1636
+
1637
+ def topological_sort_schedule(self):
1638
+ """
1639
+ Ensure self.nodes is in topologically sorted order
1640
+ """
1641
+ seen: Set[ir.Buffer] = set()
1642
+ name_to_node: Dict[str, ir.Buffer] = dict()
1643
+ result: List[ir.Buffer] = []
1644
+
1645
+ def visit(n):
1646
+ if n not in seen:
1647
+ seen.add(n)
1648
+ for dep in sorted(n.unmet_dependencies, key=lambda d: d.name):
1649
+ visit(name_to_node[dep.name])
1650
+ result.append(n)
1651
+
1652
+ for node in self.nodes:
1653
+ for name in node.get_names():
1654
+ name_to_node[name] = node
1655
+ for node in self.nodes:
1656
+ visit(node)
1657
+ self.nodes = result
1658
+
1659
+ def compute_ancestors(self):
1660
+ """
1661
+ Populate each node.ancestors
1662
+ """
1663
+ # note self.nodes is topologically sorted
1664
+ name_to_ancestors: Dict[str, Set[str]] = {}
1665
+ for node in self.nodes:
1666
+ ancestors = set()
1667
+ for dep in node.unmet_dependencies:
1668
+ ancestors.add(dep.name)
1669
+ ancestors |= name_to_ancestors[dep.name]
1670
+ name_to_ancestors[node.get_name()] = ancestors
1671
+ node.ancestors = ancestors
1672
+
1673
+ for order, node in enumerate(self.nodes):
1674
+ node.min_order = order
1675
+ node.max_order = order
1676
+
1677
+ def fuse_nodes(self):
1678
+ """
1679
+ Mutates self.nodes to combine nodes into FusedSchedulerNodes.
1680
+ """
1681
+ for i in range(10):
1682
+ old_len = len(self.nodes)
1683
+ fusion_log.debug(
1684
+ "===== attempting fusion (%d/10): %d nodes =====", i + 1, old_len
1685
+ )
1686
+ self.fuse_nodes_once()
1687
+ new_len = len(self.nodes)
1688
+ fusion_log.debug(
1689
+ "completed fusion round (%d/10): fused %d nodes into %d nodes\n",
1690
+ i + 1,
1691
+ old_len,
1692
+ new_len,
1693
+ )
1694
+ if new_len == old_len or new_len == 1:
1695
+ fusion_log.debug("===== fusion complete (%d iterations) =====", i + 1)
1696
+ break
1697
+
1698
+ def benchmark_fused_nodes(self, nodes):
1699
+ """
1700
+ Benchmark fused list of nodes and return the execution time
1701
+ in milliseconds on randomly generated inputs.
1702
+ """
1703
+ assert len(nodes) > 0
1704
+ device = nodes[0].get_device()
1705
+ V.graph.scheduler = self
1706
+ self.current_device = device
1707
+ backend = self.get_backend(device)
1708
+ return backend.benchmark_fused_nodes(nodes)
1709
+
1710
+ def speedup_by_fusion(self, node1, node2):
1711
+ """
1712
+ If config.benchmark_fusion is False, always return True.
1713
+ Otherwise, return True if fusion can brings speedup.
1714
+ """
1715
+ if not config.benchmark_fusion:
1716
+ return True
1717
+
1718
+ if (
1719
+ node1.is_template()
1720
+ and not isinstance(node1.get_template_node(), ir.TritonTemplateBuffer)
1721
+ or node1.is_foreach()
1722
+ or node2.is_foreach()
1723
+ ):
1724
+ # TODO support benchmarking epilogue fusion
1725
+ return True
1726
+
1727
+ node_list_1 = node1.get_nodes()
1728
+ device = node_list_1[0].get_device()
1729
+
1730
+ # don't support benchmark fusion for CPU right now.
1731
+ if device.type == "cpu":
1732
+ return True
1733
+
1734
+ node_list_2 = node2.get_nodes()
1735
+ node_list_fused = node_list_1 + node_list_2
1736
+
1737
+ # We can not accurately benchmark kernel using atomic_add
1738
+ # due to how we generate random integer inputs.
1739
+ # Skip benchmarking them by allowing fusion.
1740
+ if any(
1741
+ hasattr(n.node, "data")
1742
+ and hasattr(n.node.data, "scatter_mode")
1743
+ and n.node.data.scatter_mode == "atomic_add"
1744
+ for n in node_list_fused
1745
+ ):
1746
+ return True
1747
+
1748
+ from triton.compiler.errors import CompilationError
1749
+
1750
+ why = WhyNoFuse(node1, node2)
1751
+
1752
+ try:
1753
+ ms1, path1 = self.benchmark_fused_nodes(node_list_1)
1754
+ if math.isinf(ms1):
1755
+ why("register spilling of the first kernel")
1756
+ return False
1757
+ ms2, path2 = self.benchmark_fused_nodes(node_list_2)
1758
+ if math.isinf(ms2):
1759
+ why("register spilling of the second kernel")
1760
+ return False
1761
+ ms_fused, path_fused = self.benchmark_fused_nodes(node_list_fused)
1762
+ if math.isinf(ms_fused):
1763
+ why("register spilling of the fused kernel")
1764
+ return False
1765
+ except CompilationError as e:
1766
+ # workaround triton issue: https://github.com/openai/triton/issues/2151
1767
+ if "Loop-carried variable" in str(e):
1768
+ return True # allow fusion
1769
+ else:
1770
+ raise
1771
+
1772
+ if fusion_log.isEnabledFor(logging.DEBUG):
1773
+ if ms_fused < ms1 + ms2:
1774
+ fusion_log.debug(
1775
+ "can fuse (benchmark): fusing %s with %s cause %sx speedup",
1776
+ node1.get_names(),
1777
+ node2.get_names(),
1778
+ green_text(f"{(ms1 + ms2) / ms_fused:.3f}"),
1779
+ )
1780
+ else:
1781
+ fusion_log.debug(
1782
+ "cannot fuse (benchmark): fusing %s with %s cause %sx slowdown",
1783
+ node1.get_names(),
1784
+ node2.get_names(),
1785
+ red_text(f"{ms_fused / (ms1 + ms2):.3f}"),
1786
+ )
1787
+
1788
+ if (
1789
+ is_metric_table_enabled("slow_fusion")
1790
+ and ms_fused >= ms1 + ms2
1791
+ and (path1, path2) not in self.logged_slow_fusion
1792
+ ):
1793
+ self.logged_slow_fusion.add((path1, path2))
1794
+ get_metric_table("slow_fusion").add_row(
1795
+ lambda: {
1796
+ "kernel1_path": path1,
1797
+ "kernel1_latency": ms1,
1798
+ "kernel2_path": path2,
1799
+ "kernel2_latency": ms2,
1800
+ "fused_kernel_path": path_fused,
1801
+ "fused_kernel_latency": ms_fused,
1802
+ "slow_down_ratio": ms_fused / (ms1 + ms2),
1803
+ }
1804
+ )
1805
+ return ms_fused < ms1 + ms2
1806
+
1807
+ def fuse_nodes_once(self):
1808
+ """
1809
+ Mutates self.nodes to combine nodes into FusedSchedulerNodes.
1810
+
1811
+ This relies on two key functions to control the logic:
1812
+ - self.can_fuse(): checks if a fusion is legal
1813
+ - self.score_fusion(): assigns priority to a given fusion
1814
+ """
1815
+ fused_nodes = set(self.nodes)
1816
+ for node1, node2 in self.get_possible_fusions():
1817
+ node1 = self.name_to_fused_node[node1.get_first_name()]
1818
+ node2 = self.name_to_fused_node[node2.get_first_name()]
1819
+ if self.can_fuse(node1, node2) and not self.will_fusion_create_cycle(
1820
+ node1, node2
1821
+ ):
1822
+ if not self.speedup_by_fusion(node1, node2):
1823
+ continue
1824
+ fusion_log.debug(
1825
+ "fusing %s with %s", node1.get_name(), node2.get_name()
1826
+ )
1827
+
1828
+ # above can_fuse asserts that node2 has the same device
1829
+ device = node1.get_device()
1830
+ node3 = self.get_backend(device).fuse(node1, node2)
1831
+ fused_nodes.remove(node1)
1832
+ fused_nodes.remove(node2)
1833
+ fused_nodes.add(node3)
1834
+ self.name_to_fused_node.update(
1835
+ {n.get_name(): node3 for n in node3.get_nodes()}
1836
+ )
1837
+ self.nodes = sorted(fused_nodes, key=lambda x: x.min_order)
1838
+ self.topological_sort_schedule()
1839
+ self.prune_redundant_deps()
1840
+
1841
+ def prune_redundant_deps(self):
1842
+ for node in self.nodes:
1843
+ node.prune_redundant_deps(self.name_to_fused_node)
1844
+
1845
+ def get_possible_fusions(self):
1846
+ """
1847
+ Helper to find all legal fusion opportunities, sorted by self.score_fusion()
1848
+ """
1849
+ possible_fusions = []
1850
+ seen = set()
1851
+
1852
+ def check_all_pairs(nodes):
1853
+ for node1_index, node1 in enumerate(nodes):
1854
+ for node2 in nodes[node1_index + 1 :]:
1855
+ key = (node1, node2)
1856
+ if key in seen:
1857
+ continue
1858
+ seen.add(key)
1859
+
1860
+ if self.can_fuse(node1, node2):
1861
+ possible_fusions.append(key)
1862
+ elif (node2.is_template() or node2.is_foreach()) and self.can_fuse(
1863
+ node2, node1
1864
+ ):
1865
+ # foreach fusions and epilogue fusions are order dependent
1866
+ possible_fusions.append((node2, node1))
1867
+
1868
+ buffer_names_grouping = collections.defaultdict(list)
1869
+ for node in self.nodes:
1870
+ for buf in node.used_buffer_names():
1871
+ buffer_names_grouping[buf].append(node)
1872
+ for node_grouping in buffer_names_grouping.values():
1873
+ check_all_pairs(node_grouping)
1874
+
1875
+ if config.aggressive_fusion:
1876
+ group_grouping = collections.defaultdict(list)
1877
+ for node in self.nodes:
1878
+ group = getattr(node, "group", None)
1879
+ if group:
1880
+ group_grouping[group].append(node)
1881
+ for node_grouping in group_grouping.values():
1882
+ check_all_pairs(node_grouping)
1883
+
1884
+ possible_fusions.sort(key=self.score_fusion_key, reverse=True)
1885
+ fusion_log.debug("found %d possible fusions", len(possible_fusions))
1886
+ return possible_fusions
1887
+
1888
+ def will_fusion_create_cycle(self, node1, node2):
1889
+ """
1890
+ Finds whether there's a path from node1 to node2 (or vice-versa)
1891
+ caused indirectly by other fusions.
1892
+ """
1893
+
1894
+ def found_path(node):
1895
+ # only fused nodes can introduce new ancestors.
1896
+ if isinstance(node, FusedSchedulerNode) and node not in visited:
1897
+ visited.add(node)
1898
+ if node.get_names().issubset(combined_ancestors):
1899
+ # All fusion outputs are in ancestors of node1 and node2, thus
1900
+ # cannot introduce new path:
1901
+ #
1902
+ # 1. if output is neither descendent of node1 or node2, the
1903
+ # output cannot introduce a path
1904
+ # 2. due to [can_fuse]: if WLOG output is descendent of node1, it cannot be
1905
+ # on path(node1->node2), hence it cannot be ancestor of node2
1906
+ # 3. due to [acyclic]: if WLOG output is descendent of node1, it cannot be
1907
+ # ancestor of node1
1908
+ return False
1909
+ else:
1910
+ # continue DFS of new ancestors introduced by the fusion
1911
+ return bool(combined_names & node.ancestors) or any(
1912
+ found_path(self.name_to_fused_node[n])
1913
+ for n in node.ancestors - combined_ancestors
1914
+ )
1915
+ return False
1916
+
1917
+ visited = set()
1918
+ combined_names = node1.get_names() | node2.get_names()
1919
+ combined_ancestors = (node1.ancestors | node2.ancestors) - combined_names
1920
+ cycle = any(found_path(self.name_to_fused_node[n]) for n in combined_ancestors)
1921
+ if cycle:
1922
+ WhyNoFuse(node1, node2)("will create cycle")
1923
+ return cycle
1924
+
1925
+ def can_fusion_increase_peak_memory(
1926
+ self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
1927
+ ):
1928
+ """
1929
+ This function prevents fusion for nodes that can increase memory
1930
+ footprint. This problem is more common in horizontal fusion, where nodes
1931
+ that are far apart in the original order get fused, lengthening the live
1932
+ intervals of tensors. This is very evident in models with activation
1933
+ checkpointing, where the recomputed nodes from different checkpointed
1934
+ regions get fused and significantly increase the memory footprint.
1935
+
1936
+ The current attempt is a quick, possibly hacky, heuristic to prevent the
1937
+ fusion of nodes that are far away in the original order.
1938
+
1939
+ A better but difficult to implement heurisitic would be to use live
1940
+ intervals of the buffers, find region of peak pressure in the original
1941
+ program and prevent fusion that crosses that peak region. We might need
1942
+ special care or good approximation in this implementation, as fusion of
1943
+ node changes live intervals, and re-computing live intervals and peak
1944
+ memory after each fusion can introduce large compilation overhead.
1945
+ """
1946
+ proximity_score = max(
1947
+ abs(node1.min_order - node2.max_order),
1948
+ abs(node2.min_order - node1.max_order),
1949
+ )
1950
+ return proximity_score > 64
1951
+
1952
+ def can_fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
1953
+ """
1954
+ Determine if it is possible to combine node1 and node2 into a
1955
+ single fused node.
1956
+ """
1957
+
1958
+ if node1 is node2:
1959
+ return False
1960
+
1961
+ why = WhyNoFuse(node1, node2)
1962
+
1963
+ if (
1964
+ isinstance(node1, (ExternKernelSchedulerNode, NopKernelSchedulerNode))
1965
+ and not node1.is_template()
1966
+ ):
1967
+ why("node1 is extern or nop")
1968
+ return False
1969
+ if (
1970
+ isinstance(node2, (ExternKernelSchedulerNode, NopKernelSchedulerNode))
1971
+ and not node2.is_template()
1972
+ ):
1973
+ why("node2 is extern or nop")
1974
+ return False
1975
+
1976
+ if node2.get_names() & node1.ancestors:
1977
+ why("node1 must go before node2")
1978
+ return False
1979
+
1980
+ if (
1981
+ isinstance(node1, (FusedSchedulerNode, SchedulerNode))
1982
+ and isinstance(node2, SchedulerNode)
1983
+ and isinstance(node2._body, ir.LoopBody)
1984
+ ):
1985
+ # Fix issue: https://github.com/pytorch/pytorch/issues/108963
1986
+ # Check:
1987
+ # If node2 reads a buf which is a mutation buf of node1(SchedulerNode) or among nodes in node1(FusedSchedulerNode),
1988
+ # we will get the corresponding mutation buf and check if this mutation buf is stored by atomic_add mode.
1989
+ # If True, we will disable the fusion of node1 and node2.
1990
+ if any(
1991
+ (
1992
+ node2_used_buf in self.mutation_renames
1993
+ and node1.has_atomic_add(self.mutation_renames[node2_used_buf])
1994
+ )
1995
+ for node2_used_buf in node2._body.reads_name2expr.keys()
1996
+ ):
1997
+ return False
1998
+
1999
+ if node2.is_template():
2000
+ why("templates can only fuse epilogues")
2001
+ return False
2002
+ if node1.is_template() and (
2003
+ node2.has_aliasing_or_mutation()
2004
+ or node2.is_reduction()
2005
+ or not config.epilogue_fusion
2006
+ ):
2007
+ why("template epilogue not satisfied")
2008
+ return False
2009
+
2010
+ device = node1.get_device()
2011
+ device2 = node2.get_device()
2012
+ if device != device2:
2013
+ why("device mismatch (%s vs %s)", device, device2)
2014
+ return False
2015
+ del device2
2016
+
2017
+ no_shared_data = self.score_fusion_memory(node1, node2) == 0
2018
+ if no_shared_data and (
2019
+ not config.aggressive_fusion or node1.is_reduction() or node2.is_reduction()
2020
+ ):
2021
+ why("no shared data")
2022
+ return False # heuristic not needed for correctness
2023
+
2024
+ if (
2025
+ not node1.is_foreach()
2026
+ and not node2.is_foreach()
2027
+ and len(node1.get_nodes()) + len(node2.get_nodes()) > config.max_fusion_size
2028
+ ):
2029
+ why("exceeds max fusion")
2030
+ return False # heuristic not needed for correctness
2031
+
2032
+ if node1.get_names() & node2.ancestors:
2033
+ # node2 depends on node1 outputs
2034
+ if not self.can_fuse_vertical(node1, node2):
2035
+ return False
2036
+ return self.get_backend(device).can_fuse_vertical(node1, node2)
2037
+ else: # nodes don't depend on each other, but may have common reads
2038
+ if self.can_fusion_increase_peak_memory(node1, node2):
2039
+ why("will increase peak memory")
2040
+ return False
2041
+ return self.get_backend(device).can_fuse_horizontal(node1, node2)
2042
+
2043
+ def can_fuse_vertical(self, node1, node2):
2044
+ """
2045
+ Check if it is legal to fuse a consumer (node2) into a producer (node1).
2046
+
2047
+ We can fuse them if all the reads of node2 either match
2048
+ corresponding writes in node1, or are written by nodes that can
2049
+ be scheduled before the fusion of node1 and node2.
2050
+
2051
+ We also disable fusion of a write subsequent to a read if the reads
2052
+ and writes do not align.
2053
+ """
2054
+ node1_names = node1.get_names()
2055
+ computed_deps = set()
2056
+ why = WhyNoFuse(node1, node2)
2057
+
2058
+ # StarDep doesn't match MemoryDep, different indices don't match
2059
+ # However, broadcasting sometimes strips dimensions, and if that's the case
2060
+ # we still can match unmet dep
2061
+ # if there's indirect indexing, don't match it
2062
+ def fusable_read_and_write(read: Dep, write: Dep):
2063
+ return (
2064
+ self.mutation_renames.get(read.name, read.name) == write.name
2065
+ and (isinstance(read, MemoryDep) and isinstance(write, MemoryDep))
2066
+ and not free_symbol_has(read.index, "tmp")
2067
+ and not free_symbol_has(write.index, "tmp")
2068
+ and read.index == write.index
2069
+ and len(read.size) >= len(write.size)
2070
+ and read.size[: len(write.size)] == write.size
2071
+ )
2072
+
2073
+ for rd in node2.unmet_dependencies:
2074
+ for cd in node1.read_writes.writes:
2075
+ if fusable_read_and_write(rd, cd):
2076
+ computed_deps.add(rd)
2077
+
2078
+ remaining_deps = {dep.name for dep in node2.unmet_dependencies - computed_deps}
2079
+ if remaining_deps & node1_names:
2080
+ # MemoryDeps didn't match and read different locations of the same buffer.
2081
+ # Examples here include:
2082
+ # - MemoryDep("foo", x) != MemoryDep("foo", x + 1)
2083
+ # - MemoryDep("foo", x) != StarDep("foo")
2084
+ why("memory deps did not match")
2085
+ return False
2086
+ for name in remaining_deps:
2087
+ if node1_names & self.name_to_fused_node[name].ancestors:
2088
+ why("intermediate nodes between node1 & node2")
2089
+ return False
2090
+
2091
+ # similar to can_inplace, if we are going to fuse a write subsequent to a read
2092
+ # require that the indexing and size is the same
2093
+ for write in node2.read_writes.writes:
2094
+ for read in node1.read_writes.reads:
2095
+ if write.name != self.mutation_renames.get(read.name, read.name):
2096
+ continue
2097
+
2098
+ # bail on StarDep
2099
+ if not fusable_read_and_write(read=read, write=write):
2100
+ why("fusing a write into a read with different indexing formula")
2101
+ return False
2102
+
2103
+ return True
2104
+
2105
+ def score_fusion(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
2106
+ """
2107
+ Assign a score (higher comes first) to the fusion of node1
2108
+ and node2. When different fusions conflict with each other,
2109
+ this is the way we decide what order to run them in.
2110
+
2111
+ Our current score is based on:
2112
+ - Estimate of the saved memory operations
2113
+ - Fusions closer together in original order
2114
+ """
2115
+ memory_score = self.score_fusion_memory(node1, node2)
2116
+ proximity_score = -max(
2117
+ abs(node1.min_order - node2.max_order),
2118
+ abs(node2.min_order - node1.max_order),
2119
+ )
2120
+ return (
2121
+ node1.is_template() == config.epilogue_fusion_first and memory_score > 0,
2122
+ node1.is_reduction() == node2.is_reduction() and memory_score > 0,
2123
+ memory_score,
2124
+ proximity_score,
2125
+ )
2126
+
2127
+ def score_fusion_memory(self, node1, node2):
2128
+ """
2129
+ The first term in our fusion score that estimates number of saved memory operations.
2130
+ """
2131
+ common_memory_deps = (node1.read_writes.reads | node1.read_writes.writes) & (
2132
+ node2.read_writes.reads | node2.read_writes.writes
2133
+ )
2134
+ common_memory_deps = {
2135
+ dep for dep in common_memory_deps if not dep.has_unbacked_symbols()
2136
+ }
2137
+ return sum(dep.numbytes_hint() for dep in common_memory_deps)
2138
+
2139
+ def score_fusion_key(self, nodes):
2140
+ """
2141
+ Shim for list.sort(key=...)
2142
+ """
2143
+ node1, node2 = nodes
2144
+ return self.score_fusion(node1, node2)
2145
+
2146
+ def compute_last_usage(self):
2147
+ """
2148
+ Populate node.last_usage recursively (also for the nodes within a FusedSchedulerNode)
2149
+ """
2150
+
2151
+ future_used_buffers = set()
2152
+ for node_name in V.graph.get_output_names():
2153
+ future_used_buffers.add(node_name)
2154
+
2155
+ for node in reversed(self.nodes):
2156
+ node.set_last_usage(future_used_buffers, self.mutation_real_name)
2157
+ future_used_buffers.update(node.last_usage)
2158
+
2159
+ def free_buffers(self):
2160
+ """Free any buffers that are no longer needed"""
2161
+ for name in sorted(
2162
+ self.buffer_names_to_free
2163
+ - V.graph.removed_buffers
2164
+ - V.graph.wrapper_code.freed
2165
+ ):
2166
+ if name in self.name_to_node:
2167
+ node = self.name_to_node[name]
2168
+ if node.can_free():
2169
+ V.graph.wrapper_code.codegen_free(node.node)
2170
+ elif name in V.graph.graph_inputs:
2171
+ storage = V.graph.graph_inputs[name].data
2172
+ assert isinstance(storage, ir.StorageBox) and storage.is_input_buffer()
2173
+ V.graph.wrapper_code.codegen_free(storage.data)
2174
+
2175
+ self.buffer_names_to_free.clear()
2176
+
2177
+ def remove_kernel_local_buffers(self):
2178
+ """
2179
+ Any buffers that are both created and have a last use in the
2180
+ same kernel can be removed.
2181
+ """
2182
+
2183
+ # V.kernel.store_buffer_names should represent the set of nodes
2184
+ # get fused
2185
+ fused_node_names = V.kernel.store_buffer_names
2186
+ names_to_remove = []
2187
+ for out_buf in V.kernel.store_buffer_names:
2188
+ users = self.name_to_node[out_buf].users
2189
+ assert users is not None
2190
+ users = {user.get_name() for user in users if not user.is_weak}
2191
+ if users.issubset(fused_node_names):
2192
+ names_to_remove.append(out_buf)
2193
+
2194
+ def remove_filter(n):
2195
+ return (
2196
+ n not in V.kernel.must_keep_buffers
2197
+ and n not in V.kernel.args.input_buffers
2198
+ and n not in self.mutation_renames
2199
+ and n not in self.mutation_real_name
2200
+ )
2201
+
2202
+ names_to_remove = list(filter(remove_filter, names_to_remove))
2203
+
2204
+ for name in names_to_remove:
2205
+ if name in V.kernel.args.inplace_buffers:
2206
+ buf = V.kernel.args.inplace_buffers[name]
2207
+ if isinstance(buf, str) and buf.startswith("REMOVED"):
2208
+ continue
2209
+ remove = all(n in names_to_remove for n in buf.other_names)
2210
+ if remove:
2211
+ self.remove_inplace_buffer(name)
2212
+ V.kernel.inplaced_to_remove.add(name)
2213
+ else:
2214
+ self.remove_buffer(name)
2215
+
2216
+ def remove_buffer(self, name):
2217
+ # Assign a special value instead of deleting the entry
2218
+ # because we still rely on output_buffers's length to
2219
+ # generate unique arg name.
2220
+ log.debug("remove_buffer(%r)", name)
2221
+ V.kernel.args.output_buffers[name] = "REMOVED"
2222
+ V.kernel.removed_buffers.add(name)
2223
+
2224
+ def remove_inplace_buffer(self, name):
2225
+ log.debug("removing_inplace_buffer(%r)", name)
2226
+ inner_name = V.kernel.args.inplace_buffers[name].inner_name
2227
+ V.kernel.args.inplace_buffers[name] = inner_name.replace(
2228
+ "in_out_ptr", "REMOVED"
2229
+ )
2230
+ V.kernel.removed_buffers.add(name)
2231
+
2232
+ def flush(self):
2233
+ for backend in self.backends.values():
2234
+ backend.flush()
2235
+ self.free_buffers()
2236
+
2237
+ def codegen_extern_call(self, scheduler_node: ExternKernelSchedulerNode):
2238
+ assert isinstance(scheduler_node, ExternKernelSchedulerNode)
2239
+ # 'decide_inplace_update' stores the inplace update decisions in
2240
+ # the current kernel from where 'allocate' retrieve those decisions.
2241
+ # We have to make sure there is a non-NULL kernel handler to store
2242
+ # those inplace update decisions.
2243
+ with V.set_kernel_handler(Kernel(increase_kernel_count=False)):
2244
+ scheduler_node.decide_inplace_update()
2245
+ scheduler_node.allocate()
2246
+ node = scheduler_node.node
2247
+ assert isinstance(node, ir.ExternKernel), f"{type(node)=}"
2248
+ node.codegen(V.graph.wrapper_code)
2249
+ self.free_buffers()
2250
+
2251
+ def create_backend(self, device: torch.device):
2252
+ assert (
2253
+ device.type != "cuda" or device.index is not None
2254
+ ), f"{device} should have been normalized in lowering"
2255
+ V.graph.add_device_info(device)
2256
+
2257
+ device_scheduling = get_scheduling_for_device(device.type)
2258
+ if device_scheduling is None:
2259
+ raise RuntimeError(f"Unsupported device type: {device.type}")
2260
+
2261
+ if device.type == "cuda" and not has_triton():
2262
+ device_props = torch.cuda.get_device_properties(device)
2263
+ if device_props.major < 7:
2264
+ raise RuntimeError(
2265
+ f"Found {device_props.name} which is too old to be supported by the triton GPU compiler, which is used as the backend. Triton only supports devices of CUDA Capability >= 7.0, but your device is of CUDA capability {device_props.major}.{device_props.minor}" # noqa: B950
2266
+ )
2267
+ else:
2268
+ raise RuntimeError(
2269
+ "Cannot find a working triton installation. More information on installing Triton can be found at https://github.com/openai/triton" # noqa: B950
2270
+ )
2271
+
2272
+ return device_scheduling(self)
2273
+
2274
+ def get_backend(self, device: torch.device):
2275
+ if device not in self.backends:
2276
+ self.backends[device] = self.create_backend(device)
2277
+ return self.backends[device]
2278
+
2279
+ def enter_context(self, node):
2280
+ def get_order(n):
2281
+ if n not in self.origin_to_index:
2282
+ self.origin_to_index.update({n: i for i, n in enumerate(n.graph.nodes)})
2283
+ return self.origin_to_index[n]
2284
+
2285
+ # Use a dict to have ordering
2286
+ origins = {
2287
+ (get_order(e), e): None for n in node.get_nodes() for e in n.node.origins
2288
+ }
2289
+ origins = list(origins.keys())
2290
+ if origins:
2291
+ _, last = max(origins, key=operator.itemgetter(0))
2292
+ V.graph.wrapper_code.enter_context(last)
2293
+
2294
+ @dynamo_timed
2295
+ def codegen(self):
2296
+ for node in self.nodes:
2297
+ try:
2298
+ log.debug(
2299
+ "Generating code for node %s with estimated runtime %f",
2300
+ node.get_name(),
2301
+ node.get_estimated_runtime(),
2302
+ )
2303
+ except Exception as e:
2304
+ log.debug(
2305
+ "Generating code for node %s with estimated runtime 0.0",
2306
+ node.get_name(),
2307
+ )
2308
+
2309
+ self.enter_context(node)
2310
+
2311
+ if not isinstance(node, NopKernelSchedulerNode):
2312
+ device = node.get_device()
2313
+ if (
2314
+ device != self.current_device
2315
+ or node.is_extern()
2316
+ or node.is_template()
2317
+ ):
2318
+ self.flush()
2319
+ if device != self.current_device:
2320
+ if device.type == "cuda":
2321
+ if self.current_device and self.current_device.type == "cuda":
2322
+ V.graph.wrapper_code.codegen_device_guard_exit()
2323
+ assert device.index is not None, "device should have an index"
2324
+ V.graph.wrapper_code.codegen_device_guard_enter(device.index)
2325
+ elif self.current_device and self.current_device.type == "cuda":
2326
+ V.graph.wrapper_code.codegen_device_guard_exit()
2327
+ self.current_device = device
2328
+
2329
+ self.buffer_names_to_free.update(node.last_usage)
2330
+
2331
+ if node.is_template():
2332
+ node, *epilogue = node.get_nodes()
2333
+ self.get_backend(device).codegen_template(node, epilogue) # type: ignore[possibly-undefined]
2334
+ elif node.is_extern():
2335
+ self.codegen_extern_call(node)
2336
+ elif node.is_foreach():
2337
+ self.get_backend(device).codegen_foreach(node) # type: ignore[possibly-undefined]
2338
+ elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):
2339
+ self.get_backend(device).codegen_nodes(node.get_nodes()) # type: ignore[possibly-undefined]
2340
+ else:
2341
+ assert isinstance(node, NopKernelSchedulerNode)
2342
+ node.allocate()
2343
+
2344
+ if config.debug_check_inf_and_nan:
2345
+ V.graph.wrapper_code.generate_inf_and_nan_checker(node)
2346
+
2347
+ if config.triton.debug_sync_kernel:
2348
+ self.get_backend(device).codegen_sync() # type: ignore[possibly-undefined]
2349
+
2350
+ self.available_buffer_names.update(node.get_names())
2351
+
2352
+ if not isinstance(node, NopKernelSchedulerNode):
2353
+ device = node.get_device()
2354
+ if self.get_backend(device).ready_to_flush():
2355
+ self.flush()
2356
+
2357
+ if self.current_device and self.current_device.type == "cuda":
2358
+ # exit the outermost CUDA device guard. this is
2359
+ # important for nested indentation codegen-ing.
2360
+ V.graph.wrapper_code.codegen_device_guard_exit()
2361
+
2362
+ self.flush()
2363
+
2364
+ def is_unaligned_buffer(self, buf_name):
2365
+ if buf_name in V.graph.graph_inputs or buf_name in V.graph.constants:
2366
+ # all graph inputs or constants are assumed to be aligned
2367
+ return False
2368
+ node = self.name_to_node[buf_name]
2369
+ layout = node.node.get_layout()
2370
+ if isinstance(layout, ir.AliasedLayout):
2371
+ return not layout.maybe_guard_aligned()
2372
+ else:
2373
+ return False
2374
+
2375
+
2376
+ class BaseScheduling:
2377
+ def can_fuse_vertical(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
2378
+ """
2379
+ Check whether node1 and node2 can be vertically fused or not.
2380
+ """
2381
+ raise NotImplementedError()
2382
+
2383
+ def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
2384
+ """
2385
+ Check whether node1 and node2 can be horizontally fused or not.
2386
+ """
2387
+ raise NotImplementedError()
2388
+
2389
+ def fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
2390
+ """
2391
+ Fuse two nodes
2392
+ """
2393
+ if node1.is_foreach() or node2.is_foreach():
2394
+ return ForeachKernelSchedulerNode.fuse(node1, node2)
2395
+ else:
2396
+ return FusedSchedulerNode.fuse(node1, node2)
2397
+
2398
+ def group_fn(self, sizes):
2399
+ """
2400
+ Process the iteration sizes in case a transformation needs to be applied.
2401
+ """
2402
+ raise NotImplementedError()
2403
+
2404
+ def codegen_template(
2405
+ self, template_node: SchedulerNode, epilogue_nodes: List[SchedulerNode]
2406
+ ):
2407
+ """
2408
+ Given a template node, generate a kernel.
2409
+
2410
+ This function is only available for triton now. If the third-party backend behaves as a sub-class
2411
+ of TritonScheduling, it can override it or reuse it.
2412
+ """
2413
+ raise NotImplementedError()
2414
+
2415
+ def codegen_nodes(self, nodes: List[SchedulerNode]):
2416
+ """
2417
+ Generate a kernel given a list of pre-fused nodes.
2418
+ """
2419
+ raise NotImplementedError()
2420
+
2421
+ def codegen_sync(self):
2422
+ """
2423
+ Generate synchronization code for the kernel. This method depends on the hardware characteristics.
2424
+ """
2425
+ raise NotImplementedError()
2426
+
2427
+ def ready_to_flush(self) -> bool:
2428
+ """
2429
+ Check whether the backend is requesting the scheduler to flush the generated kernel.
2430
+ If not supported, please return False.
2431
+ """
2432
+ return False
2433
+
2434
+ def flush(self):
2435
+ """
2436
+ Flush the generated kernel and python wrapper code to the source code file.
2437
+ """
2438
+ raise NotImplementedError()
2439
+
2440
+ def benchmark_fused_nodes(self, nodes):
2441
+ """
2442
+ Benchmark fused list of nodes and return the execution time
2443
+ in milliseconds on randomly generated inputs.
2444
+ """
2445
+ raise NotImplementedError()
venv/lib/python3.10/site-packages/torch/_inductor/select_algorithm.py ADDED
@@ -0,0 +1,1156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ import functools
3
+ import inspect
4
+ import itertools
5
+ import logging
6
+ import operator
7
+ import sys
8
+ import textwrap
9
+ import time
10
+ from concurrent.futures import ThreadPoolExecutor
11
+ from io import StringIO
12
+
13
+ from typing import Any, Callable, Dict, List, Optional, Union
14
+ from unittest.mock import patch
15
+
16
+ import sympy
17
+
18
+ import torch
19
+ from torch._dynamo.testing import rand_strided
20
+ from torch._dynamo.utils import counters, identity, preserve_rng_state
21
+
22
+ from . import config, ir
23
+ from .autotune_process import TensorMeta, TritonBenchmarkRequest
24
+ from .codecache import code_hash, PersistentCache, PyCodeCache
25
+ from .codegen.common import (
26
+ ChoiceCaller,
27
+ IndentedBuffer,
28
+ KernelTemplate,
29
+ PrimitiveInfoType,
30
+ )
31
+ from .codegen.triton import (
32
+ gen_common_triton_imports,
33
+ texpr,
34
+ TritonKernel,
35
+ TritonPrinter,
36
+ TritonScheduling,
37
+ )
38
+ from .codegen.triton_utils import config_of, signature_to_meta
39
+ from .exc import CUDACompileError
40
+ from .utils import (
41
+ do_bench,
42
+ get_dtype_size,
43
+ Placeholder,
44
+ sympy_dot,
45
+ sympy_product,
46
+ unique,
47
+ )
48
+ from .virtualized import V
49
+
50
+ log = logging.getLogger(__name__)
51
+
52
+ # correctness checks struggle with fp16/tf32
53
+ VERIFY: Dict[str, Any] = dict()
54
+ PRINT_AUTOTUNE = True
55
+ DEBUG = False
56
+
57
+
58
+ class KernelNamespace:
59
+ pass
60
+
61
+
62
+ # these objects are imported from the generated wrapper code
63
+ extern_kernels = KernelNamespace()
64
+
65
+
66
+ class PartialRender:
67
+ """
68
+ Some parts of a template need to be generated at the end, but
69
+ inserted into the template at the start. This allows doing a bunch
70
+ of replacements after the initial render.
71
+ """
72
+
73
+ def __init__(self, code, replacement_hooks):
74
+ super().__init__()
75
+ self.code = code
76
+ self.replacement_hooks = replacement_hooks
77
+
78
+ def finalize(self):
79
+ code = self.code
80
+ assert code is not None, "can only be called once"
81
+ self.code = None
82
+ for key, fn in self.replacement_hooks.items():
83
+ code = code.replace(key, fn())
84
+ return code
85
+
86
+
87
+ class TritonTemplateKernel(TritonKernel):
88
+ def __init__(
89
+ self,
90
+ kernel_name,
91
+ input_nodes,
92
+ output_node,
93
+ defines,
94
+ num_stages,
95
+ num_warps,
96
+ grid_fn,
97
+ meta,
98
+ call_sizes,
99
+ use_jit=True,
100
+ prefix_args=0,
101
+ suffix_args=0,
102
+ epilogue_fn=identity,
103
+ *,
104
+ index_dtype,
105
+ ):
106
+ super().__init__(
107
+ sympy_product(output_node.get_size()),
108
+ sympy.Integer(1),
109
+ index_dtype=index_dtype,
110
+ )
111
+ self.input_nodes = input_nodes
112
+ self.output_node = output_node
113
+ self.named_input_nodes = {}
114
+ self.defines = defines
115
+ self.kernel_name = kernel_name
116
+ self.template_mask = None
117
+ self.use_jit = use_jit
118
+ self.num_stages = num_stages
119
+ self.num_warps = num_warps
120
+ self.grid_fn = grid_fn
121
+ self.meta = meta
122
+ self.call_sizes = call_sizes
123
+ # for templates with fixed epilogues
124
+ self.prefix_args = prefix_args
125
+ self.suffix_args = suffix_args
126
+ self.epilogue_fn = epilogue_fn
127
+ self.render_hooks = dict()
128
+ self.triton_meta: Optional[Dict[str, object]] = None
129
+
130
+ def need_numel_args(self):
131
+ return False
132
+
133
+ def estimate_kernel_num_bytes(self):
134
+ """
135
+ Estimate the total number of bytes this kernel takes.
136
+ For in/out nodes, sizes are counted twice: once for reading and
137
+ once for writing.
138
+ """
139
+ ninplace_args = len(unique(self.args.inplace_buffers.values()))
140
+ num_bytes = []
141
+ for i, inp in enumerate(itertools.chain(self.input_nodes, (self.output_node,))):
142
+ size = V.graph.sizevars.size_hints(inp.get_size())
143
+ numel = functools.reduce(operator.mul, size)
144
+ dtype_size = get_dtype_size(inp.get_dtype())
145
+ num_bytes.append(numel * dtype_size * (1 + int(i < ninplace_args)))
146
+ return sum(num_bytes)
147
+
148
+ def jit_lines(self):
149
+ if self.use_jit:
150
+ return "@triton.jit"
151
+
152
+ argdefs, _, signature = self.args.python_argdefs()
153
+ triton_meta = {
154
+ "signature": signature_to_meta(signature, size_dtype=self.index_dtype),
155
+ "device": V.graph.scheduler.current_device.index,
156
+ "device_type": V.graph.scheduler.current_device.type,
157
+ "constants": {},
158
+ }
159
+ triton_meta["configs"] = [config_of(signature)]
160
+ for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index]
161
+ triton_meta["constants"][arg_num] = 1 # type: ignore[index]
162
+ self.triton_meta = triton_meta
163
+
164
+ inductor_meta = {
165
+ "kernel_name": str(Placeholder.DESCRIPTIVE_NAME),
166
+ "backend_hash": torch.utils._triton.triton_hash_with_backend(),
167
+ }
168
+ if config.profile_bandwidth or config.benchmark_kernel:
169
+ num_gb = self.estimate_kernel_num_bytes() / 1e9
170
+ inductor_meta["kernel_num_gb"] = num_gb
171
+ return f"""
172
+ @triton_heuristics.template(
173
+ num_stages={self.num_stages},
174
+ num_warps={self.num_warps},
175
+ triton_meta={triton_meta!r},
176
+ inductor_meta={inductor_meta!r},
177
+ )
178
+ @triton.jit
179
+ """
180
+
181
+ def def_kernel(self, *argnames):
182
+ """
183
+ Hook called from template code to generate function def and
184
+ needed args.
185
+ """
186
+ assert all(isinstance(x, str) for x in argnames)
187
+ renames = IndentedBuffer(initial_indent=1)
188
+
189
+ named_args = self.input_nodes[
190
+ self.prefix_args : len(self.input_nodes) - self.suffix_args
191
+ ]
192
+
193
+ assert len(argnames) == len(named_args), (
194
+ len(argnames),
195
+ len(named_args),
196
+ self.prefix_args,
197
+ len(self.input_nodes),
198
+ )
199
+
200
+ for input_node in self.input_nodes[: self.prefix_args]:
201
+ # get args in correct order
202
+ self.args.input(input_node.get_name())
203
+
204
+ for name, input_node in zip(argnames, named_args):
205
+ arg_name = f"arg_{name}"
206
+ self.named_input_nodes[name] = input_node
207
+ self.args.input_buffers[input_node.get_name()] = arg_name
208
+
209
+ # The args may be duplicated, so renaming must be after args are de-duplicated.
210
+ for name in argnames:
211
+ input_node = self.named_input_nodes[name]
212
+ arg_name = self.args.input_buffers[input_node.get_name()]
213
+ if input_node.get_layout().offset == 0:
214
+ renames.writeline(f"{name} = {arg_name}")
215
+ else:
216
+ offset = texpr(self.rename_indexing(input_node.get_layout().offset))
217
+ renames.writeline(f"{name} = {arg_name} + {offset}")
218
+
219
+ for input_node in self.input_nodes[len(self.input_nodes) - self.suffix_args :]:
220
+ # get args in correct order
221
+ self.args.input(input_node.get_name())
222
+
223
+ def hook():
224
+ # python_argdefs() cannot be run until after the rest of the template lazily adds more args
225
+ arg_defs, *_ = self.args.python_argdefs()
226
+ code = IndentedBuffer()
227
+ code.splice(gen_common_triton_imports())
228
+ code.splice(self.jit_lines())
229
+ code.writeline(f"def {self.kernel_name}({', '.join(arg_defs)}):")
230
+ with code.indent():
231
+ code.splice(self.defines)
232
+ code.splice(renames.getvalue())
233
+ return code.getvalue()
234
+
235
+ assert "<DEF_KERNEL>" not in self.render_hooks
236
+ self.render_hooks["<DEF_KERNEL>"] = hook
237
+ return "<DEF_KERNEL>"
238
+
239
+ def size(self, name: str, index: int):
240
+ """
241
+ Hook called from template code to get the size of an arg.
242
+ Will add needed args to pass it in if it is dynamic.
243
+ """
244
+ assert isinstance(index, int)
245
+ if name is None:
246
+ val = self.output_node.get_size()[index]
247
+ else:
248
+ assert isinstance(name, str)
249
+ val = self.named_input_nodes[name].get_size()[index]
250
+ return texpr(self.rename_indexing(val))
251
+
252
+ def stride(self, name, index):
253
+ """
254
+ Hook called from template code to get the stride of an arg.
255
+ Will add needed args to pass it in if it is dynamic.
256
+ """
257
+ assert isinstance(index, int)
258
+ if name is None:
259
+ val = self.output_node.get_stride()[index]
260
+ else:
261
+ assert isinstance(name, str)
262
+ val = self.named_input_nodes[name].get_stride()[index]
263
+ return texpr(self.rename_indexing(val))
264
+
265
+ def store_output(self, indices, val, mask):
266
+ """
267
+ Hook called from template code to store the final output
268
+ (if the buffer hasn't been optimized away), then append any
269
+ epilogue fusions.
270
+ """
271
+ assert isinstance(indices, (list, tuple))
272
+ assert isinstance(val, str)
273
+ assert isinstance(mask, str)
274
+ assert self.template_mask is None
275
+ indices = list(map(TritonPrinter.paren, indices))
276
+ index_symbols = [sympy.Symbol(x) for x in indices]
277
+ lengths = [V.graph.sizevars.simplify(s) for s in self.output_node.get_size()]
278
+ assert len(indices) == len(lengths)
279
+
280
+ # glue to make generated code use same indexing from template
281
+ for name, range_tree_entry in zip(
282
+ indices, self.range_trees[0].construct_entries(lengths)
283
+ ):
284
+ range_tree_entry.set_name(name)
285
+ contiguous_index = sympy_dot(
286
+ ir.FlexibleLayout.contiguous_strides(lengths), index_symbols
287
+ )
288
+ contiguous_index = self.rename_indexing(contiguous_index)
289
+ self.body.writeline("xindex = " + texpr(contiguous_index))
290
+ self.range_trees[0].lookup(sympy.Integer(1), sympy_product(lengths)).set_name(
291
+ "xindex"
292
+ )
293
+ self.template_mask = mask
294
+ self.template_indices = indices
295
+ output_index = self.output_node.get_layout().make_indexer()(index_symbols)
296
+ output_index = self.rename_indexing(output_index)
297
+ if output_index == contiguous_index:
298
+ output_index = sympy.Symbol("xindex")
299
+
300
+ epilogue_args = [val]
301
+ for input_node in itertools.chain(
302
+ self.input_nodes[: self.prefix_args],
303
+ self.input_nodes[len(self.input_nodes) - self.suffix_args :],
304
+ ):
305
+ input_node.freeze_layout()
306
+ epilogue_args.append(input_node.make_loader()(index_symbols))
307
+
308
+ V.ops.store(
309
+ self.output_node.get_name(),
310
+ output_index,
311
+ self.epilogue_fn(*epilogue_args),
312
+ )
313
+ self.codegen_body()
314
+
315
+ def hook():
316
+ # more stuff might have been added since the codegen_body above
317
+ self.codegen_body()
318
+ return textwrap.indent(self.body.getvalue(), " ").strip()
319
+
320
+ assert "<STORE_OUTPUT>" not in self.render_hooks
321
+ self.render_hooks["<STORE_OUTPUT>"] = hook
322
+ return "<STORE_OUTPUT>"
323
+
324
+ def render(self, template, kwargs):
325
+ return PartialRender(
326
+ template.render(**self.template_env(), **kwargs),
327
+ self.render_hooks,
328
+ )
329
+
330
+ def make_load(self, name, indices, mask):
331
+ """
332
+ Optional helper called from template code to generate the code
333
+ needed to load from an tensor.
334
+ """
335
+ assert isinstance(indices, (list, tuple))
336
+ assert isinstance(name, str)
337
+ assert isinstance(mask, str)
338
+ stride = self.named_input_nodes[name].get_stride()
339
+ indices = list(map(TritonPrinter.paren, indices))
340
+ assert len(indices) == len(stride)
341
+ index = " + ".join(
342
+ f"{texpr(self.rename_indexing(s))} * {i}" for s, i in zip(stride, indices)
343
+ )
344
+ return f"tl.load({name} + ({index}), {mask})"
345
+
346
+ def template_env(self):
347
+ """
348
+ Generate the namespace visible in the template.
349
+ """
350
+ return {
351
+ fn.__name__: fn
352
+ for fn in [
353
+ self.def_kernel,
354
+ self.size,
355
+ self.stride,
356
+ self.store_output,
357
+ self.make_load,
358
+ ]
359
+ }
360
+
361
+ def indexing(
362
+ self,
363
+ index: sympy.Expr,
364
+ *,
365
+ dense_indexing=False,
366
+ copy_shape=None,
367
+ override_mask=None,
368
+ block_ptr=False,
369
+ ):
370
+ """
371
+ Override the default indexing to use our custom mask and force
372
+ dense indexing.
373
+ """
374
+ return super().indexing(
375
+ index,
376
+ dense_indexing=False,
377
+ copy_shape=self.template_mask,
378
+ override_mask=self.template_mask,
379
+ block_ptr=block_ptr,
380
+ )
381
+
382
+ def initialize_range_tree(self, pid_cache):
383
+ super().initialize_range_tree(pid_cache)
384
+ # ignore default codegen
385
+ self.body.clear()
386
+ self.indexing_code.clear()
387
+
388
+ def call_kernel(self, name: str, node: Optional[ir.IRNode] = None):
389
+ wrapper = V.graph.wrapper_code
390
+ _, call_args, _ = self.args.python_argdefs()
391
+ call_args = [str(a) for a in call_args]
392
+
393
+ for i in range(len(call_args)):
394
+ if V.graph.is_unspec_arg(call_args[i]):
395
+ call_args[i] = call_args[i] + ".item()"
396
+ if isinstance(call_args[i], sympy.Symbol):
397
+ call_args[i] = texpr(call_args[i])
398
+
399
+ if V.graph.cpp_wrapper:
400
+ # In the cpp_wrapper case, we have to compute CUDA launch grid at runtime
401
+ # if any dynamic dimension is involved. We rely on the Python version
402
+ # of the grid function to generate those grid configs, which may contain
403
+ # symbolic values. The wrapper will use cexpr to print out C++ code
404
+ # appropriately for the grid configs.
405
+ grid_args = [V.graph.sizevars.simplify(s) for s in self.call_sizes] + [
406
+ self.meta
407
+ ]
408
+ grid = self.grid_fn(*grid_args)
409
+
410
+ wrapper.generate_kernel_call(
411
+ name,
412
+ call_args,
413
+ device_index=V.graph.scheduler.current_device.index,
414
+ grid=grid,
415
+ triton_meta=self.triton_meta,
416
+ )
417
+ else:
418
+ stream_name = wrapper.write_get_raw_stream(
419
+ V.graph.scheduler.current_device.index
420
+ )
421
+
422
+ wrapper.add_import_once(f"import {self.grid_fn.__module__}")
423
+ meta = wrapper.add_meta_once(self.meta)
424
+
425
+ grid_call = [
426
+ texpr(V.graph.sizevars.simplify(s)) for s in self.call_sizes
427
+ ] + [meta]
428
+ grid_call = f"{self.grid_fn.__module__}.{self.grid_fn.__name__}({', '.join(grid_call)})"
429
+ wrapper.writeline(
430
+ f"{name}.run({', '.join(call_args)}, grid={grid_call}, stream={stream_name})"
431
+ )
432
+
433
+
434
+ @functools.lru_cache(None)
435
+ def _jinja2_env():
436
+ try:
437
+ import jinja2
438
+
439
+ return jinja2.Environment(
440
+ undefined=jinja2.StrictUndefined,
441
+ )
442
+ except ImportError:
443
+ return None
444
+
445
+
446
+ class TritonTemplate(KernelTemplate):
447
+ index_counter = itertools.count()
448
+ all_templates: Dict[str, "TritonTemplate"] = dict()
449
+
450
+ def __init__(self, name: str, grid: Any, source: str, debug=False):
451
+ super().__init__(name)
452
+ self.grid = grid
453
+ self.template = self._template_from_string(source)
454
+ assert name not in self.all_templates, "duplicate template name"
455
+ self.all_templates[name] = self
456
+ self.debug = debug
457
+
458
+ def generate(
459
+ self,
460
+ input_nodes,
461
+ layout,
462
+ num_stages,
463
+ num_warps,
464
+ prefix_args=0,
465
+ suffix_args=0,
466
+ epilogue_fn=identity,
467
+ **kwargs,
468
+ ):
469
+ assert self.template, "requires jinja2"
470
+ defines = StringIO()
471
+ for name, val in kwargs.items():
472
+ defines.write(f" {name} : tl.constexpr = {val}\n")
473
+ defines = defines.getvalue()
474
+
475
+ fake_out = ir.Buffer("buf_out", layout)
476
+ kernel_name = f"triton_{self.name}"
477
+
478
+ numel = sympy_product(layout.size)
479
+ buffers = itertools.chain(input_nodes, (fake_out,))
480
+ if not TritonScheduling.can_use_32bit_indexing(numel, buffers):
481
+ raise NotImplementedError(
482
+ "64-bit indexing is not yet implemented for triton templates"
483
+ )
484
+
485
+ kernel_options = dict(
486
+ input_nodes=input_nodes,
487
+ defines=defines,
488
+ num_stages=num_stages,
489
+ num_warps=num_warps,
490
+ grid_fn=self.grid,
491
+ meta=kwargs,
492
+ call_sizes=layout.size,
493
+ prefix_args=prefix_args,
494
+ suffix_args=suffix_args,
495
+ epilogue_fn=epilogue_fn,
496
+ index_dtype="tl.int32",
497
+ )
498
+ with patch.object(
499
+ V.graph, "get_dtype", self._fake_get_dtype(fake_out)
500
+ ), TritonTemplateKernel(
501
+ kernel_name=kernel_name,
502
+ output_node=fake_out,
503
+ use_jit=True,
504
+ **kernel_options,
505
+ ) as kernel:
506
+ try:
507
+ code = kernel.render(self.template, kwargs).finalize()
508
+ except ZeroDivisionError:
509
+ # TODO(nmacchioni): fix sympy division by zero
510
+ return None
511
+ if self.debug:
512
+ print("Generated Code:\n", code)
513
+ extra = (
514
+ "-".join(
515
+ [
516
+ *[
517
+ f"{kwarg}={repr(kwargs[kwarg])}"
518
+ for kwarg in sorted(kwargs.keys())
519
+ ],
520
+ f"num_stages={num_stages}",
521
+ f"num_warps={num_warps}",
522
+ ]
523
+ )
524
+ + "-"
525
+ )
526
+ mod = PyCodeCache.load(code, extra)
527
+ _, call_args, _ = kernel.args.python_argdefs()
528
+
529
+ expected_args = list(unique(x.get_name() for x in input_nodes))
530
+ expected_args.extend([fake_out.get_name()])
531
+ assert list(call_args)[: len(expected_args)] == expected_args, (
532
+ call_args,
533
+ expected_args,
534
+ )
535
+ extra_args = V.graph.sizevars.size_hints(
536
+ map(sympy.expand, call_args[len(expected_args) :]),
537
+ fallback=config.unbacked_symint_fallback,
538
+ )
539
+
540
+ kernel_hash_name = f"triton_{self.name}_{next(self.index_counter)}"
541
+
542
+ def make_kernel_render(out_node):
543
+ kernel = TritonTemplateKernel(
544
+ kernel_name=str(Placeholder.KERNEL_NAME),
545
+ output_node=out_node,
546
+ use_jit=False,
547
+ **kernel_options,
548
+ )
549
+ render = functools.partial(
550
+ kernel.render,
551
+ self.template,
552
+ kwargs,
553
+ )
554
+ return kernel, render
555
+
556
+ # create the BenchmarkRequest
557
+ assert mod.__file__ is not None
558
+ grid = self.grid(
559
+ *V.graph.sizevars.size_hints(
560
+ layout.size,
561
+ fallback=config.unbacked_symint_fallback,
562
+ ),
563
+ kwargs,
564
+ )
565
+ bmreq = TritonBenchmarkRequest(
566
+ module_path=mod.__file__,
567
+ module_cache_key=mod.key,
568
+ kernel_name=kernel_name,
569
+ grid=grid,
570
+ extra_args=extra_args,
571
+ num_stages=num_stages,
572
+ num_warps=num_warps,
573
+ matrix_instr_nonkdim=kwargs.get("matrix_instr_nonkdim", 0),
574
+ input_tensor_meta=TensorMeta.from_irnodes(input_nodes),
575
+ output_tensor_meta=TensorMeta.from_irnodes(layout),
576
+ )
577
+
578
+ return TritonTemplateCaller(
579
+ kernel_hash_name,
580
+ input_nodes,
581
+ layout,
582
+ make_kernel_render,
583
+ extra.strip("-").replace("-", ", "),
584
+ bmreq,
585
+ log_info={
586
+ "tile_shape": str(
587
+ (
588
+ kwargs.get("BLOCK_M", -1),
589
+ kwargs.get("BLOCK_K", -1),
590
+ kwargs.get("BLOCK_N", -1),
591
+ )
592
+ ),
593
+ "num_stages": num_stages,
594
+ "num_warps": num_warps,
595
+ "allow_tf32": str(kwargs.get("ALLOW_TF32", None)),
596
+ "acc_type": str(kwargs.get("ACC_TYPE", None)),
597
+ },
598
+ )
599
+
600
+
601
+ class ExternKernelChoice:
602
+ def __init__(
603
+ self,
604
+ kernel,
605
+ cpp_kernel=None,
606
+ *,
607
+ name=None,
608
+ has_out_variant=True,
609
+ op_overload=None,
610
+ use_fallback_kernel=False,
611
+ ):
612
+ super().__init__()
613
+ name = name or kernel.__name__
614
+ assert callable(kernel)
615
+ assert not hasattr(extern_kernels, name), "duplicate extern kernel"
616
+ self.name = name
617
+ self.cpp_kernel_name = cpp_kernel
618
+ self.has_out_variant = has_out_variant
619
+ setattr(extern_kernels, name, kernel)
620
+ self.op_overload = op_overload
621
+ self.use_fallback_kernel = use_fallback_kernel
622
+
623
+ def to_callable(self):
624
+ return getattr(extern_kernels, self.name)
625
+
626
+ def call_name(self):
627
+ return f"extern_kernels.{self.name}"
628
+
629
+ @functools.lru_cache(None)
630
+ def hash_key(self):
631
+ fn = self.to_callable()
632
+ parts = [
633
+ self.name,
634
+ getattr(fn, "__name__", ""),
635
+ getattr(fn, "__module__", ""),
636
+ ]
637
+ try:
638
+ parts.append(inspect.getsource(fn))
639
+ except Exception:
640
+ pass
641
+ return code_hash("-".join(parts))
642
+
643
+ def bind(
644
+ self,
645
+ input_nodes,
646
+ layout,
647
+ ordered_kwargs_for_cpp_kernel=(),
648
+ **kwargs,
649
+ ):
650
+ self.ordered_kwargs_for_cpp_kernel = ordered_kwargs_for_cpp_kernel
651
+ return ExternKernelCaller(
652
+ self, input_nodes, layout, kwargs, has_out_variant=self.has_out_variant
653
+ )
654
+
655
+
656
+ class TritonTemplateCaller(ChoiceCaller):
657
+ def __init__(
658
+ self,
659
+ name,
660
+ input_nodes,
661
+ layout,
662
+ make_kernel_render,
663
+ debug_extra,
664
+ bmreq,
665
+ log_info: Optional[
666
+ Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]
667
+ ] = None,
668
+ ):
669
+ super().__init__(name, input_nodes, layout)
670
+ self.make_kernel_render = make_kernel_render
671
+ self.debug_extra = debug_extra
672
+ self.bmreq: TritonBenchmarkRequest = bmreq
673
+ if log_info is None:
674
+ log_info = {}
675
+ self.log_info: Dict[str, Any] = log_info
676
+ self.log_info.update(
677
+ {
678
+ "backend": "Triton",
679
+ "grid": str(self.bmreq.grid),
680
+ "num_stages": self.bmreq.num_stages,
681
+ "num_warps": self.bmreq.num_warps,
682
+ }
683
+ )
684
+
685
+ def benchmark(self, *args, out):
686
+ assert self.bmreq is not None
687
+ return self.bmreq.benchmark(*args, output_tensor=out)
688
+
689
+ def __str__(self):
690
+ return f"TritonTemplateCaller({self.bmreq.module_path}, {self.debug_extra})"
691
+
692
+ def call_name(self):
693
+ return f"template_kernels.{self.name}"
694
+
695
+ def hash_key(self):
696
+ return "-".join(
697
+ [
698
+ self.name.rsplit("_", 1)[0],
699
+ self.bmreq.module_cache_key,
700
+ ]
701
+ )
702
+
703
+ def output_node(self):
704
+ return ir.TensorBox.create(
705
+ ir.TritonTemplateBuffer(
706
+ layout=self.layout,
707
+ inputs=self.input_nodes,
708
+ make_kernel_render=self.make_kernel_render,
709
+ )
710
+ )
711
+
712
+ def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]:
713
+ """Information returned here is logged to the autotune log file when that is enabled."""
714
+ return self.log_info
715
+
716
+
717
+ class ExternKernelCaller(ChoiceCaller):
718
+ def __init__(
719
+ self,
720
+ choice: ExternKernelChoice,
721
+ input_nodes,
722
+ layout,
723
+ kwargs=None,
724
+ *,
725
+ has_out_variant=True,
726
+ ):
727
+ super().__init__(choice.name, input_nodes, layout)
728
+ self.choice = choice
729
+ self.kwargs = kwargs or {}
730
+ self.has_out_variant = has_out_variant
731
+
732
+ def __str__(self):
733
+ return f"ExternKernelCaller({self.choice.call_name()})"
734
+
735
+ def benchmark(self, *args, out):
736
+ if self.has_out_variant:
737
+ return super().benchmark(*args, out=out)
738
+ else:
739
+ algo = self.to_callable()
740
+ out_new = algo(*args)
741
+ torch._C._dynamo.guards.assert_size_stride(
742
+ out_new, tuple(out.size()), tuple(out.stride())
743
+ )
744
+ out.copy_(out_new) # for correctness checking
745
+ return do_bench(lambda: algo(*args))
746
+
747
+ def to_callable(self):
748
+ fn = self.choice.to_callable()
749
+ if self.kwargs:
750
+ return functools.partial(fn, **self.kwargs)
751
+ else:
752
+ return fn
753
+
754
+ def hash_key(self):
755
+ return "-".join(
756
+ [
757
+ self.choice.name,
758
+ *[
759
+ f"{kwarg}={repr(self.kwargs[kwarg])}"
760
+ for kwarg in sorted(self.kwargs.keys())
761
+ ],
762
+ self.choice.hash_key(),
763
+ ]
764
+ )
765
+
766
+ def output_node(self):
767
+ if config.abi_compatible and self.choice.use_fallback_kernel:
768
+ assert (
769
+ self.choice.op_overload is not None
770
+ ), "Please provide an op_overload to use ir.FallbackKernel"
771
+ inner = ir.FallbackKernel.create(
772
+ self.choice.op_overload, *self.input_nodes, **self.kwargs
773
+ )
774
+ else:
775
+ cls = ir.ExternKernelOut if self.has_out_variant else ir.ExternKernelAlloc
776
+ inner = cls(
777
+ layout=self.layout,
778
+ inputs=self.input_nodes,
779
+ python_kernel_name=self.choice.call_name(),
780
+ cpp_kernel_name=self.choice.cpp_kernel_name,
781
+ ordered_kwargs_for_cpp_kernel=self.choice.ordered_kwargs_for_cpp_kernel,
782
+ op_overload=self.choice.op_overload,
783
+ kwargs=self.kwargs,
784
+ )
785
+
786
+ return ir.TensorBox.create(inner)
787
+
788
+ def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]:
789
+ """Information returned here is logged to the autotune log file when that is enabled."""
790
+ return {
791
+ "backend": "extern",
792
+ "kernel_call_name": self.choice.call_name(),
793
+ }
794
+
795
+
796
+ class ErrorFromChoice(RuntimeError):
797
+ def __init__(self, msg, choice: ChoiceCaller, inputs_str):
798
+ msg += f"\nFrom choice {choice}\n{inputs_str}"
799
+ super().__init__(msg)
800
+ self.choice = choice
801
+
802
+
803
+ class AlgorithmSelectorCache(PersistentCache):
804
+ def __call__(
805
+ self,
806
+ name,
807
+ choices: List[ChoiceCaller],
808
+ input_nodes,
809
+ layout,
810
+ # optional dict mapping arg indices to the functions
811
+ # generating a torch.Tensor for that input from the
812
+ # corresponding ir.Buffer. if passed for a given
813
+ # arg, the function will be called instead of
814
+ # generating a random torch.Tensor for benchmarking.
815
+ input_gen_fns: Optional[Dict[int, Callable[[ir.Buffer], torch.Tensor]]] = None,
816
+ precompilation_timeout_seconds: int = 60 * 60,
817
+ ):
818
+ from .codegen.cuda.cuda_kernel import CUDATemplateCaller
819
+
820
+ # TODO(nmacchioni): remove once CI tests are fixed
821
+ choices = [choice for choice in choices if choice is not None]
822
+ if len(choices) == 0:
823
+ raise RuntimeError(
824
+ "No choices to select, please consider adding ATEN into max_autotune_gemm_backends "
825
+ "config (defined in torch/_inductor/config.py) to allow at least one choice. "
826
+ )
827
+ log.debug("Max autotune selects from %s choices.", str(len(choices)))
828
+
829
+ if len(choices) == 1:
830
+ if not isinstance(choices[0], CUDATemplateCaller):
831
+ # CUDATemplateCaller still needs to go through autotuning process to retrieve workspace size.
832
+ return choices[0].output_node()
833
+
834
+ @functools.lru_cache(None)
835
+ def make_benchmark_fn():
836
+ return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns)
837
+
838
+ def precompile(choices):
839
+ if (
840
+ precompilation_timeout_seconds is None
841
+ or precompilation_timeout_seconds <= 0
842
+ ):
843
+ return
844
+ num_workers = min(
845
+ config.compile_threads,
846
+ torch.get_num_threads(),
847
+ len(choices),
848
+ )
849
+ if num_workers <= 0:
850
+ return
851
+ log.info(
852
+ "Multithreaded precompilation for %d choices using %d worker threads",
853
+ len(choices),
854
+ num_workers,
855
+ )
856
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
857
+ futures = executor.map(
858
+ lambda c: c.precompile(),
859
+ [c for c in choices if hasattr(c, "precompile")],
860
+ timeout=precompilation_timeout_seconds,
861
+ )
862
+ try:
863
+ iterator = iter(futures)
864
+ while True:
865
+ try:
866
+ next(iterator)
867
+ except CUDACompileError:
868
+ log.error( # noqa: G201
869
+ "CUDA Compilation error", exc_info=True
870
+ )
871
+ except TimeoutError:
872
+ log.warning(
873
+ f"Precompilation timed out after {precompilation_timeout_seconds} seconds." # noqa: G004
874
+ )
875
+ except StopIteration:
876
+ pass
877
+ executor.shutdown(wait=True)
878
+
879
+ def autotune(choices):
880
+ try:
881
+ precompile(choices)
882
+ except TimeoutError:
883
+ log.warning(
884
+ "Precompilation phase took longer than timeout allowed. Continuing"
885
+ )
886
+ pass
887
+ return make_benchmark_fn()(choices)
888
+
889
+ if config.autotune_in_subproc:
890
+ from .autotune_process import tuning_pool
891
+
892
+ # do the optional warmup
893
+ tuning_pool.initialize()
894
+
895
+ autotune_start_ts = time.time()
896
+ timings = self.lookup(
897
+ choices,
898
+ name,
899
+ repr([self.key_of(x) for x in input_nodes]),
900
+ autotune,
901
+ )
902
+ autotune_elapse = time.time() - autotune_start_ts
903
+ if timings == {} or choices[0] not in timings:
904
+ return choices[0].output_node()
905
+
906
+ if make_benchmark_fn.cache_info().currsize:
907
+ counters["inductor"]["select_algorithm_autotune"] += 1
908
+ if (
909
+ make_benchmark_fn.cache_info().currsize
910
+ or log.getEffectiveLevel() == logging.DEBUG
911
+ or config.trace.log_autotuning_results
912
+ ):
913
+ self.log_results(name, input_nodes, timings, autotune_elapse)
914
+ selected_choice = builtins.min(timings, key=timings.__getitem__).output_node()
915
+ log.debug("selected choice: %s", str(selected_choice))
916
+ return selected_choice
917
+
918
+ @classmethod
919
+ def make_benchmark_fn(
920
+ cls,
921
+ choices,
922
+ input_nodes,
923
+ layout,
924
+ input_gen_fns=None,
925
+ ):
926
+ if input_gen_fns is None:
927
+ input_gen_fns = {}
928
+
929
+ # de-duplicate args
930
+ unique_example_inputs = {
931
+ x.get_name(): input_gen_fns.get(i, cls.benchmark_example_value)(x)
932
+ for i, x in enumerate(input_nodes)
933
+ }
934
+ example_inputs = list(unique_example_inputs.values())
935
+ example_inputs_extern = [
936
+ torch.as_strided(
937
+ unique_example_inputs[input_node.get_name()],
938
+ V.graph.sizevars.size_hints(
939
+ input_node.get_size(),
940
+ fallback=config.unbacked_symint_fallback,
941
+ ),
942
+ V.graph.sizevars.size_hints(
943
+ input_node.get_stride(),
944
+ fallback=config.unbacked_symint_fallback,
945
+ ),
946
+ V.graph.sizevars.size_hint(
947
+ input_node.get_layout().offset,
948
+ fallback=config.unbacked_symint_fallback,
949
+ ),
950
+ )
951
+ for input_node in input_nodes
952
+ ]
953
+
954
+ out = cls.benchmark_example_value(layout)
955
+ out_extern = torch.as_strided(
956
+ out, out.size(), out.stride(), V.graph.sizevars.size_hint(layout.offset)
957
+ )
958
+ if VERIFY:
959
+ choices[0].benchmark(*example_inputs_extern, out=out_extern)
960
+ expected = out_extern.clone()
961
+
962
+ if DEBUG:
963
+ print(f"{len(choices)} tuning requests:")
964
+
965
+ def debug_str():
966
+ def tensor_repr(x):
967
+ return (
968
+ f"torch.empty_strided({tuple(x.size())!r}, {tuple(x.stride())!r}, "
969
+ f"dtype={x.dtype!r}, device={x.device.type!r})"
970
+ )
971
+
972
+ lines = [
973
+ "inputs = [",
974
+ ]
975
+ for x in example_inputs:
976
+ lines.append(f" {tensor_repr(x)},")
977
+ lines += ["]", f"out = {tensor_repr(out)}", ""]
978
+ return "\n".join(lines)
979
+
980
+ def benchmark_choice_in_current_process(choice):
981
+ out.zero_()
982
+ if isinstance(choice, ExternKernelCaller):
983
+ # aten kernels want the offset baked in for sliced tensors
984
+ result = choice.benchmark(*example_inputs_extern, out=out_extern)
985
+ else:
986
+ # triton templates want the base pointer for sliced tensors
987
+ result = choice.benchmark(*example_inputs, out=out)
988
+ if VERIFY:
989
+ torch.testing.assert_close(out_extern, expected, **VERIFY)
990
+ torch.cuda.synchronize() # shake out any CUDA errors
991
+ return result
992
+
993
+ def benchmark_in_current_process(choices):
994
+ timings = {}
995
+ for choice in choices:
996
+ try:
997
+ timing = benchmark_choice_in_current_process(choice)
998
+ except CUDACompileError as e:
999
+ log.warning(
1000
+ "CUDA compilation error: \n%s. \nIgnore this choice.", str(e)
1001
+ )
1002
+ timing = float("inf")
1003
+ except RuntimeError as e:
1004
+ msg = str(e)
1005
+ if "invalid argument" in msg:
1006
+ msg += "\n\nThis may mean this GPU is too small for max_autotune mode.\n\n"
1007
+ log.warning(msg)
1008
+ timing = float("inf")
1009
+ else:
1010
+ if "illegal memory access" in msg:
1011
+ msg += "\n\nEither error in template or triton bug.\n"
1012
+ raise ErrorFromChoice(msg, choice, debug_str()) # noqa: TRY200
1013
+ except AssertionError as e:
1014
+ raise AssertionError( # noqa: TRY200
1015
+ f"Incorrect result from choice {choice}\n\n{e}"
1016
+ )
1017
+
1018
+ timings[choice] = timing
1019
+
1020
+ return timings
1021
+
1022
+ def benchmark_in_sub_process(choices):
1023
+ from . import autotune_process
1024
+
1025
+ # only benchmark triton kernel in sub process for now.
1026
+ # ATen/Extern kernel are still benchmarked in the current process.
1027
+ extern = [c for c in choices if isinstance(c, ExternKernelCaller)]
1028
+ triton = [c for c in choices if not isinstance(c, ExternKernelCaller)]
1029
+
1030
+ timings = benchmark_in_current_process(extern)
1031
+ timings.update(autotune_process.benchmark_in_sub_process(triton))
1032
+ return timings
1033
+
1034
+ benchmark = (
1035
+ benchmark_in_sub_process
1036
+ if config.autotune_in_subproc
1037
+ else benchmark_in_current_process
1038
+ )
1039
+
1040
+ return benchmark
1041
+
1042
+ @staticmethod
1043
+ def log_results(
1044
+ name: str,
1045
+ input_nodes: List[ir.IRNode],
1046
+ timings: Dict[ChoiceCaller, float],
1047
+ elapse: float,
1048
+ ):
1049
+ V.debug.log_autotuning_results(name, input_nodes, timings, elapse)
1050
+ if not (config.max_autotune or config.max_autotune_gemm) or not PRINT_AUTOTUNE:
1051
+ return
1052
+ sizes = ", ".join(
1053
+ [
1054
+ "x".join(
1055
+ map(
1056
+ str,
1057
+ V.graph.sizevars.size_hints(
1058
+ n.get_size(), fallback=config.unbacked_symint_fallback
1059
+ ),
1060
+ )
1061
+ )
1062
+ for n in input_nodes
1063
+ ]
1064
+ )
1065
+ n = None if log.getEffectiveLevel() == logging.DEBUG else 10
1066
+ top_k = sorted(timings, key=timings.__getitem__)[:n]
1067
+ best = top_k[0]
1068
+ best_time = timings[best]
1069
+ sys.stderr.write(f"AUTOTUNE {name}({sizes})\n")
1070
+ for choice in top_k:
1071
+ result = timings[choice]
1072
+ if result:
1073
+ sys.stderr.write(
1074
+ f" {choice.name} {result:.4f} ms {best_time/result:.1%}\n"
1075
+ )
1076
+ else:
1077
+ sys.stderr.write(
1078
+ f" {choice.name} {result:.4f} ms <DIVIDED BY ZERO ERROR>\n"
1079
+ )
1080
+
1081
+ autotune_type_str = (
1082
+ "SubProcess" if config.autotune_in_subproc else "SingleProcess"
1083
+ )
1084
+ sys.stderr.write(f"{autotune_type_str} AUTOTUNE takes {elapse:.4f} seconds\n")
1085
+
1086
+ @staticmethod
1087
+ def benchmark_example_value(node):
1088
+ """
1089
+ Convert an ir.Buffer into a concrete torch.Tensor we can use for
1090
+ benchmarking.
1091
+ """
1092
+ if isinstance(node, ir.Layout):
1093
+ node = ir.Buffer("fake", node)
1094
+ # triton templates want the base tensor.
1095
+ if isinstance(node, ir.BaseView):
1096
+ node = node.unwrap_view()
1097
+ # preserve rng states to avoid the rand_strided call below changes
1098
+ # the rng states for the real model code.
1099
+ with preserve_rng_state():
1100
+ return rand_strided(
1101
+ V.graph.sizevars.size_hints(
1102
+ node.get_size(),
1103
+ fallback=config.unbacked_symint_fallback,
1104
+ ),
1105
+ V.graph.sizevars.size_hints(
1106
+ node.get_stride(),
1107
+ fallback=config.unbacked_symint_fallback,
1108
+ ),
1109
+ device=node.get_device(),
1110
+ dtype=node.get_dtype(),
1111
+ extra_size=node.layout.offset,
1112
+ )
1113
+
1114
+ @staticmethod
1115
+ def key_of(node):
1116
+ """
1117
+ Extract the pieces of an ir.Buffer that we should invalidate cached
1118
+ autotuning results on.
1119
+ """
1120
+ sizevars = V.graph.sizevars
1121
+ return (
1122
+ node.get_device().type,
1123
+ str(node.get_dtype()),
1124
+ *sizevars.size_hints(
1125
+ node.get_size(),
1126
+ fallback=config.unbacked_symint_fallback,
1127
+ ),
1128
+ *sizevars.size_hints(
1129
+ node.get_stride(),
1130
+ fallback=config.unbacked_symint_fallback,
1131
+ ),
1132
+ sizevars.size_hint(
1133
+ node.get_layout().offset,
1134
+ fallback=config.unbacked_symint_fallback,
1135
+ ),
1136
+ )
1137
+
1138
+
1139
+ _ALGORITHM_SELECTOR_CACHE: Optional[AlgorithmSelectorCache] = None
1140
+
1141
+
1142
+ def autotune_select_algorithm(*args, **kwargs):
1143
+ global _ALGORITHM_SELECTOR_CACHE
1144
+ if _ALGORITHM_SELECTOR_CACHE is None:
1145
+ _ALGORITHM_SELECTOR_CACHE = AlgorithmSelectorCache()
1146
+ return _ALGORITHM_SELECTOR_CACHE(*args, **kwargs)
1147
+
1148
+
1149
+ def realize_inputs(*args):
1150
+ if len(args) == 1:
1151
+ return ir.ExternKernel.require_stride1(ir.ExternKernel.realize_input(args[0]))
1152
+ return [realize_inputs(x) for x in args]
1153
+
1154
+
1155
+ # ensure lowering is imported so that `extern_kernels.*` is populated
1156
+ from . import lowering # noqa: F401
venv/lib/python3.10/site-packages/torch/_inductor/sizevars.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+ import logging
4
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
5
+
6
+ import sympy
7
+ from sympy import Expr
8
+
9
+ from torch.fx.experimental.symbolic_shapes import ShapeEnv
10
+ from torch.utils._sympy.functions import FloorDiv, ModularIndexing
11
+ from torch.utils._sympy.value_ranges import bound_sympy
12
+
13
+ from .utils import sympy_index_symbol, sympy_subs, VarRanges
14
+ from .virtualized import V
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+
19
+ # This class is a little awkward, because ShapeEnv is doing most of the heavy
20
+ # lifting and in some cases we should be directly passing through to ShapeEnv,
21
+ # but there is some extra inductor logic that needs to be handled here
22
+ class SizeVarAllocator:
23
+ def __init__(self, shape_env=None):
24
+ super().__init__()
25
+ if shape_env is None:
26
+ shape_env = ShapeEnv()
27
+ self.shape_env = shape_env
28
+ self.var_to_val = self.shape_env.var_to_val
29
+ self.replacements: Dict[sympy.Symbol, Expr] = self.shape_env.replacements
30
+ # Maps of dynamic sizes that have to be precomputed on the host to the kernel args.
31
+ # The basic idea is if we have some complicated sympy expression
32
+ # f(s0), we may choose to precompute it on the host and then replace
33
+ # all occurrences of that sympy expression with ps0, so that when we
34
+ # codegen we simply reference ps0 directly without repeating
35
+ # f(s0). Unlike regular size variables, ps variables cannot be
36
+ # guarded upon; so if we are asked to guard on a Sympy expression
37
+ # which potentially could have already had a precomputed replacement
38
+ # on it, we are obligated to invert the precomputed replacements
39
+ # (inv_precomputed_replacements).
40
+ self.precomputed_replacements: Dict[Expr, sympy.Symbol] = dict()
41
+ self.inv_precomputed_replacements: Dict[sympy.Symbol, Expr] = dict()
42
+ self.stride_vars = self.make_stride_vars_cache()
43
+ self.simplify_with_ranges = self.make_simplify_with_ranges_cache()
44
+ self._simplify_loops = self.make_simplify_loops_cache()
45
+
46
+ def simplify(self, expr: Expr):
47
+ return sympy.expand(expr).xreplace(self.replacements)
48
+
49
+ def make_simplify_with_ranges_cache(self) -> Callable[[Expr, VarRanges], Expr]:
50
+ """
51
+ self._simplify_with_ranges() can be expensive, cache its results
52
+ """
53
+ cache: Dict[Tuple[Any, ...], Expr] = dict()
54
+ replacement_count = len(self.replacements)
55
+
56
+ def simplify_with_ranges(expr: Expr, var_ranges: VarRanges) -> Expr:
57
+ nonlocal replacement_count
58
+ if replacement_count != len(self.replacements):
59
+ # new replacements invalidates cached results
60
+ cache.clear()
61
+ replacement_count = len(self.replacements)
62
+ key = (expr, *var_ranges.items())
63
+ result = cache.get(key, None)
64
+ if result is None:
65
+ result = self._simplify_with_ranges(expr, var_ranges)
66
+ cache[key] = result
67
+ return result
68
+
69
+ return simplify_with_ranges
70
+
71
+ def make_simplify_loops_cache(self):
72
+ """
73
+ self._simplify_with_ranges() can be expensive, cache its results
74
+ """
75
+ cache: Dict[Tuple[Any, ...], Any] = dict()
76
+ replacement_count = len(self.replacements)
77
+
78
+ def simplify_loops(index_vars, sizes, index_formulas):
79
+ nonlocal replacement_count
80
+ if replacement_count != len(self.replacements):
81
+ # new replacements invalidates cached results
82
+ cache.clear()
83
+ replacement_count = len(self.replacements)
84
+ key = (*index_vars, *sizes, *index_formulas)
85
+ result = cache.get(key, None)
86
+ if result is None:
87
+ result = self._simplify_loops_impl(index_vars, sizes, index_formulas)
88
+ cache[key] = result
89
+ return result
90
+
91
+ return simplify_loops
92
+
93
+ def _simplify_with_ranges(self, expr: Expr, var_ranges: VarRanges) -> Expr:
94
+ """
95
+ Simplify indexing expression with knowledge of the ranges of
96
+ iteration variables.
97
+ """
98
+
99
+ expr = join_dimensions(self.simplify(expr))
100
+ original_expr = expr
101
+
102
+ def remove_zero_terms(base, divisor):
103
+ """Symbols smaller than the divisor are zero"""
104
+ for v in base.free_symbols:
105
+ if v in var_ranges:
106
+ # var smaller than divisor can be removed
107
+ # if the rest is guaranteed to be multiple of divisor
108
+ rest = sympy.Wild("_rest", exclude=[v])
109
+ m = base.match(v + rest)
110
+ if m and v not in m[rest].free_symbols:
111
+ gcd = sympy.gcd(m[rest], divisor)
112
+ if gcd == divisor:
113
+ if self.statically_known_leq(var_ranges[v], divisor):
114
+ base = m[rest]
115
+ return base
116
+
117
+ def visit_indexing_div(base, divisor):
118
+ return FloorDiv(remove_zero_terms(base, divisor), divisor)
119
+
120
+ def visit_modular_indexing(base, divisor, modulus):
121
+ base = remove_zero_terms(base, divisor)
122
+ base_pos = True
123
+ if isinstance(base, ModularIndexing):
124
+ # for modular indexing, biggest values from the ranges don't necessarily result in
125
+ # the biggest result, the biggest result is modulus - 1
126
+ base_s = base.args[2] - 1
127
+ elif not base.has(ModularIndexing):
128
+ # actual iteration range is to size-1
129
+ iter_ranges_zero = {k: 0 for k, v in var_ranges.items()}
130
+ base_lowest = sympy_subs(base, iter_ranges_zero)
131
+ if self.statically_known_leq(0, base_lowest): # type: ignore[arg-type]
132
+ # can't replace with indexing div if base can be negative
133
+ base_pos = True
134
+ else:
135
+ base_pos = False
136
+ iter_ranges = {k: v - 1 for k, v in var_ranges.items()}
137
+ base_s = sympy_subs(base, iter_ranges)
138
+ else:
139
+ base_s = base
140
+ if self.statically_known_lt(base_s, modulus * divisor) and base_pos:
141
+ return FloorDiv(base, divisor)
142
+ return ModularIndexing(base, divisor, modulus)
143
+
144
+ if expr.has(ModularIndexing):
145
+ expr = expr.replace(
146
+ ModularIndexing(
147
+ sympy.Wild("base"),
148
+ sympy.Wild("divisor"),
149
+ sympy.Wild("modulus"),
150
+ ),
151
+ visit_modular_indexing,
152
+ )
153
+
154
+ if expr.has(FloorDiv):
155
+ expr = expr.replace(
156
+ FloorDiv(
157
+ sympy.Wild("base"),
158
+ sympy.Wild("divisor"),
159
+ ),
160
+ visit_indexing_div,
161
+ )
162
+
163
+ if expr != original_expr:
164
+ return self._simplify_with_ranges(expr, var_ranges)
165
+ return expr
166
+
167
+ def _simplify_loops_impl(
168
+ self, index_vars: List[sympy.Symbol], sizes, index_formulas
169
+ ):
170
+ """
171
+ Try to remove as many axis from loop iterations as possible, by:
172
+ 1) removing size==1 dimensions
173
+ 2) fuse contiguous dimensions into a single loop
174
+ If channel_last = True, we will prevent the last dim fused with other dims
175
+ """
176
+ sizes = list(map(self.simplify, sizes))
177
+
178
+ strides = [self.stride_vars(x, index_vars) for x in index_formulas]
179
+ assert len(sizes) == len(strides[0]), (len(sizes), len(strides[0]))
180
+
181
+ for i in range(len(sizes)):
182
+ if sizes[i] == 1:
183
+ # remove dim
184
+ sizes[i] = None
185
+
186
+ def can_merge_dims(a, b):
187
+ for k in range(len(strides)):
188
+ if self.simplify(strides[k][a] * sizes[a]) == self.simplify(
189
+ strides[k][b]
190
+ ):
191
+ # approximate test passed, try sound version
192
+ va = index_vars[a]
193
+ vb = index_vars[b]
194
+ v = sympy_index_symbol("_merge_tester")
195
+ expr1 = sympy_subs(index_formulas[k], {va: v * sizes[a], vb: 0})
196
+ expr2 = sympy_subs(index_formulas[k], {va: 0, vb: v})
197
+ if self.simplify(expr1) == self.simplify(expr2):
198
+ continue
199
+ return False
200
+ return True
201
+
202
+ changed = True
203
+ while changed:
204
+ changed = False
205
+ for i, j in itertools.product(
206
+ reversed(range(len(sizes))), reversed(range(len(sizes)))
207
+ ):
208
+ if i == j or sizes[i] is None or sizes[j] is None:
209
+ continue
210
+ if can_merge_dims(i, j):
211
+ changed = True
212
+ sizes[i] = sizes[i] * sizes[j]
213
+ sizes[j] = None
214
+
215
+ def reindex(index):
216
+ it = list(reversed(index))
217
+ new_index = []
218
+ for size in sizes:
219
+ if size is None:
220
+ new_index.append(sympy.Integer(0))
221
+ else:
222
+ new_index.append(it.pop())
223
+ assert not it
224
+ return new_index
225
+
226
+ def prune(index):
227
+ assert len(index) == len(sizes)
228
+ return [i for i, s in zip(index, sizes) if s is not None]
229
+
230
+ return [x for x in sizes if x is not None], reindex, prune
231
+
232
+ # Note - [On Statically Known]
233
+ #
234
+ # The statically_known_* family of functions below replaces a prior system, called maybe_guard_*. The prior system
235
+ # operated by providing essentially a question, where the size hinted values were evaluated. If the condition was
236
+ # true, we add a guard and return True, otherwise, False.
237
+ #
238
+ # def maybe_guard_foo(args):
239
+ # if size_hinted_check(args):
240
+ # return False # No guard, no optim
241
+ # guard(args) # Make a guard
242
+ # return True # Safe to apply optimization
243
+ #
244
+ # The prior system incurred a guard, and green lit an optimization.
245
+ #
246
+ # The new system works in reverse - in the new system, if we know that the inputs are static, and evaluate the
247
+ # condition as true, we green light the optimization, and we do not incur a guard. If we cannot prove that, we
248
+ # return False.
249
+ #
250
+ # def maybe_guard_foo(args):
251
+ # if all_static(args):
252
+ # return True # Safe to apply optimization
253
+ # else:
254
+ # return False # No guard, no optim
255
+
256
+ # See Note - [On Statically Known]
257
+
258
+ def is_expr_static_and_true(self, expr: Union[Expr, int]) -> bool:
259
+ if expr in (True, False):
260
+ return bool(expr)
261
+
262
+ try:
263
+ simplified = self.shape_env._maybe_evaluate_static(expr)
264
+ if simplified is not None:
265
+ return bool(simplified)
266
+ except Exception:
267
+ log.debug("Could not simplify %s", expr)
268
+
269
+ return False
270
+
271
+ def statically_known_equals(self, left: Expr, right: Expr) -> bool:
272
+ """
273
+ Returns a bool indicating if it is sound to optimize as if left and right are equal.
274
+ """
275
+ return self.is_expr_static_and_true(sympy.Eq(left, right)) # type: ignore[arg-type]
276
+
277
+ # See Note - [On Statically Known]
278
+ def statically_known_list_equals(self, left: List[Expr], right: List[Expr]) -> bool:
279
+ """
280
+ Returns a bool indicating if it is sound to optimize as if left and right lists are equal.
281
+ """
282
+ if len(left) != len(right):
283
+ return False
284
+ if all(self.statically_known_equals(l, r) for l, r in zip(left, right)):
285
+ return True
286
+ return False
287
+
288
+ # See Note - [On Statically Known]
289
+ def statically_known_leq(self, left: Expr, right: Expr) -> bool:
290
+ """
291
+ Returns a bool indicating if it is sound to optimize as if left is less than or equal to right.
292
+ """
293
+ expr = left <= right
294
+ return self.is_expr_static_and_true(expr)
295
+
296
+ # See Note - [On Statically Known]
297
+ def statically_known_lt(self, left: Expr, right: Expr) -> bool:
298
+ """
299
+ Returns a bool indicating if it is sound to optimize as if left is less than right.
300
+ """
301
+ expr = left < right
302
+ return self.is_expr_static_and_true(expr)
303
+
304
+ # See Note - [On Statically Known]
305
+ def statically_known_multiple_of(self, numerator: Expr, denominator: Expr) -> bool:
306
+ """
307
+ Return a bool indicating if it is sound to optimize for the numerator being a multiple of the denominator.
308
+ """
309
+ expr = sympy.Eq(numerator % denominator, 0)
310
+ return self.is_expr_static_and_true(expr) # type: ignore[arg-type]
311
+
312
+ # The guard functions require you to ALREADY KNOW that a particular
313
+ # condition holds. If you don't know (you want to guard on an expression
314
+ # being a particular value, and then get access to that value), use
315
+ # the evaluate functions.
316
+
317
+ def guard_equals(self, left: Expr, right: Expr) -> Expr:
318
+ if isinstance(left, Expr):
319
+ left = sympy_subs(left, self.inv_precomputed_replacements) # type: ignore[arg-type]
320
+ if isinstance(right, Expr):
321
+ right = sympy_subs(right, self.inv_precomputed_replacements) # type: ignore[arg-type]
322
+ assert self.shape_env.evaluate_expr(sympy.Eq(left, right))
323
+ return left
324
+
325
+ def guard_leq(self, left: Expr, right: Expr) -> None:
326
+ return self.guard_lt(left, right + 1)
327
+
328
+ def guard_lt(self, left: Expr, right: Expr) -> None:
329
+ assert self.shape_env.evaluate_expr(sympy.Lt(left, right))
330
+
331
+ def expect_true(self, expr: Expr, *, msg: str) -> None:
332
+ expr = sympy_subs(expr, self.inv_precomputed_replacements) # type: ignore[arg-type]
333
+ self.shape_env.defer_runtime_assert(expr, msg, fx_node=None)
334
+
335
+ def expect_equals(self, left: Expr, right: Expr, *, msg: str) -> Expr:
336
+ # Prefer returning the expression without unbacked symints
337
+ if self.shape_env.is_unbacked_symint(left):
338
+ self.expect_true(sympy.Eq(left, right), msg=msg) # type: ignore[arg-type]
339
+ return right
340
+ elif self.shape_env.is_unbacked_symint(right):
341
+ self.expect_true(sympy.Eq(left, right), msg=msg) # type: ignore[arg-type]
342
+ return left
343
+ else:
344
+ return self.guard_equals(left, right)
345
+
346
+ def guarded_order(self, seq):
347
+ """
348
+ Return the order of a sequence as a permutation of range(len(seq)) and guard on that order not changing.
349
+ Used for generating block_ptrs.
350
+ """
351
+ seq = [*map(self.remove_precomputed_replacements, seq)]
352
+ seq = [(self.size_hint(var), orig_idx, var) for orig_idx, var in enumerate(seq)]
353
+ seq.sort()
354
+ order = [-1] * len(seq)
355
+ last_var = None
356
+ for new_index, (_, orig_index, var) in enumerate(seq):
357
+ order[orig_index] = new_index
358
+ if last_var is not None:
359
+ self.guard_leq(last_var, var)
360
+ last_var = var
361
+ return order
362
+
363
+ # The evaluate functions evaluate some symbolic sympy expression
364
+ # (NB: not necessarily an Expr) and return what the concrete result
365
+ # is, guarding on the expression being that result
366
+
367
+ # NB: write evaluate_expr(sympy.Lt(a, b)) rather than evaluate_expr(a < b)
368
+ # as this will ensure that you actually have a sympy'ified expression,
369
+ # and will prevent you from incorrectly writing evaluate_expr(a == b)
370
+ # which does the wrong thing if a or b is a sympy expression
371
+ def evaluate_expr(self, left: Union[Expr, sympy.logic.boolalg.Boolean]) -> bool:
372
+ assert isinstance(left, (Expr, sympy.logic.boolalg.Boolean)), type(left)
373
+ return self.shape_env.evaluate_expr(sympy.sympify(left))
374
+
375
+ def evaluate_min(self, left: Expr, right: Expr) -> Expr:
376
+ """return the smaller of left and right, and guard on that choice"""
377
+ lv = self.size_hint(left)
378
+ rv = self.size_hint(right)
379
+ if lv <= rv:
380
+ self.guard_leq(left, right)
381
+ return left
382
+ else:
383
+ self.guard_leq(right, left)
384
+ return right
385
+
386
+ def evaluate_max(self, left: Expr, right: Expr) -> Expr:
387
+ """return the larger of left and right, and guard on that choice"""
388
+ # Always choose the opposite of eval min for consistency
389
+ # This means min(a, b) and max(a, b) produce the same guards
390
+ min_val = self.evaluate_min(left, right)
391
+ return right if min_val is left else left
392
+
393
+ def evaluate_static_shape(self, left: Expr) -> int:
394
+ right = self.size_hint(left)
395
+ self.guard_equals(left, sympy.Integer(right))
396
+ return int(right)
397
+
398
+ def evaluate_static_shapes(self, left: List[Expr]) -> List[int]:
399
+ return [self.evaluate_static_shape(x) for x in left]
400
+
401
+ def remove_precomputed_replacements(self, expr: Expr) -> Expr:
402
+ if any(s.name.startswith("ps") for s in expr.free_symbols): # type: ignore[attr-defined]
403
+ return sympy_subs(expr, self.inv_precomputed_replacements) # type: ignore[arg-type]
404
+ return expr
405
+
406
+ def symbolic_hint(self, expr: Expr) -> Expr:
407
+ # Substitute all hints into expr, but leave unbacked symints alone
408
+ if not isinstance(expr, Expr):
409
+ assert isinstance(expr, int)
410
+ return expr
411
+ free_symbols = expr.free_symbols
412
+ if not free_symbols:
413
+ return int(expr) # type: ignore[return-value]
414
+ expr = self.remove_precomputed_replacements(expr)
415
+ return sympy_subs(expr, self.var_to_val)
416
+
417
+ def size_hint(self, expr: Expr, *, fallback: Optional[int] = None) -> int:
418
+ out = self.symbolic_hint(expr)
419
+ if not isinstance(out, (int, sympy.Integer)) and fallback is not None:
420
+ # Use the provided heuristic fallback hint
421
+ sym_vrs = {
422
+ s: self.shape_env.var_to_range.get(s, None) for s in expr.free_symbols
423
+ }
424
+ if all(vr is not None for vr in sym_vrs.values()):
425
+ expr_vr = bound_sympy(expr, sym_vrs) # type: ignore[arg-type]
426
+ lower = self.size_hint(expr_vr.lower) # type: ignore[arg-type]
427
+ upper = self.size_hint(expr_vr.upper) # type: ignore[arg-type]
428
+ fallback = min(max(fallback, lower), upper)
429
+ return fallback
430
+ try:
431
+ return int(out)
432
+ except Exception:
433
+ log.debug("failed on: %s", out)
434
+ raise
435
+
436
+ def size_hints(
437
+ self,
438
+ exprs: Iterable[Expr],
439
+ *,
440
+ fallback: Optional[int] = None,
441
+ ) -> Tuple[int, ...]:
442
+ return tuple(self.size_hint(x, fallback=fallback) for x in exprs)
443
+
444
+ def _lru_cache(self, fn, maxsize=None):
445
+ """
446
+ Wrapper around functools.lru_cache that clears when replacements
447
+ has been invalidated.
448
+ """
449
+ fn_cache = functools.lru_cache(maxsize)(fn)
450
+ prior_len = len(self.replacements)
451
+
452
+ @functools.wraps(fn)
453
+ def wrapper(*args, **kwargs):
454
+ nonlocal prior_len
455
+ if prior_len != len(self.replacements):
456
+ prior_len = len(self.replacements)
457
+ fn_cache.cache_clear()
458
+ return fn_cache(*args, **kwargs)
459
+
460
+ return wrapper
461
+
462
+ def make_stride_vars_cache(self):
463
+ cache = self._lru_cache(self._stride_vars)
464
+
465
+ def stride_vars(
466
+ index: Expr,
467
+ vars: List[sympy.Symbol],
468
+ support_vars: Optional[List[sympy.Symbol]] = None,
469
+ ) -> List[Expr]:
470
+ if not support_vars:
471
+ support_vars = vars
472
+ return cache(index, tuple(vars), tuple(support_vars))
473
+
474
+ return stride_vars
475
+
476
+ def _stride_vars(
477
+ self, index: Expr, vars: List[sympy.Symbol], support_vars: List[sympy.Symbol]
478
+ ) -> List[Expr]:
479
+ """Convert an indexing expression back into strides
480
+
481
+ NOTE: This is only valid if the index is a standard strided offset
482
+ calculation. e.g. 10 * ModularIndexing(i0 + 1, 1, 2) would give a
483
+ stride of -10 because the index wraps around after the first element
484
+
485
+ """
486
+ strides = []
487
+ index = self.simplify(index)
488
+ # remove any offset
489
+ index = index - sympy_subs(
490
+ index, {v: sympy.Integer(0) for v in support_vars if v != 0}
491
+ )
492
+ for i in range(len(vars)):
493
+ # drop all the other dims
494
+ index_dim = sympy_subs(
495
+ index,
496
+ {
497
+ support_vars[j]: sympy.Integer(0)
498
+ for j in range(len(support_vars))
499
+ if vars[i] != support_vars[j] and support_vars[j] != 0
500
+ },
501
+ )
502
+ v = vars[i]
503
+ if v == 0:
504
+ strides.append(sympy.Integer(0))
505
+ else:
506
+ # TODO(jansel): should we use sympy.diff here?
507
+ strides.append(
508
+ sympy_subs(index_dim, {v: sympy.Integer(1)})
509
+ - sympy_subs(index_dim, {v: sympy.Integer(0)})
510
+ )
511
+ return strides
512
+
513
+ def offset_var(self, index: Expr, vars: List[sympy.Symbol]) -> Expr:
514
+ """Extract offset part of an indexing expression"""
515
+ index = self.simplify(index)
516
+ return sympy_subs(index, {v: sympy.Integer(0) for v in vars if v != 0})
517
+
518
+ def stride_hints(
519
+ self,
520
+ index: Expr,
521
+ vars: List[sympy.Symbol],
522
+ support_vars: Optional[List[sympy.Symbol]] = None,
523
+ ) -> List[int]:
524
+ for v in index.free_symbols:
525
+ if v.name.startswith("indirect"): # type: ignore[attr-defined]
526
+ index = sympy_subs(index, {v: 0}) # type: ignore[dict-item]
527
+ result = []
528
+ for s in self.stride_vars(index, vars, support_vars):
529
+ try:
530
+ result.append(self.size_hint(s))
531
+ except TypeError:
532
+ result.append(0)
533
+ return result
534
+
535
+ def stride_order(self, index: Expr, vars: List[sympy.Symbol]) -> List[int]:
536
+ strides = tuple(map(abs, self.stride_hints(index, vars)))
537
+ order = list(range(len(strides)))
538
+ order.sort(key=lambda x: (strides[x] == 0, strides[x]))
539
+ return order
540
+
541
+ def lookup_precomputed_size(self, expr: Expr) -> Expr:
542
+ if (
543
+ isinstance(expr, (int, sympy.Symbol, sympy.Number))
544
+ or expr.is_number
545
+ or expr.is_symbol
546
+ ):
547
+ return expr
548
+ expr = self.remove_precomputed_replacements(expr)
549
+ if expr not in self.precomputed_replacements:
550
+ sym = sympy_index_symbol(f"ps{len(self.precomputed_replacements)}")
551
+ self.precomputed_replacements[expr] = sym
552
+ self.inv_precomputed_replacements[sym] = expr
553
+ return self.precomputed_replacements[expr]
554
+
555
+ def free_symbols(self) -> Set[sympy.Symbol]:
556
+ return set(self.var_to_val.keys()) - set(self.replacements.keys())
557
+
558
+
559
+ def join_dimensions(expr: Expr) -> Expr:
560
+ if not isinstance(expr, sympy.Add) or not expr.has(ModularIndexing):
561
+ return expr # fast exit path
562
+ return _join_dimensions_cached(expr)
563
+
564
+
565
+ @functools.lru_cache(256)
566
+ def _join_dimensions_cached(expr: Expr) -> Expr:
567
+ """
568
+ ModularIndexing(i0, 1, 32) + 32 * ModularIndexing(i0, 32, 4)
569
+ becomes
570
+ ModularIndexing(i0, 1, 128)
571
+ ModularIndexing(i0, 1, 32) + 32 * FloorDiv(i0, 32)
572
+ becomes i0
573
+
574
+
575
+ This type of pattern can come from view operations
576
+ """
577
+ assert isinstance(expr, sympy.Add)
578
+
579
+ scale = sympy.Wild("scale", exclude=[0])
580
+ base = sympy.Wild("base")
581
+ divisor = sympy.Wild("divisor")
582
+ mod1 = sympy.Wild("modulus")
583
+ mod2 = sympy.Wild("modulus2")
584
+ for term1 in expr.args:
585
+ m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))
586
+ if m1:
587
+ for term2 in expr.args:
588
+ m2 = term2.match(
589
+ m1[scale]
590
+ * m1[mod1]
591
+ * ModularIndexing(m1[base], m1[divisor] * m1[mod1], mod2)
592
+ )
593
+ if m2 and term1 != term2:
594
+ expr = join_dimensions(
595
+ expr
596
+ - term1
597
+ - term2
598
+ + m1[scale]
599
+ * ModularIndexing(m1[base], m1[divisor], m1[mod1] * m2[mod2])
600
+ )
601
+ return expr
602
+ for term1 in expr.args:
603
+ m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))
604
+ if m1:
605
+ for term2 in expr.args:
606
+ m2 = term2.match(
607
+ m1[scale] * m1[mod1] * FloorDiv(m1[base], m1[divisor] * m1[mod1])
608
+ )
609
+ if m2 is not None: # in case of success we get an empty dict here
610
+ expr = join_dimensions(
611
+ expr
612
+ - term1
613
+ - term2
614
+ + m1[scale] * FloorDiv(m1[base], m1[divisor])
615
+ )
616
+ return expr
617
+ return expr
618
+
619
+
620
+ class SimplifyIndexing(V.WrapperHandler): # type: ignore[name-defined]
621
+ """
622
+ A wrapper around .virtualize.ops that uses var range information to
623
+ simplify ModularIndexing/FloorDiv.
624
+ """
625
+
626
+ def __init__(self, inner, var_ranges: VarRanges):
627
+ super().__init__(inner)
628
+ self.name = "SimplifyIndexing"
629
+ self._simplify: Callable[
630
+ [Expr], Expr
631
+ ] = lambda index: V.graph.sizevars.simplify_with_ranges(index, var_ranges)
632
+
633
+ def load(self, name: str, index: sympy.Expr):
634
+ return self._inner.load(name, self._simplify(index))
635
+
636
+ def store(self, name, index, value, mode=None):
637
+ return self._inner.store(name, self._simplify(index), value, mode=mode)
638
+
639
+ def store_reduction(self, name, index, value):
640
+ return self._inner.store_reduction(name, self._simplify(index), value)
641
+
642
+ def index_expr(self, index, dtype):
643
+ return self._inner.index_expr(self._simplify(index), dtype)