applied-ai-018 commited on
Commit
c496e1d
·
verified ·
1 Parent(s): 2b04270

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_inductor/__init__.py +142 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_inductor/autotune_process.py +616 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_inductor/bounds.py +122 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codecache.py +2492 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cuda_combined_scheduling.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/memory_planning.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/interface.cpp +239 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/common.py +1295 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h +410 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/__init__.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_template.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_epilogue_gen.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_utils.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/gemm_template.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py +360 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__init__.py +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/__init__.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py +186 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py +257 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py +706 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda_combined_scheduling.py +75 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/memory_planning.py +799 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py +249 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py +96 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/_inductor/comm_analysis.py +250 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/_inductor/comms.py +365 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/_inductor/compile_fx.py +1302 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/_inductor/config.py +664 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/_inductor/constant_folding.py +190 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py +315 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py +2157 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/_inductor/debug.py +561 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/_inductor/decomposition.py +613 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/_inductor/dependencies.py +444 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/_inductor/exc.py +98 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/_inductor/freezing.py +266 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_utils.py +178 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/_inductor/graph.py +1133 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/_inductor/hooks.py +24 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/_inductor/index_propagation.py +262 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/_inductor/inductor_prims.py +90 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/_inductor/ir.py +0 -0
env-llmeval/lib/python3.10/site-packages/torch/_inductor/__init__.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ import torch.fx
4
+ import torch.utils._pytree as pytree
5
+
6
+ __all__ = ["compile", "list_mode_options", "list_options", "cudagraph_mark_step_begin"]
7
+
8
+
9
+ def compile(
10
+ gm: torch.fx.GraphModule,
11
+ example_inputs: List[torch.Tensor],
12
+ options: Optional[Dict[str, Any]] = None,
13
+ ):
14
+ """
15
+ Compile a given FX graph with TorchInductor. This allows compiling
16
+ FX graphs captured without using TorchDynamo.
17
+
18
+ Args:
19
+ gm: The FX graph to compile.
20
+ example_inputs: List of tensor inputs.
21
+ options: Optional dict of config options. See `torch._inductor.config`.
22
+
23
+ Returns:
24
+ Callable with same behavior as gm but faster.
25
+ """
26
+ from .compile_fx import compile_fx
27
+
28
+ return compile_fx(gm, example_inputs, config_patches=options)
29
+
30
+
31
+ # TODO: aot_compile can only work with fx generated by export. Will remove this next
32
+ # to prevent people from calling it with arbitrary fx graph.
33
+ def aot_compile(
34
+ gm: torch.fx.GraphModule,
35
+ example_inputs: List[torch.Tensor],
36
+ options: Optional[Dict[str, Any]] = None,
37
+ ) -> str:
38
+ """
39
+ Ahead-of-time compile a given FX graph with TorchInductor into a shared library.
40
+
41
+ Args:
42
+ gm: The FX graph to compile.
43
+ example_inputs: List of tensor inputs.
44
+ options: Optional dict of config options. See `torch._inductor.config`.
45
+
46
+ Returns:
47
+ Path to the generated shared library
48
+ """
49
+ from .compile_fx import compile_fx_aot
50
+
51
+ # We will serialize the pytree info into the .so as constant strings
52
+ serialized_in_spec = ""
53
+ serialized_out_spec = ""
54
+ if isinstance(gm.graph._codegen, torch.fx.graph._PyTreeCodeGen):
55
+ codegen = gm.graph._codegen
56
+ gm.graph._codegen = torch.fx.graph.CodeGen()
57
+ gm.recompile()
58
+
59
+ if codegen.pytree_info.in_spec is not None:
60
+ serialized_in_spec = pytree.treespec_dumps(codegen.pytree_info.in_spec)
61
+
62
+ if codegen.pytree_info.out_spec is not None:
63
+ serialized_out_spec = pytree.treespec_dumps(codegen.pytree_info.out_spec)
64
+
65
+ options = (
66
+ {
67
+ "aot_inductor.serialized_in_spec": serialized_in_spec,
68
+ "aot_inductor.serialized_out_spec": serialized_out_spec,
69
+ }
70
+ if options is None
71
+ else {
72
+ **options,
73
+ "aot_inductor.serialized_in_spec": serialized_in_spec,
74
+ "aot_inductor.serialized_out_spec": serialized_out_spec,
75
+ }
76
+ )
77
+
78
+ return compile_fx_aot(
79
+ gm,
80
+ example_inputs,
81
+ config_patches=options,
82
+ )
83
+
84
+
85
+ def list_mode_options(
86
+ mode: Optional[str] = None, dynamic: Optional[bool] = None
87
+ ) -> Dict[str, Any]:
88
+ r"""Returns a dictionary describing the optimizations that each of the available
89
+ modes passed to `torch.compile()` performs.
90
+
91
+ Args:
92
+ mode (str, optional): The mode to return the optimizations for.
93
+ If None, returns optimizations for all modes
94
+ dynamic (bool, optional): Whether dynamic shape is enabled.
95
+
96
+ Example::
97
+ >>> torch._inductor.list_mode_options()
98
+ """
99
+
100
+ mode_options: Dict[str, Dict[str, bool]] = {
101
+ "default": {},
102
+ # enable cudagraphs
103
+ "reduce-overhead": {
104
+ "triton.cudagraphs": True,
105
+ },
106
+ # enable max-autotune
107
+ "max-autotune-no-cudagraphs": {
108
+ "max_autotune": True,
109
+ },
110
+ # enable max-autotune
111
+ # enable cudagraphs
112
+ "max-autotune": {
113
+ "max_autotune": True,
114
+ "triton.cudagraphs": True,
115
+ },
116
+ }
117
+ return mode_options[mode] if mode else mode_options # type: ignore[return-value]
118
+
119
+
120
+ def list_options() -> List[str]:
121
+ r"""Returns a dictionary describing the optimizations and debug configurations
122
+ that are available to `torch.compile()`.
123
+
124
+ The options are documented in `torch._inductor.config`.
125
+
126
+ Example::
127
+
128
+ >>> torch._inductor.list_options()
129
+ """
130
+
131
+ from torch._inductor import config
132
+
133
+ current_config: Dict[str, Any] = config.shallow_copy_dict()
134
+
135
+ return list(current_config.keys())
136
+
137
+
138
+ def cudagraph_mark_step_begin():
139
+ "Indicates that a new iteration of inference or training is about to begin."
140
+ from .cudagraph_trees import mark_step_begin
141
+
142
+ mark_step_begin()
env-llmeval/lib/python3.10/site-packages/torch/_inductor/autotune_process.py ADDED
@@ -0,0 +1,616 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import dataclasses
5
+ import functools
6
+ import logging
7
+ import os
8
+ import queue
9
+ import time
10
+ import warnings
11
+ from concurrent.futures import ThreadPoolExecutor
12
+ from ctypes import byref, c_size_t, c_void_p
13
+ from multiprocessing.process import BaseProcess
14
+ from multiprocessing.queues import Queue
15
+ from typing import (
16
+ Any,
17
+ Callable,
18
+ Dict,
19
+ Iterable,
20
+ List,
21
+ Optional,
22
+ Sequence,
23
+ TYPE_CHECKING,
24
+ Union,
25
+ )
26
+
27
+ import torch
28
+ from torch import multiprocessing
29
+ from torch._dynamo.testing import rand_strided
30
+
31
+ from torch._inductor import ir
32
+ from torch._inductor.codecache import CUDACodeCache, DLLWrapper, PyCodeCache
33
+
34
+ if TYPE_CHECKING:
35
+ from torch._inductor.select_algorithm import TritonTemplateCaller
36
+
37
+ from . import config
38
+ from .utils import do_bench
39
+ from .virtualized import V
40
+
41
+ CUDA_VISIBLE_DEVICES = "CUDA_VISIBLE_DEVICES"
42
+ EXIT_HANDLER_REGISTERED = False
43
+
44
+ log = logging.getLogger(__name__)
45
+
46
+
47
+ # Used to synchronize between parent and child processes
48
+ class Ping:
49
+ pass
50
+
51
+
52
+ class Pong:
53
+ pass
54
+
55
+
56
+ @contextlib.contextmanager
57
+ def set_cuda_visible_device(device: Optional[int]):
58
+ """
59
+ Context manager to set the CUDA_VISIBLE_DEVICES environment variable to the
60
+ specified single device. If device is None, don't manipulate the environment.
61
+ """
62
+ if device is None:
63
+ yield
64
+ return
65
+
66
+ current = os.environ.get(CUDA_VISIBLE_DEVICES)
67
+ os.environ[CUDA_VISIBLE_DEVICES] = str(device)
68
+ try:
69
+ yield
70
+ finally:
71
+ if current is None:
72
+ del os.environ[CUDA_VISIBLE_DEVICES]
73
+ else:
74
+ os.environ[CUDA_VISIBLE_DEVICES] = current
75
+
76
+
77
+ @dataclasses.dataclass
78
+ class TuningProcess:
79
+ """
80
+ Abstraction for launching a helper process to benchmark kernels. Spawns
81
+ the parent process and uses multiprocessing queues to send benchmark
82
+ requests and return results.
83
+ """
84
+
85
+ device: Optional[int] = None
86
+ process: Optional[BaseProcess] = None
87
+ request_queue: Optional[Queue[Any]] = None
88
+ response_queue: Optional[Queue[Any]] = None
89
+
90
+ @staticmethod
91
+ def process_main(
92
+ request_queue: Queue[Any],
93
+ response_queue: Queue[Any],
94
+ ) -> None:
95
+ """
96
+ Entry point for the child process.
97
+ """
98
+ log.debug(
99
+ "Entering TuningProcess child. Visible devices = %s",
100
+ os.environ.get(CUDA_VISIBLE_DEVICES),
101
+ )
102
+ try:
103
+ TuningProcess.workloop(request_queue, response_queue)
104
+ except Exception as ex:
105
+ log.exception("Exception in TuningProcess: %s", ex)
106
+
107
+ @staticmethod
108
+ def workloop(request_queue: Queue[Any], response_queue: Queue[Any]) -> None:
109
+ """
110
+ Work loop for the benchmarking subprocess.
111
+ """
112
+ while True:
113
+ obj = request_queue.get()
114
+
115
+ if obj is None:
116
+ break # None is a sentinel for the child to terminate
117
+ elif isinstance(obj, Ping):
118
+ response_queue.put(Pong())
119
+ elif isinstance(obj, BenchmarkRequest):
120
+ response_queue.put(obj.benchmark())
121
+ else:
122
+ raise RuntimeError(f"Invalid request type {type(obj)}")
123
+
124
+ def valid(self) -> bool:
125
+ """
126
+ True if the sub-process has been initialized.
127
+ """
128
+ return (
129
+ self.process is not None
130
+ and self.request_queue is not None
131
+ and self.response_queue is not None
132
+ )
133
+
134
+ def clear(self) -> None:
135
+ """
136
+ Reset to an uninitialized state.
137
+ """
138
+ self.process = self.request_queue = self.response_queue = None
139
+
140
+ def initialize(self) -> None:
141
+ """
142
+ Create child process, request/response queues, and do the warm up.
143
+ Set the environment to make only the provided GPU device visible
144
+ to the process.
145
+ """
146
+ if self.valid():
147
+ return
148
+
149
+ # cuda runtime does not work with "fork", use "spawn" to start processes.
150
+ ctx = multiprocessing.get_context("spawn")
151
+ self.request_queue = ctx.Queue()
152
+ self.response_queue = ctx.Queue()
153
+
154
+ self.process = ctx.Process(
155
+ target=self.process_main,
156
+ args=(
157
+ self.request_queue,
158
+ self.response_queue,
159
+ ),
160
+ )
161
+ assert self.process is not None
162
+ with set_cuda_visible_device(self.device):
163
+ self.process.start()
164
+
165
+ def put(self, obj: Any) -> None:
166
+ """
167
+ Push a work item to the child process.
168
+ """
169
+ # In case of a prior crash, ensure the subprocess is running
170
+ self.initialize()
171
+ assert self.request_queue is not None
172
+ self.request_queue.put(obj)
173
+
174
+ def get(self) -> Any:
175
+ """
176
+ Get a response from the child process.
177
+ """
178
+ assert self.process is not None
179
+ assert self.response_queue is not None
180
+ while True:
181
+ try:
182
+ return self.response_queue.get(timeout=1.0)
183
+ except queue.Empty:
184
+ status = self.process.exitcode
185
+ if status is None:
186
+ # child process is still running
187
+ continue
188
+ # child process crashed
189
+ self.clear()
190
+ raise
191
+
192
+ def terminate(self) -> None:
193
+ """
194
+ Signal the child process to terminate.
195
+ """
196
+ if self.valid():
197
+ assert self.process is not None
198
+ assert self.request_queue is not None
199
+ self.request_queue.put(None)
200
+
201
+ def wait(self) -> None:
202
+ """
203
+ Wait for the child process to exit.
204
+ """
205
+ if self.process is not None:
206
+ self.process.join()
207
+ self.clear()
208
+
209
+
210
+ @dataclasses.dataclass
211
+ class TuningProcessPool:
212
+ """
213
+ Maintains a pool of TuningProcesses to benchmark kernels in parallel
214
+ across devices. By default, we create one TuningProcess per device and
215
+ set the sub-process environment to make only that device visible.
216
+ """
217
+
218
+ processes: Optional[queue.Queue[TuningProcess]] = None
219
+ executor: Optional[ThreadPoolExecutor] = None
220
+
221
+ def initialize(self) -> None:
222
+ """
223
+ Start the child processes.
224
+ """
225
+ assert (self.processes is None) == (self.executor is None)
226
+ if self.processes is not None:
227
+ return
228
+
229
+ devices = self.get_device_list()
230
+ log.debug("Sub-process autotune device list: %s", devices)
231
+
232
+ # Launch the child processes and push a msg to "warm up"
233
+ self.processes = queue.Queue()
234
+ for device in devices:
235
+ p = TuningProcess(device=device)
236
+ p.initialize()
237
+ p.put(Ping())
238
+ self.processes.put(p)
239
+
240
+ # Wait for the initialization to finish
241
+ for p in self.processes.queue:
242
+ assert isinstance(p.get(), Pong)
243
+
244
+ # Use a thread pool to manage distributing work to the subprocesses.
245
+ # Threads block on an available process, so it makes sense to match
246
+ # the number of threads with the number of devices.
247
+ self.executor = ThreadPoolExecutor(max_workers=len(devices))
248
+
249
+ # Register the exit handler for the parent process so it will terminate
250
+ # the child processes.
251
+ global EXIT_HANDLER_REGISTERED
252
+ if not EXIT_HANDLER_REGISTERED:
253
+ EXIT_HANDLER_REGISTERED = True
254
+ import atexit
255
+
256
+ atexit.register(self.terminate)
257
+
258
+ def get_device_list(self) -> Sequence[Optional[int]]:
259
+ """
260
+ Gather the list of devices to be used in the pool.
261
+ """
262
+ if not config.autotune_multi_device:
263
+ # Don't use multiple devices
264
+ return [None]
265
+
266
+ count = torch.cuda.device_count()
267
+
268
+ # If the user specified the visible devices in the env, use those.
269
+ if CUDA_VISIBLE_DEVICES in os.environ:
270
+ devices = [int(d) for d in os.environ[CUDA_VISIBLE_DEVICES].split(",")]
271
+ assert len(devices) <= count
272
+ return devices
273
+
274
+ return list(range(count))
275
+
276
+ def terminate(self) -> None:
277
+ """
278
+ Signal all child processes to terminate.
279
+ """
280
+ if self.executor is not None:
281
+ self.executor.shutdown()
282
+ self.executor = None
283
+
284
+ if self.processes is not None:
285
+ for p in self.processes.queue:
286
+ p.terminate()
287
+ for p in self.processes.queue:
288
+ p.wait()
289
+ self.processes = None
290
+
291
+ def target(self, choice: TritonTemplateCaller) -> float:
292
+ """
293
+ Entry point for the thread-pool helper threads: Wait for an open TuningProcess,
294
+ remove it from the queue, execute the benchmark in that subprocess, and return
295
+ the TuningProcess to the queue.
296
+ """
297
+ assert choice.bmreq is not None
298
+ assert self.processes is not None
299
+
300
+ process = self.processes.get()
301
+ process.put(choice.bmreq)
302
+ try:
303
+ return process.get()
304
+ except queue.Empty:
305
+ warnings.warn(
306
+ f"Failed to benchmark choice '{choice}'. It will be ignored. "
307
+ "Please debug the root cause in case the choice can bring perf gains."
308
+ )
309
+ # set to INF so this choice will be ignored
310
+ return float("inf")
311
+ finally:
312
+ self.processes.put(process)
313
+
314
+ def benchmark(
315
+ self,
316
+ choices: List[TritonTemplateCaller],
317
+ ) -> Dict[TritonTemplateCaller, float]:
318
+ """
319
+ Benchmark each choice in a separate process.
320
+ """
321
+ assert self.processes is not None, "Tuning process pool is not initialized"
322
+ assert self.executor is not None
323
+
324
+ results = {}
325
+
326
+ # Use a ThreadExecutorPool to spread the work across the subprocesses and
327
+ # to grab subprocesses as soon as they're free.
328
+ for choice, result in zip(choices, self.executor.map(self.target, choices)):
329
+ results[choice] = result
330
+
331
+ return results
332
+
333
+
334
+ tuning_pool = TuningProcessPool()
335
+
336
+
337
+ LayoutOrBuffer = Union[ir.Layout, ir.Buffer]
338
+
339
+
340
+ @dataclasses.dataclass
341
+ class TensorMeta:
342
+ device: torch.device
343
+ dtype: torch.dtype
344
+ sizes: torch._prims_common.ShapeType
345
+ strides: torch._prims_common.StrideType
346
+ offset: int
347
+
348
+ @classmethod
349
+ def from_irnodes(
350
+ cls, irnodes: Union[LayoutOrBuffer, Sequence[LayoutOrBuffer]]
351
+ ) -> Union[TensorMeta, List[TensorMeta]]:
352
+ if isinstance(irnodes, Sequence):
353
+ result: List[Any] = [cls.from_irnodes(x) for x in irnodes]
354
+ assert all(isinstance(x, TensorMeta) for x in result)
355
+ return result
356
+
357
+ node = irnodes
358
+ if isinstance(node, ir.Layout):
359
+ node = ir.Buffer("fake", node)
360
+
361
+ dtype = node.get_dtype()
362
+ assert dtype is not None
363
+
364
+ return TensorMeta(
365
+ device=node.get_device(),
366
+ dtype=dtype,
367
+ sizes=V.graph.sizevars.size_hints(
368
+ node.get_size(),
369
+ fallback=config.unbacked_symint_fallback,
370
+ ),
371
+ strides=V.graph.sizevars.size_hints(
372
+ node.get_stride(),
373
+ fallback=config.unbacked_symint_fallback,
374
+ ),
375
+ offset=V.graph.sizevars.size_hint(
376
+ node.get_layout().offset,
377
+ fallback=config.unbacked_symint_fallback,
378
+ ),
379
+ )
380
+
381
+ def to_tensor(self) -> torch.Tensor:
382
+ return rand_strided(
383
+ self.sizes,
384
+ self.strides,
385
+ device=self.device,
386
+ dtype=self.dtype,
387
+ extra_size=self.offset,
388
+ )
389
+
390
+
391
+ @dataclasses.dataclass
392
+ class BenchmarkRequest:
393
+ """
394
+ Only handle triton template benchmark for now. The extern kernel benchmark
395
+ can be done inside the same process since they usually don't cause crash.
396
+ """
397
+
398
+ def __init__(
399
+ self,
400
+ kernel_name: str,
401
+ input_tensor_meta: Union[TensorMeta, List[TensorMeta]],
402
+ output_tensor_meta: Union[TensorMeta, List[TensorMeta]],
403
+ extra_args: Iterable[Any],
404
+ ):
405
+ # the kernel name defined in the module
406
+ self.kernel_name = kernel_name
407
+
408
+ if isinstance(input_tensor_meta, TensorMeta):
409
+ input_tensor_meta = [input_tensor_meta]
410
+ self.input_tensor_meta = input_tensor_meta
411
+
412
+ if isinstance(output_tensor_meta, (tuple, list)):
413
+ assert len(output_tensor_meta) == 1
414
+ output_tensor_meta = output_tensor_meta[0]
415
+ self.output_tensor_meta = output_tensor_meta
416
+
417
+ self.extra_args = extra_args
418
+
419
+ def make_run_fn(
420
+ self, *input_tensors: torch.Tensor, output_tensor: torch.Tensor
421
+ ) -> Callable[[], None]:
422
+ raise NotImplementedError()
423
+
424
+ def cleanup_run_fn(self) -> None:
425
+ pass
426
+
427
+ def benchmark(
428
+ self,
429
+ *input_tensors: torch.Tensor,
430
+ output_tensor: Optional[torch.Tensor] = None,
431
+ ) -> float:
432
+ debug = log.isEnabledFor(logging.DEBUG)
433
+ if debug:
434
+ start_ts = time.time()
435
+
436
+ # create args and out tensor
437
+ if output_tensor is None:
438
+ assert len(input_tensors) == 0
439
+ input_tensors = tuple(x.to_tensor() for x in self.input_tensor_meta)
440
+ output_tensor = self.output_tensor_meta.to_tensor()
441
+
442
+ if debug:
443
+ create_tensor_elapse = time.time() - start_ts
444
+ start_ts = time.time()
445
+
446
+ fn = self.make_run_fn(*input_tensors, output_tensor=output_tensor)
447
+
448
+ if debug:
449
+ load_elapse = time.time() - start_ts
450
+ start_ts = time.time()
451
+
452
+ out = do_bench(fn)
453
+ torch.cuda.synchronize() # shake out any CUDA errors
454
+
455
+ if debug:
456
+ bench_elapse = time.time() - start_ts
457
+ log.debug(
458
+ "InChildProcess %s: load %f, create tensor %f, bench %f",
459
+ str(self),
460
+ load_elapse,
461
+ create_tensor_elapse,
462
+ bench_elapse,
463
+ )
464
+ self.cleanup_run_fn()
465
+ return out
466
+
467
+
468
+ class TestBenchmarkRequest(BenchmarkRequest):
469
+ """
470
+ Supports unit testing. Defined in this file so that the TuningProcess
471
+ sub-process knows how to unpickle these objects.
472
+ """
473
+
474
+ def __init__(self, value: Optional[float] = None) -> None:
475
+ self.value = value
476
+
477
+ def benchmark(
478
+ self, *input_tensors: torch.Tensor, output_tensor: Optional[torch.Tensor] = None
479
+ ) -> float:
480
+ if self.value is None:
481
+ raise Exception("Failed to run")
482
+ return self.value
483
+
484
+
485
+ class TritonBenchmarkRequest(BenchmarkRequest):
486
+ def __init__(
487
+ self,
488
+ kernel_name: str,
489
+ input_tensor_meta: Union[TensorMeta, List[TensorMeta]],
490
+ output_tensor_meta: Union[TensorMeta, List[TensorMeta]],
491
+ extra_args: Iterable[Any],
492
+ module_path: str, # the path of the module defining the triton kernel
493
+ module_cache_key: str,
494
+ grid: List[int],
495
+ num_stages: int,
496
+ num_warps: int,
497
+ ):
498
+ super().__init__(kernel_name, input_tensor_meta, output_tensor_meta, extra_args)
499
+ self.module_path = module_path
500
+ self.module_cache_key = module_cache_key
501
+ self.grid = grid
502
+ self.num_stages = num_stages
503
+ self.num_warps = num_warps
504
+
505
+ def make_run_fn(
506
+ self, *input_tensors: torch.Tensor, output_tensor: torch.Tensor
507
+ ) -> Callable[[], None]:
508
+ mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path)
509
+ log.debug(
510
+ "benchmark module key: %s, path: %s",
511
+ self.module_cache_key,
512
+ self.module_path,
513
+ )
514
+
515
+ run_method = getattr(mod, self.kernel_name).run
516
+
517
+ return functools.partial(
518
+ run_method,
519
+ *input_tensors,
520
+ output_tensor,
521
+ *self.extra_args,
522
+ grid=self.grid,
523
+ num_stages=self.num_stages,
524
+ num_warps=self.num_warps,
525
+ stream=torch.cuda.current_stream().cuda_stream,
526
+ )
527
+
528
+ def __str__(self) -> str:
529
+ return f"{self.kernel_name=}, {self.module_path=}, {self.module_cache_key=}"
530
+
531
+
532
+ class CUDABenchmarkRequest(BenchmarkRequest):
533
+ def __init__(
534
+ self,
535
+ kernel_name: str,
536
+ input_tensor_meta: Union[TensorMeta, List[TensorMeta]],
537
+ output_tensor_meta: Union[TensorMeta, List[TensorMeta]],
538
+ extra_args: Iterable[Any],
539
+ source_code: str,
540
+ ):
541
+ super().__init__(kernel_name, input_tensor_meta, output_tensor_meta, extra_args)
542
+ self.source_code = source_code
543
+ self.workspace_size: int = 0
544
+ self.workspace: Optional[torch.Tensor] = None
545
+ self.DLL: Optional[DLLWrapper] = None
546
+ self.hash_key: str = ""
547
+ self.source_file: str = ""
548
+ self.hash_key, self.source_file = CUDACodeCache.write(self.source_code, "so")
549
+
550
+ def make_run_fn(
551
+ self, *input_tensors: torch.Tensor, output_tensor: torch.Tensor
552
+ ) -> Callable[[], None]:
553
+ self.DLL, self.hash_key, self.source_file = CUDACodeCache.load(
554
+ self.source_code, "so"
555
+ )
556
+ args = [
557
+ c_void_p(tensor.data_ptr())
558
+ for tensor in list(input_tensors) + [output_tensor]
559
+ ]
560
+ log.debug(
561
+ "make_run_fn: self.kernel_name=%s, self.source_file=%s, self.hash_key=%s, self.DLL=%s, args=%s, self.extra_args=%s",
562
+ self.kernel_name,
563
+ self.source_file,
564
+ self.hash_key,
565
+ self.DLL,
566
+ args,
567
+ self.extra_args,
568
+ )
569
+ run_method = getattr(self.DLL, self.kernel_name)
570
+ stream_ptr = c_void_p(torch.cuda.current_stream().cuda_stream)
571
+
572
+ # Retrieve workspace_size and initialize workspace.
573
+ c_workspace_size = c_size_t()
574
+ run_method(
575
+ *args, # input ptrs and output ptrs
576
+ *self.extra_args,
577
+ byref(
578
+ c_workspace_size
579
+ ), # set workspace size ptr to retrieve workspace size
580
+ None, # null workspace ptr
581
+ stream_ptr,
582
+ )
583
+ self.workspace_size = c_workspace_size.value
584
+ # TODO: Support non-zero workspace_size.
585
+ assert self.workspace_size == 0, (
586
+ "Things need to be fixed to support non-zero workspace_size: "
587
+ "1) max autotune cache needs to store workspace size; "
588
+ "2) memory allocation needs to allocate / deallocate workspace correctly; "
589
+ )
590
+
591
+ # Generate partial function.
592
+ return functools.partial(
593
+ run_method,
594
+ *args,
595
+ *self.extra_args,
596
+ None, # null workspace size ptr
597
+ None, # set workspace ptr, TODO: update it to a real ptr if workspace_size > 0
598
+ stream_ptr,
599
+ )
600
+
601
+ def cleanup_run_fn(self) -> None:
602
+ if self.DLL is not None:
603
+ self.DLL.close()
604
+ self.workspace = None
605
+
606
+ def __str__(self) -> str:
607
+ return f"{self.kernel_name=}, {self.source_file=}, {self.hash_key=}"
608
+
609
+
610
+ def benchmark_in_sub_process(
611
+ choices: List[TritonTemplateCaller],
612
+ ) -> Dict[TritonTemplateCaller, float]:
613
+ """
614
+ Do benchmarking in a subprocess and return the perf number (latency).
615
+ """
616
+ return tuning_pool.benchmark(choices)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/bounds.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from functools import partial
3
+ from typing import Any, Callable, Dict
4
+
5
+ from sympy import Expr
6
+
7
+ import torch
8
+ from torch.utils._sympy.value_ranges import bound_sympy, ValueRangeAnalysis, ValueRanges
9
+ from .ir import InterpreterShim, LoopBody, LoopBodyBlock
10
+ from .utils import cache_on_self, dominated_nodes
11
+ from .virtualized import V
12
+
13
+
14
+ class BoundVars:
15
+ """
16
+ Performs Value Range Analysis on LoopBody's fx graph by calling BoundVars.run()
17
+ It exposes the ranges of the nodes in the `bounds` variable
18
+
19
+ Note. A current limitation of this analysis is that it just works on a per-loop basis.
20
+ We should be able to propagate the bounds between across the whole graph. This may benefit
21
+ the case a bounded variable is returned by a kernel and fed into another.
22
+ """
23
+
24
+ def __init__(self, loop_body: LoopBody) -> None:
25
+ self.loop_body = loop_body
26
+ self.replacement_vals = {
27
+ k: ValueRanges(0, v - 1)
28
+ if (isinstance(v, int) or v.is_number)
29
+ else bound_sympy(v)
30
+ for k, v in loop_body.var_ranges.items()
31
+ }
32
+ # avoid computing these values, pessimistically assume that they are unbounded
33
+ self.unbounded_vars = dominated_nodes(
34
+ node
35
+ for node in self.loop_body.get_nodes()
36
+ if node.target in ["load", "reduction", operator.getitem]
37
+ or "masked_subblock" in node.target
38
+ )
39
+ # To access this variable call `get_bounds()`
40
+ self._bounds: Dict[torch.fx.Node, ValueRanges] = {}
41
+
42
+ @cache_on_self
43
+ def get_bounds(self) -> Dict[torch.fx.Node, ValueRanges]:
44
+ submodules = self.swap_submodules(self.loop_body.submodules)
45
+
46
+ # Initialize the environment with the unbounded variables
47
+ for node in self.unbounded_vars:
48
+ # we need to evaluate masked_subblock to recurse, and we need to set indirect values
49
+ if not isinstance(node.target, str) or (
50
+ "masked_subblock" not in node.target
51
+ and "set_indirect" not in node.target
52
+ ):
53
+ self._bounds[node] = ValueRanges.unknown()
54
+
55
+ with V.set_ops_handler(ValueRangeAnalysis()):
56
+ interpreter = InterpreterShim(self.loop_body.root_block.graph, submodules)
57
+ interpreter.run(V.get_ops_handler(), initial_env=self._bounds)
58
+ return self._bounds
59
+
60
+ def swap_submodules(
61
+ self, submodules: Dict[str, Callable[..., Any]]
62
+ ) -> Dict[str, Callable[..., ValueRanges]]:
63
+ result: Dict[str, Callable[..., ValueRanges]] = {}
64
+ for key in submodules.keys():
65
+ if key == "get_index":
66
+ result[key] = self.get_index
67
+ elif "masked_subblock" in key:
68
+ subblock = self.loop_body.subblocks[key]
69
+ # The result within the lambda will reference to the final
70
+ # set of modules at the end of the for-loop as it stores a reference to it
71
+
72
+ # bind subblock in a function because python lambdas close over by reference
73
+ # moving the lambda out of make_fn would close over the reference to subblock,
74
+ # so all lambdas would have the same subblock reference that is the final
75
+ # subblock in the loop
76
+ def make_fn(subblock):
77
+ return lambda mask, value: self.masked_subblock(
78
+ subblock, self._bounds, mask, value, result
79
+ )
80
+
81
+ result[key] = make_fn(subblock)
82
+
83
+ else:
84
+ assert "set_indirect" in key
85
+ idx = int(key[len("set_indirect") :])
86
+ var = self.loop_body.indirect_vars[idx]
87
+ indirect = partial(self.set_indirect, var)
88
+ result[key] = indirect
89
+
90
+ return result
91
+
92
+ def masked_subblock(
93
+ self,
94
+ subblock: LoopBodyBlock,
95
+ env: Dict[torch.fx.Node, ValueRanges],
96
+ mask: Any,
97
+ value: Any,
98
+ submodules: Dict[str, Callable[..., Any]],
99
+ ) -> ValueRanges:
100
+ interp = InterpreterShim(subblock.graph, submodules)
101
+ interp.run(V.get_ops_handler(), initial_env=env)
102
+ output = [node for node in subblock.graph.nodes if node.target == "output"]
103
+ assert len(output) == 1
104
+ # dont bother unioning with value since the load from buffer will be
105
+ # pessimistically assumed to be inf anyway
106
+ return interp.env[output[0]]
107
+
108
+ def set_indirect(self, old: Expr, new: ValueRanges) -> ValueRanges:
109
+ assert isinstance(new, ValueRanges)
110
+ self.replacement_vals[old] = new
111
+ return new
112
+
113
+ def get_index(self, name: Expr) -> ValueRanges:
114
+ expr = self.loop_body.indexing_exprs[name]
115
+ bound = self.replacement_vals.get(expr)
116
+ if bound is None:
117
+ bound = bound_sympy(expr, self.replacement_vals)
118
+ # The following assertion is true at the time of this writing
119
+ # We don't assert is as to not execute bound_sympy when bound is not None
120
+ # assert bound is None or bound == bound_sympy(expr, self.replacement_vals)
121
+ self.replacement_vals[name] = bound
122
+ return bound
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codecache.py ADDED
@@ -0,0 +1,2492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import copyreg
5
+ import dataclasses
6
+ import functools
7
+ import hashlib
8
+ import importlib
9
+ import io
10
+ import json
11
+ import logging
12
+ import multiprocessing
13
+ import os
14
+ import pathlib
15
+ import pickle
16
+ import pkgutil
17
+ import platform
18
+ import re
19
+ import shlex
20
+ import shutil
21
+ import signal
22
+ import subprocess
23
+ import sys
24
+ import sysconfig
25
+ import tempfile
26
+ import threading
27
+ import warnings
28
+ import weakref
29
+ from bisect import bisect_right
30
+ from concurrent.futures import Future, ProcessPoolExecutor, ThreadPoolExecutor
31
+ from copy import copy
32
+ from ctypes import c_void_p, cdll, CDLL
33
+ from dataclasses import field
34
+ from functools import partial
35
+ from importlib import abc
36
+ from pathlib import Path
37
+ from threading import Thread
38
+ from time import sleep, time
39
+ from types import ModuleType
40
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union
41
+
42
+ import torch
43
+
44
+ from torch._dynamo.device_interface import (
45
+ get_interface_for_device,
46
+ get_registered_device_interfaces,
47
+ )
48
+ from torch._dynamo.utils import counters
49
+ from torch._inductor import config, exc
50
+ from torch._inductor.codegen.cuda import cuda_env
51
+ from torch._inductor.utils import cache_dir, developer_warning, is_linux
52
+ from torch._prims_common import suggest_memory_format
53
+ from torch.fx.experimental.symbolic_shapes import has_hint, hint_int, ShapeEnv
54
+
55
+ if TYPE_CHECKING:
56
+ from torch._inductor.graph import GraphLowering
57
+ from torch._inductor.select_algorithm import ChoiceCaller
58
+
59
+ from torch.hub import _Faketqdm, tqdm
60
+
61
+ _HERE = os.path.abspath(__file__)
62
+ _TORCH_PATH = os.path.dirname(os.path.dirname(_HERE))
63
+
64
+ if config.is_fbcode():
65
+ from triton.fb import build_paths
66
+ from triton.fb.build import _run_build_command
67
+
68
+ from torch._inductor.fb.utils import (
69
+ log_global_cache_errors,
70
+ log_global_cache_stats,
71
+ log_global_cache_vals,
72
+ use_global_cache,
73
+ )
74
+ else:
75
+
76
+ def log_global_cache_errors(*args, **kwargs):
77
+ pass
78
+
79
+ def log_global_cache_stats(*args, **kwargs):
80
+ pass
81
+
82
+ def log_global_cache_vals(*args, **kwargs):
83
+ pass
84
+
85
+ def use_global_cache() -> bool:
86
+ return False
87
+
88
+
89
+ LOCK_TIMEOUT = 600
90
+
91
+ # timing metrics for time spent in the compilation
92
+ _cumulative_compile_time = 0.0
93
+ _t0 = None
94
+
95
+
96
+ def _compile_start() -> None:
97
+ global _t0
98
+ if _t0 is None:
99
+ _t0 = time()
100
+
101
+
102
+ def _compile_end() -> None:
103
+ global _cumulative_compile_time, _t0
104
+ if _t0 is not None:
105
+ t1 = time()
106
+ _cumulative_compile_time += t1 - _t0
107
+ _t0 = None
108
+ # print("CUMULATIVE COMPILE TIME", _cumulative_compile_time)
109
+
110
+
111
+ log = logging.getLogger(__name__)
112
+
113
+
114
+ def cpp_wrapper_cache_dir(name: str) -> str:
115
+ cu_str = (
116
+ "cpu"
117
+ if torch.version.cuda is None
118
+ else f'cu{torch.version.cuda.replace(".", "")}'
119
+ )
120
+ python_version = f"py{sys.version_info.major}{sys.version_info.minor}"
121
+ build_folder = f"{python_version}_{cu_str}"
122
+
123
+ cpp_wrapper_dir = os.path.join(cache_dir(), build_folder)
124
+ cpp_wrapper_build_directory = os.path.join(cpp_wrapper_dir, name)
125
+ os.makedirs(cpp_wrapper_build_directory, exist_ok=True)
126
+ return cpp_wrapper_build_directory
127
+
128
+
129
+ def get_cpp_wrapper_cubin_path_name():
130
+ return "cubin_path" if torch.version.hip is None else "hsaco_path"
131
+
132
+
133
+ class CacheBase:
134
+ @staticmethod
135
+ @functools.lru_cache(None)
136
+ def get_system() -> Dict[str, Any]:
137
+ try:
138
+ import triton
139
+
140
+ triton_version = triton.__version__
141
+ except ModuleNotFoundError:
142
+ triton_version = None
143
+
144
+ try:
145
+ system: Dict[str, Any] = {
146
+ "device": {
147
+ "name": torch.cuda.get_device_properties(
148
+ torch.cuda.current_device()
149
+ ).name,
150
+ },
151
+ "version": {
152
+ "cuda": torch.version.cuda,
153
+ "triton": triton_version,
154
+ },
155
+ "other": {
156
+ "allow_tf32": torch.backends.cuda.matmul.allow_tf32,
157
+ },
158
+ }
159
+ except (AssertionError, RuntimeError):
160
+ # If cuda is not installed, none of the above config is relevant.
161
+ system = {}
162
+
163
+ system["hash"] = hashlib.sha256(
164
+ json.dumps(system, sort_keys=True).encode("utf-8")
165
+ ).hexdigest()
166
+
167
+ return system
168
+
169
+ @staticmethod
170
+ @functools.lru_cache(None)
171
+ def get_local_cache_path() -> Path:
172
+ return Path(os.path.join(cache_dir(), "cache", CacheBase.get_system()["hash"]))
173
+
174
+ @staticmethod
175
+ @functools.lru_cache(None)
176
+ def get_global_cache_path() -> Optional[Path]:
177
+ return (
178
+ Path(os.path.join(config.global_cache_dir, CacheBase.get_system()["hash"]))
179
+ if config.global_cache_dir is not None
180
+ else None
181
+ )
182
+
183
+ def __init__(self) -> None:
184
+ if not torch.cuda.is_available():
185
+ return
186
+
187
+ self.system = CacheBase.get_system()
188
+
189
+ self.local_cache_path = CacheBase.get_local_cache_path()
190
+ self.global_cache_path = CacheBase.get_global_cache_path()
191
+
192
+ def get_local_cache(self) -> Dict[str, Any]:
193
+ if not self.local_cache_path.is_file():
194
+ return {}
195
+ with open(self.local_cache_path) as local_cache_fp:
196
+ local_cache = json.load(local_cache_fp)
197
+ return local_cache["cache"]
198
+
199
+ def update_local_cache(self, local_cache: Dict[str, Any]) -> None:
200
+ if not os.path.exists(self.local_cache_path.parent):
201
+ os.makedirs(self.local_cache_path.parent, exist_ok=True)
202
+ write_atomic(
203
+ str(self.local_cache_path),
204
+ json.dumps({"system": self.system, "cache": local_cache}, indent=4),
205
+ )
206
+
207
+
208
+ class LocalCache(CacheBase):
209
+ def lookup(self, *keys: str) -> Optional[Dict[str, Any]]:
210
+ cache = self.get_local_cache()
211
+
212
+ sub_cache = cache
213
+ for key in keys:
214
+ if key in cache:
215
+ sub_cache = cache[key]
216
+ else:
217
+ return None
218
+
219
+ return sub_cache
220
+
221
+ def set_value(self, *keys: str, value: Any) -> None:
222
+ cache = self.get_local_cache()
223
+
224
+ sub_cache = cache
225
+ for key in keys[0:-1]:
226
+ sub_cache.setdefault(key, {})
227
+ sub_cache = sub_cache[key]
228
+ sub_cache[keys[-1]] = value
229
+
230
+ self.update_local_cache(cache)
231
+
232
+
233
+ class PersistentCache(CacheBase):
234
+ @functools.lru_cache(None)
235
+ def get_global_cache(self):
236
+ if self.global_cache_path is None or not self.global_cache_path.is_file():
237
+ return {}
238
+ with open(self.global_cache_path) as global_cache_fp:
239
+ global_cache = json.load(global_cache_fp)
240
+ return global_cache["cache"]
241
+
242
+ def lookup(
243
+ self,
244
+ choices: List[ChoiceCaller],
245
+ name: str,
246
+ inputs: str,
247
+ benchmark: Callable[[Any], Dict[ChoiceCaller, float]],
248
+ ) -> Dict[ChoiceCaller, float]:
249
+ """
250
+ Check to see if we have benchmarked the given choice callers. For each
251
+ choice caller:
252
+
253
+ 1. Check global_cache[name][inputs][choice], return benchmark if cached.
254
+ 2. Check local_cache[name][inputs][choice], return benchmark if cached.
255
+ 3.
256
+ a. `max_autotune_gemm=True`: benchmark the choice, update
257
+ local_cache[name][inputs][choice], and return the benchmark.
258
+ b. `max_autotune_gemm=False`: don't benchmark the choice, return nothing.
259
+ """
260
+
261
+ log_stats = partial(log_global_cache_stats, self.system, name, inputs)
262
+ log_vals = partial(log_global_cache_vals, self.system, name, inputs)
263
+ log_errors = partial(log_global_cache_errors, self.system, name, inputs)
264
+ timings = {}
265
+
266
+ def check_cache(cache, callback=None) -> bool:
267
+ """Check if `cache` contains data for all the choices"""
268
+ hit = True
269
+ for choice in choices:
270
+ choice_hash = choice.hash_key()
271
+ if choice_hash in cache.get(name, {}).get(inputs, {}):
272
+ # cache hit
273
+ timings[choice] = cache[name][inputs][choice_hash]
274
+ else:
275
+ # cache miss
276
+ hit = False
277
+ break
278
+ if callback:
279
+ callback(cached=hit)
280
+ return hit
281
+
282
+ if config.max_autotune or config.max_autotune_gemm:
283
+ local_cache = self.get_local_cache()
284
+ # check local cache first since it is data specific to the current machine
285
+ if not check_cache(local_cache) and not (
286
+ use_global_cache()
287
+ and check_cache(self.get_global_cache(), callback=log_stats)
288
+ ):
289
+ try:
290
+ # re-benchmark everything to try to get consistent numbers from the same machine
291
+ timings = benchmark(choices)
292
+ assert all(choice in timings for choice in choices)
293
+
294
+ local_cache.setdefault(name, {})
295
+ local_cache[name].setdefault(inputs, {})
296
+ for choice, timing in timings.items():
297
+ local_cache[name][inputs][choice.hash_key()] = timing
298
+ except RuntimeError as e:
299
+ # catch and log autotuning failures
300
+ log_errors(e)
301
+ raise e
302
+
303
+ self.update_local_cache(local_cache)
304
+
305
+ timings_to_log = {
306
+ choice.hash_key(): timings[choice] for choice in choices
307
+ }
308
+ log_vals(timings_to_log)
309
+ elif use_global_cache():
310
+ # only check global cache, not local one
311
+ check_cache(self.get_global_cache(), callback=log_stats)
312
+ # may have a partial cache hit, where not everything is benchmarked
313
+
314
+ return timings
315
+
316
+
317
+ def get_lock_dir() -> str:
318
+ lock_dir = os.path.join(cache_dir(), "locks")
319
+ if not os.path.exists(lock_dir):
320
+ os.makedirs(lock_dir, exist_ok=True)
321
+ return lock_dir
322
+
323
+
324
+ def sha256_hash(data: bytes) -> str:
325
+ # [:51] to strip off the "Q====" suffix common to every hash value.
326
+ return base64.b32encode(hashlib.sha256(data).digest())[:51].decode("utf-8").lower()
327
+
328
+
329
+ def code_hash(code: Union[str, bytes], extra: str = ""):
330
+ hashing_str = code if isinstance(code, bytes) else code.encode("utf-8")
331
+ if extra != "":
332
+ hashing_str = hashing_str + b"||" + extra.encode("utf-8")
333
+ return "c" + sha256_hash(hashing_str)
334
+
335
+
336
+ def get_path(
337
+ basename: str, extension: str, specified_dir: str = ""
338
+ ) -> Tuple[str, str, str]:
339
+ if specified_dir:
340
+ if os.path.isabs(specified_dir):
341
+ subdir = specified_dir
342
+ else:
343
+ subdir = os.path.join(cache_dir(), specified_dir)
344
+ else:
345
+ subdir = os.path.join(cache_dir(), basename[1:3])
346
+ path = os.path.join(subdir, f"{basename}.{extension}")
347
+ return basename, subdir, path
348
+
349
+
350
+ def get_hash(content: Union[str, bytes], extra: str = "", hash_type: str = "code"):
351
+ if hash_type == "code":
352
+ return code_hash(content, extra)
353
+ if hash_type in ["cubin", "hsaco"]:
354
+ return code_hash(repr(content))
355
+ raise AssertionError(f"Unknown hash type {hash_type}")
356
+
357
+
358
+ def write(
359
+ content: Union[str, bytes],
360
+ extension: str,
361
+ extra: str = "",
362
+ hash_type: str = "code",
363
+ specified_dir: str = "",
364
+ ) -> Tuple[str, str]:
365
+ # use striped content to compute hash so we don't end up with different
366
+ # hashes just because the content begins/ends with differnet number of
367
+ # spaces.
368
+ key: str = get_hash(content.strip(), extra, hash_type)
369
+ basename, subdir, path = get_path(key, extension, specified_dir)
370
+ if not os.path.exists(subdir):
371
+ os.makedirs(subdir, exist_ok=True)
372
+ if not os.path.exists(path):
373
+ write_atomic(path, content)
374
+ return basename, path
375
+
376
+
377
+ def write_atomic(path: str, content: Union[str, bytes]) -> None:
378
+ # Write into temporary file first to avoid conflicts between threads
379
+ # Avoid using a named temporary file, as those have restricted permissions
380
+ assert isinstance(
381
+ content, (str, bytes)
382
+ ), "Only strings and byte arrays can be saved in the cache"
383
+ path = pathlib.Path(path)
384
+ tmp_path = path.parent / f".{os.getpid()}.{threading.get_ident()}.tmp"
385
+ write_mode = "w" if isinstance(content, str) else "wb"
386
+ with tmp_path.open(write_mode) as f:
387
+ f.write(content)
388
+ tmp_path.rename(path)
389
+
390
+
391
+ @dataclasses.dataclass
392
+ class TensorMetadata:
393
+ """
394
+ The Tensor metadata relevant when hashing FxGraph cache keys.
395
+ """
396
+
397
+ dtype: torch.dtype
398
+ shape: torch.Size
399
+ stride: Tuple[Any, ...]
400
+ device: torch.device
401
+ layout: torch.layout
402
+ memory_format: Optional[torch.memory_format]
403
+ storage_offset: int
404
+ requires_grad: bool
405
+ is_quantized: bool
406
+ is_conj: bool
407
+ is_neg: bool
408
+ is_coalesced: bool
409
+ dense_dim: int
410
+ sparse_dim: int
411
+
412
+
413
+ @dataclasses.dataclass
414
+ class TensorMetadataAndValues:
415
+ """
416
+ TensorMetadata plus the elements as a list of raw values.
417
+ Used for hashing inlined constants.
418
+ """
419
+
420
+ tensor_metadata: TensorMetadata
421
+ values: List[Any]
422
+
423
+
424
+ def extract_tensor_metadata(t: torch.Tensor) -> TensorMetadata:
425
+ """
426
+ Extract the TensorMetadata of a tensor.
427
+ """
428
+ memory_format: Optional[torch.memory_format] = suggest_memory_format(t)
429
+ if not t.is_contiguous(memory_format=memory_format):
430
+ memory_format = None
431
+
432
+ return TensorMetadata(
433
+ dtype=t.dtype,
434
+ shape=t.shape,
435
+ stride=t.stride() if t.layout == torch.strided else (),
436
+ device=t.device,
437
+ layout=t.layout,
438
+ memory_format=memory_format,
439
+ storage_offset=t.storage_offset(),
440
+ requires_grad=t.requires_grad,
441
+ is_quantized=t.is_quantized,
442
+ is_conj=t.is_conj(),
443
+ is_neg=t.is_neg(),
444
+ is_coalesced=t.is_coalesced() if t.is_sparse else False,
445
+ dense_dim=t.dense_dim() if t.is_sparse else False,
446
+ sparse_dim=t.sparse_dim() if t.is_sparse else False,
447
+ )
448
+
449
+
450
+ def _ident(x: Any) -> Any:
451
+ return x
452
+
453
+
454
+ def _reduce_fake_tensor(t):
455
+ """
456
+ See FxGraphCachePickler. Custom reducer to pickle FakeTensors.
457
+ """
458
+ metadata = extract_tensor_metadata(t)
459
+ return (_ident, (metadata,))
460
+
461
+
462
+ def _reduce_tensor(t):
463
+ """
464
+ See FxGraphCachePickler. Custom reducer to pickle Tensors.
465
+ """
466
+ # If we see tensors, we know they're contstants stored as attributes on
467
+ # the GraphModule. See tensor lowering; small constants are inlined. If
468
+ # we see a small tensor, therefore, no reference will ultimately remain
469
+ # in the generated code. So we need to include its value in the cache key.
470
+ # Large constannts are effectively treated as inputs and we consider only
471
+ # their metadata.
472
+ metadata = extract_tensor_metadata(t)
473
+ if len(t.shape) == 0 or torch._inductor.graph.GraphLowering.can_inline_constant(t):
474
+ return (_ident, (TensorMetadataAndValues(metadata, t.tolist()),))
475
+ else:
476
+ return (_ident, (metadata,))
477
+
478
+
479
+ def _reduce_symint(s):
480
+ """
481
+ See FxGraphCachePickler. Custom reducer to pickle SymInts.
482
+ """
483
+ # For hashing purposes, we only care about the name of the symbol and
484
+ # not the backed value. We evaluate guards stored with a cached graph
485
+ # to ensure a cached entity with SymInt args is safe to reuse.
486
+ return (_ident, (str(s),))
487
+
488
+
489
+ class FxGraphCachePickler(pickle.Pickler):
490
+ """
491
+ Custom pickler to customize the pickling of some objects (Tensors), only for the
492
+ purpose of computing a hash for keying into the FxGraphCache. Tensors contain
493
+ objects that don't pickle and/or vary between runs, and we want to capture the
494
+ data that allow us to compute a stable, but safe hash.
495
+ """
496
+
497
+ dispatch_table = copyreg.dispatch_table.copy()
498
+ dispatch_table[torch._subclasses.fake_tensor.FakeTensor] = _reduce_fake_tensor
499
+ dispatch_table[torch.Tensor] = _reduce_tensor
500
+ dispatch_table[torch.SymInt] = _reduce_symint
501
+
502
+ @staticmethod
503
+ def dumps(obj) -> bytes:
504
+ """
505
+ Pickle an object using the FxGraphCachePickler.
506
+ """
507
+ with io.BytesIO() as stream:
508
+ pickler = FxGraphCachePickler(stream)
509
+ pickler.dump(obj)
510
+ return stream.getvalue()
511
+
512
+ @staticmethod
513
+ def get_hash(obj: Any) -> str:
514
+ """
515
+ Serialize an object using the FxGraphCachePickler and return a hash
516
+ of the pickled object.
517
+ """
518
+ serialized_data = FxGraphCachePickler.dumps(obj)
519
+ return sha256_hash(serialized_data)
520
+
521
+
522
+ @functools.lru_cache(None)
523
+ def get_inductor_code_hash() -> bytes:
524
+ """
525
+ Compute a hash of all inductor code modules. Used by the FxGraph cache
526
+ so any inductor code changes would result in new cache keys.
527
+ """
528
+ inductor_root = os.path.dirname(__file__)
529
+
530
+ contents: Dict[str, bytes] = {}
531
+ for lib in pkgutil.iter_modules([inductor_root]):
532
+ spec = lib.module_finder.find_spec(lib.name, None)
533
+ assert spec is not None
534
+ module = spec.origin
535
+ assert module is not None
536
+ with open(module, "rb") as f:
537
+ contents[module] = f.read()
538
+
539
+ return hashlib.sha256(pickle.dumps(contents)).digest()
540
+
541
+
542
+ @dataclasses.dataclass
543
+ class OrderedSetHolder:
544
+ """
545
+ See FxGraphHashDetails. Holds a sorted list to support stable hashing
546
+ of set kwargs.
547
+ """
548
+
549
+ items: List[Any]
550
+
551
+
552
+ class FxGraphHashDetails:
553
+ """
554
+ Object to capture all the details for a compiled FX graph relevant to computing
555
+ a safe and stable cache key.
556
+ """
557
+
558
+ # Excluded kwargs param that are not stable between runs
559
+ EXCLUDED_KWARGS = ["graph_id"]
560
+
561
+ def __init__(
562
+ self,
563
+ gm: torch.fx.GraphModule,
564
+ example_inputs: List[torch.Tensor],
565
+ fx_kwargs: Dict[str, Any],
566
+ ):
567
+ self.gm = gm
568
+ self.example_inputs = example_inputs
569
+
570
+ # Order kwargs so hashing is stable to changes in kwarg order.
571
+ self.fx_kwargs = {}
572
+ for k in sorted(fx_kwargs):
573
+ if k not in self.EXCLUDED_KWARGS:
574
+ if type(fx_kwargs[k]) is set:
575
+ # Special case to handle set params. Python sets can't be
576
+ # ordered, so sort the elements and store them in a proxy.
577
+ self.fx_kwargs[k] = OrderedSetHolder(sorted(fx_kwargs[k]))
578
+ else:
579
+ self.fx_kwargs[k] = fx_kwargs[k]
580
+
581
+ # Also hash on various system info (including the triton compiler version), as
582
+ # well as the inductor configuration and code.
583
+ self.torch_version = torch.__version__
584
+ self.system_info = CacheBase.get_system()
585
+
586
+ self.inductor_config = config.save_config()
587
+ self.inductor_code_hash = get_inductor_code_hash()
588
+
589
+ def debug_str(self) -> str:
590
+ """
591
+ Get a printable string describing in more detail all the attributes
592
+ comprising this object. Useful for debugging when one graph hashes
593
+ to a different value than another.
594
+ """
595
+
596
+ def get_str(obj) -> str:
597
+ if isinstance(obj, torch.Tensor):
598
+ return str(extract_tensor_metadata(obj))
599
+ elif isinstance(obj, bytes):
600
+ return "<bytes>"
601
+ else:
602
+ return str(obj)
603
+
604
+ lines = []
605
+ for attr, obj in vars(self).items():
606
+ if isinstance(obj, list):
607
+ for ii in range(len(obj)):
608
+ h = FxGraphCachePickler.get_hash(obj[ii])
609
+ lines.append(f"[{h}] {attr}[{ii}]: {get_str(obj[ii])}")
610
+ elif isinstance(obj, dict):
611
+ for k, v in obj.items():
612
+ h = FxGraphCachePickler.get_hash(v)
613
+ lines.append(f"[{h}] {attr}[{k}]: {get_str(v)}")
614
+ else:
615
+ h = FxGraphCachePickler.get_hash(obj)
616
+ lines.append(f"[{h}] {attr}: {get_str(obj)}")
617
+ return "\n".join(lines)
618
+
619
+
620
+ def compiled_fx_graph_hash(
621
+ gm: torch.fx.GraphModule,
622
+ example_inputs: List[torch.Tensor],
623
+ fx_kwargs: Dict[str, Any],
624
+ ) -> str:
625
+ """
626
+ Generate a unique hash of the FX graph for caching.
627
+ """
628
+ details = FxGraphHashDetails(gm, example_inputs, fx_kwargs)
629
+ # The prefix distinguishes among the other kinds of objects we
630
+ # cache in this module.
631
+ key = "f" + FxGraphCachePickler.get_hash(details)
632
+ log.debug("FX graph cache hash details for key %s:\n%s", key, details.debug_str())
633
+ return key
634
+
635
+
636
+ class FxGraphCache:
637
+ """
638
+ Supports caching and reusing compiled Fx graphs.
639
+
640
+ The overall strategy is as follows:
641
+ - This cache stores entries on disk. When saving an entry, we can't
642
+ serialize callables (that could be C++, Triton, etc.), so we serialize
643
+ their own disk cache location. We then recreate the compiled artifact
644
+ after fetching from disk.
645
+ - For indexing the cache, we gather the fields relevant to identifying an
646
+ FxGraph (the graph module, graph inputs, system settings etc.) into an
647
+ FxGraphCacheDetails object, pickle it, and compute a hash for the key.
648
+ See FxGraphCachePickler.
649
+ - Among the metadata we store, we also include a guards expression that's
650
+ appropriate for validating any symbols for Tensor arguments that have
651
+ symbolic bounds. On cache lookup then, we evaluate those guards in the
652
+ current context to validate that a cached entry can be served.
653
+ - A given graph could have multiple compiled versions, corresponding to
654
+ different sets of guards. Therefore, we store cache entries in the form:
655
+ <temp dir>/<fx graph hash>/<serialized metatdata>
656
+ - On lookup, we compute the key from the graph details, iterate over all
657
+ leaf files in the corresponding subdirectory, deserialize the entry, and
658
+ evaluate its guards expression. If the evaluation succeeds, we have a
659
+ cache hit. If it fails, we compile the graph and store a new entry.
660
+ - Finally, on a cache hit, we need to make sure any guards that would
661
+ have been created during compilation are added to the current context.
662
+ """
663
+
664
+ # TODO(masnesral): Investigate whether it's beneficial to store compiled graphs
665
+ # in an in-memory cache after loading from disk.
666
+ @staticmethod
667
+ def _get_tmp_dir() -> str:
668
+ """
669
+ Get the toplevel temporary directory for storing compiled graphs.
670
+ """
671
+ return os.path.join(cache_dir(), "fxgraph")
672
+
673
+ @staticmethod
674
+ def _get_tmp_dir_for_key(key: str) -> str:
675
+ """
676
+ Return the disk location for a given cache key.
677
+ """
678
+ return os.path.join(FxGraphCache._get_tmp_dir(), key[1:3], key)
679
+
680
+ @staticmethod
681
+ def _filter_symints(inputs: List[Any]) -> List[torch.SymInt]:
682
+ """
683
+ Get the SymInt objects from the input list.
684
+ """
685
+ return [s for s in inputs if isinstance(s, torch.SymInt)]
686
+
687
+ @staticmethod
688
+ def _get_shape_env() -> ShapeEnv:
689
+ """
690
+ Helper to get the shape env from the tracing context.
691
+ """
692
+ return torch._guards.TracingContext.get().fake_mode.shape_env
693
+
694
+ @staticmethod
695
+ def _lookup_graph(
696
+ key: str,
697
+ example_inputs: List[torch.Tensor],
698
+ ) -> Optional[CompiledFxGraph]:
699
+ """
700
+ Lookup a compiled graph in the cache by key. On a hit, return the
701
+ deserialized CompiledFxGraph object. On a miss, return None.
702
+ """
703
+ subdir = FxGraphCache._get_tmp_dir_for_key(key)
704
+ if not os.path.exists(subdir):
705
+ return None
706
+
707
+ # Iterate over any entries in the subdir for this key and evaluate
708
+ # their guards to determine whether there's a hit.
709
+ for path in sorted(os.listdir(subdir)):
710
+ with open(os.path.join(subdir, path), "rb") as f:
711
+ graph: CompiledFxGraph = pickle.load(f)
712
+
713
+ guards_expr = graph.guards_expr
714
+ if not guards_expr:
715
+ # No guards to evaluate
716
+ return graph
717
+
718
+ # Evaluate the guard expression in the current context.
719
+ shape_env = FxGraphCache._get_shape_env()
720
+ symints = FxGraphCache._filter_symints(example_inputs)
721
+
722
+ # If there's not a cache hit, we don't want the evaluation to
723
+ # affect the current env, e.g., cause the creation of new guards,
724
+ # so we evaluate with the hints instead of the symbols.
725
+ assert all(has_hint(s) for s in symints)
726
+ hints = [hint_int(s) for s in symints]
727
+ hit = bool(shape_env.evaluate_guards_expression(guards_expr, hints))
728
+ log.debug(
729
+ "fx graph cache key %s evaluating guards for %s with values %s => %s",
730
+ key,
731
+ guards_expr,
732
+ hints,
733
+ hit,
734
+ )
735
+ if hit:
736
+ # Now re-evaluate with the symints to add any guards to the current env.
737
+ check = bool(shape_env.evaluate_guards_expression(guards_expr, symints))
738
+ assert check is True
739
+ log.debug(
740
+ "fx graph cache key %s post-load guards: %s",
741
+ key,
742
+ shape_env.guards,
743
+ )
744
+ return graph
745
+
746
+ return None
747
+
748
+ @staticmethod
749
+ def _save_graph(
750
+ key: str, compiled_graph: CompiledFxGraph, example_inputs: List[torch.Tensor]
751
+ ):
752
+ """
753
+ Store a serialized CompiledFxGraph on disk.
754
+ """
755
+ disk_compiled_graph = copy(compiled_graph)
756
+ # Important as compiled models are not pickleable:
757
+ disk_compiled_graph.compiled_artifact = None
758
+
759
+ # Before serializing, compute the guard expression that will be used to
760
+ # ensure that a CompiledFxGraph is valid when loaded from the cache. It's
761
+ # sufficient to consider only the SymInt args to the fx graph since the
762
+ # Tensor shapes are already captured in the hash for the cache key. Any
763
+ # Tensor arg with a symbolic shape will have a SymInt arg for the graph.
764
+ shape_env = FxGraphCache._get_shape_env()
765
+ symints = FxGraphCache._filter_symints(example_inputs)
766
+ disk_compiled_graph.guards_expr = shape_env.produce_guards_expression(symints)
767
+
768
+ content = pickle.dumps(disk_compiled_graph)
769
+
770
+ subdir = FxGraphCache._get_tmp_dir_for_key(key)
771
+ if not os.path.exists(subdir):
772
+ os.makedirs(subdir, exist_ok=True)
773
+
774
+ # Use a hash of the serialized CompiledFxGraph to get a unique file
775
+ # name. The specific name doesn't matter since a lookup involves
776
+ # iterating over all entries in the parent subdir.
777
+ path = os.path.join(subdir, sha256_hash(content))
778
+ write_atomic(path, content)
779
+
780
+ @staticmethod
781
+ def load(
782
+ compile_fx_fn: Callable[..., Any],
783
+ gm: torch.fx.GraphModule,
784
+ example_inputs: List[torch.Tensor],
785
+ fx_kwargs: Dict[str, Any],
786
+ ):
787
+ """
788
+ Load a compiled graph from the cache. If a cached entry does not exist,
789
+ compile the graph and save it to the cache.
790
+ """
791
+ from filelock import FileLock
792
+
793
+ key = compiled_fx_graph_hash(gm, example_inputs, fx_kwargs)
794
+
795
+ lock_dir = get_lock_dir()
796
+ lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
797
+ with lock:
798
+ compiled_graph = FxGraphCache._lookup_graph(key, example_inputs)
799
+ if compiled_graph is None:
800
+ log.debug("fx graph cache miss for key %s", key)
801
+ counters["inductor"]["fxgraph_cache_miss"] += 1
802
+ compiled_graph = compile_fx_fn(gm, example_inputs, **fx_kwargs)
803
+ FxGraphCache._save_graph(key, compiled_graph, example_inputs)
804
+ else:
805
+ log.debug("fx graph cache hit for key %s", key)
806
+ counters["inductor"]["fxgraph_cache_hit"] += 1
807
+
808
+ return compiled_graph
809
+
810
+ @staticmethod
811
+ def clear():
812
+ """
813
+ Clear out the on-disk cache.
814
+ """
815
+ shutil.rmtree(FxGraphCache._get_tmp_dir())
816
+
817
+
818
+ @dataclasses.dataclass
819
+ class CompiledFxGraph:
820
+ """
821
+ Class holding a compiled FX graph. This is the object serialized on disk
822
+ to support FxGraph caching.
823
+ """
824
+
825
+ compiled_artifact: Optional[Callable[..., Any]] = None
826
+ current_callable: Optional[Callable[..., Any]] = None
827
+ cache_key: Optional[str] = None
828
+ artifact_path: Optional[str] = None
829
+ cache_linemap: Optional[List[Tuple[int, str]]] = None
830
+ device_types: Set[str] = field(default_factory=set)
831
+ device_idxs: Set[int] = field(default_factory=set)
832
+ mutated_inputs: Set[str] = field(default_factory=set)
833
+ mutated_input_idxs: Set[int] = field(default_factory=set)
834
+ constants: Dict[str, torch.Tensor] = field(default_factory=dict)
835
+ output_strides: Optional[List[Optional[Tuple[int, ...]]]] = None
836
+ # This is a string representation of an expression we serialize
837
+ # with the object so the guards can be evaluated in a different
838
+ # context in order to verify the validity of serving a cached
839
+ # fx graph. The expression must be generated by:
840
+ # ShapeEnv.produce_guards_expression()
841
+ guards_expr: Optional[str] = None
842
+
843
+ _boxed_call: Optional[bool] = None
844
+
845
+ def __init__(
846
+ self,
847
+ compiled_artifact: Optional[Callable[..., Any]],
848
+ graph: GraphLowering,
849
+ output_strides: List[Optional[Tuple[int, ...]]],
850
+ ):
851
+ self.compiled_artifact = compiled_artifact
852
+ self.cache_key = graph.cache_key
853
+ self.artifact_path = graph.cache_path
854
+ self.cache_linemap = graph.cache_linemap
855
+ self.device_types = graph.device_types
856
+ self.device_idxs = graph.device_idxs
857
+ self.mutated_inputs = graph.mutated_inputs
858
+ self.mutated_input_idxs = set(graph.mutated_input_idxs)
859
+ self.constants = graph.constants
860
+ self.output_strides = output_strides
861
+ self.guards_expr = None
862
+
863
+ def __call__(self, inputs: List[Any]) -> Any:
864
+ return self.get_current_callable()(inputs)
865
+
866
+ def get_current_callable(self) -> Callable[..., Any]:
867
+ if self.current_callable is None:
868
+ # This prevents a circular reference that makes CompiledFxGraph
869
+ # get stuck without getting garbage collected
870
+ return functools.partial(_run_from_cache, weakref.proxy(self))
871
+ else:
872
+ return self.current_callable
873
+
874
+
875
+ def _run_from_cache(compiled_graph: CompiledFxGraph, inputs: List[Any]) -> Any:
876
+ # We can't really serialize callables that may be C++/Triton/etc.,
877
+ # so we serialize their disk cache location instead
878
+ # TODO: When making an API that can save compiled models e2e to disk
879
+ # this will need to be better
880
+ if compiled_graph.compiled_artifact is None:
881
+ from .codecache import PyCodeCache
882
+
883
+ assert compiled_graph.cache_key
884
+ assert compiled_graph.artifact_path
885
+ compiled_graph.compiled_artifact = PyCodeCache.load_by_key_path(
886
+ compiled_graph.cache_key,
887
+ compiled_graph.artifact_path,
888
+ compiled_graph.cache_linemap,
889
+ compiled_graph.constants,
890
+ ).call
891
+
892
+ return compiled_graph.compiled_artifact(inputs)
893
+
894
+
895
+ def cpp_compiler() -> str:
896
+ if config.is_fbcode():
897
+ return build_paths.cc()
898
+ if isinstance(config.cpp.cxx, (list, tuple)):
899
+ search = tuple(config.cpp.cxx)
900
+ else:
901
+ search = (config.cpp.cxx,)
902
+ return cpp_compiler_search(search)
903
+
904
+
905
+ @functools.lru_cache(1)
906
+ def cpp_compiler_search(search: str) -> str:
907
+ for cxx in search:
908
+ try:
909
+ if cxx is None:
910
+ # gxx package is only available for Linux
911
+ # according to https://anaconda.org/conda-forge/gxx/
912
+ if sys.platform != "linux":
913
+ continue
914
+ # Do not install GXX by default
915
+ if not os.getenv("TORCH_INDUCTOR_INSTALL_GXX"):
916
+ continue
917
+ from filelock import FileLock
918
+
919
+ lock_dir = get_lock_dir()
920
+ lock = FileLock(
921
+ os.path.join(lock_dir, "g++.lock"), timeout=LOCK_TIMEOUT
922
+ )
923
+ with lock:
924
+ cxx = install_gcc_via_conda()
925
+ subprocess.check_output([cxx, "--version"])
926
+ return cxx
927
+ except (subprocess.SubprocessError, FileNotFoundError, ImportError):
928
+ continue
929
+ raise exc.InvalidCxxCompiler()
930
+
931
+
932
+ def install_gcc_via_conda() -> str:
933
+ """On older systems, this is a quick way to get a modern compiler"""
934
+ prefix = os.path.join(cache_dir(), "gcc")
935
+ cxx_path = os.path.join(prefix, "bin", "g++")
936
+ if not os.path.exists(cxx_path):
937
+ log.info("Downloading GCC via conda")
938
+ conda = os.environ.get("CONDA_EXE", "conda")
939
+ if conda is None:
940
+ conda = shutil.which("conda")
941
+ if conda is not None:
942
+ subprocess.check_call(
943
+ [
944
+ conda,
945
+ "create",
946
+ f"--prefix={prefix}",
947
+ "--channel=conda-forge",
948
+ "--quiet",
949
+ "-y",
950
+ "python=3.8",
951
+ "gxx",
952
+ ],
953
+ stdout=subprocess.PIPE,
954
+ )
955
+ return cxx_path
956
+
957
+
958
+ def is_gcc() -> bool:
959
+ return bool(re.search(r"(gcc|g\+\+)", cpp_compiler()))
960
+
961
+
962
+ def is_clang() -> bool:
963
+ return bool(re.search(r"(clang|clang\+\+)", cpp_compiler()))
964
+
965
+
966
+ @functools.lru_cache(None)
967
+ def is_apple_clang() -> bool:
968
+ cxx = cpp_compiler()
969
+ version_string = subprocess.check_output([cxx, "--version"]).decode("utf8")
970
+ return "Apple" in version_string.splitlines()[0]
971
+
972
+
973
+ class VecISA:
974
+ _bit_width: int
975
+ _macro: str
976
+ _arch_flags: str
977
+ _dtype_nelements: Dict[torch.dtype, int]
978
+
979
+ # Note [Checking for Vectorized Support in Inductor]
980
+ # TorchInductor CPU vectorization reuses PyTorch vectorization utility functions
981
+ # Hence, TorchInductor would depend on Sleef* to accelerate mathematical functions
982
+ # like exp, pow, sin, cos and etc.
983
+ # But PyTorch and TorchInductor might use different compilers to build code. If
984
+ # PyTorch uses gcc-7/g++-7 to build the release package, the libtorch_cpu.so
985
+ # will not expose the Sleef* AVX512 symbols since gcc-7/g++-7 cannot pass
986
+ # avx512 check in CMake - FindAVX.cmake. But TorchInductor install the latest
987
+ # gcc/g++ compiler by default while it could support the AVX512 compilation.
988
+ # Therefore, there would be a conflict sleef version between PyTorch and
989
+ # TorchInductor. Hence, we dry-compile the following code to check whether current
990
+ # HW platform and PyTorch both could support AVX512 or AVX2. And suppose ARM
991
+ # also needs the logic
992
+ # In fbcode however, we are using the same compiler for pytorch and for inductor codegen,
993
+ # making the runtime check unnecessary.
994
+ _avx_code = """
995
+ #if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR)
996
+ #include <ATen/cpu/vec/functional.h>
997
+ #include <ATen/cpu/vec/vec.h>
998
+ #endif
999
+
1000
+ __attribute__((aligned(64))) float in_out_ptr0[16] = {0.0};
1001
+
1002
+ extern "C" void __avx_chk_kernel() {
1003
+ auto tmp0 = at::vec::Vectorized<float>(1);
1004
+ auto tmp1 = tmp0.exp();
1005
+ tmp1.store(in_out_ptr0);
1006
+ }
1007
+ """
1008
+
1009
+ _avx_py_load = """
1010
+ import torch
1011
+ from ctypes import cdll
1012
+ cdll.LoadLibrary("__lib_path__")
1013
+ """
1014
+
1015
+ def bit_width(self) -> int:
1016
+ return self._bit_width
1017
+
1018
+ def nelements(self, dtype: torch.dtype = torch.float) -> int:
1019
+ return self._dtype_nelements[dtype]
1020
+
1021
+ def build_macro(self) -> str:
1022
+ return self._macro
1023
+
1024
+ def build_arch_flags(self) -> str:
1025
+ return self._arch_flags
1026
+
1027
+ def __hash__(self) -> int:
1028
+ return hash(str(self))
1029
+
1030
+ @functools.lru_cache(None)
1031
+ def __bool__(self) -> bool:
1032
+ if config.cpp.vec_isa_ok is not None:
1033
+ return config.cpp.vec_isa_ok
1034
+
1035
+ if config.is_fbcode():
1036
+ return True
1037
+
1038
+ key, input_path = write(VecISA._avx_code, "cpp")
1039
+ from filelock import FileLock
1040
+
1041
+ lock_dir = get_lock_dir()
1042
+ lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
1043
+ with lock:
1044
+ output_path = input_path[:-3] + "so"
1045
+ build_cmd = shlex.split(
1046
+ cpp_compile_command(
1047
+ input_path, output_path, warning_all=False, vec_isa=self
1048
+ )
1049
+ )
1050
+ try:
1051
+ # Check build result
1052
+ compile_file(input_path, output_path, build_cmd)
1053
+ subprocess.check_call(
1054
+ [
1055
+ sys.executable,
1056
+ "-c",
1057
+ VecISA._avx_py_load.replace("__lib_path__", output_path),
1058
+ ],
1059
+ stderr=subprocess.DEVNULL,
1060
+ env={**os.environ, "PYTHONPATH": ":".join(sys.path)},
1061
+ )
1062
+ except Exception as e:
1063
+ return False
1064
+
1065
+ return True
1066
+
1067
+
1068
+ @dataclasses.dataclass
1069
+ class VecAVX512(VecISA):
1070
+ _bit_width = 512
1071
+ _macro = "-DCPU_CAPABILITY_AVX512"
1072
+ _arch_flags = "-mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma"
1073
+ _dtype_nelements = {torch.float: 16, torch.bfloat16: 32, torch.float16: 32}
1074
+
1075
+ def __str__(self) -> str:
1076
+ return "avx512"
1077
+
1078
+ __hash__: Callable[[VecISA], Any] = VecISA.__hash__
1079
+
1080
+
1081
+ @dataclasses.dataclass
1082
+ class VecAVX2(VecISA):
1083
+ _bit_width = 256
1084
+ _macro = "-DCPU_CAPABILITY_AVX2"
1085
+ _arch_flags = "-mavx2 -mfma"
1086
+ _dtype_nelements = {torch.float: 8, torch.bfloat16: 16, torch.float16: 16}
1087
+
1088
+ def __str__(self) -> str:
1089
+ return "avx2"
1090
+
1091
+ __hash__: Callable[[VecISA], Any] = VecISA.__hash__
1092
+
1093
+
1094
+ @dataclasses.dataclass
1095
+ class VecZVECTOR(VecISA):
1096
+ _bit_width = 256
1097
+ _macro = "-DCPU_CAPABILITY_ZVECTOR -DCPU_CAPABILITY=ZVECTOR -DHAVE_ZVECTOR_CPU_DEFINITION"
1098
+ _arch_flags = "-mvx -mzvector"
1099
+ _dtype_nelements = {torch.float: 8, torch.bfloat16: 16, torch.float16: 16}
1100
+
1101
+ def __str__(self) -> str:
1102
+ return "zvector"
1103
+
1104
+ __hash__: Callable[[VecISA], Any] = VecISA.__hash__
1105
+
1106
+
1107
+ class InvalidVecISA(VecISA):
1108
+ _bit_width = 0
1109
+ _macro = ""
1110
+ _arch_flags = ""
1111
+ _dtype_nelements = {}
1112
+
1113
+ def __str__(self) -> str:
1114
+ return "INVALID_VEC_ISA"
1115
+
1116
+ def __bool__(self) -> bool: # type: ignore[override]
1117
+ return False
1118
+
1119
+ __hash__: Callable[[VecISA], Any] = VecISA.__hash__
1120
+
1121
+
1122
+ invalid_vec_isa = InvalidVecISA()
1123
+ supported_vec_isa_list = [VecAVX512(), VecAVX2()]
1124
+
1125
+
1126
+ # Cache the cpuinfo to avoid I/O overhead. Meanwhile, the cpuinfo content
1127
+ # might have too much redundant content that is useless for ISA check. Hence,
1128
+ # we only cache some key isa information.
1129
+ @functools.lru_cache(None)
1130
+ def valid_vec_isa_list() -> List[VecISA]:
1131
+ if sys.platform != "linux":
1132
+ return []
1133
+
1134
+ if platform.machine() == "s390x":
1135
+ return [VecZVECTOR()]
1136
+
1137
+ isa_list = []
1138
+ with open("/proc/cpuinfo") as _cpu_info:
1139
+ _cpu_info_content = _cpu_info.read()
1140
+ for isa in supported_vec_isa_list:
1141
+ if str(isa) in _cpu_info_content and isa:
1142
+ isa_list.append(isa)
1143
+ return isa_list
1144
+
1145
+
1146
+ def pick_vec_isa() -> VecISA:
1147
+ if config.is_fbcode():
1148
+ return VecAVX2()
1149
+
1150
+ _valid_vec_isa_list: List[VecISA] = valid_vec_isa_list()
1151
+ if not _valid_vec_isa_list:
1152
+ return invalid_vec_isa
1153
+
1154
+ # If the simdlen is None, it indicates determin the vectorization length automatically
1155
+ if config.cpp.simdlen is None:
1156
+ assert _valid_vec_isa_list
1157
+ return _valid_vec_isa_list[0]
1158
+
1159
+ for isa in _valid_vec_isa_list:
1160
+ if config.cpp.simdlen == isa.bit_width():
1161
+ return isa
1162
+
1163
+ return invalid_vec_isa
1164
+
1165
+
1166
+ def get_compile_only(compile_only: bool = True) -> str:
1167
+ return "-c" if compile_only else ""
1168
+
1169
+
1170
+ def get_shared(shared: bool = True) -> str:
1171
+ return "-shared -fPIC" if shared else ""
1172
+
1173
+
1174
+ def get_warning_all_flag(warning_all: bool = True) -> str:
1175
+ return "-Wall" if warning_all else ""
1176
+
1177
+
1178
+ def get_glibcxx_abi_build_flags() -> str:
1179
+ return "-D_GLIBCXX_USE_CXX11_ABI=" + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))
1180
+
1181
+
1182
+ def cpp_flags() -> str:
1183
+ flags = ["-std=c++17", "-Wno-unused-variable", "-Wno-unknown-pragmas"]
1184
+ if is_clang():
1185
+ flags.append("-Werror=ignored-optimization-argument")
1186
+ return " ".join(flags)
1187
+
1188
+
1189
+ def cpp_wrapper_flags() -> str:
1190
+ return "-DTORCH_INDUCTOR_CPP_WRAPPER"
1191
+
1192
+
1193
+ def optimization_flags() -> str:
1194
+ base_flags = "-O0 -g" if config.aot_inductor.debug_compile else "-O3 -DNDEBUG"
1195
+ base_flags += " -ffast-math -fno-finite-math-only"
1196
+ if not config.cpp.enable_unsafe_math_opt_flag:
1197
+ base_flags += " -fno-unsafe-math-optimizations"
1198
+
1199
+ if config.is_fbcode():
1200
+ # FIXME: passing `-fopenmp` adds libgomp.so to the generated shared library's dependencies.
1201
+ # This causes `ldopen` to fail in fbcode, because libgomp does not exist in the default paths.
1202
+ # We will fix it later by exposing the lib path.
1203
+ return base_flags
1204
+
1205
+ if sys.platform == "darwin":
1206
+ # Per https://mac.r-project.org/openmp/ right way to pass `openmp` flags to MacOS is via `-Xclang`
1207
+ # Also, `-march=native` is unrecognized option on M1
1208
+ base_flags += " -Xclang"
1209
+ else:
1210
+ if platform.machine() == "ppc64le":
1211
+ base_flags += " -mcpu=native"
1212
+ else:
1213
+ base_flags += " -march=native"
1214
+
1215
+ # Internal cannot find libgomp.so
1216
+ if not config.is_fbcode():
1217
+ base_flags += " -fopenmp"
1218
+ return base_flags
1219
+
1220
+
1221
+ def use_custom_generated_macros() -> str:
1222
+ return "-D C10_USING_CUSTOM_GENERATED_MACROS"
1223
+
1224
+
1225
+ def use_fb_internal_macros() -> str:
1226
+ if config.is_fbcode():
1227
+ openmp_lib = build_paths.openmp_lib()
1228
+ preprocessor_flags = " ".join(
1229
+ (
1230
+ "-D C10_USE_GLOG",
1231
+ "-D C10_USE_MINIMAL_GLOG",
1232
+ "-D C10_DISABLE_TENSORIMPL_EXTENSIBILITY",
1233
+ )
1234
+ )
1235
+ return f"-Wp,-fopenmp {openmp_lib} {preprocessor_flags}"
1236
+ else:
1237
+ return ""
1238
+
1239
+
1240
+ def use_standard_sys_dir_headers() -> str:
1241
+ if config.is_fbcode():
1242
+ return "-nostdinc"
1243
+ else:
1244
+ return ""
1245
+
1246
+
1247
+ @functools.lru_cache(None)
1248
+ def is_conda_llvm_openmp_installed() -> bool:
1249
+ try:
1250
+ command = "conda list llvm-openmp --json"
1251
+ output = subprocess.check_output(command.split()).decode("utf8")
1252
+ return len(json.loads(output)) > 0
1253
+ except subprocess.SubprocessError:
1254
+ return False
1255
+
1256
+
1257
+ @functools.lru_cache(None)
1258
+ def homebrew_libomp() -> Tuple[bool, str]:
1259
+ try:
1260
+ # check if `brew` is installed
1261
+ subprocess.check_output(["which", "brew"])
1262
+ # get the location of `libomp` if it is installed
1263
+ # this is the location that `libomp` **would** be installed
1264
+ # see https://github.com/Homebrew/brew/issues/10261#issuecomment-756563567 for details
1265
+ libomp_path = (
1266
+ subprocess.check_output(["brew", "--prefix", "libomp"])
1267
+ .decode("utf8")
1268
+ .strip()
1269
+ )
1270
+ # check if `libomp` is installed
1271
+ omp_available = os.path.exists(libomp_path)
1272
+ return omp_available, libomp_path
1273
+ except subprocess.SubprocessError:
1274
+ return False, ""
1275
+
1276
+
1277
+ def get_include_and_linking_paths(
1278
+ include_pytorch: bool = False,
1279
+ vec_isa: VecISA = invalid_vec_isa,
1280
+ cuda: bool = False,
1281
+ aot_mode: bool = False,
1282
+ ) -> Tuple[List[str], str, str, str, str]:
1283
+ if (
1284
+ config.is_fbcode()
1285
+ and "CUDA_HOME" not in os.environ
1286
+ and "CUDA_PATH" not in os.environ
1287
+ ):
1288
+ os.environ["CUDA_HOME"] = os.path.dirname(build_paths.cuda())
1289
+ from torch.utils import cpp_extension
1290
+
1291
+ macros = ""
1292
+ build_arch_flags = ""
1293
+ if sys.platform == "linux" and (
1294
+ include_pytorch
1295
+ or vec_isa != invalid_vec_isa
1296
+ or cuda
1297
+ or config.cpp.enable_kernel_profile
1298
+ ):
1299
+ # Note - We include pytorch only on linux right now. There is more work
1300
+ # to do to enable OMP build on darwin where PyTorch is built with IOMP
1301
+ # and we need a way to link to what PyTorch links.
1302
+ ipaths = cpp_extension.include_paths(cuda) + [sysconfig.get_path("include")]
1303
+ lpaths = cpp_extension.library_paths(cuda) + [
1304
+ sysconfig.get_config_var("LIBDIR")
1305
+ ]
1306
+
1307
+ libs = []
1308
+
1309
+ # No need to manually specify libraries in fbcode.
1310
+ if not config.is_fbcode():
1311
+ libs += ["torch", "torch_cpu"]
1312
+ libs += ["gomp"]
1313
+ if not aot_mode:
1314
+ libs += ["torch_python"]
1315
+ else:
1316
+ # internal remote execution is able to find omp, but not gomp
1317
+ libs += ["omp"]
1318
+ if aot_mode:
1319
+ ipaths += [os.path.dirname(cpp_prefix_path())]
1320
+ if cuda:
1321
+ # This is a special treatment for Meta internal cuda-12 where all libs
1322
+ # are in lib/cuda-12 and lib/cuda-12/stubs
1323
+ for i, path in enumerate(lpaths):
1324
+ if path.startswith(
1325
+ os.environ["CUDA_HOME"]
1326
+ ) and not os.path.exists(f"{path}/libcudart_static.a"):
1327
+ for root, dirs, files in os.walk(path):
1328
+ if "libcudart_static.a" in files:
1329
+ lpaths[i] = os.path.join(path, root)
1330
+ lpaths.append(os.path.join(lpaths[i], "stubs"))
1331
+ break
1332
+ macros = vec_isa.build_macro()
1333
+ if macros:
1334
+ if config.is_fbcode() and vec_isa != invalid_vec_isa:
1335
+ cap = str(vec_isa).upper()
1336
+ macros = " ".join(
1337
+ [
1338
+ vec_isa.build_arch_flags(),
1339
+ f"-D CPU_CAPABILITY={cap}",
1340
+ f"-D CPU_CAPABILITY_{cap}",
1341
+ f"-D HAVE_{cap}_CPU_DEFINITION",
1342
+ ]
1343
+ )
1344
+
1345
+ if aot_mode and cuda:
1346
+ if macros is None:
1347
+ macros = ""
1348
+ macros += " -D USE_CUDA"
1349
+
1350
+ if cuda:
1351
+ if torch.version.hip is not None:
1352
+ libs += ["c10_hip", "torch_hip"]
1353
+ else:
1354
+ if config.is_fbcode():
1355
+ libs += ["cuda"]
1356
+ else:
1357
+ libs += ["c10_cuda", "cuda", "torch_cuda"]
1358
+ build_arch_flags = vec_isa.build_arch_flags()
1359
+ else:
1360
+ # Note - this is effectively a header only inclusion. Usage of some header files may result in
1361
+ # symbol not found, if those header files require a library.
1362
+ # For those cases, include the lpath and libs command as we do for pytorch above.
1363
+ # This approach allows us to only pay for what we use.
1364
+ ipaths = cpp_extension.include_paths(cuda) + [sysconfig.get_path("include")]
1365
+ if aot_mode:
1366
+ ipaths += [os.path.dirname(cpp_prefix_path())]
1367
+ lpaths = []
1368
+ if sys.platform == "darwin":
1369
+ # only Apple builtin compilers (Apple Clang++) require openmp
1370
+ omp_available = not is_apple_clang()
1371
+
1372
+ # check the `OMP_PREFIX` environment first
1373
+ if os.getenv("OMP_PREFIX") is not None:
1374
+ header_path = os.path.join(os.getenv("OMP_PREFIX"), "include", "omp.h")
1375
+ valid_env = os.path.exists(header_path)
1376
+ if valid_env:
1377
+ ipaths.append(os.path.join(os.getenv("OMP_PREFIX"), "include"))
1378
+ lpaths.append(os.path.join(os.getenv("OMP_PREFIX"), "lib"))
1379
+ else:
1380
+ warnings.warn("environment variable `OMP_PREFIX` is invalid.")
1381
+ omp_available = omp_available or valid_env
1382
+
1383
+ libs = [] if omp_available else ["omp"]
1384
+
1385
+ # prefer to use openmp from `conda install llvm-openmp`
1386
+ if not omp_available and os.getenv("CONDA_PREFIX") is not None:
1387
+ omp_available = is_conda_llvm_openmp_installed()
1388
+ if omp_available:
1389
+ conda_lib_path = os.path.join(os.getenv("CONDA_PREFIX"), "lib")
1390
+ ipaths.append(os.path.join(os.getenv("CONDA_PREFIX"), "include"))
1391
+ lpaths.append(conda_lib_path)
1392
+ # Prefer Intel OpenMP on x86 machine
1393
+ if os.uname().machine == "x86_64" and os.path.exists(
1394
+ os.path.join(conda_lib_path, "libiomp5.dylib")
1395
+ ):
1396
+ libs = ["iomp5"]
1397
+
1398
+ # next, try to use openmp from `brew install libomp`
1399
+ if not omp_available:
1400
+ omp_available, libomp_path = homebrew_libomp()
1401
+ if omp_available:
1402
+ ipaths.append(os.path.join(libomp_path, "include"))
1403
+ lpaths.append(os.path.join(libomp_path, "lib"))
1404
+
1405
+ # if openmp is still not available, we let the compiler to have a try,
1406
+ # and raise error together with instructions at compilation error later
1407
+ else:
1408
+ libs = ["omp"] if config.is_fbcode() else ["gomp"]
1409
+
1410
+ # Unconditionally import c10 for non-abi-compatible mode to use TORCH_CHECK - See PyTorch #108690
1411
+ if not config.aot_inductor.abi_compatible:
1412
+ libs += ["c10"]
1413
+ lpaths += [cpp_extension.TORCH_LIB_PATH]
1414
+
1415
+ # third party libs
1416
+ if config.is_fbcode():
1417
+ ipaths.append(build_paths.sleef())
1418
+ ipaths.append(build_paths.openmp())
1419
+ ipaths.append(build_paths.cc_include())
1420
+ ipaths.append(build_paths.libgcc())
1421
+ ipaths.append(build_paths.libgcc_arch())
1422
+ ipaths.append(build_paths.libgcc_backward())
1423
+ ipaths.append(build_paths.glibc())
1424
+ ipaths.append(build_paths.linux_kernel())
1425
+ ipaths.append(build_paths.cuda())
1426
+ # We also need to bundle includes with absolute paths into a remote directory
1427
+ # (later on, we copy the include paths from cpp_extensions into our remote dir)
1428
+ ipaths.append("include")
1429
+
1430
+ static_link_libs = []
1431
+ if aot_mode and cuda and config.is_fbcode():
1432
+ # For Meta internal cuda-12, it is recommended to static link cudart
1433
+ static_link_libs = ["-Wl,-Bstatic", "-lcudart_static", "-Wl,-Bdynamic"]
1434
+
1435
+ lpaths_str = " ".join(["-L" + p for p in lpaths])
1436
+ libs_str = " ".join(static_link_libs + ["-l" + p for p in libs])
1437
+ return ipaths, lpaths_str, libs_str, macros, build_arch_flags
1438
+
1439
+
1440
+ def cpp_compile_command(
1441
+ input: Union[str, List[str]],
1442
+ output: str,
1443
+ warning_all: bool = True,
1444
+ shared: bool = True,
1445
+ include_pytorch: bool = False,
1446
+ vec_isa: VecISA = invalid_vec_isa,
1447
+ cuda: bool = False,
1448
+ aot_mode: bool = False,
1449
+ compile_only: bool = False,
1450
+ use_absolute_path: bool = False,
1451
+ ) -> str:
1452
+ ipaths, lpaths, libs, macros, build_arch_flags = get_include_and_linking_paths(
1453
+ include_pytorch, vec_isa, cuda, aot_mode
1454
+ )
1455
+ if isinstance(input, str):
1456
+ input = [input]
1457
+ ipaths_str = " ".join(["-I" + p for p in ipaths])
1458
+ clang_flags = ""
1459
+ if config.is_fbcode():
1460
+ if aot_mode and not use_absolute_path:
1461
+ inp_name = input
1462
+ out_name = output
1463
+ else:
1464
+ # We need to copy any absolute-path torch includes
1465
+ inp_name = [os.path.basename(i) for i in input]
1466
+ out_name = os.path.basename(output)
1467
+ assert is_clang()
1468
+ # Use clang runtime instead of libgcc
1469
+ clang_flags += " --rtlib=compiler-rt"
1470
+ clang_flags += " -fuse-ld=lld"
1471
+ linker_paths = "-B" + build_paths.glibc_lib()
1472
+ linker_paths += " -L" + build_paths.glibc_lib()
1473
+ else:
1474
+ inp_name = input
1475
+ out_name = output
1476
+ linker_paths = "" # let the compiler pick
1477
+ inp_name_str = " ".join(inp_name)
1478
+ return re.sub(
1479
+ r"[ \n]+",
1480
+ " ",
1481
+ f"""
1482
+ {cpp_compiler()} {inp_name_str} {get_shared(shared)}
1483
+ {get_warning_all_flag(warning_all)} {cpp_flags()}
1484
+ {get_glibcxx_abi_build_flags()}
1485
+ {ipaths_str} {lpaths} {libs} {build_arch_flags}
1486
+ {macros} {linker_paths} {clang_flags}
1487
+ {optimization_flags()}
1488
+ {use_custom_generated_macros()}
1489
+ {use_fb_internal_macros()}
1490
+ {use_standard_sys_dir_headers()}
1491
+ {get_compile_only(compile_only)}
1492
+ -o {out_name}
1493
+ """,
1494
+ ).strip()
1495
+
1496
+
1497
+ def run_command_and_check(cmd: str):
1498
+ cmd = shlex.split(cmd)
1499
+ try:
1500
+ subprocess.check_call(cmd)
1501
+ except subprocess.CalledProcessError as e:
1502
+ raise exc.CppCompileError(cmd, e.output) from e
1503
+
1504
+
1505
+ @functools.lru_cache(None)
1506
+ def split_aot_inductor_output_path(path: str) -> Tuple[str, str]:
1507
+ """Returns the path where the AOT Inductor compiled kernels are stored."""
1508
+ if path.endswith(".so"):
1509
+ return os.path.split(path)
1510
+ else:
1511
+ return path, ""
1512
+
1513
+
1514
+ class CudaKernelParamCache:
1515
+ cache: Dict[str, Dict[str, str]] = dict()
1516
+ clear = staticmethod(cache.clear)
1517
+
1518
+ @classmethod
1519
+ def set(cls, key: str, params: Dict[str, str], cubin: str) -> None:
1520
+ bin_type = "cubin" if torch.version.hip is None else "hsaco"
1521
+ _, path = write(
1522
+ cubin,
1523
+ bin_type,
1524
+ hash_type=bin_type,
1525
+ specified_dir=split_aot_inductor_output_path(
1526
+ config.aot_inductor.output_path
1527
+ )[0],
1528
+ )
1529
+
1530
+ params[get_cpp_wrapper_cubin_path_name()] = path
1531
+
1532
+ cls.cache[key] = params
1533
+
1534
+ @classmethod
1535
+ def get(cls, key: str) -> Optional[Dict[str, str]]:
1536
+ return cls.cache.get(key, None)
1537
+
1538
+
1539
+ class AotCodeCache:
1540
+ cache: Dict[str, str] = dict()
1541
+ clear = staticmethod(cache.clear)
1542
+
1543
+ @classmethod
1544
+ def compile(
1545
+ cls,
1546
+ graph: GraphLowering,
1547
+ source_code: str,
1548
+ serialized_extern_kernel_nodes: Optional[str],
1549
+ cuda: bool,
1550
+ ) -> str:
1551
+ picked_vec_isa = pick_vec_isa()
1552
+ cpp_command = repr(
1553
+ cpp_compile_command(
1554
+ "i", "o", vec_isa=picked_vec_isa, cuda=cuda, aot_mode=graph.aot_mode
1555
+ )
1556
+ )
1557
+ fbcode_aot_cpu_re = False
1558
+ use_absolute_path = False
1559
+ if config.is_fbcode():
1560
+ ld_command = build_paths.ld()
1561
+ if not cuda and graph.aot_mode: # Meta internal AOTInductor CPU
1562
+ objcopy_command = build_paths.objcopy_fallback()
1563
+ fbcode_aot_cpu_re = True
1564
+ use_absolute_path = True
1565
+ else:
1566
+ objcopy_command = build_paths.objcopy()
1567
+ else:
1568
+ ld_command = "ld"
1569
+ objcopy_command = "objcopy"
1570
+
1571
+ (
1572
+ specified_output_path,
1573
+ specified_so_name,
1574
+ ) = split_aot_inductor_output_path(config.aot_inductor.output_path)
1575
+ key, input_path = write(
1576
+ source_code,
1577
+ "cpp",
1578
+ extra=cpp_command,
1579
+ specified_dir=specified_output_path,
1580
+ )
1581
+
1582
+ if key not in cls.cache or (
1583
+ specified_output_path
1584
+ and os.path.dirname(cls.cache[key]) != specified_output_path
1585
+ or specified_so_name
1586
+ and os.path.basename(cls.cache[key]) != specified_so_name
1587
+ ):
1588
+ from filelock import FileLock
1589
+
1590
+ lock_dir = get_lock_dir()
1591
+ lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
1592
+ with lock:
1593
+ # Currently, this only support serializing extern nodes in fbcode
1594
+ # Eventually, we should also have a serializer for OSS.
1595
+ if config.is_fbcode() and serialized_extern_kernel_nodes:
1596
+ output_json = os.path.splitext(input_path)[0] + ".json"
1597
+ with open(output_json, "w") as f:
1598
+ f.write(serialized_extern_kernel_nodes)
1599
+
1600
+ output_so = (
1601
+ config.aot_inductor.output_path
1602
+ if specified_so_name
1603
+ else os.path.splitext(input_path)[0] + ".so"
1604
+ )
1605
+
1606
+ if not os.path.exists(output_so):
1607
+ output_o = os.path.splitext(input_path)[0] + ".o"
1608
+ cmd = cpp_compile_command(
1609
+ input=input_path,
1610
+ output=output_o,
1611
+ vec_isa=picked_vec_isa,
1612
+ cuda=cuda,
1613
+ aot_mode=graph.aot_mode,
1614
+ compile_only=True,
1615
+ use_absolute_path=use_absolute_path,
1616
+ )
1617
+ log.debug("aot compilation command: %s", cmd)
1618
+ if fbcode_aot_cpu_re:
1619
+ compile_file(input_path, output_o, cmd.split())
1620
+ os.chmod(output_o, 0o644)
1621
+ else:
1622
+ run_command_and_check(cmd)
1623
+
1624
+ def _to_bytes(t: torch.Tensor) -> bytes:
1625
+ # This serializes the tensor's untyped_storage to bytes by accessing
1626
+ # the raw data of the underlying structure.
1627
+ import ctypes
1628
+
1629
+ if t.numel() == 0:
1630
+ return b""
1631
+
1632
+ t_cpu = t.untyped_storage().cpu()
1633
+ raw_array = ctypes.cast(
1634
+ t_cpu.data_ptr(),
1635
+ ctypes.POINTER(ctypes.c_ubyte * t_cpu.nbytes()),
1636
+ )
1637
+
1638
+ return bytes(raw_array.contents)
1639
+
1640
+ aot_constants = b"".join(
1641
+ _to_bytes(tensor) for tensor in graph.constants.values()
1642
+ )
1643
+
1644
+ consts_key, consts_path = write(
1645
+ aot_constants,
1646
+ "bin",
1647
+ specified_dir=specified_output_path,
1648
+ )
1649
+
1650
+ consts_o = os.path.splitext(consts_path)[0] + ".o"
1651
+ if fbcode_aot_cpu_re:
1652
+ cmd = f"{ld_command} -r -b binary -o {os.path.basename(consts_o)} {os.path.basename(consts_path)}"
1653
+ compile_file(consts_path, consts_o, cmd.split())
1654
+ os.chmod(consts_o, 0o644)
1655
+ else:
1656
+ cmd = f"{ld_command} -r -b binary -o {consts_o} {consts_path}"
1657
+ run_command_and_check(cmd)
1658
+ log.debug("aot constant binary command: %s", cmd)
1659
+
1660
+ cmd = (
1661
+ f"{objcopy_command} --rename-section"
1662
+ " .data=.lrodata,alloc,load,readonly,data,contents"
1663
+ f" {consts_o} {consts_o}"
1664
+ )
1665
+ log.debug("aot constant obj command: %s", cmd)
1666
+ run_command_and_check(cmd)
1667
+
1668
+ cmd = f"rm {consts_path}"
1669
+ log.debug("aot constant bin removal command: %s", cmd)
1670
+ run_command_and_check(cmd)
1671
+
1672
+ if fbcode_aot_cpu_re:
1673
+ body = re.sub(r"[\W]", "_", os.path.basename(consts_path))
1674
+ else:
1675
+ body = re.sub(r"[\W]", "_", consts_path)
1676
+
1677
+ symbol_list = []
1678
+ symbol_list.append(
1679
+ f"{objcopy_command} --redefine-sym _binary_{body}_start=_binary_constants_bin_start {consts_o}"
1680
+ )
1681
+ symbol_list.append(
1682
+ f"{objcopy_command} --redefine-sym _binary_{body}_size=_binary_constants_bin_size {consts_o}"
1683
+ )
1684
+ symbol_list.append(
1685
+ f"{objcopy_command} --redefine-sym _binary_{body}_end=_binary_constants_bin_end {consts_o}"
1686
+ )
1687
+ log.debug(
1688
+ "aot constant binary redefine symbol: %s", " ".join(symbol_list)
1689
+ )
1690
+ for cmd in symbol_list:
1691
+ run_command_and_check(cmd)
1692
+
1693
+ cmd = cpp_compile_command(
1694
+ input=[output_o, consts_o],
1695
+ output=output_so,
1696
+ vec_isa=picked_vec_isa,
1697
+ cuda=cuda,
1698
+ aot_mode=graph.aot_mode,
1699
+ use_absolute_path=use_absolute_path,
1700
+ )
1701
+ log.debug("aot linkage command: %s", cmd)
1702
+ if fbcode_aot_cpu_re:
1703
+ compile_file([output_o, consts_o], output_so, cmd.split())
1704
+ os.chmod(output_so, 0o755)
1705
+ else:
1706
+ run_command_and_check(cmd)
1707
+ else:
1708
+ log.debug(
1709
+ "aot_inductor dynamic library already exist: %s", output_so
1710
+ )
1711
+
1712
+ cls.cache[key] = output_so
1713
+
1714
+ return cls.cache[key]
1715
+
1716
+
1717
+ # Putting this fn in cpp.py (unfortunately) causes a deadlock, which is why it's in codecache.py.
1718
+ # Why? importing from cpp.py invokes codecache.pick_vec_isa(), which takes out a lock.
1719
+ # Cycle goes:
1720
+ # - CppCodeCache.load()
1721
+ # - pick_vec_isa()
1722
+ # - valid_vec_isa_list()
1723
+ # - VecISA.__bool__() <-- takes out a lock
1724
+ # - compile_file() <-- imports cpp_prefix_path from cpp, which causes us to try to take out the same lock.
1725
+ @functools.lru_cache
1726
+ def cpp_prefix_path() -> str:
1727
+ path = Path(__file__).parent / "codegen/cpp_prefix.h"
1728
+ with path.open() as f:
1729
+ content = f.read()
1730
+ _, filename = write(
1731
+ content,
1732
+ "h",
1733
+ )
1734
+ return filename
1735
+
1736
+
1737
+ def cpp_prefix() -> str:
1738
+ filename = cpp_prefix_path()
1739
+ if config.is_fbcode():
1740
+ # We need relative paths, since we bundle up
1741
+ # everything that we compile into a folder for remote compilation.
1742
+ return f'#include "{os.path.basename(filename)}"'
1743
+ else:
1744
+ return f'#include "{filename}"'
1745
+
1746
+
1747
+ # Given a path to an input cpp file and an output path,
1748
+ # Attempts to compile the file, storing the output in "output_path"
1749
+ def compile_file(
1750
+ input_path: Union[str, List[str]], output_path: str, cmd: List[str]
1751
+ ) -> None:
1752
+ input_paths = [input_path] if isinstance(input_path, str) else input_path
1753
+ input_files = [
1754
+ os.path.basename(ip) if config.is_fbcode() else ip for ip in input_paths
1755
+ ]
1756
+ try:
1757
+ if config.is_fbcode():
1758
+ # Need to copy our header into the same folder as the sourcecode.
1759
+ header_path = cpp_prefix_path()
1760
+ header_name = os.path.basename(header_path)
1761
+ output_name = os.path.basename(output_path)
1762
+ # When we build remotely, we need to make sure to carefully copy any files
1763
+ # that are required during the compilation process into our build directly.
1764
+ # This is where all of the ATen/c10/Torch includes come from.
1765
+ torch_includes_path = os.path.join(
1766
+ torch.utils.cpp_extension._TORCH_PATH, "include"
1767
+ )
1768
+ with tempfile.TemporaryDirectory() as tmp_dir:
1769
+ # Copy everything to tmp compilation folder
1770
+ shutil.copy(header_path, os.path.join(tmp_dir, header_name))
1771
+ for p, f in zip(input_paths, input_files):
1772
+ shutil.copy(p, os.path.join(tmp_dir, f))
1773
+ dest_include_path = os.path.join(tmp_dir, "include")
1774
+ shutil.copytree(torch_includes_path, dest_include_path)
1775
+ # Run the build
1776
+ output_file_path = _run_build_command(cmd, tmp_dir, output_name)
1777
+ # Copy output from the build
1778
+ if os.path.exists(output_path):
1779
+ os.remove(output_path)
1780
+ shutil.copy(output_file_path, output_path)
1781
+ else:
1782
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1783
+ except subprocess.CalledProcessError as e:
1784
+ output = e.output.decode("utf-8")
1785
+ openmp_problem = "'omp.h' file not found" in output or "libomp" in output
1786
+ if openmp_problem and sys.platform == "darwin":
1787
+ instruction = (
1788
+ "\n\nOpenMP support not found. Please try one of the following solutions:\n"
1789
+ "(1) Set the `CXX` environment variable to a compiler other than Apple clang++/g++ "
1790
+ "that has builtin OpenMP support;\n"
1791
+ "(2) install OpenMP via conda: `conda install llvm-openmp`;\n"
1792
+ "(3) install libomp via brew: `brew install libomp`;\n"
1793
+ "(4) manually setup OpenMP and set the `OMP_PREFIX` environment variable to point to a path"
1794
+ " with `include/omp.h` under it."
1795
+ )
1796
+ output += instruction
1797
+ raise exc.CppCompileError(cmd, output) from e
1798
+
1799
+
1800
+ _libgomp: Optional[CDLL] = None
1801
+
1802
+
1803
+ class CppCodeCache:
1804
+ cache: Dict[str, CDLL] = dict()
1805
+ clear = staticmethod(cache.clear)
1806
+
1807
+ @staticmethod
1808
+ def _load_library(path: str) -> CDLL:
1809
+ try:
1810
+ return cdll.LoadLibrary(path)
1811
+ except OSError as e:
1812
+ if "gomp" in str(e) and os.path.exists("/usr/lib64/libgomp.so.1"):
1813
+ # hacky workaround for fbcode/buck
1814
+ global _libgomp
1815
+ _libgomp = cdll.LoadLibrary("/usr/lib64/libgomp.so.1")
1816
+ return cdll.LoadLibrary(path)
1817
+ if "failed to map segment from shared object" in str(e):
1818
+ raise OSError(
1819
+ f"{e}. The most common reason this may occur is if the {tempfile.gettempdir()} folder "
1820
+ "is mounted with noexec (e.g., by default Docker mounts tmp file systems "
1821
+ f"as noexec). Please remount {tempfile.gettempdir()} with exec enabled, or set another "
1822
+ "temporary directory with TORCHINDUCTOR_CACHE_DIR environment variable."
1823
+ ) from e
1824
+ raise
1825
+
1826
+ @classmethod
1827
+ def load(cls, source_code: str) -> CDLL:
1828
+ picked_vec_isa = pick_vec_isa()
1829
+ cpp_command = repr(cpp_compile_command("i", "o", vec_isa=picked_vec_isa))
1830
+ key, input_path = write(source_code, "cpp", extra=cpp_command)
1831
+ if key not in cls.cache:
1832
+ from filelock import FileLock
1833
+
1834
+ lock_dir = get_lock_dir()
1835
+ lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
1836
+ with lock:
1837
+ output_path = input_path[:-3] + "so"
1838
+ if not os.path.exists(output_path):
1839
+ cmd = shlex.split(
1840
+ cpp_compile_command(
1841
+ input=input_path, output=output_path, vec_isa=picked_vec_isa
1842
+ )
1843
+ )
1844
+ compile_file(input_path, output_path, cmd)
1845
+ cls.cache[key] = cls._load_library(output_path)
1846
+ cls.cache[key].key = key # type: ignore[attr-defined]
1847
+
1848
+ return cls.cache[key]
1849
+
1850
+
1851
+ class PyCodeCache:
1852
+ cache: Dict[str, ModuleType] = dict()
1853
+ linemaps: Dict[str, List[Tuple[Any, ...]]] = dict()
1854
+ clear = staticmethod(cache.clear)
1855
+
1856
+ @classmethod
1857
+ def write(cls, source_code: str, extra: str = "") -> Tuple[str, str]:
1858
+ return write(source_code, "py", extra=extra)
1859
+
1860
+ @classmethod
1861
+ def load(
1862
+ cls,
1863
+ source_code: str,
1864
+ extra: str = "",
1865
+ linemap: Optional[List[Tuple[int, str]]] = None,
1866
+ attrs: Optional[Dict[str, Any]] = None,
1867
+ ) -> ModuleType:
1868
+ key, path = write(source_code, "py", extra=extra)
1869
+ return cls.load_by_key_path(key, path, linemap, attrs)
1870
+
1871
+ @classmethod
1872
+ def load_by_key_path(
1873
+ cls,
1874
+ key: str,
1875
+ path: str,
1876
+ linemap: Optional[List[Tuple[int, str]]] = None,
1877
+ attrs: Optional[Dict[str, Any]] = None,
1878
+ ) -> ModuleType:
1879
+ if linemap is None:
1880
+ linemap = []
1881
+ if key not in cls.cache:
1882
+ with open(path) as f:
1883
+ try:
1884
+ code = compile(f.read(), path, "exec")
1885
+ except Exception as e:
1886
+ raise RuntimeError(
1887
+ f"Failed to import {path}\n{type(e).__name__}: {e}"
1888
+ ) from None
1889
+ mod = ModuleType(f"{__name__}.{key}")
1890
+ mod.__file__ = path
1891
+ mod.key = key # type: ignore[attr-defined]
1892
+ exec(code, mod.__dict__, mod.__dict__)
1893
+ sys.modules[mod.__name__] = mod
1894
+ # another thread might set this first
1895
+ cls.cache.setdefault(key, mod)
1896
+ # unzip into separate lines/nodes lists
1897
+ cls.linemaps[path] = list(zip(*linemap))
1898
+
1899
+ if attrs is not None:
1900
+ for k, v in attrs.items():
1901
+ setattr(mod, k, v)
1902
+
1903
+ return cls.cache[key]
1904
+
1905
+ @classmethod
1906
+ @functools.lru_cache(None)
1907
+ def stack_frames_for_code(
1908
+ cls, path: str, lineno: int
1909
+ ) -> Optional[List[Dict[str, Any]]]:
1910
+ if path not in cls.linemaps:
1911
+ return None
1912
+ # [(starting_line, <fx node>), ...]
1913
+ lines, nodes = cls.linemaps[path]
1914
+ p = bisect_right(lines, lineno)
1915
+ if p == 0:
1916
+ return None
1917
+ entry = nodes[p - 1]
1918
+ if not entry:
1919
+ return None
1920
+
1921
+ def parse_stack_trace(stack_trace: str) -> List[Dict[str, Any]]:
1922
+ # ideally fx stores stack traces as data rather than a string
1923
+ # but this is not along a performance critical path
1924
+ regex = r'File "(.+)", line (\d+), in (.+)\n'
1925
+ matches = re.findall(regex, stack_trace)
1926
+ return [
1927
+ {"filename": f, "line": int(l), "name": n}
1928
+ for f, l, n in reversed(matches)
1929
+ ]
1930
+
1931
+ return parse_stack_trace(entry)
1932
+
1933
+
1934
+ class CppWrapperCodeCache:
1935
+ cache: Dict[str, CDLL] = dict()
1936
+ clear = staticmethod(cache.clear)
1937
+
1938
+ @classmethod
1939
+ def load(cls, source_code: str, func_name: str, key: str, cuda: bool) -> CDLL:
1940
+ name = f"inline_extension_{key}"
1941
+ cpp_wrapper_dir = cpp_wrapper_cache_dir(name)
1942
+ if not os.path.exists(cpp_wrapper_dir):
1943
+ os.makedirs(cpp_wrapper_dir)
1944
+
1945
+ ext = "so"
1946
+ filepath = os.path.join(cpp_wrapper_dir, f"{name}.{ext}")
1947
+ log.debug("Cpp wrapper code path %s", filepath)
1948
+
1949
+ if key not in cls.cache:
1950
+ log.debug("Cpp wrapper cache miss for %s", filepath)
1951
+ from filelock import FileLock
1952
+
1953
+ lock_dir = get_lock_dir()
1954
+ lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
1955
+ with lock:
1956
+ if not os.path.exists(filepath):
1957
+ log.debug("Cpp wrapper building %s", filepath)
1958
+
1959
+ _cpp_flags = cpp_flags()
1960
+ _opt_flags = optimization_flags()
1961
+ _shared = get_shared()
1962
+ _warning_all_flag = get_warning_all_flag()
1963
+ (
1964
+ _ipaths,
1965
+ _lpaths,
1966
+ _libs,
1967
+ _macros,
1968
+ _build_arch_flags,
1969
+ ) = get_include_and_linking_paths(
1970
+ vec_isa=pick_vec_isa(),
1971
+ cuda=cuda,
1972
+ )
1973
+ _use_custom_generated_macros = use_custom_generated_macros()
1974
+ _cpp_wrapper_flags = cpp_wrapper_flags()
1975
+
1976
+ extra_cflags = f"{_cpp_flags} {_opt_flags} {_warning_all_flag} {_build_arch_flags} {_macros} \
1977
+ {_cpp_wrapper_flags} {_use_custom_generated_macros}"
1978
+ # For CPP wrapper, add -ffast-math during linking to make CPU flush denormals.
1979
+ # CPP wrapper leverages cpp_extension which will do the compilation and linking in two stages.
1980
+ # We need to explicitly add -ffast-math as a linking flag.
1981
+ # For the default python wrapper, the compilation and linking are done in one command thus -ffast-math
1982
+ # will take effect in both compilation and linking.
1983
+ extra_ldflags = f"{_shared} {_lpaths} {_libs} -ffast-math"
1984
+
1985
+ mod = torch.utils.cpp_extension.load_inline(
1986
+ name=name,
1987
+ build_directory=cpp_wrapper_dir,
1988
+ cpp_sources=[source_code],
1989
+ functions=[func_name],
1990
+ extra_cflags=[extra_cflags],
1991
+ extra_ldflags=[extra_ldflags],
1992
+ extra_include_paths=_ipaths,
1993
+ use_pch=True,
1994
+ )
1995
+ log.debug("Cpp wrapper done building %s", filepath)
1996
+ else:
1997
+ log.debug("Found target .so, cpp wrapper loading %s", filepath)
1998
+ spec = importlib.util.spec_from_file_location(name, filepath) # type: ignore[attr-defined]
1999
+ assert spec is not None
2000
+ mod = importlib.util.module_from_spec(spec) # type: ignore[attr-defined]
2001
+ assert isinstance(spec.loader, abc.Loader)
2002
+ spec.loader.exec_module(mod)
2003
+ log.debug("Cpp wrapper done loading %s", filepath)
2004
+
2005
+ cls.cache[key] = mod
2006
+
2007
+ return cls.cache[key]
2008
+
2009
+
2010
+ class TritonCodeCache:
2011
+ @classmethod
2012
+ def load(cls, kernel_name: str, source_code: str) -> ModuleType:
2013
+ mod = PyCodeCache.load(source_code)
2014
+ return getattr(mod, kernel_name)
2015
+
2016
+
2017
+ def _cuda_compiler() -> Optional[str]:
2018
+ if cuda_env.nvcc_exist(config.cuda.cuda_cxx):
2019
+ return config.cuda.cuda_cxx
2020
+ if cuda_env.nvcc_exist(os.getenv("CUDACXX")):
2021
+ return os.getenv("CUDACXX", "")
2022
+ if cuda_env.nvcc_exist(os.getenv("CUDA_HOME")):
2023
+ return os.path.join(os.getenv("CUDA_HOME", ""), "bin/nvcc")
2024
+ return "nvcc"
2025
+
2026
+
2027
+ def _cutlass_include_paths() -> List[str]:
2028
+ cutlass_path = config.cuda.cutlass_dir
2029
+ return [
2030
+ os.path.join(cutlass_path, "include"),
2031
+ os.path.join(cutlass_path, "tools/library/include"),
2032
+ os.path.join(cutlass_path, "tools/library/src"),
2033
+ os.path.join(cutlass_path, "tools/util/include"),
2034
+ ]
2035
+
2036
+
2037
+ def _cuda_lib_options() -> List[str]:
2038
+ from torch.utils import cpp_extension
2039
+
2040
+ extra_ldflags: List[str] = []
2041
+ if is_linux():
2042
+ extra_lib_dir = "lib64"
2043
+ if not os.path.exists(
2044
+ cpp_extension._join_cuda_home(extra_lib_dir)
2045
+ ) and os.path.exists(cpp_extension._join_cuda_home("lib")):
2046
+ # 64-bit CUDA may be installed in "lib"
2047
+ # Note that it's also possible both don't exist (see _find_cuda_home) - in that case we stay with "lib64"
2048
+ extra_lib_dir = "lib"
2049
+ extra_ldflags.append(f"-L{cpp_extension._join_cuda_home(extra_lib_dir)}")
2050
+ extra_ldflags.append(
2051
+ f'-L{cpp_extension._join_cuda_home(extra_lib_dir, "stubs")}'
2052
+ )
2053
+ extra_ldflags.append("-lcuda")
2054
+ extra_ldflags.append("-lcudart")
2055
+ else:
2056
+ raise NotImplementedError(
2057
+ "Unsupported env, failed to find cuda libs! Currently only Linux is supported."
2058
+ )
2059
+ return extra_ldflags
2060
+
2061
+
2062
+ def _nvcc_host_compiler_options() -> List[str]:
2063
+ return [
2064
+ "-fPIC",
2065
+ "-fno-strict-aliasing",
2066
+ "-fvisibility=hidden",
2067
+ "-Wconversion",
2068
+ ]
2069
+
2070
+
2071
+ def _nvcc_compiler_options() -> List[str]:
2072
+ arch = cuda_env.get_cuda_arch()
2073
+ if arch == "90":
2074
+ # Required by cutlass compilation.
2075
+ arch = "90a"
2076
+ code = [f"sm_{arch}", f"compute_{arch}"]
2077
+ if config.cuda.enable_cuda_lto:
2078
+ code += [f"lto_{arch}"]
2079
+ options = [
2080
+ "-t=0",
2081
+ "-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1",
2082
+ "-w",
2083
+ f"-gencode=arch=compute_{arch},code=[{','.join(code)}]",
2084
+ config.cuda.compile_opt_level,
2085
+ "-std=c++17",
2086
+ "--expt-relaxed-constexpr",
2087
+ ]
2088
+ if config.cuda.enable_debug_info:
2089
+ options.extend(["-lineinfo", "-g", "-DCUTLASS_DEBUG_TRACE_LEVEL=1"])
2090
+ if config.cuda.enable_ptxas_info:
2091
+ options.extend(
2092
+ [
2093
+ "--keep", # Keep the intermediate files for debugging (including ptx, sass, cubin etc.)
2094
+ "--ptxas-options=--warn-on-local-memory-usage", # warn us if local memory is used in CUDA Kernels
2095
+ "--ptxas-options=--warn-on-spills", # warn us if register spilling happens in CUDA Kernels
2096
+ "--resource-usage", # Report on CUDA resource usage (shared mem, registers etc.)
2097
+ "--source-in-ptx",
2098
+ ]
2099
+ ) # Annotate the ptx file with source information
2100
+ if config.cuda.use_fast_math:
2101
+ options.extend(
2102
+ [
2103
+ "--use_fast_math",
2104
+ "-DCUTLASS_USE_TANH_FOR_SIGMOID=1",
2105
+ ]
2106
+ )
2107
+ return options
2108
+
2109
+
2110
+ def cuda_compile_command(
2111
+ src_files: List[str],
2112
+ dst_file: str,
2113
+ dst_file_ext: str,
2114
+ ) -> str:
2115
+ include_paths = _cutlass_include_paths()
2116
+ cuda_lib_options = _cuda_lib_options()
2117
+ nvcc_host_compiler_options = _nvcc_host_compiler_options()
2118
+ nvcc_compiler_options = _nvcc_compiler_options()
2119
+ options = (
2120
+ nvcc_compiler_options
2121
+ + [
2122
+ f"-Xcompiler {opt}" if "=" in opt else f"-Xcompiler={opt}"
2123
+ for opt in nvcc_host_compiler_options
2124
+ ]
2125
+ + ["-I" + path for path in include_paths]
2126
+ + cuda_lib_options
2127
+ )
2128
+ src_file = " ".join(src_files)
2129
+ res = ""
2130
+ if dst_file_ext == "o":
2131
+ res = f"{_cuda_compiler()} {' '.join(options)} -c -o {dst_file} {src_file}"
2132
+ elif dst_file_ext == "so":
2133
+ options.append("-shared")
2134
+ res = f"{_cuda_compiler()} {' '.join(options)} -o {dst_file} {src_file}"
2135
+ else:
2136
+ raise NotImplementedError(f"Unsupported output file suffix {dst_file_ext}!")
2137
+ log.debug("CUDA command: %s", res)
2138
+ return res
2139
+
2140
+
2141
+ class DLLWrapper:
2142
+ """A wrapper for a dynamic library."""
2143
+
2144
+ def __init__(
2145
+ self,
2146
+ lib_path: str,
2147
+ ):
2148
+ self.lib_path = lib_path
2149
+ self.DLL = cdll.LoadLibrary(lib_path)
2150
+ self.is_open = True
2151
+
2152
+ def close(self):
2153
+ if self.is_open:
2154
+ self._dlclose()
2155
+ self.is_open = False
2156
+
2157
+ def _dlclose(self):
2158
+ f_dlclose = None
2159
+
2160
+ if is_linux():
2161
+ syms = CDLL(None)
2162
+ if not hasattr(syms, "dlclose"):
2163
+ # Apline Linux
2164
+ syms = CDLL("libc.so")
2165
+
2166
+ if hasattr(syms, "dlclose"):
2167
+ f_dlclose = syms.dlclose
2168
+ else:
2169
+ raise NotImplementedError("Unsupported env, failed to do dlclose!")
2170
+
2171
+ if f_dlclose is not None:
2172
+ f_dlclose.argtypes = [c_void_p]
2173
+ f_dlclose(self.DLL._handle)
2174
+ else:
2175
+ log.warning(
2176
+ "dll unloading function was not found, library may not be unloaded properly!"
2177
+ )
2178
+
2179
+ def __getattr__(self, name):
2180
+ if not self.is_open:
2181
+ raise RuntimeError(f"Cannot use closed DLL library: {self.lib_path}")
2182
+
2183
+ method = getattr(self.DLL, name)
2184
+
2185
+ def _wrapped_func(*args):
2186
+ err = method(*args)
2187
+ if err:
2188
+ raise RuntimeError(f"Error in function: {method.__name__}")
2189
+
2190
+ return _wrapped_func
2191
+
2192
+ def __enter__(self):
2193
+ return self
2194
+
2195
+ def __exit__(self, *args):
2196
+ self.close()
2197
+
2198
+ def __del__(self):
2199
+ self.close()
2200
+
2201
+
2202
+ class CUDACodeCache:
2203
+ @dataclasses.dataclass
2204
+ class CacheEntry:
2205
+ input_path: str
2206
+ output_path: str
2207
+
2208
+ cache: Dict[str, CacheEntry] = dict()
2209
+ clear = staticmethod(cache.clear)
2210
+ _SOURCE_CODE_SUFFIX = "cu"
2211
+
2212
+ @classmethod
2213
+ def write(cls, source_code, dst_file_ext) -> Tuple[str, str]:
2214
+ """
2215
+ Writes source code into a file with dst_file_ext as the file extension.
2216
+ Returns the hash key of source code, and the path to the file.
2217
+ """
2218
+
2219
+ cuda_command = repr(
2220
+ cuda_compile_command(["dummy_input"], "dummy_output", dst_file_ext)
2221
+ )
2222
+ key, input_path = write(
2223
+ source_code, cls._SOURCE_CODE_SUFFIX, extra=cuda_command
2224
+ )
2225
+ return key, input_path
2226
+
2227
+ @classmethod
2228
+ def compile(cls, source_code, dst_file_ext) -> Tuple[str, str, str]:
2229
+ """
2230
+ Compiles CUDA source_code into a file with dst_file_ext extension.
2231
+ Returns a tuple of dst_file_path, hash_key, source_code_path
2232
+ """
2233
+
2234
+ key, input_path = cls.write(source_code, dst_file_ext)
2235
+ if key not in cls.cache:
2236
+ from filelock import FileLock
2237
+
2238
+ lock_dir = get_lock_dir()
2239
+ lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
2240
+ with lock:
2241
+ output_path = input_path[: -len(cls._SOURCE_CODE_SUFFIX)] + dst_file_ext
2242
+ if not os.path.exists(output_path):
2243
+ cmd = cuda_compile_command(
2244
+ [input_path], output_path, dst_file_ext
2245
+ ).split(" ")
2246
+ try:
2247
+ subprocess.check_output(
2248
+ cmd, stderr=subprocess.STDOUT, env=os.environ
2249
+ )
2250
+ except subprocess.CalledProcessError as error:
2251
+ raise exc.CUDACompileError(cmd, error.output) from error
2252
+ cls.cache[key] = CUDACodeCache.CacheEntry(input_path, output_path)
2253
+
2254
+ return (cls.cache[key].output_path, key, input_path)
2255
+
2256
+ @classmethod
2257
+ def load(cls, source_code, dst_file_ext) -> Tuple[DLLWrapper, str, str]:
2258
+ """
2259
+ Compiles source code and loads the generated .so file.
2260
+ Returns a tuple of DLLWrapper, hash_key, source_code_path
2261
+ """
2262
+
2263
+ if dst_file_ext != "so":
2264
+ raise RuntimeError(
2265
+ f"Only support loading a .so file for now. "
2266
+ f"Requested file extension: {dst_file_ext}. Source code: {source_code}"
2267
+ )
2268
+ dst_file_path, hash_key, source_code_path = cls.compile(
2269
+ source_code, dst_file_ext
2270
+ )
2271
+ return (DLLWrapper(dst_file_path), hash_key, source_code_path)
2272
+
2273
+
2274
+ def caching_device_properties():
2275
+ for _, device_interface in get_registered_device_interfaces():
2276
+ if device_interface.is_available():
2277
+ device_interface.Worker.get_device_properties()
2278
+
2279
+
2280
+ def _set_triton_ptxas_path() -> None:
2281
+ if os.environ.get("TRITON_PTXAS_PATH") is not None:
2282
+ return
2283
+ ptxas_path = os.path.abspath(
2284
+ os.path.join(os.path.dirname(__file__), "..", "bin", "ptxas")
2285
+ )
2286
+ if not os.path.exists(ptxas_path):
2287
+ return
2288
+ if os.path.isfile(ptxas_path) and os.access(ptxas_path, os.X_OK):
2289
+ os.environ["TRITON_PTXAS_PATH"] = ptxas_path
2290
+ else:
2291
+ warnings.warn(f"{ptxas_path} exists but is not an executable")
2292
+
2293
+
2294
+ def _worker_compile(
2295
+ kernel_name: str, source_code: str, cc: int, device: torch.device
2296
+ ) -> None:
2297
+ device_interface = get_interface_for_device(device.type)
2298
+ device_interface.Worker.set_device(device.index)
2299
+ kernel = TritonCodeCache.load(kernel_name, source_code)
2300
+ kernel.precompile(warm_cache_only_with_cc=cc)
2301
+
2302
+
2303
+ def _load_kernel(kernel_name: str, source_code: str) -> ModuleType:
2304
+ _set_triton_ptxas_path()
2305
+ kernel = TritonCodeCache.load(kernel_name, source_code)
2306
+ kernel.precompile()
2307
+ return kernel
2308
+
2309
+
2310
+ class TritonFuture:
2311
+ kernel: ModuleType
2312
+
2313
+ def __init__(
2314
+ self,
2315
+ kernel_name: str,
2316
+ source_code: str,
2317
+ future: Future[Any],
2318
+ ) -> None:
2319
+ self.kernel_name = kernel_name
2320
+ self.source_code = source_code
2321
+ self.future = future
2322
+
2323
+ # @dynamo_utils.dynamo_timed
2324
+ def result(self) -> ModuleType:
2325
+ t0 = time()
2326
+ if hasattr(self, "kernel"):
2327
+ return self.kernel
2328
+ # If the worker failed this will throw an exception.
2329
+ self.future.result()
2330
+ kernel = self.kernel = _load_kernel(self.kernel_name, self.source_code)
2331
+ latency = time() - t0
2332
+ if latency > 50:
2333
+ developer_warning(
2334
+ f"Detected long compilation time of {latency} seconds for kernel name {self.kernel_name}"
2335
+ )
2336
+ developer_warning(self.source_code)
2337
+ del self.kernel_name, self.source_code, self.future
2338
+ return kernel
2339
+
2340
+
2341
+ # If this process dies abnormally (e.g. segfault)
2342
+ # it will not shut down the workers. Instead
2343
+ # the workers will have their parent reassigned to the
2344
+ # init process. This launches a separate thread to
2345
+ # watch for the worker getting reassigned,
2346
+ # and cleans it up in this case.
2347
+ #
2348
+ # This function cannot be an inner function since otherwise mp_context="spawn" would
2349
+ # not work for ProcessPoolExecutor since inner functions cannot be pickled.
2350
+ def _async_compile_initializer(orig_ppid) -> None:
2351
+ def run() -> None:
2352
+ while True:
2353
+ sleep(1)
2354
+ if orig_ppid != os.getppid():
2355
+ os.kill(os.getpid(), signal.SIGKILL)
2356
+
2357
+ global _watchdog_thread
2358
+ _watchdog_thread = Thread(target=run, daemon=True)
2359
+ _watchdog_thread.start()
2360
+
2361
+
2362
+ _watchdog_thread: Optional[Thread] = None
2363
+
2364
+
2365
+ class AsyncCompile:
2366
+ def __init__(self) -> None:
2367
+ pass
2368
+
2369
+ @staticmethod
2370
+ @functools.lru_cache(1)
2371
+ def pool() -> ThreadPoolExecutor:
2372
+ assert config.compile_threads > 1
2373
+ return ThreadPoolExecutor(config.compile_threads)
2374
+
2375
+ @staticmethod
2376
+ @functools.lru_cache(1)
2377
+ def process_pool() -> ProcessPoolExecutor:
2378
+ # ensure properties have been calculated before processes
2379
+ # are forked
2380
+ caching_device_properties()
2381
+ assert config.compile_threads > 1
2382
+ orig_ppid = os.getpid()
2383
+
2384
+ ctx = multiprocessing.get_context(config.worker_start_method)
2385
+ pool = ProcessPoolExecutor(
2386
+ config.compile_threads,
2387
+ mp_context=ctx,
2388
+ initializer=partial(_async_compile_initializer, orig_ppid),
2389
+ )
2390
+ # when this pool is created in a subprocess object, the normal exit handler
2391
+ # doesn't run, and we need to register our own handler.
2392
+ # exitpriority has to be high, because another one of the finalizers will
2393
+ # kill the worker thread that sends the shutdown message to the workers...
2394
+ multiprocessing.util.Finalize(None, pool.shutdown, exitpriority=sys.maxsize)
2395
+ return pool
2396
+
2397
+ @classmethod
2398
+ def warm_pool(cls) -> None:
2399
+ if config.compile_threads <= 1:
2400
+ return
2401
+ _compile_start()
2402
+ pool = cls.process_pool()
2403
+
2404
+ # We have to fork processes for compiler workers, but the more memory and other resources that are loaded, the
2405
+ # slower the os.fork time is, quite drastically. It also holds the GIL so we can't put it on another thread.
2406
+
2407
+ # Examples:
2408
+ # A simple x + x + x script: 10ms seconds in the middle of the program, 2ms at startup
2409
+ # tf_efficientnet_b0 benchmark: 50ms! in the middle of the program , 3ms at startup
2410
+
2411
+ # So we want to start the workers early when it is still cheap, and also to allow the workers to get
2412
+ # ready before we have work for them.
2413
+
2414
+ # ProcessPoolExecutor also does not launch the workers until it finds a point when all the workers are idle.
2415
+ # But if we waited until then fork time will be long and we will be waiting for the processes to initialize.
2416
+
2417
+ # We force them to start here with some YOLOing of the internal methods.
2418
+ if hasattr(pool, "_start_queue_management_thread"):
2419
+ pool._start_queue_management_thread()
2420
+ else:
2421
+ for _ in range(config.compile_threads):
2422
+ pool._adjust_process_count()
2423
+ if hasattr(pool, "_start_executor_manager_thread"):
2424
+ pool._start_executor_manager_thread()
2425
+ _compile_end()
2426
+
2427
+ @classmethod
2428
+ def submit(cls, task: Callable[..., Any]) -> Any:
2429
+ if config.compile_threads <= 1:
2430
+ return task()
2431
+ return cls.pool().submit(task)
2432
+
2433
+ @classmethod
2434
+ def map(cls, fn: Callable[..., Any], seq: List[Any]) -> List[Any]:
2435
+ if config.compile_threads <= 1 or len(seq) <= 1:
2436
+ return list(map(fn, seq))
2437
+ return [t.result() for t in [cls.pool().submit(fn, x) for x in seq]]
2438
+
2439
+ def triton(
2440
+ self, kernel_name: str, source_code: str, device_str: str = "cuda"
2441
+ ) -> Union[TritonFuture, ModuleType]:
2442
+ _compile_start()
2443
+
2444
+ if config.compile_threads > 1:
2445
+ device_interface = get_interface_for_device(device_str)
2446
+ device = torch.device(device_str, device_interface.current_device())
2447
+ cc = device_interface.get_compute_capability(device)
2448
+ future = self.process_pool().submit(
2449
+ _worker_compile, kernel_name, source_code, cc, device
2450
+ )
2451
+ return TritonFuture(kernel_name, source_code, future)
2452
+ else:
2453
+ return _load_kernel(kernel_name, source_code)
2454
+
2455
+ def cpp(self, source_code: str) -> ModuleType:
2456
+ def task():
2457
+ return CppCodeCache.load(source_code).kernel
2458
+
2459
+ return self.submit(task)
2460
+
2461
+ def cuda(self, source_code, dst_file_ext):
2462
+ def task():
2463
+ return CUDACodeCache.load(source_code, dst_file_ext)[0]
2464
+
2465
+ return self.submit(task)
2466
+
2467
+ def wait(self, scope: Dict[str, Any]) -> None:
2468
+ num_kernels = len(
2469
+ [
2470
+ value
2471
+ for key, value in scope.items()
2472
+ if isinstance(value, (Future, TritonFuture))
2473
+ ]
2474
+ )
2475
+ pbar = tqdm(
2476
+ total=num_kernels,
2477
+ desc="Inductor Compilation",
2478
+ disable=config.disable_progress,
2479
+ delay=0,
2480
+ )
2481
+ if config.compile_threads > 1:
2482
+ for key, result in scope.items():
2483
+ if config.verbose_progress and not isinstance(pbar, _Faketqdm):
2484
+ pbar.set_postfix_str(key)
2485
+ if isinstance(result, (Future, TritonFuture)):
2486
+ scope[key] = result.result()
2487
+ pbar.update(1)
2488
+
2489
+ _compile_end()
2490
+
2491
+
2492
+ AsyncCompile.warm_pool()
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cuda_combined_scheduling.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/memory_planning.cpython-310.pyc ADDED
Binary file (26.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc ADDED
Binary file (83.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/aoti_runtime/interface.cpp ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/csrc/inductor/aoti_runtime/interface.h>
2
+ #include <torch/csrc/inductor/aoti_runtime/model_container.h>
3
+
4
+ #include <iostream>
5
+ #include <stdexcept>
6
+ #include <vector>
7
+
8
+ #define CONVERT_EXCEPTION_TO_ERROR_CODE(...) \
9
+ try { \
10
+ __VA_ARGS__ \
11
+ } catch (const std::exception& e) { \
12
+ std::cerr << "Error: " << e.what() << std::endl; \
13
+ return AOTI_RUNTIME_FAILURE; \
14
+ } catch (...) { \
15
+ std::cerr << "Unknown exception occurred." << std::endl; \
16
+ return AOTI_RUNTIME_FAILURE; \
17
+ } \
18
+ return AOTI_RUNTIME_SUCCESS;
19
+
20
+ #define AOTI_VECTOR_SIZE_CHECK(actual_size, expected_size, name) \
21
+ do { \
22
+ AOTI_RUNTIME_CHECK( \
23
+ actual_size == expected_size, \
24
+ "expected " + std::string(name) + " vector size to be " + \
25
+ std::to_string(expected_size) + ", but got " + \
26
+ std::to_string(actual_size)); \
27
+ } while (0)
28
+
29
+ // AOTInductor uses at::addmm_out, which doesn't supports
30
+ // arguments that requires gradient. For this reason, we
31
+ // enforce no_grad context for run APIs.
32
+ //
33
+ // A RAII, thread local (!) guard that enables or disables grad mode upon
34
+ // construction, and sets it back to the original value upon destruction.
35
+ struct AOTINoGradGuard {
36
+ AOTINoGradGuard() : prev_mode(aoti_torch_grad_mode_is_enabled()) {
37
+ aoti_torch_grad_mode_set_enabled(false);
38
+ }
39
+ ~AOTINoGradGuard() {
40
+ aoti_torch_grad_mode_set_enabled(prev_mode);
41
+ }
42
+ bool prev_mode;
43
+ };
44
+
45
+ extern "C" {
46
+
47
+ AOTIRuntimeError AOTInductorModelContainerCreate(
48
+ AOTInductorModelContainerHandle* container_handle,
49
+ size_t num_models,
50
+ bool is_cpu,
51
+ const char* cubin_dir) {
52
+ if (num_models == 0) {
53
+ std::cerr << "Error: num_models must be positive, but got 0" << std::endl;
54
+ return AOTI_RUNTIME_FAILURE;
55
+ }
56
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
57
+ std::optional<std::string> cubin_dir_opt;
58
+ if (cubin_dir != nullptr) {
59
+ cubin_dir_opt.emplace(cubin_dir);
60
+ }
61
+ auto* container = new torch::aot_inductor::AOTInductorModelContainer(
62
+ num_models, is_cpu, cubin_dir_opt);
63
+ *container_handle =
64
+ reinterpret_cast<AOTInductorModelContainerHandle>(container);
65
+ })
66
+ }
67
+
68
+ AOTIRuntimeError AOTInductorModelContainerDelete(
69
+ AOTInductorModelContainerHandle container_handle) {
70
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
71
+ auto* container =
72
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
73
+ container_handle);
74
+ delete container;
75
+ });
76
+ }
77
+
78
+ AOTIRuntimeError AOTInductorModelContainerRun(
79
+ AOTInductorModelContainerHandle container_handle,
80
+ AtenTensorHandle* input_handles, // array of input AtenTensorHandle; handles
81
+ // are stolen; the array itself is borrowed
82
+ size_t num_inputs,
83
+ AtenTensorHandle*
84
+ output_handles, // array for writing output AtenTensorHandle; handles
85
+ // will be stolen by the caller; the array itself is
86
+ // borrowed
87
+ size_t num_outputs,
88
+ AOTInductorStreamHandle stream_handle,
89
+ AOTIProxyExecutorHandle proxy_executor_handle) {
90
+ auto* container =
91
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
92
+ container_handle);
93
+ AOTI_VECTOR_SIZE_CHECK(num_inputs, container->num_inputs(), "inputs");
94
+ AOTI_VECTOR_SIZE_CHECK(num_outputs, container->num_outputs(), "outputs");
95
+
96
+ auto stream = reinterpret_cast<torch::aot_inductor::DeviceStreamType>(stream_handle);
97
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
98
+ AOTINoGradGuard guard;
99
+ container->run(
100
+ input_handles,
101
+ output_handles,
102
+ stream,
103
+ proxy_executor_handle);
104
+ })
105
+ }
106
+
107
+ AOTIRuntimeError AOTInductorModelContainerGetNumInputs(
108
+ AOTInductorModelContainerHandle container_handle,
109
+ size_t* ret_num_inputs) {
110
+ auto* container =
111
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
112
+ container_handle);
113
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
114
+ { *ret_num_inputs = container->num_inputs(); })
115
+ }
116
+
117
+ AOTIRuntimeError AOTInductorModelContainerGetInputName(
118
+ AOTInductorModelContainerHandle container_handle,
119
+ size_t input_idx,
120
+ const char** ret_input_names) {
121
+ auto* container =
122
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
123
+ container_handle);
124
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
125
+ { *ret_input_names = container->input_name(input_idx); })
126
+ }
127
+
128
+ AOTIRuntimeError AOTInductorModelContainerGetNumOutputs(
129
+ AOTInductorModelContainerHandle container_handle,
130
+ size_t* ret_num_outputs) {
131
+ auto* container =
132
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
133
+ container_handle);
134
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
135
+ { *ret_num_outputs = container->num_outputs(); })
136
+ }
137
+
138
+ AOTIRuntimeError AOTInductorModelContainerGetOutputName(
139
+ AOTInductorModelContainerHandle container_handle,
140
+ size_t output_idx,
141
+ const char** ret_output_names) {
142
+ auto* container =
143
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
144
+ container_handle);
145
+ CONVERT_EXCEPTION_TO_ERROR_CODE(
146
+ { *ret_output_names = container->output_name(output_idx); })
147
+ }
148
+
149
+ AOTIRuntimeError AOTInductorModelContainerGetCallSpec(
150
+ AOTInductorModelContainerHandle container_handle,
151
+ const char** in_spec,
152
+ const char** out_spec) {
153
+ auto* container =
154
+ reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
155
+ container_handle);
156
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
157
+ *in_spec = container->get_in_spec();
158
+ *out_spec = container->get_out_spec();
159
+ })
160
+ }
161
+
162
+ AOTIRuntimeError AOTInductorModelCreate(
163
+ AOTInductorModelHandle* model_handle,
164
+ AOTInductorConstantMapHandle constant_map_handle) {
165
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
166
+ auto constant_map = std::make_shared<torch::aot_inductor::ConstantMap>();
167
+ auto input_map = reinterpret_cast<std::unordered_map<std::string, AtenTensorHandle>*>(constant_map_handle);
168
+
169
+ auto model = new torch::aot_inductor::AOTInductorModel(
170
+ constant_map,
171
+ ""
172
+ );
173
+
174
+ if (input_map) {
175
+ for (auto const& kv : *input_map) {
176
+ constant_map->emplace(kv.first, kv.second);
177
+ }
178
+ } else {
179
+ model->load_constants(/*is_cpu*/true);
180
+ }
181
+
182
+ *model_handle = reinterpret_cast<AOTInductorModelHandle>(model);
183
+ })
184
+ }
185
+
186
+ AOTIRuntimeError AOTInductorModelRun(
187
+ AOTInductorModelHandle model_handle,
188
+ AtenTensorHandle* input_handles,
189
+ AtenTensorHandle* output_handles) {
190
+ auto model = reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(model_handle);
191
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
192
+ AOTINoGradGuard guard;
193
+ model->run_impl(
194
+ input_handles,
195
+ output_handles,
196
+ (torch::aot_inductor::DeviceStreamType)nullptr,
197
+ nullptr);
198
+ })
199
+ }
200
+
201
+
202
+ AOTIRuntimeError AOTInductorModelDelete(
203
+ AOTInductorModelHandle model_handle
204
+ ) {
205
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
206
+ auto model = reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(model_handle);
207
+ delete model;
208
+ })
209
+ }
210
+
211
+ AOTIRuntimeError AOTInductorModelGetNumOutputs(
212
+ AOTInductorModelHandle model_handle,
213
+ size_t* ret_num_outputs) {
214
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
215
+ auto model = reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(model_handle);
216
+ *ret_num_outputs = model->num_outputs();
217
+ })
218
+ }
219
+
220
+ AOTIRuntimeError AOTInductorModelUpdateConstantsMap(
221
+ AOTInductorModelHandle model_handle,
222
+ AOTInductorConstantMapHandle constant_map_handle) {
223
+ auto model = reinterpret_cast<torch::aot_inductor::AOTInductorModel*>(model_handle);
224
+ CONVERT_EXCEPTION_TO_ERROR_CODE({
225
+ auto constant_map = std::make_shared<torch::aot_inductor::ConstantMap>();
226
+ auto input_map = reinterpret_cast<std::unordered_map<std::string, AtenTensorHandle>*>(constant_map_handle);
227
+
228
+ for (auto const& kv : *input_map) {
229
+ constant_map->emplace(kv.first, kv.second);
230
+ }
231
+ model->update_constants_map(std::move(constant_map));
232
+ })
233
+ }
234
+
235
+ #define CACHE_TORCH_DTYPE(typename) static auto cached_torch_dtype_##typename = aoti_torch_dtype_##typename()
236
+
237
+ static auto cached_torch_device_type_cpu = aoti_torch_device_type_cpu();
238
+ static auto cached_torch_device_type_cuda = aoti_torch_device_type_cuda();
239
+ } // extern "C"
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/common.py ADDED
@@ -0,0 +1,1295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import dataclasses
3
+ import functools
4
+ import itertools
5
+ import logging
6
+ import operator
7
+ import re
8
+ from collections import namedtuple
9
+ from itertools import chain
10
+ from typing import (
11
+ Any,
12
+ Callable,
13
+ ClassVar,
14
+ Dict,
15
+ List,
16
+ NamedTuple,
17
+ Optional,
18
+ Set,
19
+ Tuple,
20
+ Union,
21
+ )
22
+
23
+ import sympy
24
+ from sympy.printing.printer import Printer
25
+
26
+ import torch
27
+ import torch.fx
28
+ from torch.utils._sympy.value_ranges import ValueRanges
29
+
30
+ from .. import config, metrics
31
+ from ..utils import (
32
+ DeferredLineBase,
33
+ do_bench,
34
+ free_symbol_startswith,
35
+ IndentedBuffer,
36
+ sympy_dot,
37
+ sympy_subs,
38
+ sympy_symbol,
39
+ unique,
40
+ )
41
+ from ..virtualized import ops, OpsValue, V
42
+
43
+ schedule_log = torch._logging.getArtifactLogger(__name__, "schedule")
44
+
45
+
46
+ def data_type_logger(msg):
47
+ if schedule_log.isEnabledFor(logging.DEBUG):
48
+ schedule_log.debug("Data type propagation: %s", msg)
49
+
50
+
51
+ TensorArg = namedtuple("TensorArg", ["name", "buffer", "dtype", "check_alignment"])
52
+ SizeArg = namedtuple("SizeArg", ["name", "expr"])
53
+
54
+ DeviceCodegen = namedtuple("DeviceCodegen", ["scheduling", "wrapper_codegen"])
55
+ device_codegens: Dict[str, DeviceCodegen] = {}
56
+
57
+
58
+ # The code generated by Inductor consists of two main parts: kernel code and wrapper code.
59
+ # For any new backend looking to integrate with Inductor, customization of these two main
60
+ # parts are necessary to generate its specific code.
61
+ #
62
+ # Kernel code generation is determined by different Scheduling. Consequently, a new
63
+ # backend needs to provide a custom Scheduling for its unique kernel code generation. Currently,
64
+ # CppScheduling and TritonScheduling serve the C++/OpenMP and Triton backends, respectively.
65
+ #
66
+ # For the Wrapper, Inductor provides a WrapperCodeGen class to generate the Python wrapper code
67
+ # that bridges kernels. This allows out-of-tree backends to inherit from WrapperCodeGen,
68
+ # and override specific member functions to create backend-specific Python wrapper code.
69
+ #
70
+ # Other classes, such as CppKernel and TritonKernel, used for code generation, typically form part
71
+ # of the logic for either Scheduling or WrapperCodeGen. So the Scheduling and WrapperCodeGen interfaces
72
+ # provide flexibility to the backend. A backend can choose to implement these classes from scratch,
73
+ # or reuse them by extending and overriding as necessary. And Inductor provides the registration API,
74
+ # register_backend_for_device, to equip a new backend at runtime.
75
+ #
76
+ # Intel has developed a new backend on top of Triton to support Intel GPUs, leveraging these interfaces.
77
+ # This backend can be used as a reference:
78
+ # https://github.com/intel/intel-extension-for-pytorch/blob/5dcc9d57e5422cf295e1a1ee97896d6b6a554a85/intel_extension_for_pytorch/_inductor/__init__.py#L9
79
+ def register_backend_for_device(
80
+ device: str, device_scheduling: type, device_wrapper_codegen: type
81
+ ):
82
+ device_codegens[device] = DeviceCodegen(device_scheduling, device_wrapper_codegen)
83
+
84
+
85
+ def get_scheduling_for_device(device: str):
86
+ return device_codegens[device].scheduling if device in device_codegens else None
87
+
88
+
89
+ def get_wrapper_codegen_for_device(device: str):
90
+ return (
91
+ device_codegens[device].wrapper_codegen if device in device_codegens else None
92
+ )
93
+
94
+
95
+ def index_prevent_reordering(index: List[sympy.Expr], index_vars, sizes):
96
+ from ..ir import FlexibleLayout
97
+
98
+ # added contiguous index prevents reordering
99
+ return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))]
100
+
101
+
102
+ @functools.lru_cache(None)
103
+ def boolean_ops():
104
+ return (
105
+ "is_inf",
106
+ "is_nan",
107
+ "bitwise_xor",
108
+ "logical_not",
109
+ "signbit",
110
+ "le",
111
+ "lt",
112
+ "ge",
113
+ "gt",
114
+ "eq",
115
+ "ne",
116
+ )
117
+
118
+
119
+ DTYPE_TO_COMPUTATION_DTYPE = {
120
+ torch.bfloat16: torch.float,
121
+ torch.float16: torch.float,
122
+ **{
123
+ dtype: dtype
124
+ for dtype in [
125
+ torch.bool,
126
+ torch.float32,
127
+ torch.float64,
128
+ torch.int8,
129
+ torch.int16,
130
+ torch.int32,
131
+ torch.int64,
132
+ torch.uint8,
133
+ ]
134
+ },
135
+ }
136
+
137
+
138
+ class DataTypePropagation:
139
+ def __init__(self, body) -> None:
140
+ self.body = body
141
+ self.graphs: Dict[Union[Callable[..., Any], str], Any] = {
142
+ "root": body.root_block.graph
143
+ }
144
+ for k, v in body.subblocks.items():
145
+ self.graphs[k] = v.graph
146
+
147
+ def deduce_node_dtype_by_inputs(self, node: torch.fx.Node):
148
+ inputs = node.all_input_nodes
149
+ input_nodes = [
150
+ n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder"
151
+ ]
152
+ if len(input_nodes) == 0:
153
+ return None
154
+
155
+ all_input_nodes_propogated = all(
156
+ OptimizationContext.key in n.meta
157
+ and n.meta[OptimizationContext.key].dtype is not None
158
+ for n in input_nodes
159
+ )
160
+ if not all_input_nodes_propogated:
161
+ return None
162
+
163
+ return functools.reduce(
164
+ torch.promote_types,
165
+ [n.meta[OptimizationContext.key].dtype for n in input_nodes],
166
+ )
167
+
168
+ def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node):
169
+ sub_graph = self.graphs[node.target]
170
+ dtype = self.propagate_graph(sub_graph)
171
+ assert dtype
172
+ return dtype
173
+
174
+ def deduce_node_dtype(self, node: torch.fx.Node):
175
+ if node.target in boolean_ops():
176
+ return torch.bool
177
+
178
+ if node.op == "placeholder":
179
+ return None
180
+
181
+ if node.target == "output":
182
+ # we can infer output node if it only have 1 arg
183
+ if len(node.args) != 1:
184
+ return None
185
+
186
+ if node.target in (
187
+ "to_dtype",
188
+ "index_expr",
189
+ ):
190
+ return node.args[-1]
191
+
192
+ if node.target in (
193
+ "rand",
194
+ "randn",
195
+ ):
196
+ return torch.float
197
+
198
+ if node.target in (
199
+ "get_index",
200
+ "index_expr",
201
+ ):
202
+ return torch.int64
203
+
204
+ if node.target in (
205
+ "load",
206
+ "store",
207
+ "store_reduction",
208
+ ):
209
+ buf_name = node.args[1]
210
+ return V.graph.get_dtype(buf_name)
211
+
212
+ if node.target == operator.getitem:
213
+ return self.deduce_node_dtype(node.args[0])
214
+
215
+ assert isinstance(node.target, str)
216
+
217
+ if node.target == "reduction":
218
+ return node.args[1]
219
+
220
+ if node.target == "constant":
221
+ return DTYPE_TO_COMPUTATION_DTYPE[node.args[-1]]
222
+
223
+ if node.target.startswith("masked_subblock"):
224
+ return self.deduce_node_dtype_by_subgraph(node)
225
+
226
+ return self.deduce_node_dtype_by_inputs(node)
227
+
228
+ def propagate_graph(self, graph: torch.fx.Graph):
229
+ assert graph.nodes
230
+ graph_dtype = None
231
+ # For masked_subblock, we use output's dtype to represent
232
+ # the dtype of this subgraph. For other cases, graph_dtype
233
+ # might be None
234
+ for node in graph.nodes:
235
+ if OptimizationContext.key in node.meta:
236
+ opt_ctx = node.meta[OptimizationContext.key]
237
+ else:
238
+ opt_ctx = OptimizationContext()
239
+
240
+ opt_ctx.dtype = self.deduce_node_dtype(node)
241
+ node.meta[OptimizationContext.key] = opt_ctx
242
+ if node.target == "output":
243
+ graph_dtype = opt_ctx.dtype
244
+ return graph_dtype
245
+
246
+ def propagate(self):
247
+ self.propagate_graph(self.graphs["root"])
248
+
249
+ @classmethod
250
+ def propagate_loopbody(cls, body):
251
+ return cls(body).propagate()
252
+
253
+ @classmethod
254
+ def propagate_scheduler_node(cls, node):
255
+ from ..ir import LoopBody
256
+ from ..scheduler import SchedulerNode
257
+
258
+ assert isinstance(node, SchedulerNode)
259
+ assert isinstance(node._body, LoopBody)
260
+ DataTypePropagation.propagate_loopbody(node._body)
261
+
262
+
263
+ class ExprPrinter(Printer):
264
+ @staticmethod
265
+ def paren(string):
266
+ def all_in_parens(string):
267
+ if string[0] != "(" or len(string) < 2:
268
+ return False
269
+ count = 1
270
+ for i, char in enumerate(string[1:]):
271
+ if char == "(":
272
+ count += 1
273
+ elif char == ")":
274
+ count -= 1
275
+ if count == 0 and i != len(string) - 2:
276
+ return False
277
+ assert count == 0
278
+ return True
279
+
280
+ if (
281
+ isinstance(string, CSEVariable)
282
+ or re.match(r"^[a-z0-9_.]+$", string, re.I)
283
+ or re.match(r"^\([^)]*\)$", string, re.I)
284
+ or string == ""
285
+ ):
286
+ return string
287
+ # don't put extra parens for strings that are already wrapped in parens
288
+ if all_in_parens(string):
289
+ return string
290
+ return f"({string})"
291
+
292
+ def _print_Infinity(self, expr):
293
+ return "math.inf"
294
+
295
+ def _print_NegativeInfinity(self, expr):
296
+ return "-math.inf"
297
+
298
+ def _print_Relational(self, expr):
299
+ return f" {expr.rel_op} ".join(map(self.paren, map(self._print, expr.args)))
300
+
301
+ def _print_Mul(self, expr):
302
+ return "*".join(map(self.paren, map(self._print, expr.args)))
303
+
304
+ def _print_Add(self, expr):
305
+ return " + ".join(map(self.paren, map(self._print, expr.args)))
306
+
307
+ def _print_Mod(self, expr):
308
+ return " % ".join(map(self.paren, map(self._print, expr.args)))
309
+
310
+ def _print_FloorDiv(self, expr):
311
+ raise NotImplementedError(f"_print_FloorDiv not implemented for {type(self)}")
312
+
313
+ def _print_CleanDiv(self, expr):
314
+ return self._print_FloorDiv(expr)
315
+
316
+ def _print_GreaterThan(self, expr):
317
+ # GreaterThan: >=
318
+ # StrictlyGreaterThan: >
319
+ # Go figure...
320
+ return " >= ".join(map(self.paren, map(self._print, expr.args)))
321
+
322
+ def _print_align(self, expr):
323
+ assert len(expr.args) == 1
324
+ return f"align({self._print(expr.args[0])})"
325
+
326
+
327
+ class PythonPrinter(ExprPrinter):
328
+ def _print_ModularIndexing(self, expr):
329
+ x, div, mod = expr.args
330
+ x = self.paren(self.doprint(x))
331
+ div = self.paren(self.doprint(div))
332
+ mod = self.paren(self.doprint(mod))
333
+ if div != "1":
334
+ x = f"({x} // {div})"
335
+ return f"{x} % {mod}"
336
+
337
+ def _print_FloorDiv(self, expr):
338
+ x, div = expr.args
339
+ x = self.paren(self.doprint(x))
340
+ div = self.paren(self.doprint(div))
341
+ return f"({x} // {div})"
342
+
343
+ def _helper_sqrt(self, expr):
344
+ return f"math.sqrt({self._print(expr)})"
345
+
346
+ def _print_Pow(self, expr):
347
+ # Pow() confuses triton
348
+ base, exp = expr.args
349
+ # NB: Remember this is sizevar computation! You don't typically
350
+ # expect to have to do floating point computation including exponents
351
+ # in sizevar compute. Instead of adding support for floating
352
+ # point pow, you should make upstream retranslate the Sympy expression
353
+ # into Tensor expressions earlier and do that instead.
354
+ if exp == 0.5:
355
+ return self._helper_sqrt(base)
356
+ elif exp == -0.5:
357
+ return "1/" + self._helper_sqrt(base)
358
+ base = self._print(base)
359
+ assert exp == int(exp), exp
360
+ exp = int(exp)
361
+ if exp > 0:
362
+ return "*".join([self.paren(base)] * exp)
363
+ elif exp < 0:
364
+ return "1/" + self.paren("*".join([self.paren(base)] * abs(exp)))
365
+ else: # exp == 0
366
+ return "1"
367
+
368
+ def _print_floor(self, expr):
369
+ assert len(expr.args) == 1
370
+ return f"math.floor({self._print(expr.args[0])})"
371
+
372
+ def _print_ceiling(self, expr):
373
+ assert len(expr.args) == 1
374
+ return f"math.ceil({self._print(expr.args[0])})"
375
+
376
+ def _print_Abs(self, expr):
377
+ assert len(expr.args) == 1
378
+ return f"abs({self._print(expr.args[0])})"
379
+
380
+ def _print_Max(self, expr):
381
+ assert len(expr.args) >= 2
382
+ return f"max({', '.join(map(self._print, expr.args))})"
383
+
384
+ def _print_Min(self, expr):
385
+ assert len(expr.args) >= 2
386
+ return f"min({', '.join(map(self._print, expr.args))})"
387
+
388
+
389
+ class OpOverrides:
390
+ def __init__(self, parent):
391
+ super().__init__()
392
+ self._parent = parent
393
+
394
+ def __getattr__(self, item):
395
+ return getattr(self._parent, item)
396
+
397
+ @staticmethod
398
+ def identity(value):
399
+ # used to trigger cse
400
+ return value
401
+
402
+ @staticmethod
403
+ def constant(value, dtype):
404
+ return repr(value)
405
+
406
+ @staticmethod
407
+ def reciprocal(x):
408
+ return ops.truediv("1", x)
409
+
410
+ @staticmethod
411
+ def square(x):
412
+ return ops.mul(x, x)
413
+
414
+ @staticmethod
415
+ def bitwise_not(x):
416
+ return f"~{ExprPrinter.paren(x)}"
417
+
418
+ @staticmethod
419
+ def logical_not(a):
420
+ return f"{ExprPrinter.paren(a)} == 0"
421
+
422
+ @staticmethod
423
+ def bitwise_and(x, y):
424
+ return f"{ExprPrinter.paren(x)} & {ExprPrinter.paren(y)}"
425
+
426
+ @staticmethod
427
+ def bitwise_or(x, y):
428
+ return f"{ExprPrinter.paren(x)} | {ExprPrinter.paren(y)}"
429
+
430
+ @staticmethod
431
+ def bitwise_xor(x, y):
432
+ return f"{ExprPrinter.paren(x)} ^ {ExprPrinter.paren(y)}"
433
+
434
+ @staticmethod
435
+ def bitwise_left_shift(x, y):
436
+ return f"{ExprPrinter.paren(x)} << {ExprPrinter.paren(y)}"
437
+
438
+ # TODO(fdrocha): this is currently not being used anywhere,
439
+ # pending on moving triton pin past 972b761
440
+ @staticmethod
441
+ def bitwise_right_shift(x, y):
442
+ return f"{ExprPrinter.paren(x)} >> {ExprPrinter.paren(y)}"
443
+
444
+ @staticmethod
445
+ def remainder(a, b):
446
+ r = ops.mod(a, b)
447
+ return ops.where(f"(({r} != 0) & (({r} < 0) != ({b} < 0)))", ops.add(r, b), r)
448
+
449
+ @staticmethod
450
+ def load_seed(name, offset):
451
+ return ops.load(name, sympy.Integer(offset))
452
+
453
+
454
+ class DeferredLine(DeferredLineBase):
455
+ """A line that can be 'unwritten' by adding name to V.graph.removed_buffers"""
456
+
457
+ def __init__(self, name, line):
458
+ super().__init__(line)
459
+ self.name = name
460
+
461
+ def __call__(self):
462
+ if all(
463
+ self.name not in x
464
+ for x in (
465
+ V.graph.removed_buffers,
466
+ V.kernel.removed_buffers,
467
+ V.graph.inplaced_to_remove,
468
+ V.kernel.inplaced_to_remove,
469
+ )
470
+ ):
471
+ return self.line
472
+ return None
473
+
474
+ def _new_line(self, line):
475
+ return DeferredLine(self.name, line)
476
+
477
+
478
+ class BracesBuffer(IndentedBuffer):
479
+ def indent(self, offset=1):
480
+ @contextlib.contextmanager
481
+ def ctx():
482
+ for _ in range(offset):
483
+ self.writeline("{")
484
+ self._indent += 1
485
+ for _ in range(-offset):
486
+ self._indent -= 1
487
+ self.writeline("}")
488
+ yield
489
+ for _ in range(-offset):
490
+ self.writeline("{")
491
+ self._indent += 1
492
+ for _ in range(offset):
493
+ self._indent -= 1
494
+ self.writeline("}")
495
+
496
+ return ctx()
497
+
498
+
499
+ class InplacedBuffer(NamedTuple):
500
+ inner_name: str
501
+ other_names: List[str]
502
+
503
+
504
+ class KernelArgs:
505
+ @staticmethod
506
+ def _lookup(prefix, odict, name):
507
+ assert isinstance(name, (str, sympy.Symbol))
508
+ if name not in odict:
509
+ odict[name] = f"{prefix}{len(odict)}"
510
+ return odict[name]
511
+
512
+ def __init__(self, sizevars=None):
513
+ self.input_buffers = dict()
514
+ self.output_buffers = dict()
515
+ self.inplace_buffers = dict()
516
+ self.sizevars = sizevars or dict()
517
+
518
+ def __repr__(self):
519
+ return "KernelArgs({})".format(
520
+ ", ".join(
521
+ map(
522
+ repr,
523
+ [
524
+ self.input_buffers,
525
+ self.output_buffers,
526
+ self.inplace_buffers,
527
+ self.sizevars,
528
+ ],
529
+ )
530
+ )
531
+ )
532
+
533
+ def _buffer_is_marked_removed(self, name):
534
+ return isinstance(name, str) and name.startswith("REMOVED")
535
+
536
+ def input(self, name):
537
+ if V.graph.scheduler:
538
+ name = V.graph.scheduler.mutation_real_name.get(name, name)
539
+ assert name not in V.graph.removed_buffers, name
540
+ if name in self.output_buffers:
541
+ return self.output_buffers[name]
542
+ if name in self.inplace_buffers:
543
+ return self.inplace_buffers[name].inner_name
544
+ if name.startswith("seed"):
545
+ return self._lookup("seed", self.input_buffers, name)
546
+ return self._lookup("in_ptr", self.input_buffers, name)
547
+
548
+ def output(self, name):
549
+ if V.graph.scheduler:
550
+ name = V.graph.scheduler.mutation_real_name.get(name, name)
551
+ assert name not in V.graph.removed_buffers, name
552
+ if name in self.inplace_buffers:
553
+ return self.inplace_buffers[name].inner_name
554
+ return self._lookup("out_ptr", self.output_buffers, name)
555
+
556
+ def make_inplace(self, input_name, output_name):
557
+ assert output_name not in self.inplace_buffers
558
+ if input_name in self.inplace_buffers:
559
+ buf = self.inplace_buffers[input_name]
560
+ buf.other_names.append(output_name)
561
+ self.inplace_buffers[output_name] = buf
562
+ else:
563
+ buf = InplacedBuffer(
564
+ f"in_out_ptr{len(unique(self.inplace_buffers.values()))}",
565
+ [input_name, output_name],
566
+ )
567
+ self.inplace_buffers[input_name] = buf
568
+ self.inplace_buffers[output_name] = buf
569
+
570
+ def seed_offset(self, name, value):
571
+ if value in self.sizevars:
572
+ return self.sizevars[value]
573
+ if name in self.sizevars.values():
574
+ name = (
575
+ f"{name}{sum(1 for v in self.sizevars.values() if v.startswith(name))}"
576
+ )
577
+ self.sizevars[value] = name
578
+ return name
579
+
580
+ def size(self, name):
581
+ if str(name) == "seed":
582
+ self.sizevars["seed"] = "seed"
583
+ return "seed"
584
+ return self._lookup("ks", self.sizevars, name)
585
+
586
+ def call_names(self):
587
+ return chain(
588
+ self.input_buffers.keys(), self.output_buffers.keys(), self.sizevars.keys()
589
+ )
590
+
591
+ def wrap_ptr_arg(self, buf, dtype):
592
+ return f"c_void_p({buf}.data_ptr())"
593
+
594
+ def wrap_size_arg(self, size):
595
+ return f"c_long({size})"
596
+
597
+ def cpp_argdefs(self):
598
+ from .cpp import DTYPE_TO_CPP, INDEX_TYPE
599
+
600
+ call_args = []
601
+ arg_defs = []
602
+ arg_types = []
603
+ for inplaced in unique(self.inplace_buffers.values()):
604
+ if self._buffer_is_marked_removed(inplaced):
605
+ continue
606
+ outer = inplaced.other_names[-1]
607
+ inner = inplaced.inner_name
608
+ dtype = V.graph.get_dtype(outer)
609
+ cpp_dtype = DTYPE_TO_CPP[dtype]
610
+ arg_defs.append(f"{cpp_dtype}* {inner}")
611
+ call_args.append(self.wrap_ptr_arg(outer, dtype))
612
+ arg_types.append(f"{cpp_dtype}*")
613
+ for outer, inner in self.input_buffers.items():
614
+ if outer in self.inplace_buffers:
615
+ continue
616
+ dtype = V.graph.get_dtype(outer)
617
+ cpp_dtype = DTYPE_TO_CPP[dtype]
618
+ arg_defs.append(f"const {cpp_dtype}* {inner}")
619
+ call_args.append(self.wrap_ptr_arg(outer, dtype))
620
+ arg_types.append(f"const {cpp_dtype}*")
621
+ for outer, inner in self.output_buffers.items():
622
+ if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
623
+ continue
624
+ dtype = V.graph.get_dtype(outer)
625
+ cpp_dtype = DTYPE_TO_CPP[dtype]
626
+ arg_defs.append(f"{cpp_dtype}* {inner}")
627
+ call_args.append(self.wrap_ptr_arg(outer, dtype))
628
+ arg_types.append(f"{cpp_dtype}*")
629
+ for outer, inner in self.sizevars.items():
630
+ arg_defs.append(f"const {INDEX_TYPE} {inner}")
631
+ call_args.append(self.wrap_size_arg(outer))
632
+ arg_types.append(f"const {INDEX_TYPE}")
633
+ return arg_defs, call_args, arg_types
634
+
635
+ def python_argdefs(self):
636
+ arg_defs = []
637
+ call_args = []
638
+ precompile_args: List[Union[TensorArg, SizeArg]] = []
639
+ for inplaced in unique(self.inplace_buffers.values()):
640
+ if self._buffer_is_marked_removed(inplaced):
641
+ continue
642
+ arg_defs.append(inplaced.inner_name)
643
+ call_args.append(inplaced.other_names[-1])
644
+ precompile_args.append(
645
+ TensorArg(
646
+ inplaced.inner_name,
647
+ inplaced.other_names[-1],
648
+ V.graph.get_dtype(inplaced.other_names[-1]),
649
+ True,
650
+ )
651
+ )
652
+ for outer, inner in chain(
653
+ self.input_buffers.items(), self.output_buffers.items()
654
+ ):
655
+ if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
656
+ continue
657
+ arg_defs.append(inner)
658
+ call_args.append(outer)
659
+ precompile_args.append(
660
+ TensorArg(inner, outer, V.graph.get_dtype(outer), True)
661
+ )
662
+ for outer, inner in self.sizevars.items():
663
+ arg_defs.append(inner)
664
+ call_args.append(outer)
665
+ precompile_args.append(SizeArg(inner, outer))
666
+
667
+ return arg_defs, call_args, precompile_args
668
+
669
+ def aliases(self):
670
+ for inplaced in unique(self.inplace_buffers.values()):
671
+ if self._buffer_is_marked_removed(inplaced):
672
+ continue
673
+ for other in inplaced.other_names:
674
+ if (
675
+ other in V.graph.inplaced_to_remove
676
+ or other in V.kernel.inplaced_to_remove
677
+ ):
678
+ continue
679
+ if other in self.input_buffers:
680
+ yield self.input_buffers[other], inplaced.inner_name
681
+ if other in self.output_buffers:
682
+ yield self.output_buffers[other], inplaced.inner_name
683
+
684
+ def is_removed(self, name):
685
+ def _is_removed(name, buffers):
686
+ return name not in buffers or self._buffer_is_marked_removed(buffers[name])
687
+
688
+ return _is_removed(name, self.output_buffers) and _is_removed(
689
+ name, self.inplace_buffers
690
+ )
691
+
692
+ # Includes inplace buffers, excludes removed buffers. Essentially,
693
+ # after you do a call into this kernel, which buffers actually contain
694
+ # updated data? Modeled off of python_argdefs.
695
+ def live_output_buffers(self):
696
+ live_outs = set()
697
+ for inplaced in unique(self.inplace_buffers.values()):
698
+ if self._buffer_is_marked_removed(inplaced):
699
+ continue
700
+ live_outs.add(inplaced.other_names[-1])
701
+ for outer, inner in self.output_buffers.items():
702
+ if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
703
+ continue
704
+ live_outs.add(outer)
705
+ return live_outs
706
+
707
+
708
+ class CSEVariable:
709
+ """A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis.
710
+ To do so, the backends can simply overload `Kernel.create_cse_var`
711
+ The "CSEVariable.update_on_args" method gives you a hook for annotations
712
+ See example of TritonCSEVariable in triton.py
713
+ """
714
+
715
+ def __init__(self, name, bounds: ValueRanges):
716
+ assert isinstance(bounds, ValueRanges)
717
+ self.name = name
718
+ self.bounds = bounds
719
+
720
+ def __str__(self):
721
+ return self.name
722
+
723
+ def __hash__(self) -> int:
724
+ return hash(self.name)
725
+
726
+ def __eq__(self, other) -> bool:
727
+ return type(other) == type(self) and other.name == self.name
728
+
729
+ def update_on_args(self, name, args, kwargs):
730
+ pass
731
+
732
+
733
+ class CppWrapperKernelArgs(KernelArgs):
734
+ def wrap_ptr_arg(self, buf, dtype):
735
+ from .cpp import DTYPE_TO_CPP
736
+
737
+ if config.aot_inductor.abi_compatible:
738
+ # In the abi_compatible model, we just return the buf here.
739
+ # We will form correct call args later in wrapper.generate_kernel_all.
740
+ return buf
741
+ else:
742
+ return f"({DTYPE_TO_CPP[dtype]}*)({buf}.data_ptr())"
743
+
744
+ def wrap_size_arg(self, size):
745
+ return f"{size}"
746
+
747
+
748
+ class CSE:
749
+ """Common subexpression elimination"""
750
+
751
+ def __init__(
752
+ self,
753
+ prefix="",
754
+ suffix="",
755
+ name_prefix="tmp",
756
+ iter_buffers=None,
757
+ store_cache=None,
758
+ reduction_cache=None,
759
+ varname_map=None,
760
+ ):
761
+ self.prefix = prefix
762
+ self.suffix = suffix
763
+ self.cache = {}
764
+ self.name_prefix = name_prefix
765
+ self.store_cache = store_cache or {}
766
+ self.reduction_cache = reduction_cache or {}
767
+ self.iter_buffer_ids = iter_buffers or itertools.count()
768
+ self.invalidated_stores = set()
769
+ self.varname_map = varname_map or {}
770
+
771
+ def invalidate(self, keep_vars: Set[str]):
772
+ for name, tmp in list(self.store_cache.items()):
773
+ if tmp not in keep_vars:
774
+ del self.store_cache[name]
775
+ self.invalidated_stores.add(name)
776
+ self.cache = {k: v for k, v in self.cache.items() if v in keep_vars}
777
+
778
+ def clone(self):
779
+ # Note(fdrocha): reduction_cache is not being cloned, not sure if this is intentional
780
+ return CSE(
781
+ prefix=self.prefix,
782
+ suffix=self.suffix,
783
+ name_prefix=self.name_prefix,
784
+ iter_buffers=self.iter_buffer_ids,
785
+ store_cache=self.store_cache,
786
+ varname_map=self.varname_map,
787
+ )
788
+
789
+ def generate(
790
+ self,
791
+ buffer: IndentedBuffer,
792
+ expr: Union[str, CSEVariable, OpsValue],
793
+ *,
794
+ bounds: ValueRanges = ValueRanges.unknown(),
795
+ write=True,
796
+ assignment=True,
797
+ ) -> CSEVariable:
798
+ if isinstance(expr, OpsValue):
799
+ expr = expr.value
800
+
801
+ assert isinstance(expr, (str, CSEVariable)), type(expr)
802
+ assert write or assignment
803
+ if isinstance(expr, CSEVariable):
804
+ # If the expressions were always created with all the information, we could
805
+ # assert expr.bounds == bounds, but sometimes the expression is created
806
+ # with the loose ValueRanges.unknown(), so we need to tighten the bounds
807
+ expr.bounds = expr.bounds.tighten(bounds)
808
+ return expr
809
+ cache_key = expr
810
+ var = self.cache.get(cache_key, None)
811
+ if not var:
812
+ var = self.newvar(bounds) if assignment else None
813
+ self.cache[cache_key] = var
814
+ if write:
815
+ if V.kernel.current_node:
816
+ V.kernel.current_node.codegen_originating_info(
817
+ buffer, only_once=True
818
+ )
819
+ if assignment:
820
+ line = f"{self.prefix}{var} = {expr}{self.suffix}"
821
+ else:
822
+ line = f"{expr}{self.suffix}"
823
+ buffer.writeline(line)
824
+ else:
825
+ var.bounds = var.bounds.tighten(bounds)
826
+
827
+ return var
828
+
829
+ def newvar(self, bounds: ValueRanges = ValueRanges.unknown()) -> CSEVariable:
830
+ var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}"
831
+ var = V.kernel.create_cse_var(var_name, bounds)
832
+ self.varname_map[var_name] = var
833
+ return var
834
+
835
+
836
+ class IndirectAssertLine(DeferredLineBase):
837
+ def __init__(self, line, assert_fn, var, mask, size_map):
838
+ self.var = var
839
+ self.mask = mask
840
+ self.line = line
841
+ self.assert_fn = assert_fn
842
+ self.size_map = size_map
843
+
844
+ def __call__(self):
845
+ size, size_str = self.size_map[(self.var, self.mask)]
846
+
847
+ # We assert if we've not been able to prove the bound
848
+ assert_min = (self.var.bounds.lower >= 0) != sympy.true
849
+ assert_max = (self.var.bounds.upper < size) != sympy.true
850
+
851
+ # FooBar interview question
852
+ if not (assert_min or assert_max):
853
+ return None
854
+ elif assert_min and assert_max:
855
+ # The conditions need to be in parens because of Python's operator precedence.
856
+ # It'd be less error-prone to use and/or/not, which is suported by triton
857
+ cond = f"(0 <= {self.var}) & ({self.var} < {size_str})"
858
+ cond_print = f"0 <= {self.var} < {size_str}"
859
+ elif assert_min:
860
+ cond = f"0 <= {self.var}"
861
+ cond_print = cond
862
+ else:
863
+ assert assert_max
864
+ cond = f"{self.var} < {size_str}"
865
+ cond_print = cond
866
+
867
+ if self.mask:
868
+ cond = f"({cond}) | ~{self.mask}"
869
+ return self.line.format(
870
+ assert_fn=self.assert_fn, cond=cond, cond_print=cond_print
871
+ )
872
+
873
+ def _new_line(self, line):
874
+ return IndirectAssertLine(
875
+ line, self.assert_fn, self.var, self.mask, self.size_map
876
+ )
877
+
878
+
879
+ class CodeGen:
880
+ def __init__(self):
881
+ super().__init__()
882
+ self.exit_stack = contextlib.ExitStack()
883
+
884
+ def __enter__(self):
885
+ self.exit_stack.__enter__()
886
+ return self
887
+
888
+ def __exit__(self, exc_type, exc_val, exc_tb):
889
+ self.exit_stack.__exit__(exc_type, exc_val, exc_tb)
890
+
891
+
892
+ class Kernel(CodeGen):
893
+ newvar_prefix = ""
894
+ suffix = ""
895
+ overrides = None
896
+ load_format = None
897
+ store_format = None
898
+
899
+ def __init__(self, args=None, increase_kernel_count=True):
900
+ super().__init__()
901
+ if increase_kernel_count:
902
+ metrics.generated_kernel_count += 1
903
+ self.args = args or KernelArgs()
904
+ self.loads = IndentedBuffer()
905
+ self.compute = IndentedBuffer()
906
+ self.stores = IndentedBuffer()
907
+ self.cse: CSE = CSE(self.newvar_prefix, self.suffix)
908
+ self.must_keep_buffers = set()
909
+ self.store_buffer_names = set()
910
+ self._load_mask = None
911
+ # set in set_current_node
912
+ self.current_node = None
913
+ self.node_to_bounds: Optional[Dict[torch.fx.Node, ValueRanges]] = None
914
+ # Upper bounds for indirect_indexing and their str representation
915
+ self.indirect_max_sizes: Dict[Tuple[str, str], Tuple[sympy.Expr, str]] = {}
916
+
917
+ self.removed_buffers = set()
918
+ self.inplaced_to_remove = set()
919
+
920
+ # key: the buffer to write
921
+ # value: the buffer to read and whose memory can be reused for
922
+ # the buffer specified by key
923
+ self.inplace_update_buffers = dict()
924
+ # Set minimum number of elements processed per thread.
925
+ self.min_elem_per_thread = 1
926
+
927
+ @contextlib.contextmanager
928
+ def set_current_node(self, node):
929
+ prior = self.current_node
930
+ self.current_node = node
931
+ self.node_to_bounds = node._body.bounds().get_bounds()
932
+ try:
933
+ yield
934
+ finally:
935
+ self.current_node = prior
936
+
937
+ @contextlib.contextmanager
938
+ def swap_buffers(self, lb, cb=None, sb=None):
939
+ if cb is None:
940
+ cb = lb
941
+ loads = self.loads
942
+ compute = self.compute
943
+ stores = self.stores
944
+ cse = self.cse
945
+ self.loads = lb
946
+ self.compute = cb
947
+ self.stores = sb
948
+ self.cse = cse.clone()
949
+ try:
950
+ yield
951
+ finally:
952
+ self.loads = loads
953
+ self.compute = compute
954
+ self.stores = stores
955
+ self.cse = cse
956
+
957
+ def load(self, name: str, index: sympy.Expr):
958
+ raise NotImplementedError()
959
+
960
+ def indirect_load(self, name: str, index: sympy.Expr):
961
+ """A load the depends on an index we have read"""
962
+ prior = self.loads
963
+ try:
964
+ # put the load in the compute section as it might have deps
965
+ self.loads = self.compute
966
+ return self.load(name, index)
967
+ finally:
968
+ self.loads = prior
969
+
970
+ def store_reduction(self, name, index, value):
971
+ raise NotImplementedError()
972
+
973
+ def store(self, name, index, value, mode=None):
974
+ raise NotImplementedError()
975
+
976
+ def reduction(self, dtype, src_dtype, reduction_type, value):
977
+ raise NotImplementedError()
978
+
979
+ def bucketize(
980
+ self,
981
+ values,
982
+ offsets_name: str,
983
+ offsets_size: sympy.Expr,
984
+ indexing_dtype: torch.dtype,
985
+ right: bool,
986
+ ):
987
+ """
988
+ See [Note: Inductor bucketize op]
989
+ """
990
+ raise NotImplementedError()
991
+
992
+ @property
993
+ def assert_function(self) -> str:
994
+ raise NotImplementedError()
995
+
996
+ def index_to_str(self, index: sympy.Expr) -> str:
997
+ raise NotImplementedError()
998
+
999
+ def __enter__(self):
1000
+ class CSEProxy:
1001
+ self.name = "CSEProxy"
1002
+
1003
+ @staticmethod
1004
+ def __getattr__(name: str) -> Callable[..., CSEVariable]: # type: ignore[misc]
1005
+ def inner(*args, **kwargs):
1006
+ # TritonTemplateKernel has no current_node
1007
+ buf_bounds = ValueRanges.unknown()
1008
+ if hasattr(V.interpreter, "current_node"):
1009
+ fx_node = V.interpreter.current_node
1010
+ assert isinstance(self.node_to_bounds, dict)
1011
+ buf_bounds = self.node_to_bounds.get(
1012
+ fx_node, ValueRanges.unknown()
1013
+ )
1014
+
1015
+ csevar = self.cse.generate(
1016
+ self.compute,
1017
+ getattr(parent_handler, name)(*args, **kwargs), # type: ignore[has-type]
1018
+ bounds=buf_bounds,
1019
+ )
1020
+ csevar.update_on_args(name, args, kwargs)
1021
+ return csevar
1022
+
1023
+ return inner
1024
+
1025
+ @staticmethod
1026
+ def indirect_indexing(var, size, check=True):
1027
+ # Skip CSE since this doesn't return an expression
1028
+
1029
+ if var.bounds.lower < 0:
1030
+ new_bounds = ValueRanges.unknown()
1031
+ if var.bounds != ValueRanges.unknown() and isinstance(
1032
+ size, sympy.Number
1033
+ ):
1034
+ # Take the negative part of the bound and add size to it
1035
+ # Then take union of that and the positive part
1036
+ # This is a tighter bound than that of a generic ops.where, as we have info on the cond
1037
+ neg = var.bounds & ValueRanges(-sympy.oo, -1)
1038
+ new_bounds = ValueRanges(neg.lower + size, neg.upper + size)
1039
+ # We don't have a good way of representing the empty range
1040
+ if var.bounds.upper >= 0:
1041
+ pos = var.bounds & ValueRanges(0, sympy.oo)
1042
+ new_bounds = new_bounds | pos
1043
+
1044
+ stm = ops.add(var, self.rename_indexing(size))
1045
+ # Mixed negative and non-negative
1046
+ if var.bounds.upper >= 0:
1047
+ lt = ops.lt(var, "0")
1048
+ stm = ops.where(lt, stm, var)
1049
+ new_var = self.cse.generate(self.compute, stm, bounds=new_bounds)
1050
+
1051
+ new_var.update_on_args("index_wrap", (var,), {})
1052
+ var = new_var
1053
+
1054
+ if self.generate_assert(check):
1055
+ mask = self.load_mask(var)
1056
+
1057
+ # An assertion line may have been written already, if so just
1058
+ # update the max size.
1059
+ map_key = (var, mask)
1060
+ existing_size, _ = self.indirect_max_sizes.get(
1061
+ map_key, (None, None)
1062
+ )
1063
+ if existing_size is not None:
1064
+ size = sympy.Min(size, existing_size)
1065
+ else:
1066
+ line = (
1067
+ '{assert_fn}({cond}, "index out of bounds: {cond_print}")'
1068
+ )
1069
+ self.compute.writeline(
1070
+ IndirectAssertLine(
1071
+ line,
1072
+ self.assert_function,
1073
+ var,
1074
+ mask,
1075
+ self.indirect_max_sizes,
1076
+ )
1077
+ )
1078
+
1079
+ self.indirect_max_sizes[map_key] = (size, self.index_to_str(size))
1080
+ return sympy_symbol(str(var))
1081
+
1082
+ @staticmethod
1083
+ def load(name: str, index: sympy.Expr):
1084
+ if name in self.cse.invalidated_stores:
1085
+ # A load from an invalidated store requires us to
1086
+ # keep the actual buffer around
1087
+ V.kernel.must_keep_buffers.add(name)
1088
+ if free_symbol_startswith(index, "tmp"):
1089
+ return self.indirect_load(name, index)
1090
+ store_cache = self.cse.store_cache
1091
+ if name in store_cache:
1092
+ return store_cache[name]
1093
+ return self.load(name, index)
1094
+
1095
+ @staticmethod
1096
+ def store(name, index, value, mode=None):
1097
+ self.store_buffer_names.add(name)
1098
+ if mode is None:
1099
+ self.cse.store_cache[name] = value
1100
+ if self.current_node:
1101
+ for other_name in self.current_node.get_mutations():
1102
+ self.cse.store_cache[other_name] = value
1103
+ if name not in V.graph.removed_buffers:
1104
+ return self.store(name, index, value, mode=mode)
1105
+
1106
+ @staticmethod
1107
+ def store_reduction(name, index, value):
1108
+ self.store_buffer_names.add(name)
1109
+ self.cse.store_cache[name] = value
1110
+ if self.current_node:
1111
+ for other_name in self.current_node.get_mutations():
1112
+ self.cse.store_cache[other_name] = value
1113
+
1114
+ if name not in V.graph.removed_buffers:
1115
+ return self.store_reduction(name, index, value)
1116
+
1117
+ @staticmethod
1118
+ def reduction(dtype, src_dtype, reduction_type, value):
1119
+ return self.reduction(dtype, src_dtype, reduction_type, value)
1120
+
1121
+ @staticmethod
1122
+ def bucketize(
1123
+ values,
1124
+ offsets_name: str,
1125
+ offsets_size: sympy.Expr,
1126
+ indexing_dtype: torch.dtype,
1127
+ right: bool,
1128
+ ):
1129
+ """
1130
+ [Note: Inductor bucketize op]
1131
+
1132
+ Given values (tensor) and offsets_name (reference to the name of a 1D
1133
+ tensor), calculate the bucket that each value belongs to.
1134
+
1135
+ e.g. for values [-1, 0, 1, 2, 3, 4, 5, 9], offsets [0, 4, 4, 8], right=True
1136
+ return = [ 0, 1, 1, 1, 1, 3, 3, 4].
1137
+
1138
+ When right == False, bucket i refers to range (offsets[i], offsets[i+1]].
1139
+ When right == True, bucket i refers to range [offsets[i], offsets[i+1]).
1140
+
1141
+ Offsets must be non-decreasing or the result is undefined.
1142
+ """
1143
+ return self.bucketize(
1144
+ values, offsets_name, offsets_size, indexing_dtype, right
1145
+ )
1146
+
1147
+ super().__enter__()
1148
+ assert self.overrides
1149
+ parent_handler = self.overrides(V.get_ops_handler())
1150
+ self.exit_stack.enter_context(V.set_ops_handler(CSEProxy()))
1151
+ self.exit_stack.enter_context(V.set_kernel_handler(self))
1152
+ return self
1153
+
1154
+ def __exit__(self, exc_type, exc_val, exc_tb):
1155
+ """
1156
+ Note that V.graph.scheduler can be None when codegening triton template
1157
+ kernels.
1158
+ """
1159
+ if V.graph.scheduler:
1160
+ V.graph.scheduler.remove_kernel_local_buffers()
1161
+ super().__exit__(exc_type, exc_val, exc_tb)
1162
+
1163
+ def generate_assert(self, check):
1164
+ return (check or config.debug_index_asserts) and config.assert_indirect_indexing
1165
+
1166
+ def load_mask(self, var):
1167
+ # only the triton kernel requires mask
1168
+ return ""
1169
+
1170
+ def rename_indexing(self, index) -> sympy.Expr:
1171
+ # adds the necessary kernel args for index expressions
1172
+ # and renames variables in index expressions to kernel arg names
1173
+ if isinstance(index, (list, tuple)):
1174
+ return [self.rename_indexing(x) for x in index]
1175
+ index = V.graph.sizevars.simplify(index)
1176
+ sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name)
1177
+ replacements = {
1178
+ x: self.args.size(x)
1179
+ for x in sorted_symbols
1180
+ if x.name.startswith("s")
1181
+ or x.name.startswith("ps")
1182
+ or (x.name.startswith("i") and not x.name.startswith("idx"))
1183
+ }
1184
+ return sympy_subs(index, replacements)
1185
+
1186
+ def create_cse_var(self, *args, **kwargs):
1187
+ return CSEVariable(*args, **kwargs)
1188
+
1189
+
1190
+ @dataclasses.dataclass
1191
+ class OptimizationContext:
1192
+ key: ClassVar[str] = "opt_ctx"
1193
+
1194
+ # Load value as mask
1195
+ is_load_as_mask: bool = False
1196
+
1197
+ dtype: Optional[torch.dtype] = None
1198
+ ops_name: str = ""
1199
+ is_most_inner_loop_irrevelant: bool = False
1200
+
1201
+ # Load uint8 value as float32
1202
+ is_load_uint8_as_float: bool = False
1203
+
1204
+
1205
+ @functools.lru_cache(None)
1206
+ def jinja2_env():
1207
+ try:
1208
+ import jinja2
1209
+
1210
+ return jinja2.Environment(
1211
+ undefined=jinja2.StrictUndefined,
1212
+ )
1213
+ except ImportError:
1214
+ return None
1215
+
1216
+
1217
+ class ChoiceCaller:
1218
+ """
1219
+ Represents a possible choice used in autotune_process.py.
1220
+ During autotuning, self.benchmark() is first called to get benchmark result,
1221
+ and if this choice is selected, self.output_node() is called to get the output_node.
1222
+
1223
+ Children classes: TritonTemplateCaller, CUDATemplateCaller.
1224
+ """
1225
+
1226
+ def __init__(self, name, input_nodes, layout):
1227
+ super().__init__()
1228
+ self.name = name
1229
+ self.layout = layout
1230
+ self.input_nodes = input_nodes
1231
+
1232
+ def benchmark(self, *args, out) -> float:
1233
+ algo = self.to_callable()
1234
+ return do_bench(lambda: algo(*args, out=out))
1235
+
1236
+ def call_name(self) -> str:
1237
+ raise NotImplementedError()
1238
+
1239
+ def to_callable(self):
1240
+ raise NotImplementedError()
1241
+
1242
+ def hash_key(self) -> str:
1243
+ raise NotImplementedError()
1244
+
1245
+ def output_node(self) -> "TensorBox": # type: ignore[name-defined]
1246
+ raise NotImplementedError()
1247
+
1248
+
1249
+ class KernelTemplate:
1250
+ """
1251
+ Base class for defining kernel templates.
1252
+
1253
+ Children classes: TritonTemplate, CUDATemplate
1254
+ """
1255
+
1256
+ @staticmethod
1257
+ def _template_from_string(source):
1258
+ env = jinja2_env()
1259
+ if env is not None:
1260
+ return env.from_string(source)
1261
+ return None
1262
+
1263
+ @staticmethod
1264
+ def _fake_get_dtype(fake_out):
1265
+ _get_dtype_real = V.graph.get_dtype
1266
+
1267
+ def get_dtype(name):
1268
+ if name == fake_out.get_name():
1269
+ return fake_out.get_dtype()
1270
+ return _get_dtype_real(name)
1271
+
1272
+ return get_dtype
1273
+
1274
+ def __init__(self, name: str):
1275
+ self.name = name
1276
+
1277
+ def maybe_append_choice(self, choices, **kwargs):
1278
+ """
1279
+ Maybe generates a new ChoiceCaller and appends it into existing choices.
1280
+
1281
+ choices: A list of ChoiceCallers.
1282
+ kwargs: Additional kwargs to be passed to self.generate() to generate a new ChoiceCaller.
1283
+ """
1284
+
1285
+ try:
1286
+ choices.append(self.generate(**kwargs))
1287
+ except NotImplementedError:
1288
+ pass
1289
+
1290
+ def generate(self, **kwargs) -> ChoiceCaller:
1291
+ """
1292
+ Generates a ChoiceCaller instance from the given arguments.
1293
+ """
1294
+
1295
+ raise NotImplementedError()
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <atomic>
5
+ #include <cmath>
6
+ #include <cstdlib>
7
+ #include <limits>
8
+ #include <omp.h>
9
+
10
+ #include <ATen/NumericUtils.h>
11
+ #include <ATen/core/PhiloxRNGEngine.h>
12
+ #include <ATen/native/Math.h>
13
+
14
+ #include <c10/util/BFloat16.h>
15
+ #include <c10/util/BFloat16-math.h>
16
+ #include <c10/util/generic_math.h>
17
+ #include <c10/util/Half.h>
18
+ #include <c10/util/TypeCast.h>
19
+
20
+ #if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR)
21
+ #define INDUCTOR_USE_VECTOR_TYPES() 1
22
+ #else
23
+ #define INDUCTOR_USE_VECTOR_TYPES() 0
24
+ #endif
25
+
26
+ #if INDUCTOR_USE_VECTOR_TYPES()
27
+ #include <ATen/cpu/vec/functional.h>
28
+ #include <ATen/cpu/vec/vec.h>
29
+ #endif
30
+
31
+ typedef at::Half half;
32
+ typedef at::BFloat16 bfloat16;
33
+
34
+ template <typename T>
35
+ struct Welford {
36
+ T mean = T(0);
37
+ T m2 = T(0);
38
+ T weight = T(0);
39
+ };
40
+
41
+
42
+ template <typename T>
43
+ struct IsVecType: std::false_type {};
44
+
45
+ #if INDUCTOR_USE_VECTOR_TYPES()
46
+ template <typename T>
47
+ struct IsVecType<at::vec::Vectorized<T>>: std::true_type {};
48
+ #endif
49
+
50
+ template <typename T>
51
+ Welford<T> welford_combine(const Welford<T> &a, const Welford<T> &b) {
52
+ if constexpr (!IsVecType<T>::value) {
53
+ if (a.weight == 0) {
54
+ return b;
55
+ }
56
+ if (b.weight == 0) {
57
+ return a;
58
+ }
59
+ }
60
+ auto delta = b.mean - a.mean;
61
+ auto new_weight = a.weight + b.weight;
62
+ auto wb_over_w = b.weight / new_weight;
63
+ if constexpr (IsVecType<T>::value) {
64
+ // Guard against division by zero
65
+ wb_over_w = T::blendv(wb_over_w, T(0), new_weight == T(0));
66
+ }
67
+ auto result = Welford<T>{
68
+ a.mean + delta * wb_over_w,
69
+ a.m2 + b.m2 + delta * delta * a.weight * wb_over_w,
70
+ new_weight
71
+ };
72
+ return result;
73
+ }
74
+
75
+ template <typename T>
76
+ Welford<T> welford_combine(const Welford<T> &acc, T data) {
77
+ // Add a single data point
78
+ auto delta = data - acc.mean;
79
+ auto new_weight = acc.weight + T(1);
80
+ auto new_mean = acc.mean + delta / new_weight;
81
+ auto new_delta = data - new_mean;
82
+ auto result = Welford<T>{
83
+ new_mean,
84
+ acc.m2 + delta * new_delta,
85
+ new_weight
86
+ };
87
+ return result;
88
+ }
89
+
90
+
91
+ #if INDUCTOR_USE_VECTOR_TYPES()
92
+ template <typename scalar_t>
93
+ inline at::vec::Vectorized<scalar_t> vec_shuffle_down(at::vec::Vectorized<scalar_t> x, size_t n) {
94
+ using Vec = at::vec::Vectorized<scalar_t>;
95
+ alignas(alignof(Vec)) scalar_t array[Vec::size()];
96
+ x.store(array);
97
+ for (size_t i = 0; i + n < Vec::size(); i += 2 * n) {
98
+ array[i] = array[i + n];
99
+ }
100
+ return Vec::loadu(array);
101
+ }
102
+
103
+ #ifdef CPU_CAPABILITY_AVX2
104
+ inline at::vec::Vectorized<float> vec_shuffle_down(at::vec::Vectorized<float> x, size_t n) {
105
+ using vec_t = at::vec::Vectorized<float>;
106
+ #define SHUFFLE_MASK(z, y, x, w) ((z << 6) | (y << 4) | (x << 2) | w)
107
+ switch (n) {
108
+ case 1:
109
+ return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(1, 1, 3, 3)));
110
+ case 2:
111
+ return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(2, 2, 2, 2)));
112
+ case 4:
113
+ return vec_t(_mm256_permute2f128_ps(x, x, SHUFFLE_MASK(1, 1, 1, 1)));
114
+ }
115
+ TORCH_CHECK(false, "Unhandled vec_shuffle_down value ", n);
116
+ }
117
+ #endif
118
+
119
+ template <typename scalar_t>
120
+ Welford<scalar_t> welford_vec_reduce_all(Welford<at::vec::Vectorized<scalar_t>> acc) {
121
+ using Vec = at::vec::Vectorized<scalar_t>;
122
+ for (size_t n = 1; n < Vec::size(); n *= 2) {
123
+ auto shuffled = Welford<Vec>{
124
+ vec_shuffle_down(acc.mean, n),
125
+ vec_shuffle_down(acc.m2, n),
126
+ vec_shuffle_down(acc.weight, n)
127
+ };
128
+ acc = welford_combine(acc, shuffled);
129
+ }
130
+
131
+ Welford<scalar_t> result;
132
+ alignas(alignof(Vec)) scalar_t array[Vec::size()];
133
+ acc.mean.store(array);
134
+ result.mean = array[0];
135
+
136
+ acc.m2.store(array);
137
+ result.m2 = array[0];
138
+
139
+ acc.weight.store(array);
140
+ result.weight = array[0];
141
+
142
+ return result;
143
+ }
144
+ #endif
145
+
146
+
147
+ template <typename T> inline T mod(T a, T b) { return a % b; }
148
+ template <> inline float mod(float a, float b) { return std::fmod(a, b); }
149
+ template <> inline double mod(double a, double b) { return std::fmod(a, b); }
150
+
151
+ template <typename scalar_t>
152
+ inline scalar_t max_propagate_nan(scalar_t a, scalar_t b) {
153
+ if (at::_isnan(a)) {
154
+ return a;
155
+ }
156
+ return a > b ? a : b;
157
+ }
158
+
159
+ template <typename scalar_t>
160
+ inline scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
161
+ if (at::_isnan(a)) {
162
+ return a;
163
+ }
164
+ return a < b ? a : b;
165
+ }
166
+
167
+ constexpr float uint32_to_uniform_float(uint32_t value) {
168
+ // maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
169
+ constexpr float scale = 4.6566127342e-10;
170
+ return static_cast<float>(value & 0x7FFFFFFF) * scale;
171
+ }
172
+
173
+ float normalized_rand_cpu(uint32_t seed, uint32_t offset) {
174
+ return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)());
175
+ }
176
+
177
+ float randn_cpu(uint32_t seed, uint32_t offset) {
178
+ at::Philox4_32 engine(seed, 0, offset);
179
+ return engine.randn(10);
180
+ }
181
+
182
+ uint64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) {
183
+ auto gen = at::Philox4_32(seed, 0, offset);
184
+ uint64_t r0 = gen();
185
+ uint64_t r1 = gen();
186
+ uint64_t result = r0 | (r1 << 32);
187
+ return (result % static_cast<uint64_t>(high - low)) + low;
188
+ }
189
+
190
+ template <typename T> struct AsIntegerType { typedef T type; };
191
+ template <> struct AsIntegerType<float> { typedef uint32_t type; };
192
+ template <> struct AsIntegerType<double> { typedef uint64_t type; };
193
+ template <> struct AsIntegerType<bfloat16> { typedef uint16_t type; };
194
+
195
+ template <typename T>
196
+ typename std::enable_if<!std::is_reduced_floating_point<T>::value, T>::type
197
+ inline fetch_value(volatile T *addr) {
198
+ return *addr;
199
+ }
200
+
201
+ template <typename T>
202
+ typename std::enable_if<std::is_reduced_floating_point<T>::value, T>::type
203
+ inline fetch_value(volatile T *addr) {
204
+ return T(addr->x, T::from_bits());
205
+ }
206
+
207
+ template <typename T>
208
+ typename std::enable_if<!std::is_integral<T>::value>::type
209
+ atomic_add(volatile T *addr, T offset) {
210
+ typedef typename AsIntegerType<T>::type alt_type;
211
+
212
+ static_assert(sizeof(std::atomic<alt_type>) == sizeof(T),
213
+ "std::atomic issue");
214
+
215
+ alt_type expected;
216
+
217
+ alt_type desired;
218
+
219
+ std::atomic<alt_type> *atomic_addr = (std::atomic<alt_type> *)addr;
220
+ do {
221
+ T val = fetch_value(addr);
222
+ reinterpret_cast<T *>(&expected)[0] = val;
223
+ reinterpret_cast<T *>(&desired)[0] = val + offset;
224
+ } while (!atomic_addr->compare_exchange_weak(expected, desired,
225
+ std::memory_order_relaxed));
226
+ }
227
+
228
+ // Since C++20 float is supported by fetch_add, but the performance may not
229
+ // better than compare_exchange_weak, which can be checked by microbenchmark
230
+ // inductor_cpu_atomic.py
231
+ template <typename T>
232
+ typename std::enable_if<std::is_integral<T>::value>::type
233
+ atomic_add(volatile T *addr, T offset) {
234
+ static_assert(sizeof(std::atomic<T>) == sizeof(T),
235
+ "std::atomic issue");
236
+ std::atomic<T> *atomic_addr = (std::atomic<T> *)addr;
237
+ atomic_addr->fetch_add(offset, std::memory_order_relaxed);
238
+ }
239
+
240
+ // This function is used to convert bool or uint8 to float mask for
241
+ // vectorization. The caller needs to make sure the src represents TRUE/FALSE
242
+ // correctly.
243
+ template <typename T>
244
+ inline float flag_to_float_scalar(T src) {
245
+ float ret;
246
+ *(uint32_t*)(&ret) = src ? 0xFFFFFFFF : 0;
247
+ return ret;
248
+ }
249
+
250
+ #if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR)
251
+
252
+ inline at::vec::Vectorized<float> masked_load(const float* src, at::vec::Vectorized<float> mask) {
253
+ # if defined(CPU_CAPABILITY_AVX512)
254
+ at::vec::Vectorized<float> zero_vec(0);
255
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
256
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
257
+ return _mm512_mask_loadu_ps(zero_vec, mmask, src);
258
+ # elif defined(CPU_CAPABILITY_AVX2)
259
+ auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
260
+ auto mmask = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
261
+ return _mm256_maskload_ps(src, mmask);
262
+ # elif defined(CPU_CAPABILITY_ZVECTOR)
263
+ auto result = at::vec::Vectorized<float>::loadu(src);
264
+ return (result & mask);
265
+ # else
266
+ # error Unsupported vectorization CPU capability
267
+ # endif
268
+ }
269
+
270
+ template <typename T>
271
+ typename std::enable_if<std::is_same<T, bfloat16>::value || std::is_same<T, half>::value, at::vec::Vectorized<T>>::type
272
+ inline masked_load(const T* src, at::vec::Vectorized<float> mask) {
273
+ # if defined(CPU_CAPABILITY_AVX512)
274
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
275
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
276
+ auto zero = _mm256_set1_epi16(0);
277
+ auto temp = _mm256_mask_loadu_epi16(zero, mmask, src);
278
+ return _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1);
279
+ # elif defined(CPU_CAPABILITY_AVX2)
280
+ auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
281
+ auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
282
+ __at_align__ uint32_t mmask[8];
283
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
284
+ __at_align__ uint16_t result[16];
285
+ for (auto i = 0; i < 8; i++) {
286
+ result[i] = mmask[i] == 0xFFFFFFFF ? src[i].x: uint16_t(0);
287
+ }
288
+ return at::vec::Vectorized<T>::loadu(result);
289
+ # elif defined(CPU_CAPABILITY_ZVECTOR)
290
+ auto result = at::vec::Vectorized<T>::loadu(src, 8);
291
+ uint32_t maskdata[8] = { 0 };
292
+ uint16_t maskdata_dest[16] = { 0 };
293
+ mask.store(maskdata);
294
+ for (auto i = 0; i < 8; i++) {
295
+ maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFFFF: 0;
296
+ }
297
+ auto maskvector = at::vec::Vectorized<T>::loadu(maskdata_dest);
298
+ return (result & maskvector);
299
+ # else
300
+ # error Unsupported vectorization CPU capability
301
+ # endif
302
+ }
303
+
304
+ inline at::vec::Vectorized<uint8_t> masked_load(const uint8_t* src, at::vec::Vectorized<float> mask) {
305
+ # if defined(CPU_CAPABILITY_AVX512)
306
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
307
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
308
+ auto zero = _mm_set1_epi8(0);
309
+ auto temp = _mm_mask_loadu_epi8(zero, mmask, src);
310
+ return _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0);
311
+ # elif defined(CPU_CAPABILITY_AVX2)
312
+ auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
313
+ auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
314
+ __at_align__ uint32_t mmask[8];
315
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
316
+ __at_align__ uint8_t result[32];
317
+ for (auto i = 0; i < 8; i++) {
318
+ result[i] = mmask[i] == 0xFFFFFFFF ? src[i]: uint8_t(0);
319
+ }
320
+ return at::vec::Vectorized<uint8_t>::loadu(result);
321
+ # elif defined(CPU_CAPABILITY_ZVECTOR)
322
+ auto result = at::vec::Vectorized<uint8_t>::loadu(src, 8);
323
+ uint32_t maskdata[8];
324
+ uint8_t maskdata_dest[32] = { 0 };
325
+ mask.store(maskdata);
326
+ for (auto i = 0; i < 8; i++) {
327
+ maskdata_dest[i] = (maskdata[i] == 0xFFFFFFFF) ? 0xFF: 0;
328
+ }
329
+ auto maskvector = at::vec::Vectorized<uint8_t>::loadu(maskdata_dest);
330
+ return (result & maskvector);
331
+ # else
332
+ # error Unsupported vectorization CPU capability
333
+ # endif
334
+ }
335
+
336
+ template <typename T>
337
+ inline at::vec::Vectorized<float> flag_to_float_vec(const T* src) {
338
+ __at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
339
+ #pragma unroll
340
+ for (int64_t i = 0; i < at::vec::Vectorized<float>::size(); i++) {
341
+ dst_tmp[i] = flag_to_float_scalar(src[i]);
342
+ }
343
+ return at::vec::Vectorized<float>::loadu(dst_tmp);
344
+ }
345
+
346
+ template <typename scalar_t>
347
+ inline at::vec::Vectorized<float> cvt_lowp_fp_to_fp32(
348
+ at::vec::Vectorized<scalar_t> src) {
349
+ at::vec::Vectorized<float> res_vec1(0);
350
+ at::vec::Vectorized<float> res_vec2(0);
351
+ std::tie(res_vec1, res_vec2) = at::vec::convert_to_float<scalar_t>(src);
352
+ return res_vec1;
353
+ }
354
+
355
+ template <typename scalar_t>
356
+ inline at::vec::Vectorized<scalar_t> cvt_fp32_to_lowp_fp(
357
+ at::vec::Vectorized<float> src) {
358
+ return at::vec::convert_from_float<scalar_t>(src, src);
359
+ }
360
+
361
+ inline at::vec::Vectorized<float> mask_convert_to_float(at::vec::Vectorized<float> src) {
362
+ auto zeros = at::vec::Vectorized<float>(0);
363
+ auto ones = at::vec::Vectorized<float>(1);
364
+ return at::vec::Vectorized<float>::blendv(zeros, ones, src);
365
+ }
366
+
367
+ template <typename SRC>
368
+ inline at::vec::Vectorized<float> vec_convert_to_mask(at::vec::Vectorized<SRC> src) {
369
+ assert(
370
+ at::vec::Vectorized<float>::size() == at::vec::Vectorized<SRC>::size());
371
+ at::vec::Vectorized<float> res_vec(0);
372
+ __at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
373
+ __at_align__ SRC src_tmp[at::vec::Vectorized<SRC>::size()];
374
+ src.store(src_tmp);
375
+
376
+ #pragma unroll
377
+ for (int i = 0; i < at::vec::Vectorized<float>::size(); i++) {
378
+ *(uint32_t*)(dst_tmp + i) = src_tmp[i] ? 0xFFFFFFFF : 0;
379
+ }
380
+
381
+ return res_vec.loadu(dst_tmp);
382
+ }
383
+
384
+ template <typename SRC>
385
+ inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<SRC> src) {
386
+ return vec_convert_to_mask(src);
387
+ }
388
+
389
+ #if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2)
390
+ template <>
391
+ inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<int> src) {
392
+ #if defined(CPU_CAPABILITY_AVX2)
393
+ return at::vec::Vectorized<float>(_mm256_castsi256_ps(src));
394
+ #else
395
+ return at::vec::Vectorized<float>(_mm512_castsi512_ps(src));
396
+ #endif
397
+ }
398
+ #endif
399
+
400
+ template <>
401
+ inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<float> src) {
402
+ return src;
403
+ }
404
+
405
+ inline at::vec::Vectorized<float> to_float_mask(int src) {
406
+ float mask;
407
+ *(uint32_t*)&mask = src ? 0xFFFFFFFF : 0;
408
+ return at::vec::Vectorized<float>(mask);
409
+ }
410
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_kernel.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_template.cpython-310.pyc ADDED
Binary file (8.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_epilogue_gen.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cutlass_utils.cpython-310.pyc ADDED
Binary file (7.02 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/gemm_template.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+ from unittest.mock import patch
3
+
4
+ import sympy
5
+
6
+ import torch._inductor.virtualized as virtualized
7
+ from torch._inductor.ir import ComputedBuffer, FlexibleLayout, IRNode, Pointwise
8
+ from torch._inductor.utils import IndentedBuffer, sympy_str
9
+
10
+
11
+ # Used as a magic string to indicate an unsupported sympy expression
12
+ # became part of generated C++ code.
13
+ _MAGIC_SYMPY_ERROR_STRING = "[!sympy: unsupported expr!]"
14
+
15
+
16
+ def _arg_str(a):
17
+ if isinstance(a, sympy.Expr):
18
+ # If this return value containting the _MAGIC_SYMPY_ERROR_STRING
19
+ # is used as part of the final generated C++ code,
20
+ # a CUTLASSEVTOpNotImplementedError is raised to indicate that
21
+ # the op could not be converted to a valid EVT expression.
22
+ return f"{_MAGIC_SYMPY_ERROR_STRING}('{sympy_str(a)}')"
23
+ return str(a)
24
+
25
+
26
+ class CUTLASSEVTOpNotImplementedError(NotImplementedError):
27
+ pass
28
+
29
+
30
+ class CutlassEVTEpilogueTypeFormatter:
31
+ """
32
+ Codegen class, which provides an entry point to generate
33
+ Cutlass "Epilogue Visitor Tree" (EVT) functor declarations.
34
+
35
+ See https://github.com/NVIDIA/cutlass/tree/main/examples/49_hopper_gemm_with_collective_builder
36
+ for more about EVTs and how they are declared and used to generate.
37
+
38
+ Notes:
39
+ * Used by CUTLASSGemmTemplate.
40
+ * This class should not be instantiated by users, it is intended to be used
41
+ by calling CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(...)
42
+ which instantiates this class as an ops handler for virtualized.V.ops.[op-name]
43
+ * Extend this with more _op_<whatever> nodes to add support for new pointwise operations.
44
+
45
+
46
+ """
47
+
48
+ def __init__(self, accumulator_node_name, evt_type_name):
49
+ """
50
+
51
+ Initialize an instance of CutlassEVTEpilogueTypeFormatter.
52
+
53
+ Parameters:
54
+ - accumulator_node_name (str): The name of the output Buffer for the GEMM operation in the original (unfused)
55
+ IR graph.
56
+ - evt_type_name (str): The output name of the EVT type we are generating.
57
+
58
+ """
59
+ self.accumulator_node_name = accumulator_node_name
60
+ self.output = IndentedBuffer(0)
61
+ self.var_counter = 0
62
+ self.evt_type_name = evt_type_name
63
+ self.aliases = dict()
64
+
65
+ @staticmethod
66
+ def ir_to_evt_string(
67
+ template_output_node_name: str,
68
+ evt_type_name: str,
69
+ epilogue_nodes: List[IRNode],
70
+ ):
71
+ """
72
+ Formats IR nodes into a string representation compatible with Cutlass EVT format.
73
+
74
+ Args:
75
+ template_output_node_name (str): The name of the template output node.
76
+ evt_type_name (str): The name of the EVT type.
77
+ epilogue_nodes (List[IRNode]): A list of IR nodes representing the epilogue nodes. As of now, these must be
78
+ ComputedBuffer nodes wrapping Pointwise nodes.
79
+
80
+ Returns:
81
+ A string representation of the IR nodes formatted according to the Cutlass EVT format.
82
+ """
83
+ formatter = CutlassEVTEpilogueTypeFormatter(
84
+ template_output_node_name, evt_type_name
85
+ )
86
+
87
+ with virtualized.V.set_ops_handler(formatter), patch.object(
88
+ FlexibleLayout, "allow_indexing", True
89
+ ):
90
+ for node in epilogue_nodes:
91
+ if isinstance(node, ComputedBuffer):
92
+ pnode = node.data
93
+ else:
94
+ raise RuntimeError(
95
+ "Epilogue nodes must be Pointwise nodes, wrapped in a named ComputedBuffer"
96
+ )
97
+ assert isinstance(pnode, Pointwise)
98
+ index = pnode._index(pnode.ranges)
99
+ result = pnode.inner_fn(index)
100
+ # each epilogue node results in a single "using" statement and may refer to the previous steps by name
101
+ formatter.aliases[node.name] = result
102
+ res = formatter.getvalue(result)
103
+ if _MAGIC_SYMPY_ERROR_STRING in res:
104
+ raise CUTLASSEVTOpNotImplementedError(
105
+ "sympy / indexing expressions not yet supported in EVT fusion"
106
+ )
107
+ else:
108
+ return res
109
+
110
+ def __getattr__(self, name):
111
+ """
112
+ Resolve V.ops.<whatever> calls, after this instance has been installed as V.ops handler.
113
+ """
114
+
115
+ def inner(*args, **kwargs):
116
+ fargs = [_arg_str(a) for a in args]
117
+ fkwargs = {key: _arg_str(a) for key, a in kwargs.items()}
118
+ fn = getattr(self, f"_op_{name}")
119
+ line = fn(*fargs, **fkwargs)
120
+ self.var_counter += 1
121
+ varname = f"EVT_expr_{self.var_counter}"
122
+ # replace line with a new variable name
123
+ self.output.writeline(f"using {varname} = {line};")
124
+ return varname
125
+
126
+ if name.startswith("_"):
127
+ raise CUTLASSEVTOpNotImplementedError(name)
128
+ if hasattr(self, f"_op_{name}"):
129
+ return inner
130
+ else:
131
+ raise CUTLASSEVTOpNotImplementedError(name)
132
+
133
+ def _op_load(self, name, index_expr):
134
+ # Load an input to an operation. Might be the output of the matmul, the result
135
+ # of a previous epilogue node, a constant or (TODO) an auxiliary input.
136
+ if name == self.accumulator_node_name:
137
+ return f"cutlass::epilogue::fusion::Sm90AccFetch /* :={name} (matmul output in accumulator) */"
138
+ elif name in self.aliases:
139
+ return self.aliases[name]
140
+ else:
141
+ # return f"cutlass::epilogue::fusion::Sm90SrcFetch /* :={name} */"
142
+ raise CUTLASSEVTOpNotImplementedError(
143
+ f"Operand {name} not found. Auxiliary inputs not supported yet."
144
+ )
145
+
146
+ def _op_constant(self, value, dtype):
147
+ # Load a constant
148
+ if str(dtype) in ("torch.float16", "torch.float32"):
149
+ return f"cutlass::epilogue::fusion::Sm90ScalarBroadcast<ElementAcc> /* value={value}, dtype={dtype} */"
150
+ else:
151
+ raise CUTLASSEVTOpNotImplementedError(
152
+ f"Unsupported dtype for constant: {dtype}"
153
+ )
154
+
155
+ def _cutlass_binary_functional_op(self, op, a, b):
156
+ # Perform a named operation on two inputs
157
+ # see https://github.com/NVIDIA/cutlass/blob/6407bcdf0a24097b7b016ee105937693c62f9923/include/cutlass/functional.h for ops
158
+ return f"cutlass::epilogue::fusion::Sm90EVT<cutlass::epilogue::fusion::Sm90Compute<cutlass::{op}, ElementAcc, ElementAcc, RoundStyle>,{a},{b}>" # noqa: B950
159
+
160
+ def _convert_to_output_dtype(self, a):
161
+ # Convert the final output to the dtype of the output buffer
162
+ return f"cutlass::epilogue::fusion::Sm90EVT<cutlass::epilogue::fusion::Sm90Compute<identity_op, ElementD, ElementAcc, RoundStyle>,{a}>" # noqa: B950
163
+
164
+ def _op_to_dtype(self, a, *args, **kwargs):
165
+ # no-op in our case, since we convert to the output dtype at the end and convert everything to the accumulator
166
+ # dtype.
167
+ # Is is asserted ( and ascertained during can_fuse decision ) that the dtype remains compatible
168
+ # throughout the fusion chain.
169
+ return a # noqa: B950
170
+
171
+ def _op_mul(self, a, b):
172
+ return self._cutlass_binary_functional_op("multiplies", a, b)
173
+
174
+ def _op_div(self, a, b):
175
+ return self._cutlass_binary_functional_op("divides", a, b)
176
+
177
+ def _op_truediv(self, a, b):
178
+ return self._cutlass_binary_functional_op("divides", a, b)
179
+
180
+ def _op_ge(self, a, b):
181
+ return self._cutlass_binary_functional_op("greater_equal", a, b)
182
+
183
+ def _op_add(self, a, b):
184
+ return self._cutlass_binary_functional_op("plus", a, b)
185
+
186
+ def _op_sub(self, a, b):
187
+ return self._cutlass_binary_functional_op("minus", a, b)
188
+
189
+ def _op_minimum(self, a, b):
190
+ return self._cutlass_binary_functional_op("minimum", a, b)
191
+
192
+ def _op_maximum(self, a, b):
193
+ return self._cutlass_binary_functional_op("maximum", a, b)
194
+
195
+ def _op_relu(self, a):
196
+ const_zero = self._op_constant(0.0, "torch.float32")
197
+ return f"cutlass::epilogue::fusion::Sm90EVT<cutlass::epilogue::fusion::Sm90Compute<cutlass::maximum, ElementAcc, ElementAcc, RoundStyle>,{a}, {const_zero}>" # noqa: B950
198
+
199
+ def reduction(self, dtype, src_dtype, reduction_type, value):
200
+ raise CUTLASSEVTOpNotImplementedError()
201
+
202
+ # Add more ops here...
203
+ def getvalue(self, result) -> str:
204
+ # Return final result
205
+ dtype_converted_expr = self._convert_to_output_dtype(
206
+ f"EVT_expr_{self.var_counter}"
207
+ )
208
+ self.output.writeline(f"using {self.evt_type_name} = {dtype_converted_expr};")
209
+ return self.output.getvalue()
210
+
211
+
212
+ class CutlassEVTEpilogueArgumentFormatter:
213
+ """
214
+ Codegen class, which provides an entry point to generate
215
+ Cutlass "Epilogue Visitor Tree" (EVT) Argument initializers
216
+
217
+ See https://github.com/NVIDIA/cutlass/tree/main/examples/49_hopper_gemm_with_collective_builder
218
+ for more about EVTs and how they are declared and used to generate.
219
+
220
+ Notes:
221
+ * Used by CUTLASSGemmTemplate.
222
+ * This class should not be instantiated by users, it is intended to be used
223
+ by calling CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(...)
224
+ which instantiates this class as an ops handler for virtualized.V.ops.[op-name]
225
+ * Extend this with more _op_<whatever> nodes to add support for new pointwise operations.
226
+
227
+
228
+ """
229
+
230
+ def __init__(self, accumulator_node_name: str):
231
+ """
232
+
233
+ Initializes a CutlassEVTEpilogueArgumentFormatter object. Do not instantiate directly.
234
+ Use the CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string static method.
235
+
236
+ Args:
237
+ accumulator_node_name (str): The name of the accumulator node which should contain
238
+ the Matmul result before fusion according to the IR graph.
239
+ """
240
+ self.accumulator_node_name: str = accumulator_node_name #
241
+ self.output: IndentedBuffer = IndentedBuffer(0) # The output buffer for codegen
242
+ self.var_counter: int = (
243
+ 0 # used to generate variable names, incremented for each new variable
244
+ )
245
+ self.aliases: Dict[str, str] = dict() # Aliases for subexpression functors
246
+
247
+ @staticmethod
248
+ def ir_to_evt_argument_string(
249
+ template_output_node_name: str,
250
+ epilogue_nodes: List[IRNode],
251
+ ) -> str:
252
+ formatter = CutlassEVTEpilogueArgumentFormatter(
253
+ template_output_node_name,
254
+ )
255
+
256
+ with virtualized.V.set_ops_handler(formatter), patch.object(
257
+ FlexibleLayout, "allow_indexing", True
258
+ ):
259
+ for node in epilogue_nodes:
260
+ assert isinstance(node, ComputedBuffer)
261
+ pnode = node.data
262
+ assert isinstance(pnode, Pointwise)
263
+ index = pnode._index(pnode.ranges)
264
+ result = pnode.inner_fn(index)
265
+ # each epilogue node results in a single "using" statement and may refer to the previous steps by name
266
+ if node.name is not None:
267
+ formatter.aliases[node.name] = result
268
+
269
+ res: str = formatter.getvalue(result)
270
+ if _MAGIC_SYMPY_ERROR_STRING in res:
271
+ raise CUTLASSEVTOpNotImplementedError(
272
+ "sympy / indexing expressions not yet supported in EVT fusion"
273
+ )
274
+ else:
275
+ return res
276
+
277
+ def __getattr__(self, name):
278
+ def inner(*args, **kwargs):
279
+ fargs = [_arg_str(a) for a in args]
280
+ fkwargs = {key: _arg_str(a) for key, a in kwargs.items()}
281
+ fn = getattr(self, f"_op_{name}")
282
+ line = fn(*fargs, **fkwargs)
283
+ return line
284
+
285
+ if name.startswith("_"):
286
+ raise CUTLASSEVTOpNotImplementedError(name)
287
+
288
+ if hasattr(self, f"_op_{name}"):
289
+ return inner
290
+ else:
291
+ raise CUTLASSEVTOpNotImplementedError(name)
292
+
293
+ def _op_load(self, name, index_expr):
294
+ if name == self.accumulator_node_name:
295
+ return "{}"
296
+ elif name in self.aliases:
297
+ return self.aliases[name]
298
+ else:
299
+ raise CUTLASSEVTOpNotImplementedError(
300
+ f"Operand {name} not found. Auxiliary inputs not supported yet."
301
+ )
302
+
303
+ def _op_constant(self, value, dtype):
304
+ if str(dtype) in ("torch.float16", "torch.float32"):
305
+ return "{ static_cast<ElementAcc>(" + str(value) + ") }"
306
+ else:
307
+ raise CUTLASSEVTOpNotImplementedError(
308
+ f"Unsupported dtype for constant: {dtype}"
309
+ )
310
+
311
+ def _cutlass_binary_functional_op(self, op, a, b):
312
+ return f"{{ /*{op}: */ {a}, {b} }}"
313
+
314
+ def _op_mul(self, a, b):
315
+ return self._cutlass_binary_functional_op("multiplies", a, b)
316
+
317
+ def _op_div(self, a, b):
318
+ return self._cutlass_binary_functional_op("divides", a, b)
319
+
320
+ def _op_truediv(self, a, b):
321
+ return self._cutlass_binary_functional_op("divides", a, b)
322
+
323
+ def _op_ge(self, a, b):
324
+ return self._cutlass_binary_functional_op("greater_equal", a, b)
325
+
326
+ def _op_add(self, a, b):
327
+ return self._cutlass_binary_functional_op("plus", a, b)
328
+
329
+ def _op_sub(self, a, b):
330
+ return self._cutlass_binary_functional_op("minus", a, b)
331
+
332
+ def _op_minimum(self, a, b):
333
+ return self._cutlass_binary_functional_op("minimum", a, b)
334
+
335
+ def _op_maximum(self, a, b):
336
+ return self._cutlass_binary_functional_op("maximum", a, b)
337
+
338
+ def _op_relu(self, a):
339
+ const_zero = self._op_constant(0.0, "torch.float32")
340
+ return "{" + str(a) + ", " + const_zero + "}"
341
+
342
+ def _op_to_dtype(self, a, dtype, src_dtype=None):
343
+ # Is is asserted ( and ascertained during can_fuse decision ) that the dtype remains compatible
344
+ # throughout the fusion chain.
345
+ assert dtype in (
346
+ "torch.float32",
347
+ "torch.float16",
348
+ ), f"Unsupported dtype: {dtype}"
349
+ assert src_dtype in (
350
+ None,
351
+ "torch.float32",
352
+ "torch.float16",
353
+ ), f"Unsupported source dtype: {src_dtype}"
354
+ return a
355
+
356
+ def reduction(self, dtype, src_dtype, reduction_type, value):
357
+ raise CUTLASSEVTOpNotImplementedError()
358
+
359
+ def getvalue(self, result) -> str:
360
+ return "{" + str(result) + "}"
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (216 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/__pycache__/gemm_operation_extensions.cpython-310.pyc ADDED
Binary file (6.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_lib_extensions/gemm_operation_extensions.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..cutlass_utils import try_import_cutlass
2
+
3
+ if try_import_cutlass():
4
+ import enum
5
+
6
+ from cutlass_library.library import * # noqa: F401, F403
7
+ from cutlass_library.gemm_operation import * # noqa: F401, F403
8
+
9
+ # copied / modified from original at
10
+ # https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/tools/library/scripts/gemm_operation.py#L658
11
+ # to support EVT similar to
12
+ # https://github.com/NVIDIA/cutlass/blob/8783c41851cd3582490e04e69e0cd756a8c1db7f/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L315C69-L315C69 # noqa: B950
13
+ class EmitGemmUniversal3xInstanceWithEVT:
14
+ """Responsible for emitting a CUTLASS 3.x template definition"""
15
+
16
+ def __init__(self, operation_suffix=""):
17
+ self.operation_suffix = operation_suffix
18
+ self.includes = [
19
+ "cutlass/cutlass.h",
20
+ "cutlass/gemm/gemm.h",
21
+ "cutlass/numeric_types.h",
22
+ "cutlass/gemm/kernel/gemm_universal.hpp",
23
+ "cutlass/gemm/collective/collective_builder.hpp",
24
+ "cutlass/epilogue/collective/collective_builder.hpp",
25
+ ]
26
+ self.builtin_epilogue_functor_template = """
27
+ ${epilogue_functor}<
28
+ ${element_c},
29
+ ${epilogue_vector_length},
30
+ ${element_accumulator},
31
+ ${element_epilogue}
32
+ >
33
+ """
34
+ self.gemm_template = """
35
+ using EpilogueScheduleType = ${epilogue_schedule};
36
+ static_assert(cute::is_same_v<EpilogueScheduleType, cutlass::epilogue::TmaWarpSpecialized> ||
37
+ cute::is_same_v<EpilogueScheduleType, cutlass::epilogue::TmaWarpSpecializedCooperative>,
38
+ "Epilogue visitor trees are currently only supported by the TMA warp-specialized epilogue");
39
+ static constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
40
+ using ElementAcc = ${element_accumulator};
41
+ using ElementD = ${element_d};
42
+ ${epilogue_functor};
43
+ using ${operation_name}_epilogue =
44
+ typename cutlass::epilogue::collective::CollectiveBuilder<
45
+ ${arch}, ${opcode_class},
46
+ cute::Shape<cute::_${tile_shape_m}, cute::_${tile_shape_n}, cute::_${tile_shape_k}>,
47
+ cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
48
+ cutlass::epilogue::collective::EpilogueTileAuto,
49
+ ${element_accumulator}, ${element_epilogue},
50
+ ${element_c}, ${layout_c}, ${align_c},
51
+ ${element_d}, ${layout_d}, ${align_d},
52
+ EpilogueScheduleType,
53
+ ${operation_name}_epilogue_functor
54
+ >::CollectiveOp;
55
+
56
+ using ${operation_name}_mainloop =
57
+ typename cutlass::gemm::collective::CollectiveBuilder<
58
+ ${arch}, ${opcode_class},
59
+ ${element_a}, ${layout_a}, ${align_a},
60
+ ${element_b}, ${layout_b}, ${align_b},
61
+ ${element_accumulator},
62
+ cute::Shape<cute::_${tile_shape_m}, cute::_${tile_shape_n}, cute::_${tile_shape_k}>,
63
+ cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
64
+ ${stages},
65
+ ${kernel_schedule}
66
+ >::CollectiveOp;
67
+
68
+ // Gemm operator ${operation_name}
69
+ using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
70
+ cute::Shape<int,int,int,int>,
71
+ ${operation_name}_mainloop,
72
+ ${operation_name}_epilogue,
73
+ ${tile_scheduler}>;
74
+
75
+ // Define named type
76
+ struct ${operation_name} :
77
+ public ${operation_name}_base { };
78
+
79
+ """
80
+
81
+ #
82
+ def instance_template(self):
83
+ return """
84
+ ${compile_guard_start}
85
+ using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>;
86
+ manifest.append(
87
+ new ${gemm_kind}<GemmKernel>("${operation_name}"));
88
+ ${compile_guard_end}
89
+ """
90
+
91
+ #
92
+ def emit(self, operation):
93
+ tile_shape = operation.tile_description.tile_shape
94
+ warp_count = operation.tile_description.warp_count
95
+ # stage count set to zero indicates builder automatic stage selection
96
+ if operation.tile_description.stages > 0:
97
+ stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>"
98
+ else:
99
+ stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename {str(operation.procedural_name())}_epilogue::SharedStorage)>" # noqa: B950
100
+ warp_shape = [tile_shape[idx] // warp_count[idx] for idx in range(3)]
101
+
102
+ (
103
+ instance_layout_A,
104
+ instance_layout_B,
105
+ instance_layout_C,
106
+ instance_layout_D,
107
+ ) = (
108
+ operation.A.layout,
109
+ operation.B.layout,
110
+ operation.C.layout,
111
+ operation.D.layout,
112
+ )
113
+
114
+ # 3.0 profiler integration only supports trivial epilogues for now
115
+ epilogue_vector_length = 1
116
+
117
+ # Support built-in epilogue functors or user-defined functions
118
+ if isinstance(operation.epilogue_functor, enum.Enum):
119
+ values = {
120
+ "epilogue_vector_length": str(epilogue_vector_length),
121
+ "element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined]
122
+ "epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor], # type: ignore[name-defined]
123
+ }
124
+ epilogue_functor = SubstituteTemplate( # type: ignore[name-defined]
125
+ self.builtin_epilogue_functor_template, values
126
+ )
127
+
128
+ elif callable(operation.epilogue_functor):
129
+ epilogue_functor = operation.epilogue_functor(
130
+ operation.procedural_name() + "_epilogue_functor"
131
+ )
132
+ else:
133
+ epilogue_functor = str(operation.epilogue_functor)
134
+ #
135
+
136
+ values = {
137
+ "operation_name": operation.procedural_name(),
138
+ "operation_suffix": self.operation_suffix,
139
+ "element_a": DataTypeTag[operation.A.element], # type: ignore[name-defined]
140
+ "layout_a": LayoutTag[instance_layout_A], # type: ignore[name-defined]
141
+ "element_b": DataTypeTag[operation.B.element], # type: ignore[name-defined]
142
+ "layout_b": LayoutTag[instance_layout_B], # type: ignore[name-defined]
143
+ "element_c": DataTypeTag[operation.C.element], # type: ignore[name-defined]
144
+ "layout_c": LayoutTag[instance_layout_C], # type: ignore[name-defined]
145
+ "element_d": DataTypeTag[operation.D.element], # type: ignore[name-defined]
146
+ "layout_d": LayoutTag[instance_layout_D], # type: ignore[name-defined]
147
+ "element_accumulator": DataTypeTag[operation.accumulator_type()], # type: ignore[name-defined]
148
+ "opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], # type: ignore[name-defined] # noqa: B950
149
+ "arch": "cutlass::arch::Sm%d" % operation.arch,
150
+ "tile_shape_m": str(operation.tile_description.tile_shape[0]),
151
+ "tile_shape_n": str(operation.tile_description.tile_shape[1]),
152
+ "tile_shape_k": str(operation.tile_description.tile_shape[2]),
153
+ "cluster_m": str(operation.tile_description.cluster_shape[0]),
154
+ "cluster_n": str(operation.tile_description.cluster_shape[1]),
155
+ "cluster_k": str(operation.tile_description.cluster_shape[2]),
156
+ "warp_shape_m": str(warp_shape[0]),
157
+ "warp_shape_n": str(warp_shape[1]),
158
+ "warp_shape_k": str(warp_shape[2]),
159
+ "instruction_shape_m": str(
160
+ operation.tile_description.math_instruction.instruction_shape[0]
161
+ ),
162
+ "instruction_shape_n": str(
163
+ operation.tile_description.math_instruction.instruction_shape[1]
164
+ ),
165
+ "instruction_shape_k": str(
166
+ operation.tile_description.math_instruction.instruction_shape[2]
167
+ ),
168
+ "kernel_schedule": str(KernelScheduleTag[operation.kernel_schedule]), # type: ignore[name-defined]
169
+ "epilogue_schedule": str(EpilogueScheduleTag[operation.epilogue_schedule]), # type: ignore[name-defined]
170
+ "epilogue_functor": epilogue_functor,
171
+ "stages": stage_count_string,
172
+ "align_a": str(operation.A.alignment),
173
+ "align_b": str(operation.B.alignment),
174
+ "align_c": str(operation.C.alignment),
175
+ "align_d": str(operation.C.alignment),
176
+ "transform_a": ComplexTransformTag[operation.A.complex_transform], # type: ignore[name-defined]
177
+ "transform_b": ComplexTransformTag[operation.B.complex_transform], # type: ignore[name-defined]
178
+ "math_operation": MathOperationTag[ # type: ignore[name-defined]
179
+ operation.tile_description.math_instruction.math_operation
180
+ ],
181
+ "epilogue_vector_length": str(epilogue_vector_length),
182
+ "element_epilogue": str(DataTypeTag[operation.element_epilogue]), # type: ignore[name-defined]
183
+ "tile_scheduler": str(TileSchedulerTag[operation.tile_scheduler]), # type: ignore[name-defined]
184
+ }
185
+
186
+ return SubstituteTemplate(self.gemm_template, values) # type: ignore[name-defined]
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cutlass_utils.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ import os
4
+ import sys
5
+ from dataclasses import dataclass
6
+ from typing import Any, List, Optional
7
+
8
+ import sympy
9
+
10
+ import torch
11
+
12
+ from ...codecache import cache_dir
13
+ from ...config import cuda as inductor_cuda_config
14
+ from ...ir import Layout
15
+ from .cuda_env import get_cuda_arch, get_cuda_version
16
+
17
+ log = logging.getLogger(__name__)
18
+
19
+
20
+ def _rename_cutlass_import(content: str, cutlass_modules: List[str]) -> str:
21
+ for cutlass_module in cutlass_modules:
22
+ content = content.replace(
23
+ f"from {cutlass_module} import ",
24
+ f"from cutlass_library.{cutlass_module} import ",
25
+ )
26
+ return content
27
+
28
+
29
+ def _gen_cutlass_file(
30
+ file_name: str, cutlass_modules: List[str], src_dir: str, dst_dir: str
31
+ ) -> None:
32
+ orig_full_path = os.path.abspath(os.path.join(src_dir, file_name))
33
+ text = ""
34
+ with open(orig_full_path) as f:
35
+ text = f.read()
36
+ text = _rename_cutlass_import(text, cutlass_modules)
37
+ dst_full_path = os.path.abspath(
38
+ os.path.join(
39
+ dst_dir,
40
+ file_name,
41
+ )
42
+ )
43
+ with open(dst_full_path, "w") as f:
44
+ f.write(text)
45
+
46
+
47
+ @functools.lru_cache(None)
48
+ def try_import_cutlass() -> bool:
49
+ # Copy CUTLASS python scripts to a temp dir and add the temp dir to Python search path.
50
+ # This is a temporary hack to avoid CUTLASS module naming conflicts.
51
+ # TODO(ipiszy): remove this hack when CUTLASS solves Python scripts packaging structure issues.
52
+
53
+ cutlass_py_full_path = os.path.abspath(
54
+ os.path.join(inductor_cuda_config.cutlass_dir, "python/cutlass_library")
55
+ )
56
+ tmp_cutlass_py_full_path = os.path.abspath(
57
+ os.path.join(cache_dir(), "torch_cutlass_library")
58
+ )
59
+ dst_link = os.path.join(tmp_cutlass_py_full_path, "cutlass_library")
60
+
61
+ if os.path.isdir(cutlass_py_full_path):
62
+ if tmp_cutlass_py_full_path not in sys.path:
63
+ if os.path.exists(dst_link):
64
+ assert os.path.islink(
65
+ dst_link
66
+ ), f"{dst_link} is not a symlink. Try to remove {dst_link} manually and try again."
67
+ assert os.path.realpath(os.readlink(dst_link)) == os.path.realpath(
68
+ cutlass_py_full_path
69
+ ), f"Symlink at {dst_link} does not point to {cutlass_py_full_path}"
70
+ else:
71
+ os.makedirs(tmp_cutlass_py_full_path, exist_ok=True)
72
+ os.symlink(cutlass_py_full_path, dst_link)
73
+ sys.path.append(tmp_cutlass_py_full_path)
74
+ try:
75
+ import cutlass_library.generator # noqa: F401
76
+ import cutlass_library.library # noqa: F401
77
+ import cutlass_library.manifest # noqa: F401
78
+
79
+ return True
80
+
81
+ except ImportError as e:
82
+ log.debug(
83
+ "Failed to import CUTLASS packages: %s, ignoring the CUTLASS backend.",
84
+ str(e),
85
+ )
86
+ else:
87
+ log.debug(
88
+ "Failed to import CUTLASS packages: CUTLASS repo does not exist: %s",
89
+ cutlass_py_full_path,
90
+ )
91
+ return False
92
+
93
+
94
+ def _normalize_cuda_arch(arch: str) -> str:
95
+ if int(arch) >= 90:
96
+ return "90"
97
+ elif int(arch) >= 80:
98
+ return "80"
99
+ elif int(arch) >= 75:
100
+ return "75"
101
+ elif int(arch) >= 70:
102
+ return "70"
103
+ else:
104
+ raise NotImplementedError(f"Unsupported cuda arch: {arch}")
105
+
106
+
107
+ @dataclass
108
+ class CUTLASSArgs:
109
+ """
110
+ CUTLASS args used to initialize a CUTLASS Manifest.
111
+ """
112
+
113
+ architectures: Optional[str] = None
114
+ cuda_version: Optional[str] = None
115
+
116
+ operations = "all"
117
+ build_dir = ""
118
+ curr_build_dir = ""
119
+ generator_target = ""
120
+ kernels = "all"
121
+ ignore_kernels = ""
122
+ kernel_filter_file = None
123
+ selected_kernel_list = None
124
+ interface_dir = None
125
+ filter_by_cc = True
126
+ disable_full_archs_compilation = False
127
+
128
+ def __post_init__(self):
129
+ if self.architectures is None or self.cuda_version is None:
130
+ raise RuntimeError(
131
+ f"{self.architectures=} or {self.cuda_version=} is None!"
132
+ )
133
+ self.architectures = _normalize_cuda_arch(self.architectures)
134
+
135
+
136
+ @functools.lru_cache(None)
137
+ def _gen_ops_cached(arch, version) -> List[Any]:
138
+ # Note: Cache needs to be specific for cuda architecture and version
139
+
140
+ # Import cutlass python scripts.
141
+ assert try_import_cutlass()
142
+ import cutlass_library.generator as cutlass_generator
143
+ import cutlass_library.manifest as cutlass_manifest
144
+
145
+ if arch is None or version is None:
146
+ log.error(
147
+ "Cannot detect cuda arch %s or cuda version %s. "
148
+ "Will discard all cutlass ops. "
149
+ "Please consider setting _inductor.cuda.arch and _inductor.cuda.version configs.",
150
+ arch,
151
+ version,
152
+ )
153
+ return list()
154
+ arch = _normalize_cuda_arch(arch)
155
+ args = CUTLASSArgs(architectures=arch, cuda_version=version)
156
+ manifest = cutlass_manifest.Manifest(args)
157
+
158
+ if arch == "90":
159
+ cutlass_generator.GenerateSM90(manifest, args.cuda_version)
160
+ cutlass_generator.GenerateSM80(manifest, args.cuda_version)
161
+ else:
162
+ try:
163
+ func = getattr(cutlass_generator, "GenerateSM" + arch)
164
+ func(manifest, args.cuda_version)
165
+ except AttributeError as e:
166
+ raise NotImplementedError(
167
+ "Arch " + arch + " is not supported by current cutlass lib."
168
+ ) from e
169
+ return manifest.operations
170
+
171
+
172
+ def gen_ops() -> List[Any]:
173
+ """
174
+ Generates all supported CUTLASS operations.
175
+ """
176
+ arch = get_cuda_arch()
177
+ version = get_cuda_version()
178
+ return _gen_ops_cached(arch, version)
179
+
180
+
181
+ def dtype_match(
182
+ torch_dtype: Optional[torch.dtype],
183
+ cutlass_dtype: "cutlass_library.library.DataType", # type: ignore[name-defined]
184
+ ) -> bool:
185
+ # Import cutlass python scripts.
186
+ assert try_import_cutlass()
187
+ import cutlass_library
188
+
189
+ if torch_dtype == torch.float:
190
+ return (
191
+ cutlass_dtype == cutlass_library.library.DataType.f32
192
+ or cutlass_dtype == cutlass_library.library.DataType.tf32
193
+ )
194
+ elif torch_dtype == torch.half:
195
+ return cutlass_dtype == cutlass_library.library.DataType.f16
196
+ elif torch_dtype == torch.bfloat16:
197
+ return cutlass_dtype == cutlass_library.library.DataType.bf16
198
+ else:
199
+ return False
200
+
201
+
202
+ def get_accumulator_dtype(
203
+ input_torch_dtypes: List[torch.dtype],
204
+ ) -> Optional[torch.dtype]:
205
+ """
206
+ Given a list of input torch dtypes, returns the inferred accumulator torch dtype.
207
+ """
208
+
209
+ if len(input_torch_dtypes) == 0:
210
+ return None
211
+ torch_dtype = input_torch_dtypes[0]
212
+ for dtype in input_torch_dtypes[1:]:
213
+ if torch_dtype != dtype:
214
+ raise RuntimeError(f"Unmatched input dtypes: {torch_dtype=}, {dtype=}")
215
+ if torch_dtype == torch.half:
216
+ if torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction:
217
+ return torch_dtype
218
+ else:
219
+ return torch.float
220
+ if torch_dtype in {torch.bfloat16, torch.float}:
221
+ return torch.float
222
+ raise NotImplementedError(f"Unsupported data type: {input_torch_dtypes=}")
223
+
224
+
225
+ def get_alignments(torch_dtype: torch.dtype) -> List[int]:
226
+ """
227
+ Returns all possible valid CUTLASS alignments in terms of the number of elements for a given dtype.
228
+ CUTLASS gemm / conv SM80 APIs support 16 bytes max alignment, and 2 bytes min alignment.
229
+ """
230
+
231
+ if torch_dtype in (torch.half, torch.bfloat16):
232
+ return [8, 4, 2, 1]
233
+ elif torch_dtype == torch.float:
234
+ return [4, 2, 1]
235
+ else:
236
+ raise NotImplementedError(f"unsupported {torch_dtype=} for alignments")
237
+
238
+
239
+ def get_max_alignment(inductor_layout: Layout) -> int:
240
+ """
241
+ Returns the max alignment (in terms of number of elements) for a given Inductor Layout.
242
+ """
243
+
244
+ dtype = inductor_layout.dtype
245
+ size = inductor_layout.size
246
+ offset = inductor_layout.offset
247
+
248
+ def is_static_int(number):
249
+ return isinstance(number, (int, sympy.Integer))
250
+
251
+ if is_static_int(size[-1]) and is_static_int(offset):
252
+ alignments = get_alignments(dtype)
253
+ for alignment in alignments:
254
+ if int(size[-1]) % alignment == 0 and int(offset) % alignment == 0:
255
+ return alignment
256
+
257
+ return 1
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/gemm_template.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import logging
3
+ import re
4
+ from typing import cast, Dict, List, Optional, Tuple
5
+
6
+ from ...config import cuda as inductor_cuda_config
7
+ from ...ir import Buffer, CUDATemplateBuffer, FixedLayout, IRNode, Layout
8
+ from ..common import IndentedBuffer
9
+
10
+ from . import cutlass_utils
11
+ from .cuda_kernel import CUDATemplateKernel
12
+ from .cuda_template import CUTLASSTemplate
13
+ from .cutlass_epilogue_gen import (
14
+ CutlassEVTEpilogueArgumentFormatter,
15
+ CutlassEVTEpilogueTypeFormatter,
16
+ )
17
+
18
+ log = logging.getLogger(__name__)
19
+
20
+ GEMM_TEMPLATE = r"""
21
+ {{template.header().getvalue()}}
22
+ {{template.globals().getvalue()}}
23
+ {{instance_definition}}
24
+ // When workspace_size is not a nullptr, populates requested workspace_size and returns.
25
+ // Otherwise, computes the Gemm kernel using the given workspace ptr.
26
+ extern "C" {
27
+ {{kernel.def_kernel(inputs=[X, W, Bias], outputs=[Y], names_str="X, W, Bias, Y", input_reorder=input_reorder)}} {
28
+ try {
29
+ {{kernel.check_not_null(X)}}
30
+ {{kernel.check_not_null(W)}}
31
+ {{kernel.check_not_null(Bias)}}
32
+ {{kernel.check_not_null(Y)}}
33
+ int64_t B = {{kernel.size(Y, 0, -3, default_value=1)}};
34
+ int64_t M = {{kernel.size(X, -2)}};
35
+ int64_t K = {{kernel.size(X, -1)}};
36
+ int64_t N = {{kernel.size(W, -1)}};
37
+ using ElementComputeEpilogue = {{instance_type}}::ElementAccumulator;
38
+ using coord_t = cutlass::gemm::GemmCoord::Index;
39
+ {{instance_type}}::Arguments arguments;
40
+ {{template.render_gemm_arguments(argument_template, epilogue_template, should_swap_xw,
41
+ X, W, Bias, Y, alpha, beta, kernel, epilogue_args)}}
42
+ {{instance_type}} gemm_op;
43
+ if (workspace_size) {
44
+ *workspace_size = gemm_op.get_workspace_size(arguments);
45
+ return 0;
46
+ }
47
+ {
48
+ auto status = gemm_op.can_implement(arguments);
49
+ CUTLASS_CHECK(status);
50
+ }
51
+ {
52
+ auto status = gemm_op.initialize(arguments, workspace, stream);
53
+ CUTLASS_CHECK(status);
54
+ }
55
+ {
56
+ auto status = gemm_op(stream);
57
+ CUTLASS_CHECK(status);
58
+ }
59
+ }
60
+ catch (std::exception& e) {
61
+ std::cerr << "Runtime error: " << e.what() << std::endl;
62
+ return -1;
63
+ }
64
+ catch (...) {
65
+ return -1;
66
+ }
67
+ return 0;
68
+ }
69
+ }
70
+ """
71
+
72
+
73
+ GEMM_ARGS_CUTLASS_2X = r"""
74
+ int64_t batch_stride_x = {{kernel.stride(X, -3)}};
75
+ int64_t row_stride_x = {{kernel.row_or_column_stride(X)}};
76
+ int64_t batch_stride_w = {{kernel.stride(W, -3)}};
77
+ int64_t row_stride_w = {{kernel.row_or_column_stride(W)}};
78
+ int64_t batch_stride_bias = {{kernel.stride(Bias, -3)}};
79
+ int64_t row_stride_bias = {{kernel.row_or_column_stride(Bias)}};
80
+ int64_t batch_stride_y = {{kernel.stride(Y, -3)}};
81
+ int64_t row_stride_y = {{kernel.row_or_column_stride(Y)}};
82
+ // Initialize GemmUniversalInstance arguments.
83
+ arguments = {
84
+ {{template.gemm_mode()}}, // GemmUniversalMode mode
85
+ {
86
+ static_cast<coord_t>(M),
87
+ static_cast<coord_t>(N),
88
+ static_cast<coord_t>(K)
89
+ }, // GemmCoord problem_size
90
+ {{split_k if split_k > 1 else 'B'}}, // int batch_count
91
+ {ElementComputeEpilogue({{alpha}}), ElementComputeEpilogue({{beta}})}, // typename EpilogueOutputOp::Params epilogue
92
+ {{template.cutlass_type_cast(X, kernel.ptr(X))}}, // void const * ptr_A
93
+ {{template.cutlass_type_cast(W, kernel.ptr(W))}}, // void const * ptr_B
94
+ {{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // void const * ptr_C
95
+ {{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // void * ptr_D
96
+ batch_stride_x, // int64_t batch_stride_A
97
+ batch_stride_w, // int64_t batch_stride_B
98
+ batch_stride_bias, // int64_t batch_stride_C
99
+ batch_stride_y, // int64_t batch_stride_D
100
+ row_stride_x, // typename LayoutA::Stride::LongIndex lda
101
+ row_stride_w, // typename LayoutB::Stride::LongIndex ldb
102
+ row_stride_bias, // typename LayoutC::Stride::LongIndex ldc
103
+ row_stride_y, // typename LayoutC::Stride::LongIndex ldd
104
+ };
105
+ """
106
+
107
+
108
+ GEMM_ARGS_CUTLASS_3X = r"""
109
+ // Initialize GemmUniversal3xInstance arguments.
110
+ arguments = {
111
+ {{template.gemm_mode()}}, // GemmUniversalMode mode
112
+ {
113
+ static_cast<coord_t>({{M}}),
114
+ static_cast<coord_t>({{N}}),
115
+ static_cast<coord_t>(K),
116
+ static_cast<coord_t>(B)
117
+ }, // ProblemShape problem_shape
118
+ {
119
+ {{template.cutlass_type_cast(X, kernel.ptr(X))}}, // ElementA const* ptr_A
120
+ {
121
+ {{template.cute_int(kernel.stride(X, -2), "stride_x0")}},
122
+ {{template.cute_int(kernel.stride(X, -1), "stride_x1")}},
123
+ {{template.cute_int(kernel.stride(X, -3), "batch_stride_x")}}
124
+ }, // StrideA dA
125
+ {{template.cutlass_type_cast(W, kernel.ptr(W))}}, // ElementB const* ptr_B
126
+ {
127
+ {{template.cute_int(kernel.stride(W, -1), "stride_w1")}},
128
+ {{template.cute_int(kernel.stride(W, -2), "stride_w0")}},
129
+ {{template.cute_int(kernel.stride(W, -3), "batch_stride_w")}}
130
+ }, // StrideB dB
131
+ }, // MainloopArguments mainloop
132
+ {{epilogue_arguments}}
133
+ };
134
+ """
135
+
136
+ GEMM_ARGS_CUTLASS_3X_EPILOGUE = r"""
137
+ // see https://tinyurl.com/4rk89z48
138
+ {
139
+ {{epilogue_args}}, // thread, typename FusionCallbacks::Arguments ( EVT ) or ThreadEpilogueOp::Params (non-EVT )
140
+ {{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // ElementC const* ptr_C
141
+ {
142
+ {{template.cute_int(kernel.stride(Bias, -2, 1), "stride_bias0")}},
143
+ {{template.cute_int(kernel.stride(Bias, -1, 1), "stride_bias1")}},
144
+ {{template.cute_int(kernel.stride(Bias, -3), "batch_stride_bias")}}
145
+ }, // StrideC dC
146
+ {{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // ElementD const* ptr_D
147
+ {
148
+ {{template.cute_int(kernel.stride(Y, -2), "stride_y0")}},
149
+ {{template.cute_int(kernel.stride(Y, -1), "stride_y1")}},
150
+ {{template.cute_int(kernel.stride(Y, -3), "batch_stride_y")}}
151
+ }, // StrideD dD
152
+ }, // EpilogueArguments epilogue
153
+ """
154
+
155
+
156
+ class CUTLASSGemmTemplate(CUTLASSTemplate):
157
+ """
158
+ CUTLASS GEMM template, which is used to generate CUTLASS GEMM kernels
159
+ including those which allow flexible fusions with epilogues.
160
+ """
161
+
162
+ def __init__(
163
+ self,
164
+ input_nodes: List[Buffer],
165
+ layout: Layout,
166
+ alpha: float,
167
+ beta: float,
168
+ input_reorder: Optional[List[int]] = None,
169
+ can_fuse_epilogue: Optional[bool] = None,
170
+ ):
171
+ """
172
+ Args:
173
+ input_nodes: input nodes of the kernel
174
+ layout: layout of the output node
175
+ alpha: alpha value of the GEMM operation
176
+ beta: beta value of the GEMM operation
177
+ input_reorder: reorder of the input nodes
178
+ can_fuse_epilogue: If set to True, will only list and use operators capable of flexible epilogue fusions.
179
+ If False, it will not use those. If None, both may be listed, but it will not allow fusions.
180
+ Defaults to None
181
+ """
182
+ super().__init__("cutlass_gemm", input_nodes, layout, input_reorder)
183
+ self.alpha = alpha
184
+ self.beta = beta
185
+ self.can_fuse_epilogue = can_fuse_epilogue
186
+
187
+ @staticmethod
188
+ def add_cutlass_gemm_choices(
189
+ choices,
190
+ layout,
191
+ input_nodes,
192
+ alpha=1,
193
+ beta=0,
194
+ input_reorder=None,
195
+ fuseable=True,
196
+ non_fuseable=True,
197
+ ):
198
+ if non_fuseable:
199
+ if fuseable:
200
+ # list both fuseable and non-fuseable ops, and treat them all as non-fuseable
201
+ can_fuse_epilogue = False
202
+ else:
203
+ can_fuse_epilogue = None
204
+
205
+ cutlass_template = CUTLASSGemmTemplate(
206
+ input_nodes,
207
+ layout,
208
+ alpha=alpha,
209
+ beta=beta,
210
+ input_reorder=input_reorder,
211
+ can_fuse_epilogue=can_fuse_epilogue,
212
+ )
213
+ ops = cutlass_template.gen_ops()
214
+ for op in ops:
215
+ cutlass_template.maybe_append_choice(
216
+ choices,
217
+ op=op,
218
+ )
219
+ else:
220
+ ops = []
221
+ if fuseable:
222
+ cutlass_template_evt = CUTLASSGemmTemplate(
223
+ input_nodes,
224
+ layout,
225
+ alpha=alpha,
226
+ beta=beta,
227
+ input_reorder=input_reorder,
228
+ can_fuse_epilogue=True,
229
+ )
230
+ # This will list only ops capable of EVT fusion
231
+ ops_evt = cutlass_template_evt.gen_ops()
232
+ for op in ops_evt:
233
+ cutlass_template_evt.maybe_append_choice(
234
+ choices,
235
+ op=op,
236
+ )
237
+ else:
238
+ ops_evt = []
239
+ log.debug(
240
+ "Added %d cutlass gemm configs and %d fuseable gemm configs.",
241
+ len(ops),
242
+ len(ops_evt),
243
+ )
244
+
245
+ def header(self) -> IndentedBuffer:
246
+ res = super().header()
247
+ res.splice(
248
+ """
249
+ #include "cutlass/gemm/gemm.h"
250
+ #include "cutlass/gemm/device/gemm_universal.h"
251
+ #include "cutlass/gemm/device/gemm_universal_adapter.h"
252
+ #include "cutlass/gemm/kernel/gemm_universal.hpp"
253
+ #include "cutlass/gemm/collective/collective_builder.hpp"
254
+ #include "cutlass/epilogue/collective/collective_builder.hpp"
255
+ #include "cutlass/epilogue/collective/default_epilogue.hpp"
256
+ #include "cutlass/epilogue/thread/linear_combination.h"
257
+ #include "cutlass/gemm/dispatch_policy.hpp"
258
+ #include "cutlass/gemm/kernel/tile_scheduler.hpp"
259
+ #include "cutlass/util/distribution.h"
260
+ #include "cutlass/util/packed_stride.hpp"
261
+ #include "cutlass/util/tensor_view_io.h"
262
+ """
263
+ )
264
+ return res
265
+
266
+ @staticmethod
267
+ def cutlass_layout(torch_layout) -> "Optional[cutlass_lib.LayoutType]": # type: ignore[name-defined]
268
+ assert cutlass_utils.try_import_cutlass()
269
+ import cutlass_library.library as cutlass_lib
270
+
271
+ if torch_layout.stride[-1] == 1:
272
+ return cutlass_lib.LayoutType.RowMajor
273
+ elif torch_layout.stride[-2] == 1:
274
+ return cutlass_lib.LayoutType.ColumnMajor
275
+ else:
276
+ return None
277
+
278
+ @staticmethod
279
+ def flip_cutlass_layout(
280
+ cutlass_layout: "cutlass_lib.LayoutType", # type: ignore[name-defined]
281
+ ) -> "cutlass_lib.LayoutType": # type: ignore[name-defined]
282
+ assert cutlass_utils.try_import_cutlass()
283
+ import cutlass_library.library as cutlass_lib
284
+
285
+ if cutlass_layout == cutlass_lib.LayoutType.RowMajor:
286
+ return cutlass_lib.LayoutType.ColumnMajor
287
+ else:
288
+ return cutlass_lib.LayoutType.RowMajor
289
+
290
+ @staticmethod
291
+ def layout_match(torch_layout, cutlass_layout) -> bool:
292
+ return CUTLASSGemmTemplate.cutlass_layout(torch_layout) == cutlass_layout
293
+
294
+ @staticmethod
295
+ def set_alignment(torch_layout, op_element) -> bool:
296
+ alignment = cutlass_utils.get_max_alignment(torch_layout)
297
+ if alignment < op_element.alignment:
298
+ return False
299
+ else:
300
+ op_element.alignment = alignment
301
+ return True
302
+
303
+ @staticmethod
304
+ def has_tma_epilogue(op) -> bool:
305
+ assert cutlass_utils.try_import_cutlass()
306
+ import cutlass_library.library as cutlass_lib
307
+
308
+ result = False
309
+ if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
310
+ epilogue_schedule_str = str(op.epilogue_schedule).split(".")[-1]
311
+ result = epilogue_schedule_str.lower().startswith("tma")
312
+ return result
313
+
314
+ @staticmethod
315
+ def supports_evt(op: "cutlass_library.gemm_op.GemmOperation") -> bool: # type: ignore[name-defined]
316
+ """
317
+ returns True if the op is capable of flexible epilogue fusions
318
+ using epilogue visitor trees.
319
+
320
+ See https://github.com/NVIDIA/cutlass/blob/e01b9b5029b7caca5a43c29f7d2714d7cf1dcae8/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L283-L285 # noqa: B950
321
+ """
322
+ assert cutlass_utils.try_import_cutlass()
323
+ import cutlass_library.library as cutlass_lib
324
+
325
+ if op.gemm_kind != cutlass_lib.GemmKind.Universal3x:
326
+ return False
327
+ if op.epilogue_schedule not in (
328
+ cutlass_lib.EpilogueScheduleType.TmaWarpSpecialized,
329
+ cutlass_lib.EpilogueScheduleType.TmaWarpSpecializedCooperative,
330
+ ):
331
+ return False
332
+
333
+ return True
334
+
335
+ def render_evt_epilogue_declaration(
336
+ self,
337
+ template_output_node_name: str,
338
+ evt_type_name: str,
339
+ epilogue_nodes: List[IRNode],
340
+ ) -> str:
341
+ """Generates the epilogue for the EVT epilogue fusion"""
342
+ return CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(
343
+ template_output_node_name, evt_type_name, epilogue_nodes
344
+ )
345
+
346
+ def define_gemm_instance(
347
+ self,
348
+ op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined]
349
+ output_buffer_name: str,
350
+ epilogue_nodes: Optional[List[IRNode]] = None,
351
+ ) -> Tuple[str, str]:
352
+ assert cutlass_utils.try_import_cutlass()
353
+ import cutlass_library.gemm_operation as cutlass_gemm_op
354
+ import cutlass_library.library as cutlass_lib
355
+
356
+ from torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions import (
357
+ EmitGemmUniversal3xInstanceWithEVT,
358
+ )
359
+
360
+ if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
361
+ if epilogue_nodes is not None and len(epilogue_nodes) > 0:
362
+ emitter = EmitGemmUniversal3xInstanceWithEVT()
363
+ op.epilogue_functor = lambda epilogue_functor_type_name: self.render_evt_epilogue_declaration(
364
+ output_buffer_name, epilogue_functor_type_name, epilogue_nodes
365
+ )
366
+ else:
367
+ emitter = cutlass_gemm_op.EmitGemmUniversal3xInstance()
368
+ op_def = emitter.emit(op)
369
+ pattern = re.compile(r"\s*struct\s(.*?)\s:")
370
+ decl = [line for line in op_def.split("\n") if "struct " in line][-1]
371
+ else:
372
+ if epilogue_nodes is not None and len(epilogue_nodes) > 0:
373
+ raise RuntimeError(
374
+ "EVT epilogue fusion is not supported for Cutlass 2.x ops."
375
+ )
376
+ emitter = cutlass_gemm_op.EmitGemmInstance()
377
+ op_def = emitter.emit(op)
378
+ op_def = op_def.replace(
379
+ "cutlass::gemm::device::Gemm", "cutlass::gemm::device::GemmUniversal"
380
+ )
381
+ op_def = op_def.replace("false,", "")
382
+ pattern = re.compile(r"\s*using\s(.*?)\s=")
383
+ decl = op_def.split("\n")[2]
384
+ match = pattern.match(decl)
385
+ if match is None:
386
+ raise RuntimeError("Invalid Gemm config: \n" + op_def)
387
+ op_type = match.groups()[0]
388
+ if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
389
+ op_def += f"\n using {op_type}_device_type = cutlass::gemm::device::GemmUniversalAdapter<{op_type}>;\n"
390
+ op_type = f"{op_type}_device_type"
391
+ return op_def, op_type
392
+
393
+ @staticmethod
394
+ def should_swap_XW(
395
+ bias: IRNode,
396
+ beta: float,
397
+ ) -> bool:
398
+ return True
399
+
400
+ # TODO(ipiszy): Check whether it's necessary to swap X/W.
401
+ # strides = bias.get_stride()
402
+ # if strides[-1] != 1:
403
+ # return True
404
+ # for stride in strides[:-1]:
405
+ # if stride != 0:
406
+ # return True
407
+ # return False
408
+
409
+ @staticmethod
410
+ def swap_XW(
411
+ op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined]
412
+ ) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined]
413
+ # Swap X and W in GemmOperation.
414
+ new_op = copy.deepcopy(op)
415
+ new_op.A.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.A.layout)
416
+ new_op.B.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.B.layout)
417
+ new_op.A, new_op.B = new_op.B, new_op.A
418
+ new_op.C.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.C.layout)
419
+ new_op.D.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.D.layout)
420
+ return new_op
421
+
422
+ def filter_op(
423
+ self,
424
+ op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined]
425
+ ) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined]
426
+ assert cutlass_utils.try_import_cutlass()
427
+ import cutlass_library.library as cutlass_lib
428
+
429
+ # Skip simt kernels
430
+ if (
431
+ op.tile_description.math_instruction.opcode_class
432
+ == cutlass_lib.OpcodeClass.Simt
433
+ ):
434
+ return None
435
+
436
+ # Only keep GemmUniversal kernels
437
+ if op.gemm_kind not in {
438
+ cutlass_lib.GemmKind.Universal,
439
+ cutlass_lib.GemmKind.Universal3x,
440
+ }:
441
+ return None
442
+ # Filter ops by dtypes.
443
+ X = self.input_nodes[0]
444
+ W = self.input_nodes[1]
445
+ accumulator_torch_dtype = cutlass_utils.get_accumulator_dtype(
446
+ [X.get_dtype(), W.get_dtype()],
447
+ )
448
+ if not (
449
+ cutlass_utils.dtype_match(X.get_dtype(), op.A.element)
450
+ and cutlass_utils.dtype_match(W.get_dtype(), op.B.element)
451
+ and cutlass_utils.dtype_match(
452
+ self.output_node.get_layout().dtype, op.C.element
453
+ )
454
+ and cutlass_utils.dtype_match(
455
+ accumulator_torch_dtype, op.accumulator_type()
456
+ )
457
+ ):
458
+ return None
459
+
460
+ # Filter ops by input layouts.
461
+ if not (
462
+ self.layout_match(X.get_layout(), op.A.layout)
463
+ and self.layout_match(W.get_layout(), op.B.layout)
464
+ ):
465
+ return None
466
+
467
+ # Update op.
468
+ op = copy.deepcopy(op)
469
+
470
+ # Set output layout.
471
+ op.D.layout = CUTLASSGemmTemplate.cutlass_layout(self.output_node.get_layout())
472
+
473
+ # Filter ops by alignments and set alignments.
474
+ if not (
475
+ self.set_alignment(X.get_layout(), op.A)
476
+ and self.set_alignment(W.get_layout(), op.B)
477
+ and self.set_alignment(self.output_node.get_layout(), op.D)
478
+ ):
479
+ return None
480
+
481
+ # Set epilogue.
482
+ # TODO: update epilogue functor according to epilogues.
483
+ op.element_epilogue = op.accumulator_type()
484
+
485
+ # Set bias layout and alignment.
486
+ if len(self.input_nodes) >= 3 and self.input_nodes[2] is not None:
487
+ Bias = self.input_nodes[2]
488
+ bias_layout = CUTLASSGemmTemplate.cutlass_layout(Bias.get_layout())
489
+ if op.gemm_kind != cutlass_lib.GemmKind.Universal3x:
490
+ if bias_layout != op.D.layout:
491
+ # For cutlass2, bias and output layout must match
492
+ return None
493
+ else:
494
+ op.C.layout = bias_layout
495
+ if not self.set_alignment(Bias.get_layout(), op.C):
496
+ return None
497
+ else:
498
+ if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
499
+ op.C.element = cutlass_lib.DataType.void
500
+ else:
501
+ op.C.layout = op.D.layout
502
+ supports_evt: bool = self.supports_evt(op)
503
+ if (self.can_fuse_epilogue is not None) and (
504
+ self.can_fuse_epilogue != supports_evt
505
+ ):
506
+ return None
507
+ if inductor_cuda_config.cutlass_only_evt_capable_ops and not supports_evt:
508
+ return None
509
+ return op
510
+
511
+ def gen_ops(self) -> "List[cutlass_gemm_op.GemmOperation]": # type: ignore[name-defined]
512
+ assert cutlass_utils.try_import_cutlass()
513
+ import cutlass_library.gemm_operation as cutlass_gemm_op
514
+ import cutlass_library.library as cutlass_lib
515
+
516
+ ops = cutlass_utils.gen_ops()[cutlass_lib.OperationKind.Gemm]
517
+ res: Dict[str, cutlass_gemm_op.GemmOperation] = dict()
518
+ num_3x_ops = 0
519
+ num_2x_ops = 0
520
+ for op_dict in ops.values():
521
+ for op_list in op_dict.values():
522
+ for op in op_list:
523
+ assert isinstance(op, cutlass_gemm_op.GemmOperation)
524
+ filter_res = self.filter_op(op)
525
+ if (
526
+ filter_res is not None
527
+ and res.get(filter_res.configuration_name(), None) is None
528
+ ):
529
+ res[filter_res.configuration_name()] = filter_res
530
+ for op in res.values():
531
+ if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
532
+ num_3x_ops += 1
533
+ else:
534
+ num_2x_ops += 1
535
+ log.debug(
536
+ "Got cutlass configs: total number of ops: %d, "
537
+ "total number of 3x ops: %d, total number of 2x ops: %d",
538
+ len(res),
539
+ num_3x_ops,
540
+ num_2x_ops,
541
+ )
542
+ return list(res.values())[: inductor_cuda_config.cutlass_max_profiling_configs]
543
+
544
+ def gemm_mode(self) -> str:
545
+ sizes = self.output_node.get_size()
546
+ if len(sizes) > 2:
547
+ return "cutlass::gemm::GemmUniversalMode::kBatched"
548
+ else:
549
+ return "cutlass::gemm::GemmUniversalMode::kGemm"
550
+
551
+ def render_gemm_arguments(
552
+ self,
553
+ argument_template: str,
554
+ epilogue_template: str,
555
+ should_swap_xw: bool,
556
+ X: IRNode,
557
+ W: IRNode,
558
+ Bias: IRNode,
559
+ Y: IRNode,
560
+ alpha: float,
561
+ beta: float,
562
+ kernel: CUDATemplateKernel,
563
+ epilogue_args,
564
+ ) -> str:
565
+ options = dict(
566
+ alpha=self.alpha,
567
+ beta=self.beta,
568
+ X=X,
569
+ W=W,
570
+ Y=Y,
571
+ Bias=Bias,
572
+ template=self,
573
+ kernel=kernel,
574
+ M="M",
575
+ N="N",
576
+ epilogue_args=epilogue_args,
577
+ )
578
+
579
+ if epilogue_template is not None:
580
+ if should_swap_xw:
581
+ # Swap
582
+ def clone_with_transposed_stride(node: IRNode) -> IRNode:
583
+ old_layout = node.get_layout()
584
+ new_stride = list(old_layout.stride)
585
+ new_stride[-2], new_stride[-1] = new_stride[-1], new_stride[-2]
586
+ new_layout = FixedLayout(
587
+ old_layout.device,
588
+ old_layout.dtype,
589
+ list(old_layout.size),
590
+ new_stride,
591
+ old_layout.offset,
592
+ )
593
+ return Buffer(node.get_name(), new_layout)
594
+
595
+ new_X = clone_with_transposed_stride(X)
596
+ new_W = clone_with_transposed_stride(W)
597
+ new_Bias = clone_with_transposed_stride(Bias)
598
+ new_Y = clone_with_transposed_stride(Y)
599
+ options["X"], options["W"], options["Bias"], options["Y"] = (
600
+ new_W,
601
+ new_X,
602
+ new_Bias,
603
+ new_Y,
604
+ )
605
+ options["M"], options["N"] = "N", "M"
606
+
607
+ epilogue_arguments = self._template_from_string(epilogue_template).render(
608
+ **options
609
+ )
610
+ arguments = self._template_from_string(argument_template).render(
611
+ epilogue_arguments=epilogue_arguments, **options
612
+ )
613
+ else:
614
+ arguments = self._template_from_string(GEMM_ARGS_CUTLASS_2X).render(
615
+ split_k=1, **options
616
+ )
617
+ return arguments
618
+
619
+ def render( # type: ignore[override]
620
+ self,
621
+ kernel: CUDATemplateKernel,
622
+ op: "cutlass_gemm_op.GemmOperation" = None, # type: ignore[name-defined]
623
+ template_buffer_node: Optional[CUDATemplateBuffer] = None,
624
+ epilogue_nodes: Optional[List[IRNode]] = None,
625
+ **kwargs,
626
+ ) -> str:
627
+ if epilogue_nodes is not None and len(epilogue_nodes) > 0:
628
+ assert self.can_fuse_epilogue and CUTLASSGemmTemplate.supports_evt(
629
+ op
630
+ ), "op does not support EVT epilogue fusion"
631
+ assert (
632
+ template_buffer_node is not None
633
+ ), "Template node is required for epilogue fusion"
634
+ assert isinstance(
635
+ template_buffer_node, CUDATemplateBuffer
636
+ ), f"Template node has to be a CUDATemplateBuffer, is type {type(template_buffer_node)}"
637
+ assert (
638
+ template_buffer_node.name is not None
639
+ ), "Output node has to be a Buffer with a name"
640
+ # This is the name of the output of the Matmul, before epilogues are applied.
641
+ # it is not necessarily materialized in global memory if we have an epilogue
642
+
643
+ template_output_node_name = (
644
+ template_buffer_node.name if template_buffer_node is not None else None
645
+ )
646
+
647
+ assert cutlass_utils.try_import_cutlass()
648
+ import cutlass_library.gemm_operation as cutlass_gemm_op
649
+ import cutlass_library.library as cutlass_lib
650
+
651
+ assert isinstance(
652
+ op, cutlass_gemm_op.GemmOperation
653
+ ), "op argument is required and has to be an instance of GemmOperation"
654
+ if template_buffer_node is not None:
655
+ self.output_node = template_buffer_node
656
+ if epilogue_nodes is not None and len(epilogue_nodes) > 0:
657
+ self.output_node = cast(Buffer, epilogue_nodes[-1])
658
+
659
+ assert len(self.input_nodes) >= 2 and self.output_node is not None
660
+ X, W = self.input_nodes[0], self.input_nodes[1]
661
+ Y = self.output_node
662
+ Bias = None if len(self.input_nodes) == 2 else self.input_nodes[2]
663
+
664
+ epilogue_template: Optional[str] = None
665
+ should_swap_xw: bool = False
666
+ epilogue_args = f"{{ElementComputeEpilogue({self.alpha}), ElementComputeEpilogue({self.beta})}}"
667
+ if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
668
+ if Bias is not None and self.has_tma_epilogue(op):
669
+ if self.should_swap_XW(Bias, self.beta):
670
+ # TMA epilogue requires bias vector in column major to get best perf.
671
+ op = self.swap_XW(op)
672
+ should_swap_xw = True
673
+ if epilogue_nodes is not None and len(epilogue_nodes) > 0:
674
+ epilogue_args = (
675
+ CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(
676
+ cast(str, template_output_node_name), epilogue_nodes
677
+ )
678
+ )
679
+ epilogue_template = GEMM_ARGS_CUTLASS_3X_EPILOGUE
680
+ argument_template = GEMM_ARGS_CUTLASS_3X
681
+ else:
682
+ # TODO: Support split_k.
683
+ argument_template = GEMM_ARGS_CUTLASS_2X
684
+
685
+ instance_definition, instance_type = self.define_gemm_instance(
686
+ op, cast(str, template_output_node_name), epilogue_nodes
687
+ )
688
+ options = dict(
689
+ alpha=self.alpha,
690
+ beta=self.beta,
691
+ X=X,
692
+ W=W,
693
+ Y=Y,
694
+ Bias=Bias,
695
+ epilogue_template=epilogue_template,
696
+ argument_template=argument_template,
697
+ should_swap_xw=should_swap_xw,
698
+ template=self,
699
+ kernel=kernel,
700
+ instance_definition=instance_definition,
701
+ instance_type=instance_type,
702
+ input_reorder=self.input_reorder,
703
+ epilogue_args=epilogue_args,
704
+ )
705
+ res = self._template_from_string(GEMM_TEMPLATE).render(**options)
706
+ return res
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda_combined_scheduling.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from ..scheduler import BaseSchedulerNode, BaseScheduling, Scheduler, SchedulerNode
4
+ from .cuda.cuda_cpp_scheduling import CUDACPPScheduling
5
+
6
+ from .triton import TritonScheduling
7
+
8
+
9
+ class CUDACombinedScheduling(BaseScheduling):
10
+ """
11
+ Scheduler for CUDA Kernels, which delegates calls as appropriate
12
+ to the CUDA-C++ and Triton Schedulers, which both work for CUDA devices
13
+ and use a unified-wrapper for codegen.
14
+
15
+ If Scheduling code needs to be specialized for the case of mixed Triton / CUDA C++ code,
16
+ this would also be the place to do it.
17
+ """
18
+
19
+ def __init__(self, scheduler: Scheduler):
20
+ super().__init__()
21
+ self._scheduler = scheduler
22
+ self._triton_scheduling = TritonScheduling(scheduler)
23
+ self._cuda_cpp_scheduling = CUDACPPScheduling(scheduler)
24
+
25
+ def choose_node_backend(self, node: BaseSchedulerNode) -> BaseScheduling:
26
+ if self._cuda_cpp_scheduling.is_cuda_cpp_template(
27
+ node
28
+ ) or self._cuda_cpp_scheduling.is_cuda_cpp_fused_template(node):
29
+ return self._cuda_cpp_scheduling
30
+ return self._triton_scheduling
31
+
32
+ def can_fuse_vertical(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
33
+ if self._cuda_cpp_scheduling.can_fuse_vertical(node1, node2):
34
+ return True
35
+ return self._triton_scheduling.can_fuse_vertical(node1, node2)
36
+
37
+ def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
38
+ for node in (node1, node2):
39
+ if self._cuda_cpp_scheduling.is_cuda_cpp_template(
40
+ node
41
+ ) or self._cuda_cpp_scheduling.is_cuda_cpp_fused_template(node):
42
+ return self._cuda_cpp_scheduling.can_fuse_horizontal(
43
+ node1, node2
44
+ ) # always False at the moment
45
+ return self._triton_scheduling.can_fuse_horizontal(node1, node2)
46
+
47
+ def group_fn(self, sizes):
48
+ return self._triton_scheduling.group_fn(sizes)
49
+
50
+ def codegen_template(
51
+ self, template_node: SchedulerNode, epilogue_nodes: List[SchedulerNode]
52
+ ):
53
+ if self._cuda_cpp_scheduling.is_cuda_cpp_template(template_node):
54
+ return self._cuda_cpp_scheduling.codegen_template(
55
+ template_node, epilogue_nodes
56
+ )
57
+ else:
58
+ return self._triton_scheduling.codegen_template(
59
+ template_node, epilogue_nodes
60
+ )
61
+
62
+ def codegen_nodes(self, nodes: List[BaseSchedulerNode]):
63
+ return self._triton_scheduling.codegen_nodes(nodes)
64
+
65
+ def codegen_sync(self):
66
+ return self._triton_scheduling.codegen_sync()
67
+
68
+ def flush(self):
69
+ return self._triton_scheduling.flush()
70
+
71
+ def codegen_foreach(self, *args, **kwargs):
72
+ return self._triton_scheduling.codegen_foreach(*args, **kwargs)
73
+
74
+ def benchmark_fused_nodes(self, nodes):
75
+ return self._triton_scheduling.benchmark_fused_nodes(nodes)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/memory_planning.py ADDED
@@ -0,0 +1,799 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ import dataclasses
5
+ import itertools
6
+ import pprint
7
+ from typing import Any, Dict, Iterable, List, Optional, Protocol
8
+
9
+ import sympy
10
+
11
+ import torch
12
+ from .. import config, ir
13
+ from ..utils import cache_on_self, CachedMethod, IndentedBuffer
14
+ from ..virtualized import V
15
+
16
+ from .wrapper import (
17
+ AllocateLine,
18
+ FreeIfNotReusedLine,
19
+ MemoryPlanningLine,
20
+ NullLine,
21
+ ReuseLine,
22
+ )
23
+
24
+
25
+ ALIGN_BYTES = 64
26
+ assert (ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0 and ALIGN_BYTES >= 8, "must be power of 2"
27
+
28
+
29
+ def _align(nbytes):
30
+ """Round up to the nearest multiple of ALIGN_BYTES"""
31
+ return (nbytes + ALIGN_BYTES - 1) & -ALIGN_BYTES
32
+
33
+
34
+ def _is_aligned(v: sympy.Expr):
35
+ """v can be statically proven to be a multiple of ALIGN_BYTES"""
36
+ if isinstance(v, (sympy.Add, sympy.Max)):
37
+ return all(map(_is_aligned, v.args))
38
+ return isinstance(v, align) or sympy.gcd(v, ALIGN_BYTES) == ALIGN_BYTES
39
+
40
+
41
+ class align(sympy.Function):
42
+ """Symbolically round up to the nearest multiple of ALIGN_BYTES"""
43
+
44
+ nargs = (1,)
45
+ is_integer = True
46
+
47
+ @classmethod
48
+ def eval(cls, value):
49
+ if isinstance(value, (int, sympy.Integer)):
50
+ return _align(int(value))
51
+ if _is_aligned(value):
52
+ return value
53
+
54
+
55
+ @dataclasses.dataclass
56
+ class LiveRange:
57
+ """
58
+ A range where a given tensor is live. Begin and end are both counters
59
+ representing points in the program of grouped memory operations.
60
+ Begin is inclusive, end is exclusive.
61
+
62
+ Invariant: begin <= end
63
+ """
64
+
65
+ begin: float # int | ±inf
66
+ end: float # int | ±inf
67
+
68
+ def contains(self, other: LiveRange):
69
+ """Is other entirely within self"""
70
+ return self.begin <= other.begin and other.end <= self.end
71
+
72
+ def join(self, other: LiveRange):
73
+ """Combine two ranges using a union operation"""
74
+ return LiveRange(min(self.begin, other.begin), max(self.end, other.end))
75
+
76
+ def __len__(self):
77
+ return self.end - self.begin
78
+
79
+
80
+ class LiveRanges:
81
+ """
82
+ A collection of LiveRange regions, allowing for non-contiguous
83
+ live regions.
84
+
85
+ Invariant: LiveRanges.ranges is in sorted order and non-overlapping
86
+ """
87
+
88
+ def __init__(self, ranges: Iterable[LiveRange]):
89
+ ranges = [*sorted(ranges, key=lambda x: x.begin)]
90
+ self.ranges = ranges[:1]
91
+ for r in ranges[1:]:
92
+ assert self.ranges[-1].begin <= r.begin
93
+ if self.ranges[-1].end >= r.begin:
94
+ self.ranges[-1] = LiveRange.join(self.ranges[-1], r)
95
+ else:
96
+ self.ranges.append(r)
97
+
98
+ def overlaps(self, other: LiveRanges):
99
+ """Check if any pair of ranges in self and other overlap"""
100
+ left = collections.deque(self.ranges)
101
+ right = collections.deque(other.ranges)
102
+ while left and right:
103
+ if left[0].begin > right[0].begin:
104
+ left, right = right, left
105
+ assert left[0].begin <= right[0].begin
106
+ if left[0].end > right[0].begin:
107
+ return True
108
+ left.popleft()
109
+ return False
110
+
111
+ @property
112
+ def begin(self):
113
+ return self.ranges[0].begin
114
+
115
+ @property
116
+ def end(self):
117
+ return self.ranges[-1].end
118
+
119
+ def __repr__(self):
120
+ return f"{self.__class__.__name__}([{', '.join(map(repr, self.ranges))}])"
121
+
122
+
123
+ class AllocationTreeNode:
124
+ """
125
+ Abstract base class for nodes in allocation pool.
126
+ """
127
+
128
+ def allocate(self, block: Allocation, is_last: bool) -> bool:
129
+ """
130
+ Try to assign block to a memory location in this bool. Return True if
131
+ an assignment was made.
132
+ """
133
+ return False
134
+
135
+ def get_live_ranges(self) -> LiveRanges:
136
+ """Aggregate LiveRanges for all objects below this in tree"""
137
+ raise NotImplementedError()
138
+
139
+ def get_size_hint(self) -> int:
140
+ """Number of bytes used for example inputs"""
141
+ raise NotImplementedError()
142
+
143
+ def get_symbolic_size(self) -> sympy.Expr:
144
+ """Number of bytes needed at runtime"""
145
+ raise NotImplementedError()
146
+
147
+ def finalize(self, pool, offset) -> AllocationTreeNode:
148
+ """Called after all allocations have been made"""
149
+ return self
150
+
151
+ def is_empty(self):
152
+ return False
153
+
154
+
155
+ @dataclasses.dataclass
156
+ class Allocation(AllocationTreeNode):
157
+ """
158
+ Represents memory allocated to a given node in the allocation pool.
159
+ """
160
+
161
+ node: ir.Buffer
162
+ live_range: LiveRange
163
+ size_hint: int
164
+ symbolic_size: sympy.Expr
165
+ allocated: bool = False
166
+ pool: Optional[AllocationPool] = None
167
+ offset: Optional[sympy.Expr] = None
168
+
169
+ @property
170
+ def device(self):
171
+ return self.node.get_device()
172
+
173
+ def get_live_ranges(self):
174
+ return LiveRanges([self.live_range])
175
+
176
+ def get_size_hint(self):
177
+ return self.size_hint
178
+
179
+ def get_symbolic_size(self):
180
+ return self.symbolic_size
181
+
182
+ def mark_allocated(self):
183
+ assert not self.allocated
184
+ self.allocated = True
185
+
186
+ def finalize(self, pool, offset):
187
+ assert self.pool is None and self.offset is None
188
+ self.pool = pool
189
+ self.offset = offset
190
+ return self
191
+
192
+ def codegen_alloc_from_pool(self, wrapper):
193
+ assert self.pool
194
+ node = self.node
195
+ shape = tuple(node.get_size())
196
+ stride = tuple(node.get_stride())
197
+ return wrapper.codegen_alloc_from_pool(
198
+ self.pool.name, self.offset, node.get_dtype(), shape, stride
199
+ )
200
+
201
+ def __repr__(self):
202
+ return (
203
+ f"{self.__class__.__name__}("
204
+ f"node={self.node.get_name()}, "
205
+ f"live_range={self.live_range}, "
206
+ f"size_hint={self.size_hint}, "
207
+ f"symbolic_size={self.symbolic_size}, "
208
+ f"pool={self.pool.name if self.pool else None}, "
209
+ f"offset={self.offset})"
210
+ )
211
+
212
+
213
+ @dataclasses.dataclass
214
+ class Empty(AllocationTreeNode):
215
+ """
216
+ Placeholder to represent empty space in the allocation pool.
217
+ Only exists to get the size_hint correct in parent nodes.
218
+ """
219
+
220
+ size_hint: int
221
+
222
+ def get_live_ranges(self):
223
+ return LiveRanges([])
224
+
225
+ def get_size_hint(self):
226
+ return self.size_hint
227
+
228
+ def get_symbolic_size(self):
229
+ return 0
230
+
231
+ def is_empty(self):
232
+ return True
233
+
234
+
235
+ class MemorySplitProtocol(Protocol):
236
+ get_live_ranges: CachedMethod[[], LiveRanges]
237
+ get_size_hint: CachedMethod[[], int]
238
+ get_symbolic_size: CachedMethod[[], sympy.Expr]
239
+
240
+ def _allocate(self, block: Allocation, is_last: bool) -> bool:
241
+ ...
242
+
243
+
244
+ class ClearCacheOnAllocateMixin(MemorySplitProtocol):
245
+ """
246
+ Helper to assist in caching get_live_ranges, get_size_hint, and
247
+ get_symbolic_size.
248
+ """
249
+
250
+ def allocate(self, block: Allocation, is_last: bool):
251
+ is_allocated = self._allocate(block, is_last)
252
+ if is_allocated:
253
+ self.clear_cache()
254
+ return is_allocated
255
+
256
+ def clear_cache(self):
257
+ self.get_live_ranges.clear_cache(self)
258
+ self.get_size_hint.clear_cache(self)
259
+ self.get_symbolic_size.clear_cache(self)
260
+
261
+
262
+ @dataclasses.dataclass
263
+ class TemporalSplit(ClearCacheOnAllocateMixin, AllocationTreeNode):
264
+ """
265
+ Contains a list of allocations not overlapping in LiveRanges.
266
+
267
+ Invariant: no pair (a,b) in self.allocations will have:
268
+ a.get_live_ranges().overlaps(b.get_live_ranges())
269
+ """
270
+
271
+ allocations: List[AllocationTreeNode]
272
+
273
+ def _allocate(self, block: Allocation, is_last: bool):
274
+ slot_size = self.get_size_hint()
275
+ block_size = block.get_size_hint()
276
+ if not is_last and block_size > slot_size:
277
+ return False # doesn't fit
278
+
279
+ block_live = block.get_live_ranges()
280
+ overlapping = [
281
+ s for s in self.allocations if s.get_live_ranges().overlaps(block_live)
282
+ ]
283
+ if len(overlapping) > 1:
284
+ # TODO(jansel): we could try harder here by merging overlapping in space
285
+ return False
286
+ elif len(overlapping) == 1:
287
+ return overlapping[0].allocate(block, is_last)
288
+ else:
289
+ block.mark_allocated()
290
+
291
+ if len(self.allocations) == 1 and isinstance(self.allocations[-1], Empty):
292
+ self.allocations.pop()
293
+
294
+ if slot_size == block_size:
295
+ # perfect fit
296
+ self.allocations.append(block)
297
+ elif slot_size > block_size:
298
+ self.allocations.append(
299
+ SpatialSplit.create(block, slot_size - block_size)
300
+ )
301
+ else: # grow this allocation
302
+ assert is_last
303
+ self.allocations = [
304
+ *(
305
+ SpatialSplit.create(a, block_size - slot_size)
306
+ for a in self.allocations
307
+ ),
308
+ block,
309
+ ]
310
+ return True
311
+
312
+ @cache_on_self
313
+ def get_live_ranges(self) -> LiveRanges:
314
+ return LiveRanges(
315
+ itertools.chain.from_iterable(
316
+ x.get_live_ranges().ranges for x in self.allocations
317
+ )
318
+ )
319
+
320
+ @cache_on_self
321
+ def get_size_hint(self) -> int:
322
+ if not self.allocations:
323
+ return 0
324
+ return max(x.get_size_hint() for x in self.allocations)
325
+
326
+ @cache_on_self
327
+ def get_symbolic_size(self) -> sympy.Expr:
328
+ if not self.allocations:
329
+ return 0
330
+ return sympy.Max(*[x.get_symbolic_size() for x in self.allocations])
331
+
332
+ def is_empty(self):
333
+ return len(self.allocations) == 1 and self.allocations[0].is_empty()
334
+
335
+ def finalize(self, pool, offset):
336
+ self.allocations = [block.finalize(pool, offset) for block in self.allocations]
337
+ self.clear_cache()
338
+ if len(self.allocations) == 1:
339
+ return self.allocations[0]
340
+ return self
341
+
342
+
343
+ @dataclasses.dataclass
344
+ class SpatialSplit(ClearCacheOnAllocateMixin, AllocationTreeNode):
345
+ """
346
+ Contains two allocations, left and right, that do not overlap in space.
347
+ Right will be allocated immediately after left in memory.
348
+ """
349
+
350
+ left: TemporalSplit
351
+ right: TemporalSplit
352
+
353
+ @staticmethod
354
+ def create(left, extra_space):
355
+ assert isinstance(left, AllocationTreeNode)
356
+ assert isinstance(extra_space, int) and extra_space >= 1
357
+ return SpatialSplit(TemporalSplit([left]), TemporalSplit([Empty(extra_space)]))
358
+
359
+ def _allocate(self, block: Allocation, is_last: bool):
360
+ return self.left.allocate(block, False) or self.right.allocate(block, is_last)
361
+
362
+ @cache_on_self
363
+ def get_live_ranges(self):
364
+ return LiveRanges(
365
+ itertools.chain(
366
+ self.left.get_live_ranges().ranges, self.right.get_live_ranges().ranges
367
+ )
368
+ )
369
+
370
+ @cache_on_self
371
+ def get_size_hint(self) -> int:
372
+ return _align(self.left.get_size_hint()) + self.right.get_size_hint()
373
+
374
+ @cache_on_self
375
+ def get_symbolic_size(self) -> sympy.Expr:
376
+ return align(self.left.get_symbolic_size()) + self.right.get_symbolic_size()
377
+
378
+ def finalize(self, pool, offset):
379
+ self.left = self.left.finalize(pool, offset)
380
+ self.right = self.right.finalize(
381
+ pool, offset + align(self.left.get_symbolic_size())
382
+ )
383
+ self.clear_cache()
384
+ if self.right.is_empty():
385
+ return self.left
386
+ return self
387
+
388
+
389
+ @dataclasses.dataclass
390
+ class AllocationPool:
391
+ """
392
+ Represents a pool of allocations that will be generated by a single
393
+ call to torch.empty.
394
+ """
395
+
396
+ device: torch.device
397
+ root: TemporalSplit
398
+ can_expand: bool = True
399
+ restrict_live_range: Optional[LiveRange] = None
400
+ name: Optional[str] = None
401
+ names_to_del: List[str] = dataclasses.field(default_factory=list)
402
+ creation_cache: Dict[str, str] = dataclasses.field(default_factory=dict)
403
+
404
+ def allocate(self, block: Allocation, is_last: bool):
405
+ if self.restrict_live_range and not self.restrict_live_range.contains(
406
+ block.live_range
407
+ ):
408
+ return False
409
+
410
+ is_last = self.can_expand and is_last
411
+ if self.root.allocate(block, is_last):
412
+ return True
413
+
414
+ if is_last:
415
+ return self.allocate_at_end(block)
416
+
417
+ return False
418
+
419
+ def allocate_at_end(self, block):
420
+ block.mark_allocated()
421
+ self.root = TemporalSplit([SpatialSplit(self.root, TemporalSplit([block]))])
422
+ return True
423
+
424
+ def finalize(self, name):
425
+ assert not self.name
426
+ self.name = name
427
+ self.names_to_del.append(name)
428
+ self.root.finalize(self, 0)
429
+
430
+ def codegen_create(self, wrapper, code: IndentedBuffer):
431
+ assert self.name
432
+ nbytes = self.root.get_symbolic_size()
433
+ for block in self.root.allocations:
434
+ if isinstance(block, Allocation) and nbytes == block.get_symbolic_size():
435
+ # optimization: fuse first allocation and pool creation
436
+ node = block.node
437
+ code.writeline(
438
+ wrapper.make_allocation(
439
+ self.name,
440
+ device=self.device,
441
+ dtype=node.get_dtype(),
442
+ shape=tuple(node.get_size()),
443
+ stride=tuple(node.get_stride()),
444
+ )
445
+ )
446
+ self.creation_cache[block.codegen_alloc_from_pool(wrapper)] = self.name
447
+ return
448
+ else:
449
+ code.writeline(
450
+ wrapper.make_allocation(
451
+ self.name,
452
+ device=self.device,
453
+ dtype=torch.uint8,
454
+ shape=(nbytes,),
455
+ stride=(1,),
456
+ )
457
+ )
458
+
459
+ def codegen_destroy(self, wrapper, code: IndentedBuffer):
460
+ code.writeline(wrapper.make_free_by_names(self.names_to_del))
461
+
462
+ def __eq__(self, other):
463
+ return self is other
464
+
465
+ def __hash__(self):
466
+ return id(self)
467
+
468
+
469
+ @dataclasses.dataclass
470
+ class AllocationPools:
471
+ """
472
+ Collection of many AllocationPool objects grouped by device.
473
+ """
474
+
475
+ device_to_pools: Dict[torch.device, List[AllocationPool]] = dataclasses.field(
476
+ default_factory=dict
477
+ )
478
+
479
+ def get_pools(self, block):
480
+ if block.device not in self.device_to_pools:
481
+ self.device_to_pools[block.device] = []
482
+ return self.device_to_pools[block.device]
483
+
484
+ def allocate(self, block: Allocation):
485
+ pools = self.get_pools(block)
486
+
487
+ for pool in pools:
488
+ if pool.allocate(block, is_last=pool is pools[-1]):
489
+ return
490
+
491
+ # everything is full, make a new pool
492
+ pools.append(
493
+ AllocationPool(
494
+ block.device,
495
+ TemporalSplit([block]),
496
+ can_expand=config.memory_pool != "none",
497
+ )
498
+ )
499
+ block.mark_allocated()
500
+
501
+ def allocate_output(self, block: Allocation):
502
+ """Outputs get different pools so memory gets freed properly"""
503
+ pools = self.get_pools(block)
504
+ if pools and config.memory_pool in ("outputs", "combined"):
505
+ pools[-1].allocate_at_end(block)
506
+ else:
507
+ # create a new pool
508
+ block.mark_allocated()
509
+ pools.append(
510
+ AllocationPool(
511
+ block.device,
512
+ TemporalSplit([block]),
513
+ can_expand=config.memory_pool == "combined",
514
+ )
515
+ )
516
+
517
+ def finalize(self):
518
+ """Called at the end of allocation process"""
519
+ for i, pool in enumerate(
520
+ itertools.chain.from_iterable(self.device_to_pools.values())
521
+ ):
522
+ pool.finalize(f"pool{i}")
523
+
524
+ def pprint(self):
525
+ for pool in itertools.chain.from_iterable(self.device_to_pools.values()):
526
+ print()
527
+ print(pool.name)
528
+ print(pool.root.get_live_ranges())
529
+ pprint.pprint(pool.root)
530
+
531
+
532
+ class BufferGroup:
533
+ """
534
+ Due to inplace reuse an allocated buffer can have many names.
535
+ This tracks these collections of buffers sharing underlying memory.
536
+ """
537
+
538
+ def __init__(self, node: ir.Buffer):
539
+ self.node = node
540
+ self.names = [node.get_name()]
541
+ self.is_output = False
542
+ self.allocation: Optional[Allocation] = None
543
+ self.live_range = LiveRange(float("inf"), -float("inf"))
544
+
545
+ def update_usage(self, timestep: int):
546
+ """Expand self.live_range to include timestep"""
547
+ self.live_range = LiveRange(
548
+ min(timestep, self.live_range.begin),
549
+ max(timestep, self.live_range.end),
550
+ )
551
+
552
+ def sym_nbytes(self):
553
+ return self.node.get_layout().storage_size() * self.node.get_dtype().itemsize
554
+
555
+ def make_allocation(self):
556
+ assert not self.allocation, "multiple allocations"
557
+ assert isinstance(self.live_range.begin, int), "live ranges not computed"
558
+ nbytes = self.sym_nbytes()
559
+ # For now, fallback value will be used if we encounter an unbacked SymInt. The longer-term plan is to have
560
+ # size_hint() use better heuristics for unbackeds, at which point the fallback value will be ignored.
561
+ size_hint = V.graph.sizevars.size_hint(nbytes, fallback=64)
562
+ self.allocation = Allocation(
563
+ self.node,
564
+ self.live_range,
565
+ size_hint=size_hint,
566
+ symbolic_size=nbytes,
567
+ )
568
+
569
+ def __repr__(self):
570
+ return (
571
+ f"{self.__class__.__name__}({self.names!r}, is_output={self.is_output}, "
572
+ f"live_range={self.live_range}"
573
+ )
574
+
575
+
576
+ @dataclasses.dataclass
577
+ class PoolMemoryPlanningLine(MemoryPlanningLine):
578
+ """Abstract base class for {Alloc,Dealloc}FromPoolLine"""
579
+
580
+ group: BufferGroup
581
+ timestep: Optional[int] = None
582
+
583
+ @property
584
+ def node(self):
585
+ return self.group.node
586
+
587
+
588
+ @dataclasses.dataclass
589
+ class AllocFromPoolLine(PoolMemoryPlanningLine):
590
+ """Similar to AllocationLine, but takes memory from a pool"""
591
+
592
+ is_first_pool_usage: bool = False
593
+
594
+ def codegen(self, code: IndentedBuffer):
595
+ allocation = self.group.allocation
596
+ assert allocation and allocation.pool
597
+ pool = allocation.pool
598
+ name = self.node.get_name()
599
+
600
+ if self.is_first_pool_usage:
601
+ pool.codegen_create(self.wrapper, code)
602
+
603
+ pool.names_to_del.extend(self.group.names)
604
+ alloc_from_pool = allocation.codegen_alloc_from_pool(self.wrapper)
605
+ if alloc_from_pool in pool.creation_cache:
606
+ code.writeline(
607
+ self.wrapper.make_tensor_alias(
608
+ name, pool.creation_cache[alloc_from_pool], "alloc"
609
+ )
610
+ )
611
+ else:
612
+ pool.creation_cache[alloc_from_pool] = name
613
+ code.writeline(
614
+ f"{self.wrapper.declare}{name} = {alloc_from_pool}{self.wrapper.ending}"
615
+ )
616
+
617
+
618
+ @dataclasses.dataclass
619
+ class DeallocFromPoolLine(PoolMemoryPlanningLine):
620
+ """Similar to FreeIfNotReusedLine, but takes memory from a pool"""
621
+
622
+ is_last_pool_usage: bool = False
623
+
624
+ def codegen(self, code: IndentedBuffer):
625
+ if self.is_last_pool_usage:
626
+ assert self.group.allocation and self.group.allocation.pool
627
+ self.group.allocation.pool.codegen_destroy(self.wrapper, code)
628
+
629
+
630
+ @dataclasses.dataclass
631
+ class MemoryPlanner:
632
+ """
633
+ Coordination object to run memory planning passes during wrapper
634
+ codegen.
635
+ """
636
+
637
+ wrapper: Any
638
+ pools: AllocationPools = dataclasses.field(default_factory=AllocationPools)
639
+ buffer_groups: Optional[List[BufferGroup]] = None
640
+
641
+ def plan(self, lines: List[Any]) -> List[Any]:
642
+ """Call all the memory planning passes in sequence"""
643
+ lines = [*lines]
644
+ self.drop_removed_buffers(lines)
645
+ self.convert_to_pool_lines(lines)
646
+ self.compute_live_ranges(lines)
647
+ self.allocate_groups()
648
+ self.mark_first_last_usage(lines)
649
+ return lines
650
+
651
+ def drop_removed_buffers(self, lines):
652
+ """
653
+ Replace any memory planning lines in V.graph.removed_buffers with NullLine
654
+ """
655
+ # drop any removed buffers
656
+ for i, line in enumerate(lines):
657
+ if isinstance(line, (AllocateLine, FreeIfNotReusedLine, ReuseLine)):
658
+ if line.node.get_name() in V.graph.removed_buffers:
659
+ lines[i] = NullLine(self.wrapper)
660
+
661
+ def compute_buffer_groups(self, lines):
662
+ """
663
+ Populates self.buffer_groups with BufferGroup objects that join
664
+ allocations with common storage (due to inplace reuse) into a
665
+ single object.
666
+ """
667
+ name_to_group = {}
668
+ for line in lines:
669
+ if isinstance(line, AllocateLine):
670
+ name = line.node.get_name()
671
+ assert name not in name_to_group
672
+ name_to_group[name] = BufferGroup(line.node)
673
+ elif isinstance(line, ReuseLine):
674
+ old_name = line.node.get_name()
675
+ new_name = line.reused_as.get_name()
676
+ assert new_name not in name_to_group
677
+ # TODO(jansel): we should support reusing buffers created via ExternKernelAlloc
678
+ if old_name in name_to_group:
679
+ name_to_group[old_name].names.append(new_name)
680
+ name_to_group[new_name] = name_to_group[old_name]
681
+
682
+ outputs = set(V.graph.get_output_names())
683
+ unique_groups = [*{id(g): g for g in name_to_group.values()}.values()]
684
+ for group in unique_groups:
685
+ group.is_output = any(x in outputs for x in group.names)
686
+
687
+ assert self.buffer_groups is None
688
+ self.buffer_groups = unique_groups
689
+ return name_to_group
690
+
691
+ def convert_to_pool_lines(self, lines):
692
+ """
693
+ Convert AllocateLine/FreeIfNotReusedLine/ReuseLine into their
694
+ pool-based counterparts.
695
+ """
696
+ name_to_group = self.compute_buffer_groups(lines)
697
+ for i, line in enumerate(lines):
698
+ if isinstance(line, AllocateLine):
699
+ if line.node.get_name() in name_to_group:
700
+ lines[i] = AllocFromPoolLine(
701
+ self.wrapper, name_to_group[line.node.get_name()]
702
+ )
703
+ elif isinstance(line, FreeIfNotReusedLine):
704
+ assert not line.is_reused
705
+ if line.node.get_name() in name_to_group:
706
+ lines[i] = DeallocFromPoolLine(
707
+ self.wrapper, name_to_group[line.node.get_name()]
708
+ )
709
+ elif isinstance(line, ReuseLine):
710
+ if line.node.get_name() in name_to_group:
711
+ line.delete_old = False
712
+
713
+ def compute_live_ranges(self, lines):
714
+ """Populate every BufferGroup.live_ranges field based on first/last usage"""
715
+ timestep = 0
716
+ worklist = collections.deque(lines)
717
+ while worklist:
718
+ if isinstance(worklist[0], MemoryPlanningLine):
719
+ timestep += 1
720
+ while worklist and isinstance(worklist[0], MemoryPlanningLine):
721
+ line = worklist.popleft()
722
+ if isinstance(line, PoolMemoryPlanningLine):
723
+ line.group.update_usage(timestep)
724
+ line.timestep = timestep
725
+ else:
726
+ worklist.popleft()
727
+
728
+ timestep += 1
729
+ assert self.buffer_groups is not None
730
+ for group in self.buffer_groups:
731
+ if group.is_output:
732
+ group.update_usage(timestep)
733
+
734
+ def allocate_groups(self):
735
+ """
736
+ Assign every allocation to a specific location in a specific AllocationPool.
737
+ """
738
+ assert config.memory_pool in ("none", "intermediates", "outputs", "combined")
739
+ assert self.buffer_groups is not None
740
+
741
+ for group in self.buffer_groups:
742
+ group.make_allocation()
743
+
744
+ outputs: List[Allocation] = []
745
+ intermediates: List[Allocation] = []
746
+ for group in self.buffer_groups:
747
+ assert group.allocation
748
+ if group.is_output and config.memory_pool != "combined":
749
+ outputs.append(group.allocation)
750
+ else:
751
+ intermediates.append(group.allocation)
752
+
753
+ for block in sorted(
754
+ outputs,
755
+ key=lambda x: (
756
+ x.size_hint,
757
+ -len(x.live_range),
758
+ ),
759
+ ):
760
+ self.pools.allocate_output(block)
761
+
762
+ for block in sorted(
763
+ intermediates,
764
+ key=lambda x: (
765
+ -x.size_hint,
766
+ -len(x.live_range),
767
+ ),
768
+ ):
769
+ self.pools.allocate(block)
770
+
771
+ self.pools.finalize()
772
+
773
+ def mark_first_last_usage(self, lines):
774
+ """
775
+ Populate the AllocFromPoolLine.is_first_pool_usage and
776
+ DeallocFromPoolLine.is_last_pool_usage fields so that pools
777
+ are created/destroyed.
778
+ """
779
+ seen = set()
780
+ for line in lines:
781
+ if isinstance(line, AllocFromPoolLine):
782
+ assert line.group.allocation
783
+ pool = line.group.allocation.pool
784
+ assert pool is not None
785
+ if pool not in seen:
786
+ line.is_first_pool_usage = True
787
+ seen.add(pool)
788
+
789
+ seen = set()
790
+ for line in reversed(lines):
791
+ if isinstance(line, DeallocFromPoolLine):
792
+ assert line.group.allocation
793
+ pool = line.group.allocation.pool
794
+ assert pool is not None
795
+ if pool not in seen:
796
+ line.is_last_pool_usage = (
797
+ pool.root.get_live_ranges().end <= line.timestep
798
+ )
799
+ seen.add(pool)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from collections import defaultdict
3
+ from dataclasses import dataclass
4
+ from typing import Dict, List, Tuple
5
+
6
+ from sympy import Integer
7
+
8
+ from .. import metrics
9
+ from ..scheduler import SchedulerNode
10
+ from ..utils import ceildiv, Placeholder
11
+ from ..virtualized import V
12
+ from .common import IndentedBuffer, Kernel
13
+ from .triton import TritonKernel
14
+ from .triton_utils import config_of, signature_to_meta
15
+
16
+
17
+ @dataclass
18
+ class PartitionState:
19
+ partitions: List[
20
+ List[Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer]]
21
+ ]
22
+ cur_partition: List[
23
+ Tuple[List[SchedulerNode], Tuple[Integer, ...], Integer, Integer]
24
+ ]
25
+ cur_count: int
26
+
27
+ def finalize(self):
28
+ if self.cur_partition:
29
+ self.partitions.append(self.cur_partition)
30
+
31
+
32
+ class ForeachKernel(Kernel):
33
+ MAX_NUM_ARGS = 250 # number where I would no longer get triton errors
34
+
35
+ @staticmethod
36
+ def _update_partition(partition_state, node_rw_count, node_info):
37
+ if partition_state.cur_count + node_rw_count > ForeachKernel.MAX_NUM_ARGS:
38
+ partition_state.partitions.append(partition_state.cur_partition)
39
+ partition_state.cur_partition = [node_info]
40
+ partition_state.cur_count = node_rw_count
41
+ else:
42
+ partition_state.cur_count += node_rw_count
43
+ partition_state.cur_partition.append(node_info)
44
+
45
+ @staticmethod
46
+ def horizontal_partition(subkernel_nodes, triton_scheduling):
47
+ """Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel)
48
+ for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args
49
+ (read/writes) and to have the same 2D or 1D blocking strategy."""
50
+ assert len(subkernel_nodes) >= 1
51
+
52
+ partition_state_1d = PartitionState([], [], 0)
53
+ yelem_to_partition_state_2d: Dict[Integer, PartitionState] = defaultdict(
54
+ lambda: PartitionState([], [], 0)
55
+ )
56
+
57
+ for node in subkernel_nodes:
58
+ fused_nodes = node.get_nodes()
59
+ _, (numel, rnumel) = max(
60
+ fused_nodes, key=lambda x: int(x.is_reduction())
61
+ ).group
62
+ tiled_groups = triton_scheduling.select_tiling(fused_nodes, numel, rnumel)
63
+ node_info = fused_nodes, tiled_groups, numel, rnumel
64
+
65
+ read_writes = node.read_writes
66
+ read_write_count = len(read_writes.reads) + len(read_writes.writes)
67
+
68
+ if tiled_groups[1] == 1:
69
+ ForeachKernel._update_partition(
70
+ partition_state_1d, read_write_count, node_info
71
+ )
72
+ else:
73
+ y_elem = tiled_groups[0]
74
+ partition_state_2d = yelem_to_partition_state_2d[y_elem]
75
+ ForeachKernel._update_partition(
76
+ partition_state_2d, read_write_count, node_info
77
+ )
78
+
79
+ partition_state_1d.finalize()
80
+ all_partitions = partition_state_1d.partitions
81
+ for partition_state_2d in yelem_to_partition_state_2d.values():
82
+ partition_state_2d.finalize()
83
+ all_partitions.extend(partition_state_2d.partitions)
84
+
85
+ return all_partitions
86
+
87
+ def __init__(self):
88
+ super().__init__()
89
+ self.blocking_2d = False
90
+ self.block_size_1d = 1024 # Try tuning this value
91
+ self.block_size_2d = 32
92
+ self.num_warps = 8
93
+ self.sub_kernels = []
94
+ self.iter_vars_count = itertools.count()
95
+ self.x_block_count = 0
96
+ self.y_block_count = 0
97
+
98
+ def get_block_size(self):
99
+ if self.blocking_2d:
100
+ return self.block_size_2d
101
+ else:
102
+ return self.block_size_1d
103
+
104
+ @staticmethod
105
+ def codegen_pid_offsets(code, block_count, lower_bound, prefix):
106
+ if block_count == 0:
107
+ code.splice(f"{prefix}pid_offset = {prefix}pid")
108
+ else:
109
+ code.splice(f"{prefix}pid_offset = {prefix}pid - {lower_bound}")
110
+
111
+ def codegen_pid_range(self, code, x_elems):
112
+ num_x_blocks = ceildiv(x_elems, self.get_block_size())
113
+ upper_bound_x_pid = self.x_block_count + num_x_blocks
114
+ lower_bound_x_pid = self.x_block_count
115
+
116
+ if self.x_block_count == 0:
117
+ cond = "if"
118
+ else:
119
+ cond = "elif"
120
+
121
+ x_pid_bounds_check = (
122
+ f"xpid >= {lower_bound_x_pid} and xpid < {upper_bound_x_pid}"
123
+ )
124
+ code.splice(f"{cond} {x_pid_bounds_check}:")
125
+
126
+ with code.indent():
127
+ ForeachKernel.codegen_pid_offsets(
128
+ code, num_x_blocks, lower_bound_x_pid, "x"
129
+ )
130
+ self.x_block_count += num_x_blocks
131
+
132
+ def create_sub_kernel(self, *groups, index_dtype, mutations, reduction_hint):
133
+ sub_kernel = TritonKernel(
134
+ *groups,
135
+ index_dtype=index_dtype,
136
+ mutations=mutations,
137
+ pid_cache={
138
+ "tl.program_id(0)": "xpid_offset",
139
+ "tl.program_id(1)": "ypid",
140
+ },
141
+ reduction_hint=reduction_hint,
142
+ )
143
+ if self.blocking_2d:
144
+ assert len(groups) == 3
145
+
146
+ self.blocking_2d |= groups[1] != 1 and len(groups) == 3
147
+ metrics.generated_kernel_count -= 1
148
+ sub_kernel.args = self.args
149
+ sub_kernel.iter_vars_count = self.iter_vars_count
150
+ sub_kernel.cse.iter_buffer_ids = self.cse.iter_buffer_ids
151
+ self.sub_kernels.append(sub_kernel)
152
+ return sub_kernel
153
+
154
+ def jit_line(self):
155
+ can_use_32bit = all(k.index_dtype == "tl.int32" for k in self.sub_kernels)
156
+ size_dtype = "tl.int32" if can_use_32bit else "tl.int64"
157
+ _, _, signature = self.args.python_argdefs()
158
+ triton_meta = {
159
+ "signature": signature_to_meta(signature, size_dtype=size_dtype),
160
+ "device": V.graph.scheduler.current_device.index,
161
+ "device_type": V.graph.scheduler.current_device.type,
162
+ "constants": {},
163
+ }
164
+ triton_meta["configs"] = [config_of(signature)]
165
+ inductor_meta = {"kernel_name": str(Placeholder.DESCRIPTIVE_NAME)}
166
+ return (
167
+ f"@foreach(num_warps={self.num_warps}, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r})\n"
168
+ + "@triton.jit"
169
+ )
170
+
171
+ def grid(self):
172
+ return (
173
+ self.x_block_count,
174
+ ceildiv(int(self.sub_kernels[0].numels[0]), self.block_size_2d)
175
+ if self.blocking_2d
176
+ else 1,
177
+ 1,
178
+ )
179
+
180
+ def codegen_kernel(self, name=None):
181
+ code = IndentedBuffer()
182
+
183
+ code.splice(
184
+ """
185
+ import triton
186
+ import triton.language as tl
187
+ from torch._inductor.triton_heuristics import foreach
188
+ from torch._inductor.utils import instance_descriptor
189
+ from torch._inductor import triton_helpers
190
+ """
191
+ )
192
+ argdefs, _, _ = self.args.python_argdefs()
193
+ code.writeline(self.jit_line())
194
+ code.writeline(
195
+ f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):"
196
+ )
197
+
198
+ with code.indent():
199
+ code.splice("xpid = tl.program_id(0)")
200
+ if self.blocking_2d:
201
+ code.splice("ypid = tl.program_id(1)")
202
+ code.splice(f"XBLOCK: tl.constexpr = {self.block_size_2d}")
203
+ code.splice(f"YBLOCK: tl.constexpr = {self.block_size_2d}")
204
+ else:
205
+ code.splice(f"XBLOCK: tl.constexpr = {self.block_size_1d}")
206
+
207
+ for sub_kernel in self.sub_kernels:
208
+ assert len(sub_kernel.numels) <= 3
209
+ # TODO mlazos: support dynamic shapes
210
+ numel_ind = 0 if not self.blocking_2d else 1
211
+ self.codegen_pid_range(code, int(sub_kernel.numels[numel_ind]))
212
+ with code.indent():
213
+ if self.blocking_2d:
214
+ code.splice(f"ynumel = {sub_kernel.numels[0]}")
215
+ code.splice(f"xnumel = {sub_kernel.numels[1]}")
216
+ else:
217
+ code.splice(f"xnumel = {sub_kernel.numels[0]}")
218
+
219
+ sub_kernel.codegen_body()
220
+ code.splice(sub_kernel.body)
221
+
222
+ code.splice("else:")
223
+ with code.indent():
224
+ code.splice("pass")
225
+
226
+ return code.getvalue()
227
+
228
+ def call_kernel(self, code, name: str):
229
+ _, call_args, _ = self.args.python_argdefs()
230
+ # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar
231
+ for i in range(len(call_args)):
232
+ if V.graph.is_unspec_arg(call_args[i]):
233
+ call_args[i] = call_args[i] + ".item()"
234
+ if V.graph.cpp_wrapper:
235
+ V.graph.wrapper_code.generate_kernel_call(
236
+ name,
237
+ call_args,
238
+ device_index=V.graph.scheduler.current_device.index,
239
+ grid=self.grid(),
240
+ )
241
+ else:
242
+ # TODO: refactor generate_kernel_call
243
+ call_args_str = ", ".join(call_args)
244
+ stream_name = code.write_get_raw_stream(
245
+ V.graph.scheduler.current_device.index
246
+ )
247
+ code.writeline(
248
+ f"{name}.run({call_args_str}, grid=({self.grid()}), stream={stream_name})"
249
+ )
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Union
2
+
3
+ import torch
4
+
5
+ from .. import config
6
+ from ..utils import instance_descriptor
7
+ from ..virtualized import V
8
+ from .common import SizeArg, TensorArg
9
+
10
+
11
+ def signature_of(arg: Union[TensorArg, SizeArg], *, size_dtype: str) -> str:
12
+ from triton.runtime.jit import JITFunction
13
+
14
+ if isinstance(arg, TensorArg):
15
+ # TODO: Remove fp8 special handling when Triton supports PyTorch fp8 dtypes.
16
+ # Related PR: https://github.com/openai/triton/pull/2279/
17
+ if arg.dtype == torch.float8_e4m3fn:
18
+ tye = "*fp8e4nv"
19
+ elif arg.dtype == torch.float8_e5m2:
20
+ tye = "*fp8e5"
21
+ else:
22
+ tye = JITFunction._type_of(arg.dtype)
23
+ if V.graph.is_unspec_arg(arg.buffer):
24
+ # had unwrapped 0d tensor as scalar
25
+ new_tye = tye.lstrip("*")
26
+ if new_tye in ["fp16", "bf16"]:
27
+ return "fp32"
28
+ else:
29
+ return new_tye
30
+ else:
31
+ return tye
32
+ if isinstance(arg, SizeArg):
33
+ if arg.expr is None:
34
+ # From triton/runtime/jit.py
35
+ # `None` is nullptr. Implicitly convert to *i8.
36
+ return "*i8"
37
+ elif isinstance(arg.expr, float):
38
+ return "fp32"
39
+ if size_dtype == "tl.int32":
40
+ return "i32"
41
+ elif size_dtype == "tl.int64":
42
+ return "i64"
43
+ else:
44
+ raise NotImplementedError(f"unhandled size_dtype {size_dtype}")
45
+ raise NotImplementedError(f"unhandled {type(arg)}: {arg}")
46
+
47
+
48
+ def signature_to_meta(
49
+ signature: List[Union[TensorArg, SizeArg]], *, size_dtype: str
50
+ ) -> Dict[int, str]:
51
+ return {
52
+ i: signature_of(arg, size_dtype=size_dtype) for i, arg in enumerate(signature)
53
+ }
54
+
55
+
56
+ def config_of(args: List[Union[TensorArg, SizeArg]]) -> instance_descriptor:
57
+ def is_aligned(
58
+ x: Union[TensorArg, SizeArg], alignment: int, include_tensor: bool
59
+ ) -> bool:
60
+ """
61
+ Roughly follow triton code here:
62
+ https://github.com/openai/triton/blob/5282ed890d453e10b9ee30076ef89115dd197761/python/triton/runtime/jit.py#L208-L222
63
+ """
64
+ if isinstance(x, TensorArg):
65
+ if not x.check_alignment:
66
+ return False
67
+ if include_tensor:
68
+ return not V.graph.scheduler.is_unaligned_buffer(x.buffer)
69
+ else:
70
+ return False
71
+ if isinstance(x, SizeArg):
72
+ # TODO(voz): These are kinda redundant, if we can solve out statically_known_multiple_of with
73
+ # _maybe_evaluate_static...
74
+ if x.name.startswith("load_seed_offset"):
75
+ return False
76
+ if x.expr is None:
77
+ return False
78
+ if isinstance(x.expr, float):
79
+ return False
80
+ return V.graph.sizevars.statically_known_multiple_of(x.expr, alignment)
81
+ raise NotImplementedError(f"unhandled {type(x)}: {x}")
82
+
83
+ if config.triton.divisible_by_16:
84
+ divisible_by_16 = tuple(
85
+ i
86
+ for i, arg in enumerate(args)
87
+ if is_aligned(arg, alignment=16, include_tensor=True)
88
+ )
89
+ else:
90
+ divisible_by_16 = ()
91
+ divisible_by_8 = tuple(
92
+ i
93
+ for i, arg in enumerate(args)
94
+ if is_aligned(arg, alignment=8, include_tensor=False)
95
+ )
96
+ return instance_descriptor(divisible_by_16, (), (), divisible_by_8)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/comm_analysis.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from enum import IntEnum
3
+
4
+ from typing import TYPE_CHECKING
5
+
6
+ import torch
7
+ from . import ir
8
+
9
+ from .utils import get_dtype_size, sympy_product
10
+ from .virtualized import V
11
+
12
+ if TYPE_CHECKING:
13
+ from torch._inductor.scheduler import BaseSchedulerNode
14
+
15
+
16
+ class NCCL_COLL(IntEnum):
17
+ ALL_REDUCE = 0
18
+ ALL_GATHER = 1
19
+ REDUCE_SCATTER = 2
20
+
21
+
22
+ class NVIDIA_GPU_TYPE(IntEnum):
23
+ VOLTA = 0
24
+ AMPERE = 1
25
+ HOPPER = 2
26
+
27
+
28
+ def get_gpu_type() -> NVIDIA_GPU_TYPE:
29
+ gpu_info = torch.utils.collect_env.get_gpu_info(torch.utils.collect_env.run)
30
+ if "V100" in gpu_info:
31
+ return NVIDIA_GPU_TYPE.VOLTA
32
+ elif "A100" in gpu_info:
33
+ return NVIDIA_GPU_TYPE.AMPERE
34
+ elif "H100" in gpu_info:
35
+ return NVIDIA_GPU_TYPE.HOPPER
36
+ else:
37
+ # for other gpu types, assume Ampere
38
+ return NVIDIA_GPU_TYPE.AMPERE
39
+
40
+
41
+ def get_collective_type(snode: "BaseSchedulerNode") -> NCCL_COLL:
42
+ if isinstance(snode.node, (ir.AllReduce, ir.AllReduceCoalesced)):
43
+ return NCCL_COLL.ALL_REDUCE
44
+ elif isinstance(
45
+ snode.node, (ir.AllGatherIntoTensor, ir.AllGatherIntoTensorCoalesced)
46
+ ):
47
+ return NCCL_COLL.ALL_GATHER
48
+ elif isinstance(
49
+ snode.node, (ir.ReduceScatterTensor, ir.ReduceScatterTensorCoalesced)
50
+ ):
51
+ return NCCL_COLL.REDUCE_SCATTER
52
+ else:
53
+ raise Exception(f"Unsupported collective type: {snode.node}")
54
+
55
+
56
+ ####################################################################################################################
57
+ # The following code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
58
+ ####################################################################################################################
59
+
60
+
61
+ class NCCL_HW(IntEnum):
62
+ NVLINK = 0
63
+ PCI = 1
64
+ NET = 2
65
+
66
+
67
+ class NCCL_ALGO(IntEnum):
68
+ TREE = 0
69
+ RING = 1
70
+
71
+
72
+ class NCCL_PROTO(IntEnum):
73
+ # The ordering and enum values here matches original in
74
+ # https://github.com/NVIDIA/nccl/blob/0b083e52096c387bad7a5c5c65b26a9dca54de8c/src/include/devcomm.h#L28
75
+ # For difference between these protocols, see https://github.com/NVIDIA/nccl/issues/281#issuecomment-571816990
76
+ LL = 0 # Low-latency
77
+ # LL128 = 1 # Low-latency 128-byte
78
+ # SIMPLE = 2
79
+
80
+
81
+ # Latencies in us
82
+ # len(NCCL_ALGO) x len(NCCL_PROTO)
83
+ baseLat = torch.tensor(
84
+ [
85
+ # Tree
86
+ [
87
+ 6.8, # LL
88
+ ],
89
+ # Ring
90
+ [
91
+ 6.6, # LL
92
+ ],
93
+ ]
94
+ )
95
+
96
+ # Latencies in us
97
+ # len(NCCL_HW) x len(NCCL_ALGO) x len(NCCL_PROTO)
98
+ hwLat = torch.tensor(
99
+ [
100
+ # NVLINK
101
+ [
102
+ [0.6], # Tree (LL)
103
+ [0.6], # Ring (LL)
104
+ ],
105
+ # PCI
106
+ [
107
+ [1.0], # Tree (LL)
108
+ [1.0], # Ring (LL)
109
+ ],
110
+ # NET
111
+ [
112
+ [5.0], # Tree (LL)
113
+ [2.7], # Ring (LL)
114
+ ],
115
+ ]
116
+ )
117
+
118
+
119
+ # LL128 max BW per channel
120
+ llMaxBws = torch.tensor(
121
+ [
122
+ # Volta-N1/Intel-N2/Intel-N4
123
+ [
124
+ 39.0,
125
+ 39.0,
126
+ 20.4,
127
+ ],
128
+ # Ampere-N1/AMD-N2/AMD-N4
129
+ [
130
+ 87.7,
131
+ 22.5, # avg of ring & tree
132
+ 19.0,
133
+ ],
134
+ # Hopper-N1/AMD-N2/AMD-N4
135
+ [
136
+ 87.7,
137
+ 22.5, # avg of ring & tree
138
+ 19.0,
139
+ ],
140
+ ]
141
+ )
142
+
143
+
144
+ def estimate_nccl_collective_runtime(snode: "BaseSchedulerNode") -> float:
145
+ """
146
+ Returns estimated NCCL collective runtime in nanoseconds (ns).
147
+
148
+ The following heuristics are copied from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc.
149
+ We aim to estimate the runtime as accurately as possible.
150
+
151
+ Assumptions:
152
+ - only ring algorithm (NCCL_ALGO_RING) is used
153
+ - only Low-Latency protocol (NCCL_PROTO_LL) is used, i.e. Simple or LL128 is not used
154
+ - 8 gpus per node # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
155
+ - collective is one of: allreduce, reducescatter, allgather
156
+ """
157
+ tensor_numel = V.graph.sizevars.size_hint(sympy_product(snode.node.layout.size))
158
+ tensor_dtype = snode.node.layout.dtype
159
+ tensor_storage_size_bytes = tensor_numel * get_dtype_size(tensor_dtype)
160
+ # Convert bytes to GB
161
+ tensor_storage_size_GB = tensor_storage_size_bytes / 1024 / 1024 / 1024
162
+
163
+ # Currently assumes each node has 8 gpus. And when >1 node is used, assumes each node uses all 8 gpus.
164
+ # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
165
+ num_gpus_per_node = 8
166
+ _, _, group_size = snode.node.constant_args # type: ignore[attr-defined]
167
+ nNodes = math.ceil(group_size / num_gpus_per_node)
168
+ nRanks = group_size # this is total # of gpus globally that participate in this collective op
169
+
170
+ if nRanks <= 1:
171
+ return 0
172
+
173
+ # Assumes ring algorithm
174
+ nccl_algo = NCCL_ALGO.RING
175
+ nccl_proto = NCCL_PROTO.LL
176
+ coll = get_collective_type(snode)
177
+
178
+ # =============== bandwidth computation ===============
179
+ # First compute bandwidth in GB/s; then at the end, convert it to GB/ns
180
+
181
+ bwIntra = torch._inductor.config.intra_node_bw
182
+ bwInter = torch._inductor.config.inter_node_bw
183
+
184
+ compCapIndex = get_gpu_type()
185
+ index2 = nNodes - 1 if nNodes <= 2 else 2
186
+ # LL: for single node, we look at GPU type; for multi-node, we look at CPU type
187
+ index1 = compCapIndex if nNodes == 1 else 0
188
+ llMaxBw = llMaxBws[index1][index2].item()
189
+
190
+ # NOTE: each step of ring algorithm is synchronized,
191
+ # and is bottlenecked by the slowest link which is the inter-node interconnect.
192
+ # hence when nNodes >= 2, bw is inter-node bandwidth.
193
+ # NOTE: the original code in https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc
194
+ # have this as `if nNodes <= 2` which seems wrong. Corrected it here.
195
+ bw = bwIntra if nNodes == 1 else bwInter
196
+ nChannels = 2 # Assume # channels is 2
197
+ busBw = nChannels * bw
198
+
199
+ # Various model refinements
200
+ busBw = min(
201
+ llMaxBw,
202
+ busBw
203
+ * (1.0 / 4.0 if (nNodes > 1 or coll == NCCL_COLL.ALL_REDUCE) else 1.0 / 3.0),
204
+ )
205
+
206
+ if coll == NCCL_COLL.ALL_REDUCE:
207
+ nsteps = 2 * (nRanks - 1)
208
+ elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER):
209
+ nsteps = nRanks - 1
210
+
211
+ # Convert bus BW to algorithm BW (tensor bytes / algoBW = actual execution time)
212
+ ratio = (1.0 * nRanks) / nsteps
213
+ bandwidth = busBw * ratio
214
+ # Convert GB/s to GB/ns
215
+ bandwidth_GB_per_ns = bandwidth / 1e9
216
+
217
+ # =============== latency computation ===============
218
+ intraHw = NCCL_HW.NVLINK
219
+ hw = intraHw if nNodes == 1 else NCCL_HW.NET
220
+
221
+ if coll == NCCL_COLL.ALL_REDUCE:
222
+ if nNodes > 1:
223
+ nInterSteps = 2 * nNodes
224
+ else:
225
+ nInterSteps = 0
226
+ elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER):
227
+ nInterSteps = nNodes - 1
228
+
229
+ # First compute latency in us; then at the end, convert it to ns
230
+ latency = baseLat[nccl_algo][nccl_proto].item()
231
+ intraLat = hwLat[intraHw][nccl_algo][nccl_proto].item()
232
+ interLat = hwLat[NCCL_HW.NET][nccl_algo][nccl_proto].item()
233
+
234
+ # Inter-node rings still have to launch nsteps * net overhead.
235
+ netOverhead = 0.0
236
+ if nNodes > 1:
237
+ netOverhead = 1.0 # getNetOverhead(comm);
238
+ intraLat = max(intraLat, netOverhead)
239
+ latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat
240
+ # Convert us to ns
241
+ latency_ns = latency * 1e3
242
+
243
+ # =============== final result ===============
244
+ transport_ns = tensor_storage_size_GB / bandwidth_GB_per_ns
245
+ return transport_ns + latency_ns
246
+
247
+
248
+ ################################################################################################################
249
+ # The above code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
250
+ ################################################################################################################
env-llmeval/lib/python3.10/site-packages/torch/_inductor/comms.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pyre-strict
2
+
3
+ from typing import List
4
+
5
+ import torch
6
+
7
+ from . import config, ir, scheduler
8
+ from .dependencies import WeakDep
9
+ from .utils import tuple_sorted
10
+
11
+ overlap_log = torch._logging.getArtifactLogger(__name__, "overlap")
12
+
13
+
14
+ def sink_waits(
15
+ snodes: List["scheduler.BaseSchedulerNode"],
16
+ ) -> List["scheduler.BaseSchedulerNode"]:
17
+ """
18
+ Greedily moves waits as late as possible (i.e. until we reach a use). Optimal in terms of
19
+ communication overlap.
20
+ """
21
+ new_order = []
22
+ cur_waits = set()
23
+ for snode in snodes:
24
+ if isinstance(snode.node, ir.Wait):
25
+ cur_waits.add(snode)
26
+ else:
27
+ for wait in tuple_sorted(cur_waits):
28
+ if snode in wait.node_users:
29
+ new_order.append(wait)
30
+ cur_waits.remove(wait)
31
+ new_order.append(snode)
32
+ for snode in tuple_sorted(cur_waits):
33
+ new_order.append(snode)
34
+ return new_order
35
+
36
+
37
+ def raise_comms(
38
+ snodes: List["scheduler.BaseSchedulerNode"],
39
+ ) -> List["scheduler.BaseSchedulerNode"]:
40
+ """
41
+ Greedily moves comms as early as possible (i.e. until we reach an input).
42
+ Optimal in terms of communication overlap.
43
+
44
+ TODO: We might want to adjust this in the future to account for memory limitations.
45
+ e.g. when we are compiling FSDP, this heuristics will cause the all-gathers to be prefetched as soon as possible,
46
+ which is the beginning of the forwards pass. We'll have to either do a special pass for FSDP,
47
+ or we'll want to redo this pass with memory considerations so we handle the FSDP case in a general way.
48
+ """
49
+ new_order_reversed: List["scheduler.BaseSchedulerNode"] = []
50
+ cur_comms: List["scheduler.BaseSchedulerNode"] = []
51
+ for snode in reversed(snodes):
52
+ if isinstance(snode.node, ir.CollectiveKernel):
53
+ cur_comms.append(snode)
54
+ else:
55
+ for comm in cur_comms:
56
+ assert len(comm.inverse_users) > 0
57
+ while len(cur_comms) > 0 and any(
58
+ snode in comm.inverse_users for comm in cur_comms
59
+ ):
60
+ comm = cur_comms.pop(0)
61
+ new_order_reversed.append(comm)
62
+ new_order_reversed.append(snode)
63
+ assert len(cur_comms) <= 1
64
+ for snode in tuple_sorted(cur_comms):
65
+ new_order_reversed.append(snode)
66
+ return new_order_reversed[::-1]
67
+
68
+
69
+ def get_ancestors(node):
70
+ ancestors = set()
71
+ cur_nodes = [node]
72
+ while len(cur_nodes) > 0:
73
+ new_nodes = []
74
+ for node in cur_nodes:
75
+ for inp in node.inverse_users:
76
+ if inp not in ancestors:
77
+ ancestors.add(inp)
78
+ new_nodes.append(inp)
79
+ cur_nodes = new_nodes
80
+ return ancestors
81
+
82
+
83
+ def get_descendants(node):
84
+ descendants = set()
85
+ cur_nodes = [node]
86
+ while len(cur_nodes) > 0:
87
+ new_nodes = []
88
+ for node in cur_nodes:
89
+ for inp in node.node_users:
90
+ if inp not in descendants:
91
+ descendants.add(inp)
92
+ new_nodes.append(inp)
93
+ cur_nodes = new_nodes
94
+ return descendants
95
+
96
+
97
+ def decide_global_ordering_of_comms(nodes: List["scheduler.BaseSchedulerNode"]):
98
+ """
99
+ Decide global ordering of comms, by just enforcing the ordering that's in the input graph
100
+ (might not be the same ordering as the eager mode program).
101
+ TODO: Come up with a better approach
102
+ """
103
+ comm_nodes = [n for n in nodes if isinstance(n.node, ir.CollectiveKernel)]
104
+ for i in range(1, len(comm_nodes)):
105
+ # Enforce ordering by making previous comm a `WeakDep` dependency of the next comm
106
+ comm_nodes[i].add_fake_dep(WeakDep(comm_nodes[i - 1].get_name()))
107
+
108
+
109
+ def assert_no_comm_nodes(snodes: List["scheduler.BaseSchedulerNode"]) -> None:
110
+ assert not any(isinstance(snode.node, ir.CollectiveKernel) for snode in snodes)
111
+
112
+
113
+ def estimate_op_runtime(snode: "scheduler.BaseSchedulerNode") -> float:
114
+ """
115
+ Returns estimated op runtime in nanoseconds (ns)
116
+ """
117
+ if config.estimate_op_runtime == "default":
118
+ runtime = snode.get_estimated_runtime()
119
+ else:
120
+ assert callable(config.estimate_op_runtime)
121
+ runtime = config.estimate_op_runtime(snode)
122
+ return runtime
123
+
124
+
125
+ def reorder_compute_for_overlap(
126
+ snodes: List["scheduler.BaseSchedulerNode"],
127
+ ) -> List["scheduler.BaseSchedulerNode"]:
128
+ """
129
+ Decides a global ordering of all compute and communication nodes,
130
+ assuming that we already have a global ordering of communication nodes.
131
+
132
+ Overall scheduling procedure is:
133
+ Step 1: Given that we've currently scheduled comm N, we now schedule all compute nodes
134
+ that are required for comm N + 1 but do not depend on comm N, to run at the same time with comm N.
135
+ Step 2: If all those compute nodes are sufficient to overlap comm N, we're done.
136
+ Otherwise, we now need to look elsewhere to find compute that overlaps with comm N.
137
+ We prioritize compute nodes that are needed sooner.
138
+ Step 3: We schedule the compute nodes dependent on comm N and required for comm N + 1.
139
+ Step 4: We schedule comm N + 1.
140
+ Repeat this for subsequent comm nodes.
141
+ """
142
+ final_order = []
143
+
144
+ comm_nodes = []
145
+ for snode in snodes:
146
+ if isinstance(snode.node, ir.CollectiveKernel):
147
+ comm_nodes.append(snode)
148
+ if len(comm_nodes) == 0:
149
+ # if there is no comm nodes, return the current order
150
+ return snodes
151
+
152
+ comm_ancestors = {node: get_ancestors(node) for node in comm_nodes}
153
+ comm_descendants = {node: get_descendants(node) for node in comm_nodes}
154
+
155
+ indeg = {k: 0 for k in snodes}
156
+ for snode in snodes:
157
+ for user in snode.node_users:
158
+ if user in indeg:
159
+ indeg[user] += 1
160
+ ready_to_schedule_nodes = {node for node in snodes if indeg[node] == 0}
161
+
162
+ unscheduled_nodes = set()
163
+ unscheduled_nodes = set(snodes)
164
+
165
+ def schedule_node(snode):
166
+ """
167
+ Schedule a single node.
168
+ """
169
+ assert snode in unscheduled_nodes
170
+ assert snode in ready_to_schedule_nodes
171
+ ready_to_schedule_nodes.remove(snode)
172
+ unscheduled_nodes.remove(snode)
173
+ final_order.append(snode)
174
+ for user in tuple_sorted(snode.node_users):
175
+ if user in indeg:
176
+ indeg[user] -= 1
177
+ if indeg[user] == 0:
178
+ ready_to_schedule_nodes.add(user)
179
+
180
+ def schedule_nodes(snodes):
181
+ """
182
+ Schedules all nodes in `snodes` in an arbitrary topologically valid order.
183
+ """
184
+ all_nodes = set(snodes)
185
+ assert all(node in unscheduled_nodes for node in all_nodes)
186
+ while len(all_nodes) > 0:
187
+ # NOTE: since model graph is always a DAG and does not have circular dependency inside,
188
+ # there should be at least one node that is a "free node" (i.e. indeg == 0),
189
+ # hence infinite loop is not possible. But we check here just to be safe.
190
+ progress = False
191
+ for node in tuple_sorted(all_nodes):
192
+ if node in ready_to_schedule_nodes:
193
+ schedule_node(node)
194
+ all_nodes.remove(node)
195
+ progress = True
196
+ if not progress:
197
+ raise Exception(
198
+ "Unable to find a free node (indeg == 0). This is an impossible state to reach. "
199
+ "Please report a bug to PyTorch."
200
+ )
201
+
202
+ # First, schedule all compute nodes that are required by first comm node,
203
+ # as well as the first comm node itself.
204
+ assert len(comm_nodes) > 0
205
+ schedule_nodes(
206
+ list(comm_ancestors[comm_nodes[0]]) + [comm_nodes[0]],
207
+ )
208
+
209
+ rolled_over_compute_cost = 0
210
+ for idx in range(1, len(comm_ancestors)):
211
+ # Step 1: Given that we've currently scheduled comm `idx-1`, we now schedule
212
+ # all compute nodes that are required for comm `idx` but do not depend on comm `idx-1`,
213
+ # to run at the same time with comm `idx-1`.
214
+ needed_by_next_comm_and_ready_compute_nodes = unscheduled_nodes & (
215
+ comm_ancestors[comm_nodes[idx]] - comm_descendants[comm_nodes[idx - 1]]
216
+ )
217
+ assert_no_comm_nodes(needed_by_next_comm_and_ready_compute_nodes)
218
+
219
+ total_compute_runtime_cost = rolled_over_compute_cost + sum(
220
+ [
221
+ estimate_op_runtime(node)
222
+ for node in needed_by_next_comm_and_ready_compute_nodes
223
+ ]
224
+ )
225
+ prev_comm_runtime_cost = estimate_op_runtime(comm_nodes[idx - 1])
226
+ schedule_nodes(tuple_sorted(needed_by_next_comm_and_ready_compute_nodes))
227
+
228
+ # Step 2: If all those compute nodes are sufficient to overlap comm `idx-1`, we're done.
229
+ # Otherwise, we now need to look elsewhere to find compute that overlaps with comm `idx`.
230
+ # We prioritize compute nodes that are needed sooner.
231
+ step1_runtime_cost = total_compute_runtime_cost
232
+ if step1_runtime_cost >= prev_comm_runtime_cost:
233
+ pass
234
+ else:
235
+ # Find all ready to schedule compute nodes that do not depend on comm `idx-1`.
236
+ ready_to_schedule_compute_nodes = tuple_sorted(
237
+ ready_to_schedule_nodes - comm_descendants[comm_nodes[idx - 1]]
238
+ )
239
+ assert_no_comm_nodes(ready_to_schedule_compute_nodes)
240
+
241
+ def earliest_comm_descendant(node):
242
+ for idx in range(len(comm_nodes)):
243
+ if node in comm_ancestors[comm_nodes[idx]]:
244
+ return idx
245
+ return len(comm_nodes)
246
+
247
+ # Prioritize compute nodes that are needed sooner.
248
+ ready_to_schedule_compute_nodes = sorted(
249
+ ready_to_schedule_compute_nodes, key=earliest_comm_descendant
250
+ )
251
+
252
+ for snode in ready_to_schedule_compute_nodes:
253
+ if total_compute_runtime_cost >= prev_comm_runtime_cost:
254
+ # If accumulated compute runtime cost is greater than comm `idx-1` runtime cost,
255
+ # it means we have maximized overlap for comm `idx-1`, and hence we stop looking
256
+ # for more compute to schedule.
257
+ break
258
+ compute_runtime_cost = estimate_op_runtime(snode)
259
+ # If we're not able to leverage more than half of this
260
+ # node's compute to overlap, we skip it.
261
+ # TODO: Smarter heuristics here
262
+ if (
263
+ prev_comm_runtime_cost - total_compute_runtime_cost
264
+ ) <= compute_runtime_cost / 2:
265
+ continue
266
+ schedule_node(snode)
267
+ total_compute_runtime_cost += compute_runtime_cost
268
+ rollable_compute_cost = total_compute_runtime_cost - step1_runtime_cost
269
+
270
+ # Step 3: We schedule the compute nodes dependent on comm `idx-1` and required for comm `idx`.
271
+ needed_by_next_comm_nodes = unscheduled_nodes & comm_ancestors[comm_nodes[idx]]
272
+ schedule_nodes(list(needed_by_next_comm_nodes))
273
+
274
+ # Step 4: We schedule comm `idx`.
275
+ schedule_nodes([comm_nodes[idx]])
276
+
277
+ is_prev_comm_blocking_next_comm = len(needed_by_next_comm_nodes) > 0
278
+ # The idea here is that if there are no compute nodes from Step 3
279
+ # (i.e. if prev comm is not blocking next comm), we can roll over the compute nodes
280
+ # in Step 2 to overlap with the next comm, since they're not required to finish
281
+ # before the next comm starts.
282
+ if is_prev_comm_blocking_next_comm:
283
+ rolled_over_compute_cost = 0
284
+ else:
285
+ rolled_over_compute_cost = rollable_compute_cost # type: ignore[assignment]
286
+
287
+ schedule_nodes(unscheduled_nodes)
288
+ return final_order
289
+
290
+
291
+ def node_summary(snode):
292
+ detail = ""
293
+ if isinstance(snode.node, ir.ExternKernelOut):
294
+ detail = f" ({snode.node.kernel})"
295
+ out_tensor_info = ""
296
+ if (
297
+ hasattr(snode.node, "layout")
298
+ and hasattr(snode.node.layout, "size")
299
+ and hasattr(snode.node.layout, "stride")
300
+ ):
301
+ out_tensor_info = (
302
+ f" (size={snode.node.layout.size}, stride={snode.node.layout.stride})"
303
+ )
304
+ node_name = ""
305
+ if hasattr(snode.node, "name"):
306
+ node_name = snode.node.name
307
+ return f"{snode.node.__class__.__name__}{detail}{out_tensor_info} ({node_name})"
308
+
309
+
310
+ def visualize_overlap(order):
311
+ total_est_runtime: float = 0.0
312
+ cur_comm_node = None
313
+ for snode in order:
314
+ if cur_comm_node is None:
315
+ if isinstance(snode.node, ir.CollectiveKernel):
316
+ total_est_runtime += estimate_op_runtime(snode)
317
+ cur_comm_node = snode.node
318
+ elif isinstance(snode.node, ir.Wait):
319
+ raise Exception(
320
+ "Wait is not expected when there is no collective running"
321
+ )
322
+ else: # exposed compute op
323
+ total_est_runtime += estimate_op_runtime(snode)
324
+ overlap_log.debug(f"{node_summary(snode)}") # noqa: G004
325
+ else: # cur_comm_node is not None
326
+ if isinstance(snode.node, ir.CollectiveKernel):
327
+ raise Exception(
328
+ "Found two collectives running at the same time. "
329
+ "`visualize_overlap` needs to be updated to handle this case"
330
+ )
331
+ elif isinstance(snode.node, ir.Wait): # end of this comm op
332
+ overlap_log.debug(f"{node_summary(snode)}") # noqa: G004
333
+ cur_comm_node = None
334
+ else: # overlapped compute op
335
+ overlap_log.debug(f"| {node_summary(snode)}") # noqa: G004
336
+ overlap_log.debug(
337
+ f"Est. runtime (ms): {total_est_runtime / 1000 / 1000}" # noqa: G004
338
+ )
339
+
340
+
341
+ def reorder_compute_and_comm_for_overlap(
342
+ snodes: List["scheduler.BaseSchedulerNode"],
343
+ ) -> List["scheduler.BaseSchedulerNode"]:
344
+ order = snodes
345
+ for p in config.reorder_for_compute_comm_overlap_passes:
346
+ if isinstance(p, str) and p in globals():
347
+ p = globals()[p] # it is a builtin pass
348
+ if torch.distributed.get_rank() == 0:
349
+ overlap_log.debug(
350
+ f"==== Visualize overlap before reordering pass {p} ====" # noqa: G004
351
+ )
352
+ try:
353
+ visualize_overlap(order)
354
+ except Exception as e:
355
+ overlap_log.debug(str(e))
356
+ order = p(order) # type: ignore[operator]
357
+ if torch.distributed.get_rank() == 0:
358
+ overlap_log.debug(
359
+ f"==== Visualize overlap after reordering pass {p} ====" # noqa: G004
360
+ )
361
+ try:
362
+ visualize_overlap(order)
363
+ except Exception as e:
364
+ overlap_log.debug(str(e))
365
+ return order
env-llmeval/lib/python3.10/site-packages/torch/_inductor/compile_fx.py ADDED
@@ -0,0 +1,1302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import dataclasses
3
+ import functools
4
+ import logging
5
+ import os
6
+ import sys
7
+ import time
8
+ import warnings
9
+ from itertools import count
10
+
11
+ from typing import (
12
+ Any,
13
+ Callable,
14
+ Dict,
15
+ FrozenSet,
16
+ List,
17
+ Optional,
18
+ Sequence,
19
+ Tuple,
20
+ Union,
21
+ )
22
+ from unittest import mock
23
+
24
+ from functorch.compile import min_cut_rematerialization_partition
25
+
26
+ import torch._functorch.config as functorch_config
27
+
28
+ import torch.fx
29
+ import torch.utils._pytree as pytree
30
+ from torch._dynamo import (
31
+ compiled_autograd,
32
+ logging as dynamo_logging,
33
+ utils as dynamo_utils,
34
+ )
35
+ from torch._dynamo.utils import detect_fake_mode, lazy_format_graph_code
36
+ from torch._functorch.aot_autograd import aot_export_module, make_boxed_func
37
+ from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
38
+
39
+ from torch._inductor.debug import save_args_for_compile_fx_inner
40
+ from torch._ops import OpOverload
41
+ from torch._subclasses.fake_tensor import FakeTensor
42
+ from torch.fx.passes.fake_tensor_prop import FakeTensorProp
43
+
44
+ from .._dynamo.backends.common import aot_autograd
45
+ from ..fx.graph import _PyTreeCodeGen
46
+ from . import config, metrics
47
+ from .debug import DebugContext
48
+ from .decomposition import select_decomp_table
49
+ from .fx_passes.joint_graph import joint_graph_passes
50
+ from .fx_passes.post_grad import post_grad_passes, view_to_reshape
51
+ from .fx_passes.pre_grad import pre_grad_passes
52
+ from .graph import GraphLowering
53
+ from .ir import ExternKernelNode
54
+ from .utils import get_dtype_size, has_incompatible_cudagraph_ops
55
+ from .virtualized import V
56
+
57
+ if config.is_fbcode():
58
+ from torch._inductor.fb.utils import time_and_log
59
+ else:
60
+ # no-op decorator
61
+ def time_and_log(attr: str):
62
+ return dynamo_utils.identity
63
+
64
+
65
+ log = logging.getLogger(__name__)
66
+ perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
67
+ post_grad_graphs_log = torch._logging.getArtifactLogger(__name__, "post_grad_graphs")
68
+ ALIGNMENT = 16
69
+
70
+
71
+ @dataclasses.dataclass
72
+ class BoxedBool:
73
+ value: bool
74
+
75
+ def __bool__(self):
76
+ return self.value
77
+
78
+ @staticmethod
79
+ def disable(obj):
80
+ if isinstance(obj, BoxedBool):
81
+ obj.value = False
82
+ return obj
83
+ return False
84
+
85
+
86
+ @dataclasses.dataclass
87
+ class BoxedDeviceIndex:
88
+ value: Optional[int]
89
+
90
+ def set(self, device_idx):
91
+ assert device_idx is None or isinstance(device_idx, int)
92
+ self.value = device_idx
93
+
94
+
95
+ # copy_ fails when trying to write to tensors with memory overlap,
96
+ # for expanded dimensions (a dimension which used to have size 1 -> ?)
97
+ # we can select one element from that dimension and write to it
98
+ # to achieve writing to all values of that dimension of the input tensor
99
+ def get_expanded_dims(t):
100
+ if not isinstance(t, torch.Tensor):
101
+ return None
102
+ return [i for i in range(t.ndim) if t.stride(i) == 0 and t.size(i) != 1]
103
+
104
+
105
+ def index_expanded_dims(t: torch.Tensor, expanded_dims: List[int]) -> torch.Tensor:
106
+ for expanded_dim in expanded_dims:
107
+ t = torch.ops.aten.slice(t, expanded_dim, 0, 1)
108
+ return t
109
+
110
+
111
+ def complex_memory_overlap(t: torch.Tensor) -> bool:
112
+ # if torch._debug_has_internal_overlap thinks this tensor potentially has
113
+ # memory overlap internally, let's dig deeper to find out whether it's true.
114
+ t = index_expanded_dims(t, get_expanded_dims(t))
115
+ if torch._debug_has_internal_overlap(t) != 0:
116
+ strides = t.stride()
117
+ sizes = t.shape
118
+ indices = list(range(len(strides)))
119
+ indices = [x for _, x in sorted(zip(strides, indices))]
120
+ for i in range(len(strides)):
121
+ prev_stride = 1 if i == 0 else strides[indices[i - 1]]
122
+ prev_size = 1 if i == 0 else sizes[indices[i - 1]]
123
+ if strides[indices[i]] < prev_stride * prev_size:
124
+ return True
125
+ return False
126
+
127
+
128
+ @functools.lru_cache(None)
129
+ def _step_logger():
130
+ return dynamo_logging.get_step_logger(log)
131
+
132
+
133
+ @functools.lru_cache(None)
134
+ def _warn_tf32_disabled():
135
+ if (
136
+ torch.cuda.is_available()
137
+ and not torch.backends.cuda.matmul.allow_tf32
138
+ and torch.cuda.get_device_capability() >= (8, 0)
139
+ ):
140
+ warnings.warn(
141
+ "TensorFloat32 tensor cores for float32 matrix multiplication available but not enabled. "
142
+ "Consider setting `torch.set_float32_matmul_precision('high')` for better performance."
143
+ )
144
+
145
+
146
+ def _unlift_graph(mod, gm, graph_signature):
147
+ state_dict = {}
148
+ for name, param in mod.named_parameters(remove_duplicate=False):
149
+ state_dict[name] = param
150
+ for name, param in mod.named_buffers(remove_duplicate=False):
151
+ state_dict[name] = param
152
+
153
+ from torch._export.exported_program import (
154
+ _construct_inp_pos_to_param_buffer_name,
155
+ _unlift,
156
+ )
157
+
158
+ inp_pos_to_param_buffer_name = _construct_inp_pos_to_param_buffer_name(
159
+ gm,
160
+ graph_signature,
161
+ state_dict,
162
+ {},
163
+ )
164
+ unlifted_gm = _unlift(
165
+ gm,
166
+ inp_pos_to_param_buffer_name,
167
+ pytree.LeafSpec(),
168
+ None,
169
+ state_dict,
170
+ {},
171
+ graph_signature.buffers_to_mutate,
172
+ )
173
+ return unlifted_gm
174
+
175
+
176
+ def is_tf32_warning_applicable(gm: torch.fx.GraphModule):
177
+ aten = torch.ops.aten
178
+ tf32_ops = {
179
+ aten.mm.default,
180
+ aten.addmm.default,
181
+ aten.bmm.default,
182
+ aten.baddbmm.default,
183
+ }
184
+ for node in gm.graph.nodes:
185
+ if (
186
+ node.op == "call_function"
187
+ and node.target in tf32_ops
188
+ and isinstance(node.meta.get("val", None), torch.Tensor)
189
+ and node.meta["val"].dtype == torch.float32
190
+ and node.meta["val"].device.type == "cuda"
191
+ ):
192
+ return True
193
+ return False
194
+
195
+
196
+ @DebugContext.wrap
197
+ def count_bytes_inner(
198
+ gm: torch.fx.GraphModule,
199
+ example_inputs: List[torch.Tensor],
200
+ num_fixed: int = 0,
201
+ **kwargs,
202
+ ):
203
+ shape_env = _shape_env_from_inputs(example_inputs)
204
+ fake_mode = fake_tensor_prop(gm, example_inputs)
205
+
206
+ with V.set_fake_mode(fake_mode):
207
+ post_grad_passes(gm, False)
208
+
209
+ graph = GraphLowering(gm, shape_env=shape_env, num_static_inputs=num_fixed)
210
+ with V.set_graph_handler(graph), V.set_real_inputs(example_inputs):
211
+ graph.run(*example_inputs)
212
+ num_bytes, nodes_num_elem, node_runtimes = graph.count_bytes()
213
+ metrics.num_bytes_accessed += num_bytes
214
+ metrics.nodes_num_elem += nodes_num_elem
215
+ metrics.node_runtimes += node_runtimes
216
+ return make_boxed_func(gm.forward)
217
+
218
+
219
+ def fake_tensor_prop(
220
+ gm: torch.fx.GraphModule,
221
+ example_inputs: List[torch.Tensor],
222
+ force_allow_non_fake_inputs: bool = False,
223
+ ):
224
+ """
225
+ If we can not detect fake mode from the context of inputs, create one.
226
+
227
+ The created fake mode will be returned.
228
+ """
229
+ fake_mode = detect_fake_mode(example_inputs)
230
+ if not fake_mode:
231
+ fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
232
+ FakeTensorProp(gm, mode=fake_mode).propagate(*example_inputs)
233
+ else:
234
+ ctx = (
235
+ contextlib.nullcontext()
236
+ if not force_allow_non_fake_inputs
237
+ else mock.patch.object(fake_mode, "allow_non_fake_inputs", True)
238
+ )
239
+ with ctx: # type: ignore[attr-defined]
240
+ FakeTensorProp(gm, mode=fake_mode).propagate_dont_convert_inputs(
241
+ *example_inputs
242
+ )
243
+
244
+ return fake_mode
245
+
246
+
247
+ @DebugContext.wrap
248
+ @torch.utils._python_dispatch._disable_current_modes()
249
+ @time_and_log(attr="compilation time (in seconds)")
250
+ def compile_fx_inner(
251
+ gm: torch.fx.GraphModule,
252
+ example_inputs: List[torch.Tensor],
253
+ cudagraphs: Optional[BoxedBool] = None,
254
+ num_fixed: int = 0,
255
+ is_backward: bool = False,
256
+ graph_id: Optional[int] = None,
257
+ cpp_wrapper: bool = False,
258
+ aot_mode: bool = False,
259
+ is_inference: bool = False,
260
+ boxed_forward_device_index: Optional[BoxedDeviceIndex] = None,
261
+ user_visible_outputs: FrozenSet[str] = frozenset(),
262
+ layout_opt: Optional[bool] = None,
263
+ extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
264
+ ) -> Union[CompiledFxGraph, str]:
265
+ """
266
+ Inductor API that compiles a single graph.
267
+
268
+ If you change the argument list for this function, make sure you
269
+ also update the call to save_args_for_compile_fx_inner below accordingly.
270
+ """
271
+ if dynamo_utils.count_calls(gm.graph) == 0 and not aot_mode:
272
+ return make_boxed_func(gm.forward)
273
+
274
+ assert isinstance(
275
+ next(iter(reversed(gm.graph.nodes))).args[0], (tuple, list)
276
+ ), f"inductor can only compile FX graphs which return a tuple/list, but got {gm.graph}"
277
+
278
+ if config.save_args:
279
+ save_args_for_compile_fx_inner(
280
+ gm,
281
+ example_inputs,
282
+ cudagraphs=cudagraphs,
283
+ num_fixed=num_fixed,
284
+ is_backward=is_backward,
285
+ graph_id=graph_id,
286
+ cpp_wrapper=cpp_wrapper,
287
+ aot_mode=aot_mode,
288
+ is_inference=is_inference,
289
+ boxed_forward_device_index=boxed_forward_device_index,
290
+ user_visible_outputs=user_visible_outputs,
291
+ layout_opt=layout_opt,
292
+ )
293
+
294
+ if cudagraphs is None:
295
+ cudagraphs = BoxedBool(config.triton.cudagraphs)
296
+
297
+ # Inputs to fx_codegen_and_compile
298
+ # Anything that affects codegen should go here, so if the signature
299
+ # of fx_codegen_and_compile changes, the dict should be updated accordingly
300
+ graph_kwargs = {
301
+ "cudagraphs": cudagraphs,
302
+ "num_fixed": num_fixed,
303
+ "is_backward": is_backward,
304
+ "graph_id": graph_id,
305
+ "cpp_wrapper": cpp_wrapper,
306
+ "aot_mode": aot_mode,
307
+ "is_inference": is_inference,
308
+ "user_visible_outputs": user_visible_outputs,
309
+ "layout_opt": layout_opt,
310
+ "extern_node_serializer": extern_node_serializer,
311
+ }
312
+
313
+ start = time.time()
314
+
315
+ if config.fx_graph_cache and not aot_mode:
316
+ compiled_graph = FxGraphCache.load(
317
+ fx_codegen_and_compile, gm, example_inputs, graph_kwargs
318
+ )
319
+ else:
320
+ compiled_graph = fx_codegen_and_compile(
321
+ gm, example_inputs, **graph_kwargs # type: ignore[arg-type]
322
+ )
323
+
324
+ log.debug("FX codegen and compilation took %.3fs", time.time() - start)
325
+
326
+ # Return the output strides to the caller via TracingContext
327
+ context = torch._guards.TracingContext.try_get()
328
+ if context is not None and context.output_strides is not None:
329
+ assert len(context.output_strides) == 0
330
+ context.output_strides.extend(compiled_graph.output_strides)
331
+
332
+ if aot_mode:
333
+ return compiled_graph
334
+
335
+ if cudagraphs:
336
+ # output args are tuple of first argument
337
+ output = list(gm.graph.nodes)[-1]
338
+ assert len(output.args) == 1
339
+ stack_traces = [
340
+ (arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
341
+ for arg in output.args[0]
342
+ ]
343
+
344
+ complex_memory_overlap_inputs = any(
345
+ complex_memory_overlap(t)
346
+ for t in example_inputs
347
+ if isinstance(t, torch.Tensor)
348
+ )
349
+
350
+ # doesnt work for non-trees because the warmup run would apply mutation twice
351
+ if config.triton.cudagraph_trees:
352
+ # checking if mutation is only on parameters/static inputs
353
+ has_mutation = not all(
354
+ idx < num_fixed for idx in compiled_graph.mutated_input_idxs
355
+ )
356
+ else:
357
+ has_mutation = len(compiled_graph.mutated_inputs) != 0
358
+
359
+ cudagraph_tests = [
360
+ (set(compiled_graph.device_types) == {"cuda"}, "non-cuda device in graph"),
361
+ (not has_mutation, "mutated inputs"),
362
+ (not has_incompatible_cudagraph_ops(gm), "incompatible ops"),
363
+ (not complex_memory_overlap_inputs, "complex memory overlap"),
364
+ (
365
+ all(
366
+ isinstance(t, (torch.Tensor, torch.SymInt)) for t in example_inputs
367
+ ),
368
+ "non-Tensor inputs",
369
+ ),
370
+ (
371
+ (
372
+ len(compiled_graph.device_idxs) == 1
373
+ or not config.triton.cudagraph_trees
374
+ ),
375
+ "multiple device indices without cudagraph_trees",
376
+ ),
377
+ ]
378
+ cudagraph_fail_reasons = [s for b, s in cudagraph_tests if not b]
379
+
380
+ if not cudagraph_fail_reasons:
381
+ if not config.triton.cudagraph_trees:
382
+ # Force specialize all inputs so that CUDA graphs will work
383
+ for t in example_inputs:
384
+ if isinstance(t, torch.SymInt):
385
+ int(t) # guard
386
+
387
+ if (
388
+ boxed_forward_device_index is not None
389
+ and not is_inference
390
+ and not is_backward
391
+ ):
392
+ boxed_forward_device_index.set(next(iter(compiled_graph.device_idxs)))
393
+
394
+ compiled_graph.current_callable = cudagraphify(
395
+ compiled_graph.get_current_callable(),
396
+ example_inputs,
397
+ static_input_idxs=range(num_fixed),
398
+ device_index=next(iter(compiled_graph.device_idxs)),
399
+ stack_traces=stack_traces,
400
+ is_backward=is_backward,
401
+ is_inference=is_inference,
402
+ constants=tuple(compiled_graph.constants.values()),
403
+ )
404
+ else:
405
+ BoxedBool.disable(cudagraphs)
406
+
407
+ # See [Backward Generation Handling]
408
+ # if cudagraph'd the forward and set the device, we need to let the cudagraph manager
409
+ # know we are we running the backward even if we will not run it in cudagraphs
410
+ if is_backward and config.triton.cudagraph_trees:
411
+ assert boxed_forward_device_index is not None
412
+ assert boxed_forward_device_index.value is not None
413
+ compiled_graph_callable = compiled_graph.get_current_callable()
414
+
415
+ manager = torch._inductor.cudagraph_trees.get_manager(
416
+ boxed_forward_device_index.value, create_if_none_exists=False
417
+ )
418
+ # should already exist from forward
419
+ assert manager is not None
420
+
421
+ def compiled_artifact(new_inputs):
422
+ manager.set_to_running_backward()
423
+ return compiled_graph_callable(new_inputs)
424
+
425
+ compiled_graph.current_callable = compiled_artifact
426
+
427
+ if "cuda" in compiled_graph.device_types:
428
+ perf_hint_log.warning(
429
+ "skipping cudagraphs due to %s", cudagraph_fail_reasons
430
+ )
431
+
432
+ # cudagraphs does its own aligning of inputs
433
+ if not cudagraphs:
434
+ new_callable = align_inputs(
435
+ compiled_graph.get_current_callable(), example_inputs, range(num_fixed)
436
+ )
437
+ if new_callable is not compiled_graph.get_current_callable():
438
+ compiled_graph.current_callable = new_callable
439
+
440
+ _step_logger()(
441
+ logging.INFO,
442
+ "torchinductor done compiling "
443
+ f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
444
+ f"graph {graph_id}",
445
+ )
446
+
447
+ # aot autograd needs to know to pass in inputs as a list
448
+ compiled_graph._boxed_call = True
449
+ return compiled_graph
450
+
451
+
452
+ def fx_codegen_and_compile(
453
+ gm: torch.fx.GraphModule,
454
+ example_inputs: List[torch.Tensor],
455
+ cudagraphs: Optional[BoxedBool] = None,
456
+ num_fixed: int = 0,
457
+ is_backward: bool = False,
458
+ graph_id: Optional[int] = None,
459
+ cpp_wrapper: bool = False,
460
+ aot_mode: bool = False,
461
+ is_inference: bool = False,
462
+ user_visible_outputs: FrozenSet[str] = frozenset(),
463
+ layout_opt: Optional[bool] = None,
464
+ extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
465
+ ) -> Union[CompiledFxGraph, str]:
466
+ if is_tf32_warning_applicable(gm):
467
+ _warn_tf32_disabled()
468
+
469
+ # lift the maximum depth of the Python interpreter stack
470
+ # to adapt large/deep models
471
+ sys.setrecursionlimit(max(sys.getrecursionlimit(), 2000))
472
+
473
+ _step_logger()(
474
+ logging.INFO,
475
+ "torchinductor compiling "
476
+ f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
477
+ f"graph {graph_id}",
478
+ )
479
+ V.debug.fx_graph(gm, example_inputs)
480
+
481
+ shape_env = _shape_env_from_inputs(example_inputs)
482
+
483
+ # Convert view to reshape in the graph. This is necessary primarily for
484
+ # layout optimization. Do it unconditionally for uniformity.
485
+ #
486
+ # It's needed because when we do layout optimization, an contiguous tensor
487
+ # in eager mode may becomes a channels last tensor. A view op previously
488
+ # can be applied to the contiguous tensor may not be able to be applied
489
+ # on the channels tensor any more. An error like
490
+ # RuntimeError: view size is not compatible with input tensor's size and stride
491
+ # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
492
+ # will be printed.
493
+ #
494
+ # Replace view op to reshape op in this case.
495
+ # As an example, timm_resnest/botnet26t_256/convnext_base etc. will fail if we don't do this.
496
+ #
497
+ # Also this has to be done before FakeTensorProp below to avoid the failed
498
+ # .view() call.
499
+ view_to_reshape(gm)
500
+
501
+ # It is safe to run FakeTensorProp under no_grad because by the time
502
+ # we're in inductor, we assume that AOTAutograd has already "taken care"
503
+ # of autograd, so there should be no more autograd-related API's in the
504
+ # graph.
505
+ with torch.no_grad():
506
+ fake_mode = fake_tensor_prop(gm, example_inputs)
507
+
508
+ # pattern matcher passes might not preserve striding information
509
+ # on node.meta["val"]. if in the future we rely on these being
510
+ # correct we will need to fix.
511
+
512
+ with V.set_fake_mode(fake_mode):
513
+ # has some issues with memory in training
514
+ post_grad_passes(gm, is_inference=is_inference)
515
+ V.debug.fx_graph_transformed(gm, example_inputs)
516
+ post_grad_graphs_log.info("%s", lazy_format_graph_code("AFTER POST GRAD", gm))
517
+
518
+ with V.set_fake_mode(fake_mode):
519
+ graph = GraphLowering(
520
+ gm,
521
+ # example_inputs will be used by AOTInductor to dry-run the generated code for Triton kernel tuning.
522
+ # For the forward pass, we have the real inputs to be used as example_inputs. For the backward pass,
523
+ # we currently use fake tensors and defake them later.
524
+ example_inputs=V.real_inputs if is_inference else example_inputs,
525
+ shape_env=shape_env,
526
+ num_static_inputs=num_fixed,
527
+ graph_id=graph_id,
528
+ cpp_wrapper=cpp_wrapper,
529
+ aot_mode=aot_mode,
530
+ user_visible_outputs=user_visible_outputs,
531
+ extern_node_serializer=extern_node_serializer,
532
+ is_inference=is_inference,
533
+ )
534
+ with V.set_graph_handler(graph):
535
+ graph.run(*example_inputs)
536
+ output_strides: List[Optional[Tuple[int, ...]]] = []
537
+ if graph.graph_outputs is not None:
538
+ # We'll put the output strides in the compiled graph so we
539
+ # can later return them to the caller via TracingContext
540
+ for out in graph.graph_outputs:
541
+ if hasattr(out, "layout"):
542
+ output_strides.append(
543
+ tuple(
544
+ V.graph.sizevars.size_hint(s) for s in out.layout.stride
545
+ )
546
+ )
547
+ else:
548
+ output_strides.append(None)
549
+
550
+ compiled_fn = graph.compile_to_fn()
551
+
552
+ if V.aot_compilation is True:
553
+ return compiled_fn
554
+
555
+ if graph.disable_cudagraphs:
556
+ perf_hint_log.warning(
557
+ "skipping cudagraphs due to %s", V.graph.disable_cudagraphs_reason
558
+ )
559
+ BoxedBool.disable(cudagraphs)
560
+
561
+ compiled_graph = CompiledFxGraph(compiled_fn, graph, output_strides)
562
+
563
+ return compiled_graph
564
+
565
+
566
+ def clone_preserve_strides(x: torch.Tensor):
567
+ needed_size = (
568
+ sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
569
+ )
570
+ buffer = torch.as_strided(x, (needed_size,), (1,)).clone()
571
+ return torch.as_strided(buffer, x.size(), x.stride())
572
+
573
+
574
+ def copy_misaligned_inputs(
575
+ new_inputs: List[torch.Tensor], check_inputs_idxs: Sequence[int]
576
+ ) -> None:
577
+ for i in check_inputs_idxs:
578
+ if new_inputs[i].data_ptr() % ALIGNMENT:
579
+ new_inputs[i] = clone_preserve_strides(new_inputs[i])
580
+
581
+
582
+ def get_input_idxs_to_check(
583
+ inputs: Union[List[torch.Tensor], Sequence[int]],
584
+ static_input_idxs: Sequence[int],
585
+ ) -> Sequence[int]:
586
+ def is_aligned(storage_offset, dtype):
587
+ return (storage_offset * get_dtype_size(dtype)) % ALIGNMENT == 0
588
+
589
+ ids_to_check = []
590
+ for i, input in enumerate(inputs):
591
+ if (
592
+ isinstance(input, torch.Tensor)
593
+ and (
594
+ i not in static_input_idxs
595
+ or not is_aligned(input.storage_offset(), input.dtype)
596
+ )
597
+ and input.device.type == "cuda"
598
+ ):
599
+ ids_to_check.append(i)
600
+ return ids_to_check
601
+
602
+
603
+ def align_inputs_from_check_idxs(
604
+ model: Callable[[List[torch.Tensor]], Any], inputs_to_check: Sequence[int]
605
+ ):
606
+ if len(inputs_to_check) == 0:
607
+ return model
608
+
609
+ def run(new_inputs):
610
+ copy_misaligned_inputs(new_inputs, inputs_to_check)
611
+ return model(new_inputs)
612
+
613
+ return run
614
+
615
+
616
+ def align_inputs(
617
+ model: Callable[[List[torch.Tensor]], Any],
618
+ inputs: List[torch.Tensor],
619
+ static_input_idxs: Sequence[int] = (),
620
+ ):
621
+ inputs_to_check = get_input_idxs_to_check(inputs, static_input_idxs)
622
+ return align_inputs_from_check_idxs(model, inputs_to_check)
623
+
624
+
625
+ @dynamo_utils.dynamo_timed
626
+ def cudagraphify(
627
+ model: torch.fx.GraphModule,
628
+ inputs: List[torch.Tensor],
629
+ static_input_idxs: Sequence[int] = (),
630
+ *,
631
+ device_index: int,
632
+ stack_traces: List[Optional[str]],
633
+ is_backward: bool,
634
+ is_inference: bool,
635
+ constants: Tuple[torch.Tensor, ...] = (),
636
+ ):
637
+ from torch._inductor.cudagraph_trees import (
638
+ cudagraphify_impl as new_cudagraphify_impl,
639
+ )
640
+
641
+ cudagraphify_fn: Callable[..., Any]
642
+ if config.triton.cudagraph_trees:
643
+ cudagraphify_fn = functools.partial(
644
+ new_cudagraphify_impl,
645
+ device_index=device_index,
646
+ stack_traces=stack_traces,
647
+ is_backward=is_backward,
648
+ is_inference=is_inference,
649
+ constants=constants,
650
+ )
651
+ else:
652
+ cudagraphify_fn = cudagraphify_impl
653
+
654
+ # if using fake tensors, defer cudagraphs until we get real inputs at runtime
655
+ if not any(isinstance(inp, FakeTensor) for inp in inputs):
656
+ return cudagraphify_fn(model, inputs, static_input_idxs)
657
+
658
+ compiled_fn = None
659
+
660
+ def run(new_inputs):
661
+ nonlocal compiled_fn
662
+ if compiled_fn is None:
663
+ with dynamo_utils.preserve_rng_state():
664
+ compiled_fn = cudagraphify_fn(model, new_inputs, static_input_idxs)
665
+ return compiled_fn(new_inputs)
666
+
667
+ return run
668
+
669
+
670
+ def remove_unaligned_input_idxs(
671
+ inputs: Union[List[torch.Tensor], Sequence[int]],
672
+ static_input_idxs: Sequence[int],
673
+ ):
674
+ """
675
+ We require all inputs to be aligned, so introduce a copy for any
676
+ that aren't.
677
+ """
678
+ aligned_static_input_idxs = []
679
+ for idx, input in zip(static_input_idxs, inputs):
680
+ if isinstance(input, torch.Tensor) and (input.data_ptr() % ALIGNMENT) == 0:
681
+ aligned_static_input_idxs.append(idx)
682
+ if len(aligned_static_input_idxs) != len(static_input_idxs):
683
+ return aligned_static_input_idxs
684
+ return static_input_idxs
685
+
686
+
687
+ def static_input(x: torch.Tensor):
688
+ """
689
+ Copy and input while preserving strides
690
+ """
691
+ # TODO(jansel): figure out why this version doesn't work:
692
+ # return torch.empty_strided(x.size(), x.stride(), dtype=x.dtype, device=x.device)
693
+ needed_size = (
694
+ sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
695
+ )
696
+ buffer = torch.empty(needed_size, dtype=x.dtype, device=x.device)
697
+ return torch.as_strided(buffer, x.size(), x.stride())
698
+
699
+
700
+ def index_expanded_dims_and_copy_(
701
+ dst: torch.Tensor,
702
+ src: torch.Tensor,
703
+ expanded_dims: List[int],
704
+ ):
705
+ "Index into expanded dimensions of both dst and src then copy_"
706
+ dst = index_expanded_dims(dst, expanded_dims)
707
+ src = index_expanded_dims(src, expanded_dims)
708
+ dst.copy_(src)
709
+
710
+
711
+ def cudagraphify_impl(
712
+ model: torch.fx.GraphModule,
713
+ inputs: List[torch.Tensor],
714
+ static_input_idxs: Sequence[int] = (),
715
+ ):
716
+ """
717
+ Assumes inputs[static_input_idxs[i]] are always the same memory address
718
+ """
719
+ check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
720
+ static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
721
+ copy_misaligned_inputs(inputs, check_input_idxs)
722
+
723
+ assert isinstance(inputs, list)
724
+
725
+ inps_expanded_dims = [
726
+ get_expanded_dims(x) if idx not in static_input_idxs else []
727
+ for idx, x in enumerate(inputs)
728
+ ]
729
+
730
+ # allocate static tensor inputs
731
+ static_inputs = [
732
+ x
733
+ if not isinstance(x, torch.Tensor)
734
+ else static_input(x)
735
+ if idx not in static_input_idxs
736
+ else x.detach()
737
+ for idx, x in enumerate(inputs)
738
+ ]
739
+
740
+ # copy over input values for fresh allocations
741
+ for idx, (x, expanded_dims) in enumerate(zip(inputs, inps_expanded_dims)):
742
+ if isinstance(x, torch.Tensor) and idx not in static_input_idxs:
743
+ index_expanded_dims_and_copy_(static_inputs[idx], x, expanded_dims)
744
+
745
+ # warmup
746
+ torch.cuda.synchronize()
747
+ stream = torch.cuda.Stream()
748
+ stream.wait_stream(torch.cuda.current_stream())
749
+ # copy static_inputs because it will be cleared in model
750
+ with torch.cuda.stream(stream):
751
+ model(list(static_inputs))
752
+ stream.synchronize()
753
+ torch.cuda.current_stream().wait_stream(stream)
754
+ torch.cuda.synchronize()
755
+
756
+ # record
757
+ graph = torch.cuda.CUDAGraph()
758
+ with torch.cuda.graph(graph, stream=stream, capture_error_mode="thread_local"):
759
+ static_outputs = model(list(static_inputs))
760
+ if not isinstance(static_outputs, (list, tuple)):
761
+ static_outputs = (static_outputs,)
762
+
763
+ if config.size_asserts:
764
+
765
+ def run(new_inputs):
766
+ assert len(static_inputs) == len(new_inputs)
767
+ for idx, (dst, src, expanded_dims) in enumerate(
768
+ zip(static_inputs, new_inputs, inps_expanded_dims)
769
+ ):
770
+ if not isinstance(dst, torch.Tensor):
771
+ pass
772
+ elif idx in static_input_idxs:
773
+ assert dst.data_ptr() == src.data_ptr()
774
+ else:
775
+ # TODO - could make one single op of multiple slices
776
+ # and avoid dispatch.
777
+ # Could also pre-index the `dst` tensors
778
+ index_expanded_dims_and_copy_(dst, src, expanded_dims)
779
+ new_inputs.clear()
780
+ graph.replay()
781
+ return static_outputs
782
+
783
+ else:
784
+ copy_indices = [
785
+ idx for idx in range(len(static_inputs)) if idx not in static_input_idxs
786
+ ]
787
+
788
+ def run(new_inputs):
789
+ for idx in copy_indices:
790
+ expanded_dims = inps_expanded_dims[idx]
791
+ index_expanded_dims_and_copy_(
792
+ static_inputs[idx], new_inputs[idx], expanded_dims
793
+ )
794
+ new_inputs.clear()
795
+ graph.replay()
796
+ return static_outputs
797
+
798
+ return align_inputs_from_check_idxs(run, check_input_idxs)
799
+
800
+
801
+ def count_tangents(fx_g: torch.fx.GraphModule):
802
+ """
803
+ Infers which inputs are static for a backwards graph
804
+ """
805
+
806
+ def is_saved_tensor(x):
807
+ return (
808
+ "tangents" not in x.name
809
+ and "bwd_seed" not in x.name
810
+ and "bwd_base_offset" not in x.name
811
+ )
812
+
813
+ arg_count = 0
814
+ static_arg_idxs = []
815
+ for n in fx_g.graph.nodes:
816
+ if n.op == "placeholder":
817
+ if is_saved_tensor(n):
818
+ static_arg_idxs.append(arg_count)
819
+ arg_count += 1
820
+
821
+ assert static_arg_idxs == list(range(len(static_arg_idxs)))
822
+ return len(static_arg_idxs)
823
+
824
+
825
+ def compile_fx_aot(
826
+ model_: torch.fx.GraphModule,
827
+ example_inputs_: List[torch.Tensor],
828
+ inner_compile: Callable[..., Any] = compile_fx_inner,
829
+ config_patches: Optional[Dict[str, Any]] = None,
830
+ ):
831
+ config_patches: Dict[str, Any] = (
832
+ {"cpp_wrapper": True}
833
+ if config_patches is None
834
+ else {**config_patches, "cpp_wrapper": True}
835
+ )
836
+ if (
837
+ "aot_inductor.output_path" not in config_patches
838
+ and not config.aot_inductor.output_path
839
+ ):
840
+ config_patches = {
841
+ **config_patches,
842
+ "aot_inductor.output_path": code_hash(model_.code),
843
+ }
844
+
845
+ extern_node_serializer = config_patches.pop("extern_node_serializer", None)
846
+ with V.set_aot_compilation(True):
847
+ compiled_lib_path = compile_fx(
848
+ model_,
849
+ example_inputs_,
850
+ inner_compile=functools.partial(
851
+ inner_compile,
852
+ aot_mode=True,
853
+ extern_node_serializer=extern_node_serializer,
854
+ ),
855
+ config_patches=config_patches,
856
+ )
857
+ assert os.path.exists(
858
+ compiled_lib_path
859
+ ), f"AOTInductor compiled library does not exist at {compiled_lib_path}"
860
+ return compiled_lib_path
861
+
862
+
863
+ _graph_counter = count(0)
864
+
865
+
866
+ def fw_compiler_freezing(
867
+ aot_autograd_model: torch.fx.GraphModule,
868
+ aot_example_inputs: List[torch.Tensor],
869
+ dynamo_model: torch.fx.GraphModule,
870
+ num_example_inputs: int,
871
+ inner_compile: Callable[..., Any],
872
+ cudagraphs: BoxedBool,
873
+ graph_id: int,
874
+ forward_device: BoxedDeviceIndex,
875
+ ):
876
+ from torch._inductor.freezing import convert_conv_weights_to_channels_last, freeze
877
+
878
+ # partition_fn won't be called
879
+ joint_graph_passes(aot_autograd_model)
880
+
881
+ layout_opt = GraphLowering.decide_layout_opt(aot_autograd_model, is_inference=True)
882
+ if layout_opt:
883
+ # make sure meta['val'] is properly setup
884
+ fake_tensor_prop(aot_autograd_model, aot_example_inputs, True)
885
+ convert_conv_weights_to_channels_last(aot_autograd_model)
886
+
887
+ opt_model, preserved_arg_indices = freeze(
888
+ dynamo_model,
889
+ aot_autograd_model,
890
+ aot_example_inputs, # type: ignore[arg-type]
891
+ )
892
+
893
+ aot_example_inputs = [aot_example_inputs[ind] for ind in preserved_arg_indices]
894
+ num_fixed = len(preserved_arg_indices) - num_example_inputs
895
+
896
+ fake_mode = detect_fake_mode(aot_example_inputs)
897
+
898
+ # for freezing, all graph outputs should be user visible
899
+ *_, model_outputs_node = opt_model.graph.nodes
900
+ model_outputs = model_outputs_node.args[0]
901
+ user_visible_outputs = [
902
+ n.name for n in model_outputs if isinstance(n, torch.fx.Node)
903
+ ]
904
+
905
+ # constant params will be real tensors, not fake
906
+ tracing_context = torch._guards.TracingContext.try_get()
907
+ if tracing_context is not None:
908
+ params_flat = tracing_context.params_flat
909
+ assert params_flat is not None
910
+ for i in range(len(params_flat)):
911
+ if i not in preserved_arg_indices:
912
+ params_flat[i] = None
913
+
914
+ with mock.patch.object(fake_mode, "allow_non_fake_inputs", True):
915
+ optimized_function = inner_compile(
916
+ opt_model,
917
+ aot_example_inputs,
918
+ num_fixed=num_fixed,
919
+ cudagraphs=cudagraphs,
920
+ graph_id=graph_id,
921
+ is_inference=True,
922
+ boxed_forward_device_index=forward_device,
923
+ layout_opt=layout_opt,
924
+ user_visible_outputs=user_visible_outputs,
925
+ )
926
+
927
+ # aot_inductor codegens a call that takes in just the inputs, so we don't return a wrapper
928
+ # that drops constant-ified params
929
+ if V.aot_compilation is True:
930
+ return optimized_function
931
+
932
+ def wrapper(args):
933
+ args_new = [args[i] for i in preserved_arg_indices]
934
+ args.clear()
935
+ return optimized_function(args_new)
936
+
937
+ wrapper._boxed_call = True # type: ignore[attr-defined]
938
+
939
+ return wrapper
940
+
941
+
942
+ def compile_fx(
943
+ model_: torch.fx.GraphModule,
944
+ example_inputs_: List[torch.Tensor],
945
+ inner_compile: Callable[..., Any] = compile_fx_inner,
946
+ config_patches: Optional[Dict[str, Any]] = None,
947
+ decompositions: Optional[Dict[OpOverload, Callable[..., Any]]] = None,
948
+ ):
949
+ """Main entrypoint to a compile given FX graph"""
950
+ if config_patches:
951
+ with config.patch(config_patches):
952
+ return compile_fx(
953
+ model_,
954
+ example_inputs_,
955
+ # need extra layer of patching as backwards is compiled out of scope
956
+ inner_compile=config.patch(config_patches)(inner_compile),
957
+ decompositions=decompositions,
958
+ )
959
+
960
+ if config.cpp_wrapper:
961
+ with config.patch(
962
+ {
963
+ "cpp_wrapper": False,
964
+ "triton.autotune_cublasLt": False,
965
+ "triton.cudagraphs": False,
966
+ "triton.store_cubin": True,
967
+ }
968
+ ), V.set_real_inputs(example_inputs_):
969
+ inputs_ = example_inputs_
970
+ if isinstance(model_, torch.fx.GraphModule):
971
+ fake_inputs = [
972
+ node.meta.get("val")
973
+ for node in model_.graph.nodes
974
+ if node.op == "placeholder"
975
+ ]
976
+ if all(v is not None for v in fake_inputs):
977
+ # Validate devices before switching to fake tensors.
978
+ for idx, fi, i in zip(count(), fake_inputs, inputs_):
979
+ if fi.device != i.device:
980
+ raise ValueError(
981
+ f"Device mismatch between fake input and example input at position #{idx}: "
982
+ f"{fi.device} vs {i.device}. If the model was exported via torch.export(), "
983
+ "make sure torch.export() and torch.aot_compile() run on the same device."
984
+ )
985
+ inputs_ = fake_inputs
986
+ return compile_fx(
987
+ model_,
988
+ inputs_,
989
+ inner_compile=functools.partial(inner_compile, cpp_wrapper=True),
990
+ decompositions=decompositions,
991
+ )
992
+
993
+ recursive_compile_fx = functools.partial(
994
+ compile_fx,
995
+ inner_compile=inner_compile,
996
+ decompositions=decompositions,
997
+ )
998
+
999
+ if not graph_returns_tuple(model_):
1000
+ return make_graph_return_tuple(
1001
+ model_,
1002
+ example_inputs_,
1003
+ recursive_compile_fx,
1004
+ )
1005
+
1006
+ if isinstance(model_, torch.fx.GraphModule):
1007
+ if isinstance(model_.graph._codegen, _PyTreeCodeGen):
1008
+ # this graph is the result of dynamo.export()
1009
+ return handle_dynamo_export_graph(
1010
+ model_,
1011
+ example_inputs_,
1012
+ recursive_compile_fx,
1013
+ )
1014
+
1015
+ model_ = pre_grad_passes(model_, example_inputs_)
1016
+
1017
+ if any(isinstance(x, (list, tuple, dict)) for x in example_inputs_):
1018
+ return flatten_graph_inputs(
1019
+ model_,
1020
+ example_inputs_,
1021
+ recursive_compile_fx,
1022
+ )
1023
+
1024
+ assert not config._raise_error_for_testing
1025
+ num_example_inputs = len(example_inputs_)
1026
+ cudagraphs = BoxedBool(config.triton.cudagraphs)
1027
+ forward_device = BoxedDeviceIndex(None)
1028
+
1029
+ graph_id = next(_graph_counter)
1030
+
1031
+ decompositions = (
1032
+ decompositions if decompositions is not None else select_decomp_table()
1033
+ )
1034
+
1035
+ @dynamo_utils.dynamo_timed
1036
+ def fw_compiler_base(
1037
+ model: torch.fx.GraphModule,
1038
+ example_inputs: List[torch.Tensor],
1039
+ is_inference: bool,
1040
+ ):
1041
+ if is_inference:
1042
+ # partition_fn won't be called
1043
+ joint_graph_passes(model)
1044
+
1045
+ num_rng_seed_offset_inputs = 2 if functorch_config.functionalize_rng_ops else 0
1046
+ fixed = len(example_inputs) - num_example_inputs - num_rng_seed_offset_inputs
1047
+ user_visible_outputs = set()
1048
+
1049
+ if config.keep_output_stride:
1050
+ *_, model_outputs_node = model.graph.nodes
1051
+ assert model_outputs_node.op == "output"
1052
+ model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args)
1053
+ num_model_outputs = len(model_outputs)
1054
+
1055
+ context = torch._guards.TracingContext.try_get()
1056
+ # See Note [User Outputs in the inductor graph]
1057
+ if context is not None and context.fw_metadata and not is_inference:
1058
+ original_output_start_index = (
1059
+ context.fw_metadata.num_mutated_inp_runtime_indices
1060
+ )
1061
+ else:
1062
+ original_output_start_index = 0
1063
+
1064
+ if isinstance(model_, torch.fx.GraphModule):
1065
+ *_, orig_model_outputs_node = model_.graph.nodes
1066
+ assert orig_model_outputs_node.op == "output"
1067
+ orig_model_outputs, _ = pytree.tree_flatten(
1068
+ orig_model_outputs_node.args
1069
+ )
1070
+ num_orig_model_outputs = len(orig_model_outputs)
1071
+ else:
1072
+ num_orig_model_outputs = num_model_outputs
1073
+
1074
+ assert num_orig_model_outputs <= num_model_outputs
1075
+
1076
+ # Note [User Outputs in the inductor graph]
1077
+ # We makes the following assumption
1078
+ # For inference
1079
+ # len(orig_model_outputs) == len(model_outputs)
1080
+ # For training
1081
+ # len(orig_model_outputs) <= len(model_outputs)
1082
+ # During training, most of the time the model_outputs starts with
1083
+ # original module's outputs followed by saved activations.
1084
+ # But this can be not true if the model have inplace updated tensors.
1085
+ # AOTAutograd will make those tensors being returned before the original
1086
+ # module's output.
1087
+ # To make things safe, we'll use original_output_start_index field
1088
+ # set by AOTAutograd to decide where the original module outputs start.
1089
+ orig_output_end_idx = original_output_start_index + num_orig_model_outputs
1090
+ # Sanity chec: we are about to splice out the "user" outputs from the full set
1091
+ # of "graph" outputs. Make sure we're within bounds.
1092
+ assert orig_output_end_idx <= num_model_outputs
1093
+
1094
+ user_visible_outputs = {
1095
+ n.name
1096
+ for n in model_outputs[original_output_start_index:orig_output_end_idx]
1097
+ if isinstance(n, torch.fx.Node)
1098
+ }
1099
+
1100
+ return inner_compile(
1101
+ model,
1102
+ example_inputs,
1103
+ num_fixed=fixed,
1104
+ cudagraphs=cudagraphs,
1105
+ graph_id=graph_id,
1106
+ is_inference=is_inference,
1107
+ boxed_forward_device_index=forward_device,
1108
+ user_visible_outputs=user_visible_outputs,
1109
+ )
1110
+
1111
+ fw_compiler = functools.partial(fw_compiler_base, is_inference=False)
1112
+
1113
+ if config.freezing and not torch.is_grad_enabled():
1114
+ inference_compiler = functools.partial(
1115
+ fw_compiler_freezing,
1116
+ dynamo_model=model_,
1117
+ num_example_inputs=num_example_inputs,
1118
+ inner_compile=inner_compile,
1119
+ cudagraphs=cudagraphs,
1120
+ graph_id=graph_id,
1121
+ forward_device=forward_device,
1122
+ )
1123
+ else:
1124
+ inference_compiler = functools.partial(fw_compiler_base, is_inference=True)
1125
+
1126
+ def partition_fn(graph, joint_inputs, **kwargs):
1127
+ joint_graph_passes(graph)
1128
+ return min_cut_rematerialization_partition(
1129
+ graph, joint_inputs, **kwargs, compiler="inductor"
1130
+ )
1131
+
1132
+ @dynamo_utils.dynamo_timed
1133
+ def bw_compiler(model: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
1134
+ fixed = count_tangents(model)
1135
+ return inner_compile(
1136
+ model,
1137
+ example_inputs,
1138
+ num_fixed=fixed,
1139
+ cudagraphs=cudagraphs,
1140
+ is_backward=True,
1141
+ graph_id=graph_id,
1142
+ boxed_forward_device_index=forward_device,
1143
+ )
1144
+
1145
+ # TODO: can add logging before/after the call to create_aot_dispatcher_function
1146
+ # in torch._functorch/aot_autograd.py::aot_module_simplified::aot_function_simplified::new_func
1147
+ # once torchdynamo is merged into pytorch
1148
+
1149
+ fake_mode = detect_fake_mode(example_inputs_) or torch._subclasses.FakeTensorMode(
1150
+ allow_non_fake_inputs=True
1151
+ )
1152
+ tracing_context = (
1153
+ torch._guards.TracingContext.try_get()
1154
+ or torch._guards.TracingContext(fake_mode)
1155
+ )
1156
+
1157
+ if V.aot_compilation is True:
1158
+ gm, graph_signature = aot_export_module(
1159
+ model_, example_inputs_, trace_joint=False, decompositions=decompositions
1160
+ )
1161
+ unlifted_gm = _unlift_graph(model_, gm, graph_signature)
1162
+ with V.set_fake_mode(fake_mode), compiled_autograd.disable():
1163
+ return inference_compiler(unlifted_gm, example_inputs_)
1164
+
1165
+ with V.set_fake_mode(fake_mode), torch._guards.tracing(
1166
+ tracing_context
1167
+ ), compiled_autograd.disable():
1168
+ return aot_autograd(
1169
+ fw_compiler=fw_compiler,
1170
+ bw_compiler=bw_compiler,
1171
+ inference_compiler=inference_compiler,
1172
+ decompositions=decompositions,
1173
+ partition_fn=partition_fn,
1174
+ keep_inference_input_mutations=True,
1175
+ )(model_, example_inputs_)
1176
+
1177
+
1178
+ # pass config dict back to user
1179
+ def get_patched_config_dict(config_patches=None):
1180
+ with config.patch(config_patches):
1181
+ return config.get_config_copy()
1182
+
1183
+
1184
+ def _shape_env_from_inputs(inputs: List[torch.Tensor]):
1185
+ shape_env = None
1186
+ fake_mode = detect_fake_mode(inputs)
1187
+
1188
+ # TODO(voz): It would be nice to enable this assert, but there are lots of tests that
1189
+ # pass in real inputs for now.
1190
+ # if len(inputs) > 0:
1191
+ # assert fake_mode is not None, breakpoint()
1192
+
1193
+ if fake_mode is not None:
1194
+ return fake_mode.shape_env
1195
+
1196
+ # When there are no tensor inputs, get shape_env from the first SymInt.
1197
+ for input in inputs:
1198
+ if isinstance(input, torch.SymInt):
1199
+ return input.node.shape_env
1200
+
1201
+ # TODO(voz): Should we always have one anyway?
1202
+ return None
1203
+
1204
+
1205
+ def output_node(gm: torch.fx.GraphModule):
1206
+ """Get the output node from an FX graph"""
1207
+ last_node = next(iter(reversed(gm.graph.nodes)))
1208
+ assert last_node.op == "output"
1209
+ return last_node
1210
+
1211
+
1212
+ def graph_returns_tuple(gm: torch.fx.GraphModule):
1213
+ """True if a FX graph returns a tuple"""
1214
+ if not isinstance(gm, torch.fx.GraphModule):
1215
+ return True # can't check this, assume true
1216
+ (rv,) = output_node(gm).args
1217
+ if isinstance(rv, (list, tuple)):
1218
+ return True
1219
+ if (
1220
+ isinstance(rv, torch.fx.node.Node)
1221
+ and hasattr(rv.target, "_schema")
1222
+ and len(rv.target._schema.returns) > 1
1223
+ and all(str(ret.type) == "Tensor" for ret in rv.target._schema.returns)
1224
+ ):
1225
+ # for graphs whose result is one node with multiple outputs
1226
+ return True
1227
+ return False
1228
+
1229
+
1230
+ def make_graph_return_tuple(
1231
+ gm: torch.fx.GraphModule,
1232
+ inputs: List[torch.Tensor],
1233
+ compile_gm: Callable[..., Any],
1234
+ ):
1235
+ """
1236
+ Mutate gm so it returns a tuple. This is only needed for graphs
1237
+ not created by torchdynamo that return non-tuples.
1238
+ """
1239
+ node = output_node(gm)
1240
+ (rv,) = node.args
1241
+ rv, spec = pytree.tree_flatten(rv)
1242
+ with gm.graph.inserting_before(node):
1243
+ gm.graph.output(rv)
1244
+ gm.graph.erase_node(node)
1245
+ assert graph_returns_tuple(gm)
1246
+
1247
+ compiled_fn = compile_gm(gm, inputs)
1248
+
1249
+ @functools.wraps(compiled_fn)
1250
+ def wrapper(*args, **kwargs):
1251
+ return pytree.tree_unflatten(compiled_fn(*args, **kwargs), spec)
1252
+
1253
+ return wrapper
1254
+
1255
+
1256
+ def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
1257
+ """
1258
+ Mutate inputs so that they are flat and wrap gm such that it
1259
+ accepts those inputs. This is only needed for graphs not created
1260
+ by torchdynamo that take bumpy inputs.
1261
+ """
1262
+ inputs, spec = pytree.tree_flatten(inputs)
1263
+
1264
+ class GmWrapper(torch.nn.Module):
1265
+ def __init__(self):
1266
+ super().__init__()
1267
+ self.gm = gm
1268
+
1269
+ def forward(self, *args):
1270
+ args: List[Any] = list(args)
1271
+ return self.gm(*pytree.tree_unflatten(args, spec))
1272
+
1273
+ compiled_fn = compile_gm(GmWrapper(), inputs)
1274
+
1275
+ @functools.wraps(compiled_fn)
1276
+ def wrapper(*args):
1277
+ # note this doesn't check the spec, assuming it is the same
1278
+ return compiled_fn(*pytree.arg_tree_leaves(*args))
1279
+
1280
+ return wrapper
1281
+
1282
+
1283
+ def handle_dynamo_export_graph(
1284
+ gm: torch.fx.GraphModule,
1285
+ inputs: List[torch.Tensor],
1286
+ compile_gm: Callable[..., Any],
1287
+ ):
1288
+ """
1289
+ `torch._dynamo.export` embeds pytrees in the FX graph codegen object,
1290
+ convert that to a normal FX graph so inductor can compile it.
1291
+ """
1292
+ codegen = gm.graph._codegen
1293
+ gm.graph._codegen = torch.fx.graph.CodeGen()
1294
+ gm.recompile()
1295
+
1296
+ compiled_fn = compile_gm(gm, codegen.process_inputs(*inputs))
1297
+
1298
+ @functools.wraps(compiled_fn)
1299
+ def wrapper(*args):
1300
+ return codegen.process_outputs(compiled_fn(*codegen.process_inputs(*args)))
1301
+
1302
+ return wrapper
env-llmeval/lib/python3.10/site-packages/torch/_inductor/config.py ADDED
@@ -0,0 +1,664 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os # noqa: C101
2
+ import sys
3
+ from typing import Any, Dict, TYPE_CHECKING
4
+
5
+ import torch
6
+
7
+ # add some debug printouts
8
+ debug = False
9
+
10
+ # add inf and NaN checkers
11
+ debug_check_inf_and_nan = False
12
+
13
+ # Whether to disable a progress bar for autotuning
14
+ disable_progress = True
15
+
16
+ # Whether to enable printing the source code for each future
17
+ verbose_progress = False
18
+
19
+ # use fx aot graph codegen cache
20
+ fx_graph_cache = os.environ.get("TORCHINDUCTOR_FX_GRAPH_CACHE") == "1"
21
+
22
+ # use cpp wrapper instead of python wrapper
23
+ cpp_wrapper = False
24
+
25
+ # dead code elimination
26
+ dce = False
27
+
28
+ # assume weight tensors are fixed size
29
+ static_weight_shapes = True
30
+
31
+ # put correctness assertions in generated code
32
+ size_asserts = os.environ.get("TORCHINDUCTOR_SIZE_ASSERTS", "1") == "1"
33
+ nan_asserts = os.environ.get("TORCHINDUCTOR_NAN_ASSERTS") == "1"
34
+
35
+ # enable loop reordering based on input orders
36
+ pick_loop_orders = True
37
+
38
+ # reuse a kernel input as the output
39
+ inplace_buffers = True
40
+
41
+ # reuse a buffer for an unrelated purpose
42
+ allow_buffer_reuse = True
43
+
44
+ # Enable pooled allocations for non-output tensors
45
+ memory_planning = os.environ.get("TORCHINDUCTOR_MEMORY_PLANNING", "0") == "1"
46
+
47
+ # How to organize memory under memory_planning=True:
48
+ # - "none": do not try to pool storage, just reuse
49
+ # - "intermediates": all non-outputs share storage, outputs each get unique storage
50
+ # - "outputs": two pools, one for intermediates (freed on return) and one for outputs
51
+ # - "combined": a single pool for both intermediates and outputs
52
+ memory_pool = os.environ.get("TORCHINDUCTOR_MEMORY_POOL", "intermediates")
53
+
54
+ # codegen benchmark harness
55
+ benchmark_harness = True
56
+
57
+ # fuse pointwise into templates
58
+ epilogue_fusion = True
59
+
60
+ # do epilogue fusions before other fusions
61
+ epilogue_fusion_first = False
62
+
63
+ # enable pattern match+replace optimizations
64
+ pattern_matcher = True
65
+
66
+ # register custom graph optimization pass hook. so far, pre/post passes are
67
+ # only applied before/after pattern_matcher in post_grad_passes.
68
+ #
69
+ # def my_custom_pre_pass(graph: torch.fx.graph.Graph):
70
+ # # my custom graph optimization pass
71
+ # ...
72
+ #
73
+ # def my_custom_post_pass(graph: torch.fx.graph.Graph):
74
+ # # my custom graph optimization pass
75
+ # ...
76
+ #
77
+ # torch._inductor.config.post_grad_custom_pre_pass = my_custom_pre_pass
78
+ # torch._inductor.config.post_grad_custom_post_pass = my_custom_post_pass
79
+ post_grad_custom_pre_pass = None
80
+ post_grad_custom_post_pass = None
81
+
82
+ # Registers a custom pregrad pass. Note that the pre-grad IR is 1.
83
+ # non-functional, 2. non-normalized, and 3. prone to change. Ideally we should
84
+ # use post-grad passes.
85
+ pre_grad_custom_pass = None
86
+
87
+ # Optimize away split cat patterns (Experimental)
88
+ split_cat_fx_passes = True
89
+
90
+ # Optimize conv-batchnorm if batchnorm is in eval mode. Slightly reduces numerical stability.
91
+ efficient_conv_bn_eval_fx_passes = False
92
+
93
+ # Deprecated
94
+ group_fusion = False
95
+
96
+ # Deprecated
97
+ batch_fusion = True
98
+
99
+ # Pre grad group/batch fusion and options in order, set to empty dict to disable fusion.
100
+ # Call `torch._inductor.fx_passes.group_batch_fusion.list_group_batch_fusions()` to see available fusions.
101
+ pre_grad_fusion_options: Dict[str, Dict[str, Any]] = {
102
+ "batch_linear": {},
103
+ "batch_linear_lhs": {},
104
+ "batch_layernorm": {},
105
+ "batch_tanh": {},
106
+ "batch_relu": {},
107
+ "batch_sigmoid": {},
108
+ }
109
+
110
+ # Post grad group/batch fusion and options, set to empty dict to disable fusion.
111
+ # Call `torch._inductor.fx_passes.group_batch_fusion.list_group_batch_fusions(False)` to see available fusions.
112
+ post_grad_fusion_options: Dict[str, Dict[str, Any]] = {}
113
+
114
+ # enable reordering pass for improving memory locality
115
+ reorder_for_locality = True
116
+
117
+ # Scale down RBLOCK for better occupancy
118
+ dynamic_scale_rblock = os.environ.get("TORCHINDUCTOR_DYNAMIC_SCALE_RBLOCK", "1") == "1"
119
+
120
+ # this forces fusion for int_mm with mul. Needed when you want to avoid realizing the int32
121
+ # but the mul gets fused with other pointwise ops instead.
122
+ force_fuse_int_mm_with_mul = False
123
+
124
+ # for pattern torch.mm(a, b.to(dtype)) with cuda tensors,
125
+ # enable torch._inductor.kernel.mm.tuned_mixed_mm fused kernel.
126
+ # Autotune will compare perf with normal cast->then->mm option
127
+ use_mixed_mm = False
128
+
129
+ # for pattern torch.mm(a, b.to(dtype)) with cuda tensors, always use
130
+ # torch._inductor.kernel.mm.tuned_mixed_mm's fused kernel.
131
+ # Autotune will not compare with normal cast->then->mm option.
132
+ # (if force_mixed_mm is true, the use_mixed_mm flag will be ignored)
133
+ force_mixed_mm = False
134
+
135
+ # enable reordering pass for increasing overlap between compute and communication
136
+ reorder_for_compute_comm_overlap = False
137
+
138
+ # passes (in execution order) for increasing overlap between compute and communication
139
+ # for built-in passes, use string name; for user-defined passes, pass in the function handle
140
+ reorder_for_compute_comm_overlap_passes = [
141
+ "reorder_compute_for_overlap",
142
+ "sink_waits",
143
+ "raise_comms",
144
+ ]
145
+
146
+ # runtime estimation function for ops
147
+ # for built-in estimation function, pass in "default"; for user-defined estimation function, pass in the function handle
148
+ estimate_op_runtime = "default"
149
+
150
+ # unit: GB/s, uni-directional P2P bandwidth per card
151
+ # default value is NVLink
152
+ intra_node_bw = 300
153
+
154
+ # unit: GB/s, uni-directional P2P bandwidth per node
155
+ # default value is InfiniBand
156
+ inter_node_bw = 25
157
+
158
+ # enable slow autotuning passes to select algorithms
159
+ max_autotune = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE") == "1"
160
+
161
+ # enable slow autotuning passes to select pointwise/reductions algorithms
162
+ max_autotune_pointwise = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE") == "1"
163
+
164
+ # enable slow autotuning passes to select gemm algorithms
165
+ max_autotune_gemm = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_GEMM") == "1"
166
+
167
+ # Specify candidate backends for gemm autotune.
168
+ # Possible choices are combinations of: ATen, Triton, CUTLASS.
169
+ # ATen: default Pytorch ATen kernels.
170
+ # Triton: Triton templates defined in torch inductor.
171
+ # CUTLASS: Cutlass templates and kernels.
172
+ max_autotune_gemm_backends = os.environ.get(
173
+ "TORCHINDUCTOR_MAX_AUTOTUNE_GEMM_BACKENDS", "ATEN,TRITON"
174
+ ).upper()
175
+
176
+ # the value used as a fallback for the unbacked SymInts
177
+ # that can appear in the input shapes (e.g., in autotuning)
178
+ unbacked_symint_fallback = 8192
179
+
180
+ # enable searching global and local cache regardless of `max_autotune`
181
+ search_autotune_cache = os.environ.get("TORCHINDUCTOR_SEARCH_AUTOTUNE_CACHE") == "1"
182
+
183
+ save_args = os.environ.get("TORCHINDUCTOR_SAVE_ARGS") == "1"
184
+
185
+ # We will disable creating subprocess for autotuning if this is False
186
+ autotune_in_subproc = os.environ.get("TORCHINDUCTOR_AUTOTUNE_IN_SUBPROC") == "1"
187
+
188
+ # If autotuning in subprocess, whether to use multiple devices
189
+ autotune_multi_device = os.environ.get("TORCHINDUCTOR_AUTOTUNE_MULTI_DEVICE") == "1"
190
+
191
+ coordinate_descent_tuning = (
192
+ os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_TUNING") == "1"
193
+ )
194
+ coordinate_descent_check_all_directions = (
195
+ os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_CHECK_ALL_DIRECTIONS") == "1"
196
+ )
197
+ coordinate_descent_search_radius = int(
198
+ os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_RADIUS", "1")
199
+ )
200
+
201
+ layout_optimization = os.environ.get("TORCHINDUCTOR_LAYOUT_OPTIMIZATION", "1") == "1"
202
+
203
+
204
+ force_layout_optimization = os.environ.get("TORCHINDUCTOR_FORCE_LAYOUT_OPT", "0") == "1"
205
+
206
+
207
+ # Whether to keep the output strides the same as eager after layout optimization.
208
+ keep_output_stride = os.environ.get("TORCHINDUCTOR_KEEP_OUTPUT_STRIDE", "1") == "1"
209
+
210
+ # Enabling this will let compiler print warning messages if a generated triton
211
+ # kernel has inputs with mixed layouts. This is helpful for perf debugging
212
+ # since kernel with mixed layout inputs may run much slower then one whose inputs
213
+ # have uniform layouts.
214
+ warn_mix_layout = os.environ.get("TORCHINDUCTOR_WARN_MIX_LAYOUT") == "1"
215
+
216
+ # control store vs recompute heuristic
217
+ # For fanouts, rematerialization can lead to exponential blowup. So, have
218
+ # smaller threshold
219
+ realize_reads_threshold = 4
220
+ realize_bytes_threshold = 2000
221
+
222
+ # Threshold to prevent excessive accumulation of ops in one buffer during lowering
223
+ realize_acc_reads_threshold = 8
224
+
225
+ # fallback to eager for random/dropout, this is slow but useful for debugging
226
+ fallback_random = False
227
+
228
+ # automatically create fallbacks when encountering an unhandled op
229
+ implicit_fallbacks = True
230
+
231
+ # fuse even in cases without common reads
232
+ aggressive_fusion = False
233
+
234
+ # For each fused kernel in the wrapper, comment with the nodes that get fused.
235
+ # Useful for debugging fusion.
236
+ debug_fusion = os.environ.get("TORCHINDUCTOR_DEBUG_FUSION") == "1"
237
+ benchmark_fusion = os.environ.get("TORCHINDUCTOR_BENCHMARK_FUSION") == "1"
238
+ enabled_metric_tables = os.environ.get("TORCHINDUCTOR_ENABLED_METRIC_TABLES", "")
239
+
240
+ # how many nodes to allow into a single fusion
241
+ max_fusion_size = 64
242
+
243
+ # max number of inputs to generate cat as a pointwise op with masked laods
244
+ max_pointwise_cat_inputs = 4
245
+
246
+ # replace small reductions with pointwise, disable with `= 1`
247
+ unroll_reductions_threshold = 8
248
+
249
+ # Add extra comments to output code (causes compile cache misses)
250
+ comment_origin = False
251
+
252
+ # Convert 1x1 convs into matmuls
253
+ conv_1x1_as_mm = False
254
+
255
+ # Enable split reductions for better utilization when the dimension
256
+ # being reduced over is large (by splitting it)
257
+ split_reductions = True
258
+
259
+ benchmark_kernel = os.environ.get("TORCHINDUCTOR_BENCHMARK_KERNEL", "0") == "1"
260
+
261
+ # Enable constant and index_expr folding
262
+ constant_and_index_propagation = True
263
+
264
+ # we always add constants into graph.constants without
265
+ # performing any constant-inlining optimization
266
+ always_keep_tensor_constants = False
267
+
268
+ # assert that indirect indexing does not read / write out of bounds
269
+ assert_indirect_indexing = True
270
+
271
+
272
+ def is_fbcode():
273
+ return not hasattr(torch.version, "git_version")
274
+
275
+
276
+ # constant folding on the joint graph
277
+ joint_graph_constant_folding = True
278
+
279
+ # Enable indirect_indexing asserts for decompositions and lowerings
280
+ debug_index_asserts = False
281
+
282
+ # warnings intended for PyTorch developers, disable for point releases
283
+ is_nightly_or_source = "dev" in torch.__version__ or "git" in torch.__version__
284
+ developer_warnings = is_fbcode() or is_nightly_or_source
285
+
286
+ # The multiprocessing start method to use for inductor workers in the codecache.
287
+ # TODO: fork is not safe in a multithreaded environment, we should evaluate changing
288
+ # the default to spawn.
289
+ worker_start_method = "fork"
290
+
291
+
292
+ def decide_compile_threads():
293
+ """
294
+ Here are the precedence to decide compile_threads
295
+ 1. User can override it by TORCHINDUCTOR_COMPILE_THREADS. One may want to disable async compiling by
296
+ setting this to 1 to make pdb happy.
297
+ 2. Set to 1 if it's win32 platform or it's a fbcode build
298
+ 3. decide by the number of CPU cores
299
+ """
300
+ if "TORCHINDUCTOR_COMPILE_THREADS" in os.environ:
301
+ return int(os.environ["TORCHINDUCTOR_COMPILE_THREADS"])
302
+ elif sys.platform == "win32" or is_fbcode():
303
+ return 1
304
+ else:
305
+ cpu_count = (
306
+ len(os.sched_getaffinity(0))
307
+ if hasattr(os, "sched_getaffinity")
308
+ else os.cpu_count()
309
+ )
310
+ assert cpu_count
311
+ return min(32, cpu_count)
312
+
313
+
314
+ compile_threads = decide_compile_threads()
315
+
316
+ # gemm autotuning global cache dir
317
+ if is_fbcode():
318
+ from libfb.py import parutil
319
+
320
+ try:
321
+ if __package__:
322
+ global_cache_dir = parutil.get_dir_path(
323
+ os.path.join(__package__.replace(".", os.sep), "fb/cache")
324
+ )
325
+ else:
326
+ global_cache_dir = parutil.get_dir_path("fb/cache")
327
+ except ValueError:
328
+ global_cache_dir = None
329
+ else:
330
+ global_cache_dir = None
331
+
332
+ # If kernel is fused, the name is generated from the origin node op names
333
+ # for larger kernels limit this
334
+ kernel_name_max_ops = 10
335
+
336
+ # Pad input tensors of matmul/bmm/addmm to leverage Tensor Cores in NVIDIA GPUs
337
+ shape_padding = os.environ.get("TORCHINDUCTOR_SHAPE_PADDING", "1") == "1"
338
+
339
+ # Fx-based linear/matmul/bmm + permute/transpose vertical fusion
340
+ permute_fusion = os.environ.get("TORCHINDUCTOR_PERMUTE_FUSION", "0") == "1"
341
+
342
+ # Mark the wrapper call in PyTorch profiler
343
+ profiler_mark_wrapper_call = False
344
+
345
+ # Generate hook calls to torch._inductor.hooks.run_intermediate_hooks for
346
+ # every intermediate for which we can correlate it with an intermediate
347
+ # from the original FX graph
348
+ generate_intermediate_hooks = False
349
+
350
+ # Populate traceback field on IRNode; good for debugging why origin_node is
351
+ # not populated, or finding out where an IRNode was constructed
352
+ debug_ir_traceback = False
353
+
354
+ # used for debugging to make sure config is properly set
355
+ _raise_error_for_testing = False
356
+
357
+ _profile_var = os.environ.get("TORCHINDUCTOR_PROFILE", "")
358
+ profile_bandwidth = _profile_var != ""
359
+ profile_bandwidth_regex = "" if _profile_var == "1" else _profile_var
360
+ # Specify a file where we print out the profiling results.
361
+ # None means we do not dump results to a file.
362
+ profile_bandwidth_output = os.environ.get("TORCHINDUCTOR_PROFILE_OUTPUT", None)
363
+
364
+ # TODO: remove later
365
+ disable_cpp_codegen = False
366
+
367
+
368
+ # Freezing will attempt to inline weights as constants in optimization
369
+ # and run constant folding and other optimizations on them. After freezing, weights
370
+ # can no longer be updated.
371
+ freezing: bool = os.environ.get("TORCHINDUCTOR_FREEZING", "0") == "1"
372
+
373
+ # Make freezing invalidate the eager Parameters of nn modules, to avoid memory overhead
374
+ # of potentially keeping multiple copies of weights.
375
+ freezing_discard_parameters: bool = False
376
+
377
+
378
+ # config specific to codegen/cpp.py
379
+ class cpp:
380
+ # set to torch.get_num_threads()
381
+ threads = -1
382
+
383
+ # Do not generate loops when the condition doesn't hold, like:
384
+ # for(long i0=4096; i0<4096; i0+=1)
385
+ no_redundant_loops = True
386
+
387
+ # Assume number of threads is dynamic, don't specialize thread number.
388
+ # Kernels don't recompile on thread number changes with this flag on.
389
+ # For single-threaded workload, turning it on would incur a slight
390
+ # performance degradation.
391
+ dynamic_threads = False
392
+
393
+ simdlen = None
394
+ min_chunk_size = 4096
395
+ cxx = (
396
+ None, # download gcc12 from conda-forge if conda is installed
397
+ # "g++-12",
398
+ # "g++-11",
399
+ # "g++-10",
400
+ # "clang++",
401
+ os.environ.get("CXX", "g++"),
402
+ # "g++.par",
403
+ )
404
+ # Allow kernel performance profiling via PyTorch profiler
405
+ enable_kernel_profile = False
406
+
407
+ # enable weight prepacking to get a better performance; may lead to large memory footprint
408
+ weight_prepack = True
409
+
410
+ # Inject a bug into our relu implementation; useful for testing our repro
411
+ # extraction and minification functionality.
412
+ # Valid values: "compile_error", "runtime_error", "accuracy"
413
+ inject_relu_bug_TESTING_ONLY = None
414
+ inject_log1p_bug_TESTING_ONLY = None
415
+
416
+ # If None, autodetect whether or not AVX512/AVX2 can be used. Otherwise,
417
+ # force usage as specified, without testing.
418
+ vec_isa_ok = None
419
+
420
+ # similar to config.triton.descriptive_names
421
+ descriptive_names = "original_aten"
422
+
423
+ # how many nodes to allow into a single horizontal fusion
424
+ max_horizontal_fusion_size = 16
425
+
426
+ # Make scatter_reduce fallback when reduce is sum to avoid performance regression
427
+ # using atomic_add.
428
+ fallback_scatter_reduce_sum = True
429
+
430
+ # Use funsafe-math-optimizations when compiling
431
+ enable_unsafe_math_opt_flag = False
432
+
433
+
434
+ # config specific to codegen/triton.py
435
+ class triton:
436
+ # Use cudagraphs on output code
437
+ cudagraphs = False
438
+
439
+ # Use cudagraph trees for memory pooling if `cudagraphs` is True
440
+ cudagraph_trees = True
441
+
442
+ # assertions not on the fast path, steady state
443
+ slow_path_cudagraph_asserts = True
444
+
445
+ # TODO - need to debug why this prevents cleanup
446
+ cudagraph_trees_history_recording = False
447
+
448
+ # assertions on the fast path
449
+ fast_path_cudagraph_asserts = False
450
+
451
+ # skip warmup for cudagraph trees
452
+ skip_cudagraph_warmup = False
453
+
454
+ # Synchronize before and after every compiled graph.
455
+ debug_sync_graph = False
456
+
457
+ # Synchronize after every kernel launch, to help pinpoint bugs
458
+ debug_sync_kernel = False
459
+
460
+ # Always load full blocks (rather than broadcasting inside the block)
461
+ dense_indexing = False
462
+
463
+ # limit tiling dimensions
464
+ max_tiles = 2
465
+
466
+ # use triton.autotune for pointwise ops with complex layouts
467
+ # this should only be disabled for debugging/testing
468
+ autotune_pointwise = True
469
+
470
+ # max autotune gemm with cublasLt
471
+ autotune_cublasLt = True
472
+
473
+ # should we stop a fusion to allow better tiling?
474
+ tiling_prevents_pointwise_fusion = True
475
+ tiling_prevents_reduction_fusion = True
476
+
477
+ # should we give different names to kernels
478
+ # Note: This is orthogonal to descriptive_names - this is deciding whether
479
+ # our triton kernel names should all be `triton_` (to maximize caching) or
480
+ # whether they should be unique.
481
+ unique_kernel_names = os.environ.get("TORCHINDUCTOR_UNIQUE_KERNEL_NAMES") == "1"
482
+
483
+ # should we put op names in kernel names
484
+ # False: No special names (just triton__1, triton__2, etc.)
485
+ # "torch": Maps to the fx op in the Dynamo graph (module name, method name, etc.)
486
+ # "original_aten": Maps to the highest-level aten op (i.e. pre-decompositions)
487
+ # "inductor_node": Maps to the node name in the FX graph passed to Inductor
488
+ descriptive_names = "original_aten"
489
+
490
+ # use alternate codegen for smaller reductions
491
+ persistent_reductions = (
492
+ os.environ.get("TORCHINDUCTOR_PERSISTENT_REDUCTIONS", "1") == "1"
493
+ )
494
+
495
+ # hint to Triton when arguments are divisible by 16
496
+ divisible_by_16 = True
497
+
498
+ # theses are not enforced, but they are used by asserts in triton_heuristics.py
499
+ # NOTE: mobilevit_s in timm_models required X to be set to the higher value 2048
500
+ max_block = {"X": 2048, "Y": 1024, "Z": 1024}
501
+
502
+ # Store the generated cubin files for cpp wrapper code to load
503
+ store_cubin = False
504
+
505
+ # the max number of spills we allow for the configs we benchmark.
506
+ # Setting this to 0 means we skip a config if it spills even a single
507
+ # register.
508
+ # Setting it to a larger value allows a config spilling a small amount
509
+ # of registers being benchmarked.
510
+ #
511
+ # NOTE: triton will always report >0 register spills for kernels using sin/cos.
512
+ # (check this issue https://github.com/openai/triton/issues/1756 )
513
+ # So far we see a fixed 8 spilled registers for kernels using sin/cos.
514
+ # Raise the threshold to 16 to be safe.
515
+ # We should revisit this once we understand more of the source of register spills.
516
+ spill_threshold: int = 16
517
+
518
+ # Inject a bug into our relu implementation; useful for testing our repro
519
+ # extraction and minification functionality.
520
+ # Valid values: "compile_error", "runtime_error", "accuracy"
521
+ inject_relu_bug_TESTING_ONLY = None
522
+
523
+
524
+ class aot_inductor:
525
+ # AOTInductor output path
526
+ # If an absolute path is specified, the generated lib files will be stored under the directory;
527
+ # If a relative path is specified, it will be used as a subdirectory under the default caching path;
528
+ # If not specified, a temp directory will be created under the default caching path.
529
+ # If the specified path contains something like "model.so", the sub-string will be used
530
+ # to name the generated library.
531
+ output_path = ""
532
+
533
+ debug_compile = os.environ.get("AOT_INDUCTOR_DEBUG_COMPILE", "0") == "1"
534
+
535
+ # Wether to codegen abi compatible model.so
536
+ abi_compatible = is_fbcode()
537
+
538
+ # Serialized tree spec for flattening inputs
539
+ serialized_in_spec = ""
540
+
541
+ # Serialized tree spec for flattening outputs
542
+ serialized_out_spec = ""
543
+
544
+
545
+ class cuda:
546
+ # CUDA arch to use for CUDA template kernel compilation.
547
+ # e.g. "70", "75", "80", "90", etc.
548
+ # When arch is None, Inductor uses torch.cuda.get_device_capability(0).
549
+ arch = None
550
+
551
+ # CUDA version to use for CUDA template kernel compilation.
552
+ # e.g. "11.4", "12.1", etc.
553
+ # When version is None, Inductor uses torch.version.cuda.
554
+ version = None
555
+
556
+ # Optimization level for the host compiler.
557
+ compile_opt_level = "-O1"
558
+
559
+ # Whether to enable device LTO (link-time-optimization).
560
+ enable_cuda_lto = False
561
+
562
+ # Whether to keep intermediate files dring compilation.
563
+ enable_ptxas_info = False
564
+
565
+ # Whether to enable debug info, e.g. line number, cutlass debug info.
566
+ enable_debug_info = False
567
+
568
+ # Whether to use fast math.
569
+ use_fast_math = False
570
+
571
+ # Path to the CUTLASS repo root directory.
572
+ # The default path only works under PyTorch local development environment.
573
+ cutlass_dir = os.environ.get(
574
+ "TORCHINDUCTOR_CUTLASS_DIR",
575
+ os.path.abspath(
576
+ os.path.join(os.path.dirname(torch.__file__), "../third_party/cutlass/")
577
+ ),
578
+ )
579
+
580
+ # Configures the maximum number of CUTLASS configs to profile in max_autotune.
581
+ # By default it's None, so that all CUTLASS configs are tuned.
582
+ # This is mainly used to reduce test time in CI.
583
+ cutlass_max_profiling_configs = None
584
+
585
+ # Path to CUDA NVCC.
586
+ # NVCC search order:
587
+ # 1) cuda_cxx set in this config
588
+ # 2)CUDACXX environment variable
589
+ # 3)CUDA_HOME environment variable
590
+ # 4) default system search PATH.
591
+ cuda_cxx = None
592
+
593
+ # If set to True, it will ensure that only GEMM ops capable of
594
+ # epilogue fusion via CUTLASS Epilogue Visitor Trees ( EVT )
595
+ # are enabled for the CUTLASS backend.
596
+ cutlass_only_evt_capable_ops: bool = False
597
+
598
+
599
+ # create a directory containing lots of debug information
600
+ class trace:
601
+ # master switch for all debugging flags below
602
+ enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
603
+
604
+ # Save debug information to a temporary directory
605
+ # If not specified, a temp directory will be created by system
606
+ debug_dir = None
607
+
608
+ # Save python logger call >=logging.DEBUG
609
+ debug_log = False
610
+
611
+ # Save python logger call >=logging.INFO
612
+ info_log = False
613
+
614
+ # Save input FX graph (post decomps, pre optimization)
615
+ fx_graph = True
616
+
617
+ # Save FX graph after transformations
618
+ fx_graph_transformed = True
619
+
620
+ # Save TorchInductor IR before fusion pass
621
+ ir_pre_fusion = True
622
+
623
+ # Save TorchInductor IR after fusion pass
624
+ ir_post_fusion = True
625
+
626
+ # Copy generated code to trace dir
627
+ output_code = True
628
+
629
+ # SVG figure showing post-fusion graph
630
+ graph_diagram = os.environ.get("INDUCTOR_POST_FUSION_SVG", "0") == "1"
631
+
632
+ # SVG figure showing fx with fusion
633
+ draw_orig_fx_graph = os.environ.get("INDUCTOR_ORIG_FX_SVG", "0") == "1"
634
+
635
+ # We draw our fx graphs with the "record" shape attribute by default.
636
+ # Sometimes, when the graph is very complex, we may hit dot errors like below:
637
+ # "flat edge between adjacent nodes one of which has a record shape -
638
+ # replace records with HTML-like labels"
639
+ # and thus fail to generate a graph. So, let's give the user an option
640
+ # to specify the shape attribute for the dot graph. For example, passing
641
+ # INDUCTOR_DOT_GRAPH_SHAPE_SVG = "none" would let us generate HTML-like lables
642
+ # to workaround the above failure.
643
+ dot_graph_shape = os.environ.get("INDUCTOR_DOT_GRAPH_SHAPE_SVG", None)
644
+
645
+ # Store cProfile (see snakeviz to view)
646
+ compile_profile = False
647
+
648
+ # Upload the .tar.gz file
649
+ # Needs to be overriden based on specific environment needs
650
+ upload_tar = None
651
+
652
+
653
+ _save_config_ignore = {
654
+ # workaround: "Can't pickle <function ...>"
655
+ "trace.upload_tar",
656
+ }
657
+
658
+ if TYPE_CHECKING:
659
+ from torch.utils._config_typing import * # noqa: F401, F403
660
+
661
+ from torch.utils._config_module import install_config_module
662
+
663
+ # adds patch, save_config, etc
664
+ install_config_module(sys.modules[__name__])
env-llmeval/lib/python3.10/site-packages/torch/_inductor/constant_folding.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from typing import Any, Callable, Dict, Optional
3
+
4
+ import torch
5
+ import torch.utils._pytree as pytree
6
+
7
+ aten = torch.ops.aten
8
+
9
+
10
+ def replace_node_with_constant(gm, node, constant):
11
+ g = gm.graph
12
+
13
+ if not hasattr(gm, "_frozen_param_count"):
14
+ gm._frozen_param_count = 0
15
+
16
+ i = gm._frozen_param_count
17
+
18
+ while True:
19
+ qualname = f"_frozen_param{i}"
20
+ if not hasattr(gm, qualname):
21
+ break
22
+ i += 1
23
+
24
+ gm._frozen_param_count = i + 1
25
+
26
+ with g.inserting_before(node):
27
+ new_input_node = g.create_node("get_attr", qualname, (), {})
28
+ node.replace_all_uses_with(new_input_node)
29
+ new_input_node.meta.update(node.meta)
30
+ g.erase_node(node)
31
+
32
+ # needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning
33
+ gm.register_buffer(qualname, constant)
34
+ setattr(gm, qualname, constant)
35
+
36
+
37
+ class ConstantFolder(torch.fx.Interpreter):
38
+ def __init__(
39
+ self,
40
+ gm,
41
+ skip_constructors=False,
42
+ ):
43
+ super().__init__(gm)
44
+ self.node_replacements: Dict[torch.fx.Node, Any] = {}
45
+ self.replaced_uses: Dict[torch.fx.Node, int] = collections.Counter()
46
+ self.unknown_value = object()
47
+ self.skip_constructors: bool = skip_constructors
48
+
49
+ # overwrite this to deallocate env values if their only remaining use
50
+ # is the output
51
+ self.user_to_last_uses = self.node_to_last_non_output_use()
52
+
53
+ def is_impure(self, node: torch.fx.node.Node):
54
+ if node.target in [
55
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
56
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
57
+ torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
58
+ ]:
59
+ # For the pattern fp32_weight -> q -> dq
60
+ # We only folding fp32_weight -> q
61
+ # int8_weight and leave dq in graph to be fused
62
+ return True
63
+ return False
64
+
65
+ def node_to_last_non_output_use(self):
66
+ last_non_output_use = collections.defaultdict(list)
67
+ seen_uses = set()
68
+ output_node = next(iter(reversed(self.module.graph.nodes)))
69
+
70
+ for node in reversed(self.module.graph.nodes):
71
+ if node.target == "output":
72
+ continue
73
+
74
+ def add_use(inp):
75
+ if inp in seen_uses:
76
+ return
77
+
78
+ seen_uses.add(inp)
79
+ last_non_output_use[node].append(inp)
80
+
81
+ pytree.tree_map_only(torch.fx.Node, add_use, (node.args, node.kwargs))
82
+
83
+ # if this node is only used in output, we want to gc it right away
84
+ if len(node.users) == 1 and output_node in node.users:
85
+ last_non_output_use[node].append(node)
86
+
87
+ return last_non_output_use
88
+
89
+ def run_node(self, node):
90
+ if node.target == "output":
91
+ # because we remove nodes from env on last non output use,
92
+ # re-define them now or we'll get error in interpreter
93
+ def set_env(arg):
94
+ self.env[arg] = self.unknown_value
95
+
96
+ pytree.tree_map_only(torch.fx.Node, set_env, node.args)
97
+ return super().run_node(node)
98
+
99
+ args, kwargs = self.fetch_args_kwargs_from_env(node)
100
+ flattened_inputs = pytree.arg_tree_leaves(*args, **kwargs)
101
+
102
+ if self.unknown_value in flattened_inputs:
103
+ return self.unknown_value
104
+
105
+ # TODO - fix errors with this
106
+ if (
107
+ node.op == "call_function"
108
+ and node.target == aten._efficientzerotensor.default
109
+ ):
110
+ return self.unknown_value
111
+
112
+ # skip constructors, since inductor generates optimal code for them already
113
+ # and turning into tensor would result in an additional global memory read
114
+ # TODO - more complicated strategy
115
+ if (
116
+ self.skip_constructors
117
+ and node.op != "get_attr"
118
+ and not any(isinstance(e, torch.Tensor) for e in flattened_inputs)
119
+ ):
120
+ return self.unknown_value
121
+
122
+ # All mutations should either be removed or on inputs which we did not make constant
123
+ if (
124
+ isinstance(node.target, torch._ops.OpOverload)
125
+ and torch.Tag.nondeterministic_seeded in node.target.tags
126
+ ):
127
+ return self.unknown_value
128
+
129
+ out = super().run_node(node)
130
+
131
+ if node.op != "get_attr" and isinstance(out, torch.Tensor):
132
+ if not self.insertable_tensor_check(out):
133
+ return out
134
+
135
+ if self.is_impure(node):
136
+ return self.unknown_value
137
+
138
+ self.add_node_replacement(node, out)
139
+
140
+ flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs)
141
+
142
+ for n in flattened_node_inps:
143
+ if not isinstance(n, torch.fx.Node):
144
+ continue
145
+
146
+ self.replaced_uses[n] += 1
147
+
148
+ for to_delete in self.user_to_last_uses.get(node, []):
149
+ if self.replaced_uses[to_delete] == len(to_delete.users):
150
+ self.node_replacements.pop(to_delete, None)
151
+
152
+ return out
153
+
154
+ def insertable_tensor_check(self, tensor: torch.Tensor) -> bool:
155
+ return True
156
+
157
+ def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
158
+ self.node_replacements[node] = tensor
159
+
160
+ def run(self):
161
+ env = {}
162
+ for n in self.module.graph.nodes:
163
+ if n.op == "placeholder":
164
+ env[n] = self.unknown_value
165
+ return super().run(initial_env=env)
166
+
167
+
168
+ @torch.utils._python_dispatch._disable_current_modes()
169
+ def constant_fold(gm, constraint_fn: Optional[Callable[[torch.fx.Node], bool]] = None):
170
+ cf = ConstantFolder(gm, skip_constructors=True)
171
+ cf.run()
172
+
173
+ for node, constant in cf.node_replacements.items():
174
+ if constraint_fn is not None and not constraint_fn(node):
175
+ continue
176
+ replace_node_with_constant(gm, node, constant)
177
+
178
+ erased_params = []
179
+ for node in gm.graph.nodes:
180
+ if node.op == "get_attr" and len(node.users) == 0:
181
+ if hasattr(gm, node.target):
182
+ delattr(gm, node.target)
183
+ erased_params.append(node)
184
+
185
+ for node in erased_params:
186
+ gm.graph.erase_node(node)
187
+
188
+ gm.graph.eliminate_dead_code()
189
+ gm.graph.lint()
190
+ gm.recompile()
env-llmeval/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import itertools
3
+ import logging
4
+ from typing import Callable, Optional
5
+
6
+ from torch.utils._triton import has_triton
7
+ from .utils import red_text, triton_config_to_hashable
8
+
9
+ if has_triton():
10
+ import triton
11
+ else:
12
+ triton = None
13
+
14
+ from . import config as inductor_config
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+
19
+ def get_field(config, name):
20
+ if name == "num_warps":
21
+ return config.num_warps
22
+ elif name == "num_stages":
23
+ return config.num_stages
24
+ else:
25
+ return config.kwargs.get(name, None)
26
+
27
+
28
+ def set_field(config, name, value):
29
+ if name == "num_warps":
30
+ config.num_warps = value
31
+ elif name == "num_stages":
32
+ config.num_stages = value
33
+ else:
34
+ config.kwargs[name] = value
35
+
36
+
37
+ class CoordescTuner:
38
+ """
39
+ The coordinate descent tuner. Tune one field/coordinate at a time.
40
+
41
+ TODO will it be necessary to tune multiple fields simultaneously.
42
+
43
+
44
+ TODO: what if both increasing and decreasing a field can improve perf.
45
+ i.e., there are multiple local optima..
46
+ """
47
+
48
+ def __init__(self, is_mm=False, name="unknown", size_hints=None):
49
+ self.is_mm = is_mm # we will tune num_stages for mm
50
+ self.cached_benchmark_results = {}
51
+ self.name = name
52
+ self.size_hints = size_hints
53
+
54
+ def get_xmax(self):
55
+ xmax = inductor_config.triton.max_block["X"]
56
+ if self.size_hints and len(self.size_hints) > 0:
57
+ xmax = min(xmax, self.size_hints[0])
58
+ return xmax
59
+
60
+ def get_ymax(self):
61
+ ymax = inductor_config.triton.max_block["Y"]
62
+ if self.size_hints and len(self.size_hints) > 1:
63
+ ymax = min(ymax, self.size_hints[1])
64
+ return ymax
65
+
66
+ def get_zmax(self):
67
+ zmax = inductor_config.triton.max_block["Z"]
68
+ if self.size_hints and len(self.size_hints) > 2:
69
+ zmax = min(zmax, self.size_hints[2])
70
+ return zmax
71
+
72
+ def get_rmax(self):
73
+ if self.size_hints and len(self.size_hints) > 0:
74
+ return self.size_hints[-1] # the last one is for reduction
75
+ else:
76
+ # large enough. We should not pick this large RBLOCK anyway
77
+ return 2**30
78
+
79
+ def get_warpsmax(self):
80
+ # Currently, CUDA has a maximum of 1024 threads, so 32 is the max
81
+ # number of warps.
82
+ return 1024 // 32
83
+
84
+ def cache_benchmark_result(self, config, timing):
85
+ self.cached_benchmark_results[triton_config_to_hashable(config)] = timing
86
+
87
+ def lookup_in_cache(self, config):
88
+ return self.cached_benchmark_results.get(triton_config_to_hashable(config))
89
+
90
+ def call_func(self, func, config):
91
+ found = self.lookup_in_cache(config)
92
+ if found is not None:
93
+ log.debug(" CACHED")
94
+ return found
95
+ timing = func(config)
96
+ self.cache_benchmark_result(config, timing)
97
+ return timing
98
+
99
+ @property
100
+ def tunable_fields(self):
101
+ out = [
102
+ "XBLOCK",
103
+ "YBLOCK",
104
+ "ZBLOCK",
105
+ # NOTE: we should not tune RBLOCK for persistent reduction.
106
+ # We rely on the fact that persistent reduction's triton.Config
107
+ # does not have the RBLOCK field to guarantee that.
108
+ "RBLOCK",
109
+ # the following 3 are for mm
110
+ "BLOCK_M",
111
+ "BLOCK_N",
112
+ "BLOCK_K",
113
+ "num_warps",
114
+ ]
115
+ if self.is_mm:
116
+ out.append("num_stages")
117
+
118
+ return out
119
+
120
+ def value_too_large(self, name, val):
121
+ if name == "XBLOCK":
122
+ return val > self.get_xmax()
123
+ if name == "YBLOCK":
124
+ return val > self.get_ymax()
125
+ if name == "ZBLOCK":
126
+ return val > self.get_zmax()
127
+ if name == "RBLOCK":
128
+ return val > self.get_rmax()
129
+ if name == "num_warps":
130
+ return val > self.get_warpsmax()
131
+
132
+ return False
133
+
134
+ def get_neighbour_values(self, name, orig_val, radius=1, include_self=False):
135
+ """
136
+ Get neighbour values in 'radius' steps. The original value is not
137
+ returned as it's own neighbour.
138
+ """
139
+ assert radius >= 1
140
+
141
+ def update(cur_val, inc=True):
142
+ if name == "num_stages":
143
+ if inc:
144
+ return cur_val + 1
145
+ else:
146
+ return cur_val - 1
147
+ else:
148
+ if inc:
149
+ return cur_val * 2
150
+ else:
151
+ return cur_val // 2
152
+
153
+ out = []
154
+ # increment loop
155
+ cur_val = orig_val
156
+ for _ in range(radius):
157
+ cur_val = update(cur_val, True)
158
+ if self.value_too_large(name, cur_val):
159
+ break
160
+ out.append(cur_val)
161
+
162
+ # decrement loop
163
+ cur_val = orig_val
164
+ for _ in range(radius):
165
+ cur_val = update(cur_val, False)
166
+ if cur_val <= 0:
167
+ break
168
+ out.append(cur_val)
169
+
170
+ if include_self:
171
+ out.append(orig_val)
172
+ return out
173
+
174
+ @staticmethod
175
+ def has_improvement(baseline, test):
176
+ threshold = 0.001 # 0.1%
177
+ return test is not None and test < baseline * (1 - threshold)
178
+
179
+ def check_all_tuning_directions(
180
+ self,
181
+ func: Callable[["triton.Config"], float],
182
+ best_config,
183
+ best_timing,
184
+ ):
185
+ """
186
+ Check all directions. We only do this once the regular coordinate
187
+ descent tuning find no better choices any more.
188
+ We only have a few tunable fields, so this should be fine.
189
+ """
190
+ candidate_values_list = []
191
+ effective_fields = []
192
+ for field in self.tunable_fields:
193
+ old_value = get_field(best_config, field)
194
+ if old_value is None:
195
+ continue
196
+ candidate_values = self.get_neighbour_values(
197
+ field,
198
+ old_value,
199
+ radius=inductor_config.coordinate_descent_search_radius,
200
+ include_self=True,
201
+ )
202
+ candidate_values_list.append(candidate_values)
203
+ effective_fields.append(field)
204
+
205
+ choices = itertools.product(*candidate_values_list)
206
+ improved = False
207
+ for choice in choices:
208
+ assert len(choice) == len(effective_fields)
209
+ candidate_config = copy.deepcopy(best_config)
210
+ for new_val, field in zip(choice, effective_fields):
211
+ set_field(candidate_config, field, new_val)
212
+ cmp_res, candidate_timing = self.compare_config(
213
+ func, candidate_config, best_config, best_timing
214
+ )
215
+ if cmp_res:
216
+ improved = True
217
+ best_config = candidate_config
218
+ best_timing = candidate_timing
219
+
220
+ return improved, best_config, best_timing
221
+
222
+ def compare_config(self, func, candidate_config, best_config, best_timing):
223
+ """
224
+ Check if candidate_config is better than best_config.
225
+
226
+ Return a touple of (compare_result, candidate_timing).
227
+ compare_result is true iff candidate_config is better.
228
+ """
229
+ log.debug("Try config %s", candidate_config)
230
+ try:
231
+ candidate_timing = self.call_func(func, candidate_config)
232
+ except Exception as e:
233
+ log.debug("Got exception %s", e)
234
+ return False, float("inf")
235
+
236
+ if self.has_improvement(best_timing, candidate_timing):
237
+ log.debug(
238
+ "Tune from %s %f -> %s %f",
239
+ best_config,
240
+ best_timing,
241
+ candidate_config,
242
+ candidate_timing,
243
+ )
244
+
245
+ return True, candidate_timing
246
+ return False, candidate_timing
247
+
248
+ def autotune(
249
+ self,
250
+ func: Callable[["triton.Config"], float],
251
+ baseline_config: "triton.Config",
252
+ baseline_timing: Optional[float] = None,
253
+ ) -> "triton.Config":
254
+ if baseline_timing is None:
255
+ baseline_timing = self.call_func(func, baseline_config)
256
+
257
+ log.debug("= Do coordinate descent tuning for %s =", self.name)
258
+ log.debug(
259
+ "Baseline Config %s, baseline timing %f", baseline_config, baseline_timing
260
+ )
261
+ improved = True
262
+ best_config = baseline_config
263
+ best_timing = baseline_timing
264
+ tunable_fields = self.tunable_fields
265
+
266
+ while improved:
267
+ improved = False
268
+
269
+ for name in tunable_fields:
270
+ cur_val = get_field(best_config, name)
271
+ # some kernel don't have RBLOCK/YBLOCK/ZBLOCK. So cur_val may be None
272
+ if cur_val is None:
273
+ continue
274
+
275
+ # It's possible that candidate_values is empty.
276
+ # E.g., if XBLOCK is 1 initially and size_hint for x is also 1.
277
+ # We would not try either larger or smaller XBLOCK in this case.
278
+ candidate_values = self.get_neighbour_values(name, cur_val)
279
+
280
+ for next_val in candidate_values:
281
+ candidate_config = copy.deepcopy(best_config)
282
+ set_field(candidate_config, name, next_val)
283
+
284
+ cmp_res, candidate_timing = self.compare_config(
285
+ func, candidate_config, best_config, best_timing
286
+ )
287
+ if cmp_res:
288
+ improved = True
289
+ best_config, best_timing = candidate_config, candidate_timing
290
+
291
+ if not improved and inductor_config.coordinate_descent_check_all_directions:
292
+ old_best_timing = best_timing
293
+ improved, best_config, best_timing = self.check_all_tuning_directions(
294
+ func, best_config, best_timing
295
+ )
296
+
297
+ if improved:
298
+ msg = red_text(
299
+ "Coordinate descend tuning found improvement of %.3fx by looking in all directions."
300
+ )
301
+ log.debug(
302
+ msg,
303
+ old_best_timing / best_timing,
304
+ )
305
+
306
+ log.debug(
307
+ "Improve from %s %f -> %s %f, %.3fx",
308
+ baseline_config,
309
+ baseline_timing,
310
+ best_config,
311
+ best_timing,
312
+ baseline_timing / best_timing,
313
+ )
314
+
315
+ return best_config
env-llmeval/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py ADDED
@@ -0,0 +1,2157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CUDA graph trees are a safety abstraction over CUDAGraphs, similar to make_graph_callables,
3
+ which share the same memory pool. Sharing a memory pool is an extremely
4
+ important optimization when chaining multiple CUDA graphs together, as it
5
+ prevents you from needing to copy intermediate tensors from one graph to the
6
+ next, and reduces overall memory usage by allowing dead memory from the first
7
+ pool to be reused in the second.
8
+
9
+ The standard graph/make_graph_callables support sharing memory pool, but
10
+ with a lot of caveats. CUDA graph trees remove these restrictions:
11
+
12
+ * Previously, if you recorded graphs A, B, you had to replay A, B in that
13
+ order. With CUDA graph trees, after replaying A, you can change your
14
+ mind and record/replay a different graph B'; we will support efficient
15
+ execution of both A, B and A, B', using only max(mem(A, B), mem(A, B')). In
16
+ other words: we support arbitrary trees of CUDA graph operations, not just
17
+ sequences (this is why this feature is called CUDA graph trees.)
18
+
19
+ * Previously, if you executed graph A, some non-CUDA graph code, and then
20
+ graph B, after executing graph B, it was not safe to retain any references
21
+ to intermediates produced by A. With CUDA graph trees, we track if any
22
+ outputs of graph A are still live by the time graph B is run, and make
23
+ sure graph B doesn't clobber there memory when reusing the CUDA graphs
24
+ pool. You'll get a separate recording of B depending on what tensors
25
+ stay live or dead.
26
+
27
+ CUDA graph trees are flexible enough to be used in Dynamo across graph breaks,
28
+ which is their primary use case.
29
+
30
+ The ability to switch from replay to record is fairly nontrivial: remember that
31
+ when you replay a CUDA graph, you only replay CUDA operations; no CPU side state
32
+ is updated. In particular, the CPU-side book-keeping for the allocator is not
33
+ reconstructed. However, to record a new child CUDA graph, we must restore this
34
+ book-keeping. This is what checkpoint pool state is used for.
35
+ """
36
+
37
+ from __future__ import annotations
38
+
39
+ import contextlib
40
+ import dataclasses
41
+ import functools
42
+ import gc
43
+ import itertools
44
+ import logging
45
+ import operator
46
+ import sys
47
+ import threading
48
+ import traceback
49
+ import warnings
50
+ import weakref
51
+ from collections import defaultdict
52
+
53
+ from enum import auto, Enum
54
+ from typing import (
55
+ Any,
56
+ Callable,
57
+ cast,
58
+ Dict,
59
+ Iterator,
60
+ List,
61
+ Optional,
62
+ Sequence,
63
+ Set,
64
+ Tuple,
65
+ Union,
66
+ )
67
+
68
+ import torch.fx
69
+ from torch import Tensor
70
+ from torch._dynamo.mutation_guard import GenerationTracker
71
+ from torch._dynamo.utils import preserve_rng_state
72
+ from torch._inductor.compile_fx import (
73
+ align_inputs_from_check_idxs,
74
+ copy_misaligned_inputs,
75
+ get_expanded_dims,
76
+ get_input_idxs_to_check,
77
+ index_expanded_dims,
78
+ remove_unaligned_input_idxs,
79
+ static_input,
80
+ )
81
+ from torch.multiprocessing.reductions import StorageWeakRef
82
+ from torch.storage import UntypedStorage
83
+ from torch.types import _bool
84
+ from torch.utils import _pytree as pytree
85
+ from torch.utils.weak import TensorWeakRef
86
+
87
+ StorageWeakRefPointer = int
88
+ StorageDataPtr = int
89
+ NBytes = int
90
+
91
+ if torch.backends.cuda.is_built():
92
+ from torch._C import (
93
+ _cuda_CUDAAllocator_AllocatorState as AllocatorState,
94
+ _set_cached_tensors_enabled as _set_cached_tensors_enabled,
95
+ )
96
+ else:
97
+
98
+ class AllocatorState: # type: ignore[no-redef]
99
+ pass
100
+
101
+ def _set_cached_tensors_enabled(enabled: _bool) -> None:
102
+ pass
103
+
104
+
105
+ log = logging.getLogger(__name__)
106
+
107
+ from . import config
108
+
109
+
110
+ @dataclasses.dataclass(frozen=True)
111
+ class GraphID:
112
+ "Unique counter of a cuda graph recording"
113
+ id: int
114
+
115
+
116
+ @dataclasses.dataclass(frozen=True)
117
+ class FunctionID:
118
+ "Unique counter of a function wrapped in cudagraphify_impl"
119
+ id: int
120
+
121
+
122
+ @dataclasses.dataclass(frozen=True)
123
+ class WrappedFunction:
124
+ """
125
+ Represents a function that you want to record for CUDA graph replay,
126
+ with a little more metadata so we can identify if we have an applicable
127
+ CUDA graph in our CUDA graph tree for it.
128
+ """
129
+
130
+ model: Callable[..., Any]
131
+ static_input_idxs: Sequence[int]
132
+ id: FunctionID
133
+ constants: Tuple[torch.Tensor, ...]
134
+
135
+
136
+ def clear_cublass_cache():
137
+ """
138
+ Cublas keeps a persistent workspace allocation for running matmuls. This poses a problem for
139
+ doing warmup within a CUDAGraph private pool because we do not want persistent allocations from
140
+ one one run to the next. When we begin a new run of a cudagraphs path (generation), all tensors
141
+ from the previous generation are freed. This frees them the memory pool, but not elsewhere.
142
+ A tensor in the cublas workspace would continue to be in use the workspace but would also get allocated
143
+ in the next run. The memory would be in use in two places.
144
+
145
+ To solve this, we clear cublas caches before and after warming up or recording. If a workspace is required
146
+ it will be allocated to the cudagraph private pool and accounted for in the allocator for the duration of the
147
+ program. There is no overhead to this on replay since cudagraphs removes allocation overhead.
148
+ """
149
+ torch._C._cuda_clearCublasWorkspaces()
150
+
151
+
152
+ @contextlib.contextmanager
153
+ def clear_cublas_manager():
154
+ "Context manager around clearing cublas caches that will clear on enter and exit"
155
+ clear_cublass_cache()
156
+ try:
157
+ yield
158
+ finally:
159
+ clear_cublass_cache()
160
+
161
+
162
+ @contextlib.contextmanager
163
+ def disable_conv_cache_emptying():
164
+ prev = torch._C._cuda_get_conv_benchmark_empty_cache()
165
+ torch._C._cudnn_set_conv_benchmark_empty_cache(False)
166
+ try:
167
+ yield
168
+ finally:
169
+ torch._C._cudnn_set_conv_benchmark_empty_cache(prev)
170
+
171
+
172
+ @contextlib.contextmanager
173
+ def enable_history_recording():
174
+ "Turns on history recording in the CUDA Caching Allocator"
175
+ enabled = torch._C._cuda_isHistoryEnabled()
176
+ try:
177
+ if not enabled:
178
+ torch.cuda.memory._record_memory_history()
179
+ yield
180
+ finally:
181
+ if not enabled:
182
+ torch.cuda.memory._record_memory_history(None)
183
+
184
+
185
+ def get_history_recording():
186
+ # TODO - remove, prevents cleanup
187
+ if not config.triton.cudagraph_trees_history_recording:
188
+ return contextlib.nullcontext()
189
+ return enable_history_recording()
190
+
191
+
192
+ class TreeManagerContainer:
193
+ """
194
+ Manages the lifetime of the tree manager. Like `PrivatePool` in cuda caching allocator,
195
+ the tree and its corresponding memory pool should be kept alive as long as any outstanding
196
+ graph or tensor which is an output of a graph remains alive.
197
+
198
+ There is a single tree manager container per device.
199
+
200
+ The lifecycle of a tree_manager is:
201
+ - Is constructed, no graph, no fns, no tensors
202
+ - Tree manager is fetched, resulting in tree manager being allocated
203
+ - We generate a bunch of functions, calling add_strong_reference
204
+ - These functions die, calling finalize_reference
205
+ - When all the functions die, we finalize_tree_manager.
206
+
207
+ TODO: in the future, we would like to do the following once storage weak refs land
208
+ - We look for all the live storages and add references to THOSE
209
+ - We count as storages die
210
+ - All the storages are dead, we deallocate the tree manager
211
+ """
212
+
213
+ def __init__(self, device_index):
214
+ # This class keeps a strong reference to tree_manager,
215
+ # but upon all other strong references to the tree_manager will reset it to None.
216
+ # We need a strong reference so that we can still access its attributes upon cleanup.
217
+ self.tree_manager: Optional[CUDAGraphTreeManager] = None
218
+
219
+ # Number of outstanding references to the current tree manager
220
+ self.live_cudagraphify_fns = 0
221
+
222
+ self.device_index = device_index
223
+
224
+ # Following two objects are only set in the case that Tensor outputs outlive
225
+ # the cudagraphify_fns. Reference to the Graph is needed to keep the private pool from
226
+ # deallocation.
227
+ self.live_storages_count = 0
228
+ self.graph: Optional[torch.cuda.CUDAGraph] = None
229
+
230
+ self.lock = threading.Lock()
231
+
232
+ def _finalize_tensor(self):
233
+ with self.lock:
234
+ self.live_storages_count -= 1
235
+ if self.live_storages_count == 0:
236
+ self.graph = None
237
+
238
+ # manager was used again after existing cleanup,
239
+ # we shouldnt set it to None
240
+ if self.live_cudagraphify_fns == 0:
241
+ self.tree_manager = None
242
+
243
+ def finalize_cudagraphify_fn(self):
244
+ with self.lock:
245
+ self.live_cudagraphify_fns -= 1
246
+ if self.live_cudagraphify_fns == 0:
247
+ self._finalize_tree_manager()
248
+
249
+ def _finalize_tree_manager(self):
250
+ assert self.lock.locked()
251
+ self.tree_manager = None
252
+
253
+ # TODO - when issue #91395 is landed, we can set a weakref on
254
+ # storages and trigger a deallocation when all outputs of the
255
+ # cudagraph are dead.
256
+
257
+ # live_storages = list(
258
+ # tree_manager.live_cudagraph_pool_storages_in_curr_execution()
259
+ # )
260
+
261
+ # # Maintain reference to graph to keep tensors alive
262
+ # assert len(tree_manager.roots) > 0, "expected at least one use"
263
+ # root = next(tree_manager.get_roots())
264
+ # self.graph = root.graph
265
+ # seen_storages = set()
266
+ # for stor in live_storages:
267
+ # if stor in seen_storages:
268
+ # continue
269
+ # seen_storages.add(stor)
270
+ # self.live_storages_count += 1
271
+ # . weakref.finalize(stor, self._finalize_tensor)
272
+
273
+ def add_strong_reference(self, fn: Callable[..., Any]):
274
+ with self.lock:
275
+ self.live_cudagraphify_fns += 1
276
+
277
+ weakref.finalize(fn, self.finalize_cudagraphify_fn)
278
+
279
+ def get_tree_manager(self) -> CUDAGraphTreeManager:
280
+ with self.lock:
281
+ if self.tree_manager is None:
282
+ self.tree_manager = CUDAGraphTreeManager(self.device_index)
283
+ return self.tree_manager
284
+
285
+
286
+ local = threading.local()
287
+
288
+ # one tree manager per device
289
+ local.tree_manager_containers = {}
290
+ local.tree_manager_locks = defaultdict(threading.Lock)
291
+
292
+
293
+ # only incremented by user call of mark_step_begin
294
+ class MarkStepBox:
295
+ mark_step_counter = 0
296
+
297
+
298
+ # We need to register this as an object that will be copied over as TLS when new
299
+ # threads are created in autograd
300
+ torch._C._stash_obj_in_tls("tree_manager_containers", local.tree_manager_containers)
301
+ torch._C._stash_obj_in_tls("tree_manager_locks", local.tree_manager_locks)
302
+
303
+
304
+ def mark_step_begin():
305
+ "Indicates that a new iteration of inference or training is about to begin."
306
+
307
+ # iterate down to distinguish from GenerationTracking counter
308
+ MarkStepBox.mark_step_counter -= 1
309
+
310
+
311
+ def reset_cudagraph_trees():
312
+ "Clear all cudagraph trees"
313
+ # see shutdown below for why this is necessary
314
+ container_dict = get_obj(local, "tree_manager_containers")
315
+ locks_dict = get_obj(local, "tree_manager_locks")
316
+ for device, lock in locks_dict.items():
317
+ with lock:
318
+ container = container_dict.get(device)
319
+ if not container or not container.tree_manager:
320
+ continue
321
+
322
+ container.tree_manager.shutdown()
323
+
324
+ _set_cached_tensors_enabled(False)
325
+ container_dict.clear()
326
+
327
+ MarkStepBox.mark_step_counter = 0
328
+
329
+
330
+ def get_obj(local, attr_name):
331
+ if hasattr(local, attr_name):
332
+ return getattr(local, attr_name)
333
+ else:
334
+ assert torch._C._is_key_in_tls(attr_name)
335
+ return torch._C._get_obj_in_tls(attr_name)
336
+
337
+
338
+ def get_container(device_index: int):
339
+ container_dict = get_obj(local, "tree_manager_containers")
340
+ lock = get_obj(local, "tree_manager_locks")[device_index]
341
+
342
+ with lock:
343
+ if device_index not in container_dict:
344
+ container_dict[device_index] = TreeManagerContainer(device_index)
345
+
346
+ return container_dict[device_index]
347
+
348
+
349
+ def get_manager(
350
+ device_index: int, create_if_none_exists=True
351
+ ) -> Optional[CUDAGraphTreeManager]:
352
+ if create_if_none_exists:
353
+ return get_container(device_index).get_tree_manager()
354
+ return get_container(device_index).tree_manager
355
+
356
+
357
+ def cudagraphify_impl(model, inputs, static_input_idxs, *args, **kwargs):
358
+ fn_cache: Dict[Tuple[int, ...], Callable[..., Any]] = {}
359
+
360
+ # Detect int inputs: we need to index on these
361
+ int_key = [i for i, v in enumerate(inputs) if isinstance(v, int)]
362
+ get_ints: Any = operator.itemgetter(*int_key) if int_key else lambda _: None
363
+
364
+ del inputs
365
+
366
+ def deferred_cudagraphify(inputs):
367
+ int_key = get_ints(inputs)
368
+ fn = fn_cache.get(int_key)
369
+ if fn is not None:
370
+ return fn(inputs)
371
+
372
+ log.info("recording cudagraph tree for %s", int_key)
373
+
374
+ # first get indices we need to check to align, then update our static inputs,
375
+ # and finally copy
376
+ check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
377
+ new_static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
378
+ copy_misaligned_inputs(inputs, check_input_idxs)
379
+
380
+ fn, out = cudagraphify(model, inputs, new_static_input_idxs, *args, **kwargs)
381
+ fn = align_inputs_from_check_idxs(fn, inputs_to_check=check_input_idxs)
382
+ fn_cache[int_key] = fn
383
+
384
+ return out
385
+
386
+ return deferred_cudagraphify
387
+
388
+
389
+ def cudagraphify(
390
+ model,
391
+ inputs,
392
+ static_input_idxs=(),
393
+ *,
394
+ device_index: int,
395
+ is_backward: bool,
396
+ is_inference: bool,
397
+ stack_traces: Optional[StackTraces] = None,
398
+ constants: Tuple[torch.Tensor, ...] = (),
399
+ ):
400
+ manager = get_container(device_index).get_tree_manager()
401
+ assert not (is_backward and is_inference)
402
+ mode = (
403
+ CompilationMode.BACKWARD
404
+ if is_backward
405
+ else (CompilationMode.INFERENCE if is_inference else CompilationMode.FORWARD)
406
+ )
407
+
408
+ return manager.add_function(
409
+ model,
410
+ inputs,
411
+ static_input_idxs,
412
+ stack_traces,
413
+ mode,
414
+ constants,
415
+ )
416
+
417
+
418
+ class StorageWeakRefWrapper:
419
+ """
420
+ Wrapper around a storage weak ref. Will deallocate it upon expiration if invoked.
421
+ """
422
+
423
+ __slots__ = ["ref", "_data_ptr", "extra_ref_check"]
424
+
425
+ storage_ref: Optional[StorageWeakRef]
426
+
427
+ def __init__(
428
+ self,
429
+ inp: Union[Tensor, UntypedStorage],
430
+ extra_ref_check: Optional[Callable[[], None]] = None,
431
+ ):
432
+ """
433
+ extra_ref_check is an additional check we need to run to check if the
434
+ weak ref has expired. in checking storage use count we assume extra_ref_check
435
+ will hold an additional reference to the storage.
436
+ """
437
+ if isinstance(inp, Tensor):
438
+ stor = inp.untyped_storage()
439
+ else:
440
+ assert isinstance(inp, UntypedStorage)
441
+ stor = inp
442
+ self.ref = StorageWeakRef(stor)
443
+ self._data_ptr = stor.data_ptr()
444
+ self.extra_ref_check = extra_ref_check
445
+
446
+ @classmethod
447
+ def from_weakref_and_data_ptr(cls, cdata, data_ptr, extra_ref_check=None):
448
+ instance = cls.__new__(cls)
449
+ instance._data_ptr = data_ptr
450
+ instance.ref = StorageWeakRef.from_weakref(cdata)
451
+ instance.extra_ref_check = extra_ref_check
452
+ return instance
453
+
454
+ def __call__(self) -> Optional[StorageWeakRefPointer]:
455
+ if self.expired():
456
+ return None
457
+
458
+ return self.ref.cdata
459
+
460
+ def swap_weakref(self, cdata):
461
+ self.ref.__del__()
462
+ self.ref.cdata = cdata
463
+
464
+ def data_ptr(self) -> int:
465
+ "NB: returns the data ptr even if the storage has expired"
466
+ return self._data_ptr
467
+
468
+ def remove_extra_reference(self):
469
+ self.extra_ref_check = None
470
+
471
+ def expired(self):
472
+ if self.extra_ref_check is not None and not self.extra_ref_check():
473
+ return False
474
+
475
+ # if extra_ref_check is not None we expect an additional reference
476
+ stor_count = torch._C._storage_Use_Count(self.ref.cdata)
477
+ return (stor_count - (self.extra_ref_check is not None)) == 0
478
+
479
+ def __repr__(self):
480
+ if self.ref is None or self.ref.expired():
481
+ return f"StorageWeakRefWrapper to {self.data_ptr()}; dead"
482
+ else:
483
+ return f"StorageWeakRefWrapper to {self.data_ptr()}; alive"
484
+
485
+
486
+ def is_live(weak_ref: Optional[StorageWeakRefWrapper]) -> bool:
487
+ return maybe_deref(weak_ref) is not None
488
+
489
+
490
+ def maybe_deref(
491
+ weak_ref: Optional[StorageWeakRefWrapper],
492
+ ) -> Optional[Tuple[StorageWeakRefPointer, int]]:
493
+ if weak_ref is None:
494
+ return None
495
+ r = weak_ref()
496
+ if r is None:
497
+ return None
498
+ # NB: r.data_ptr() does not necessarily equal weak_ref.data_ptr()
499
+ return r, weak_ref.data_ptr()
500
+
501
+
502
+ @contextlib.contextmanager
503
+ def _use_cuda_memory_pool_manager(device, mem_pool, stream):
504
+ """
505
+ Context manager to use cuda graph pool for new allocations. If you use this manager
506
+ all cudagraph tensors in use should be reflected in the allocator or they will be overwritten.
507
+ existing_graph should already have been used in a capture, and the mem_pool must already exist,
508
+ because this manager will not preserve a reference to the pool which keeps it alive.
509
+ """
510
+ torch.cuda.synchronize()
511
+ stream.wait_stream(torch.cuda.current_stream())
512
+
513
+ with torch.cuda.stream(stream), torch.device(device):
514
+ torch._C._cuda_beginAllocateCurrentStreamToPool(device, mem_pool)
515
+ try:
516
+ yield
517
+ finally:
518
+ torch._C._cuda_endAllocateCurrentStreamToPool(device)
519
+ torch._C._cuda_releasePool(device, mem_pool)
520
+
521
+
522
+ def map_to_ref(t: Optional[Tensor]) -> Optional[StorageWeakRefWrapper]:
523
+ if not isinstance(t, torch.Tensor):
524
+ assert t is None
525
+ return None
526
+ return StorageWeakRefWrapper(t)
527
+
528
+
529
+ # A path index of (depth, offset) indices into a graph that is `depth`` number of nodes from the root
530
+ # at graph output offset
531
+ PathOutputIndex = Tuple[int, int]
532
+
533
+ # For each node in the path, for each output, is the output alive
534
+ PathLiveness = List[List[bool]]
535
+
536
+ StackTraces = List[Optional[str]]
537
+
538
+
539
+ class CUDAWarmupNode:
540
+ """
541
+ Simplified Wrapper around A CUDA Model that wraps outputs in storage refs and exposes
542
+ apis to get the live storages in the current chain of warmup.
543
+
544
+ A CUDAWarmupNode may have either CUDAGraphNode or CUDAWarmupNode as a parent, but may only have
545
+ CUDAWarmupNode as children, because we cannot record or execute with tensors which do not have stable
546
+ memory addresses.
547
+
548
+ CUDAWarmupNode and CUDAGraphNode have a number of differences that make it easier to use separate classes.
549
+ - Much of the CUDAGraphNode logic & initialization is based on the tensor properties of first recording. In the
550
+ first instance of warmup, these are not finalized yet.
551
+ - All Inputs to the RecordedFunction must be copied over to the cuda graph memory pool, this is unnecessary in warmup.
552
+ - CUDAWarmup is only used once and so does not need to optimize as much bookkeeping. It is much simpler.
553
+
554
+ NB: this class and CUDAGraphNode need to expose `path_live_weakrefs`, `all_outputs_are_dead`, and
555
+ `self.outputs_weakrefs`, `stack_traces`, and `tensor_weakrefs` for compatibility.
556
+ """
557
+
558
+ def __init__(
559
+ self,
560
+ wrapped_function: WrappedFunction,
561
+ parent,
562
+ cuda_graphs_pool: Tuple[int, int],
563
+ existing_cuda_graph: Optional[torch.cuda.CUDAGraph],
564
+ device_index: int,
565
+ stack_traces: Optional[StackTraces],
566
+ stream: torch.cuda.Stream,
567
+ already_warm: bool,
568
+ ):
569
+ self.wrapped_function = wrapped_function
570
+ self.parent = parent
571
+ self.cuda_graphs_pool = cuda_graphs_pool
572
+ self.outputs_weakrefs: List[Optional[StorageWeakRefWrapper]] = []
573
+ self.tensor_weakrefs: List[Optional[TensorWeakRef]] = []
574
+ self.existing_cuda_graph = existing_cuda_graph
575
+ self.has_run = False
576
+ self.device_index = device_index
577
+ self.stack_traces = stack_traces
578
+ self.stream = stream
579
+ self.already_warm = already_warm
580
+
581
+ def run(self, new_inputs):
582
+ assert not self.has_run, "Wrapped function should never be run twice"
583
+
584
+ # See: output_is_alias_of_persistent_static_inputs below. We should only be returning freshly created
585
+ # storages in path_live_weakrefs.
586
+ existing_path_data_ptrs = {
587
+ t.data_ptr() for t in self.path_live_weakrefs() if t()
588
+ }
589
+
590
+ def get_non_cudagraph_inps():
591
+ non_cudagraph_inps = set()
592
+ for t in itertools.chain(new_inputs, self.wrapped_function.constants):
593
+ if (
594
+ isinstance(t, torch.Tensor)
595
+ and t.untyped_storage().data_ptr() not in existing_path_data_ptrs
596
+ ):
597
+ non_cudagraph_inps.add(t.untyped_storage().data_ptr())
598
+ return non_cudagraph_inps
599
+
600
+ non_cudagraph_inps = get_non_cudagraph_inps()
601
+
602
+ if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
603
+ refs = list(self.path_live_weakrefs())
604
+ check_memory_pool(self.device_index, self.cuda_graphs_pool, refs)
605
+
606
+ with torch.cuda.device(
607
+ self.device_index
608
+ ), disable_conv_cache_emptying(), clear_cublas_manager(), _use_cuda_memory_pool_manager(
609
+ self.device_index, self.cuda_graphs_pool, self.stream
610
+ ), get_history_recording():
611
+ out = self.wrapped_function.model(new_inputs)
612
+
613
+ # sync up stream used in `_use_cuda_memory_pool_manager` - TODO - wait stream instead ?
614
+ torch.cuda.synchronize()
615
+
616
+ assert len(new_inputs) == 0
617
+
618
+ # sdpa returns cpu tensors when not recording cuda graph
619
+ def add_ref(o):
620
+ return (
621
+ o is not None
622
+ and isinstance(o, torch.Tensor)
623
+ and o.is_cuda
624
+ and o.untyped_storage().data_ptr() not in non_cudagraph_inps
625
+ and o.untyped_storage().data_ptr() != 0
626
+ )
627
+
628
+ self.outputs_weakrefs.extend(
629
+ [map_to_ref(o) if add_ref(o) else None for o in out]
630
+ )
631
+ self.tensor_weakrefs.extend(
632
+ [TensorWeakRef(o) if add_ref(o) else None for o in out]
633
+ )
634
+
635
+ if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
636
+ out_refs = self.path_live_weakrefs()
637
+ new_storages = [
638
+ t for t in out_refs if t.data_ptr() not in non_cudagraph_inps
639
+ ]
640
+ check_memory_pool(self.device_index, self.cuda_graphs_pool, new_storages)
641
+
642
+ return out
643
+
644
+ @property
645
+ def _path_from_root(self):
646
+ nodes = []
647
+ node = self
648
+ while node:
649
+ nodes.append(node)
650
+ node = node.parent
651
+
652
+ yield from reversed(nodes)
653
+
654
+ def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
655
+ "Returns all live storages weakrefs that created by nodes in this path"
656
+ for node in self._path_from_root:
657
+ for output in node.outputs_weakrefs:
658
+ if is_live(output):
659
+ yield output
660
+
661
+ def all_outputs_are_dead(self):
662
+ return not list(self.path_live_weakrefs())
663
+
664
+
665
+ # Aliases for List that say what the indices denote
666
+ InputList = List # input indexes
667
+ OutputList = List # output indexes
668
+ LevelList = List # levels (distance from root of tree)
669
+
670
+
671
+ class OutputAliasInfo:
672
+ pass
673
+
674
+
675
+ class _UnaliasedStorage(OutputAliasInfo):
676
+ "Singleton to mark that the graph output constructs a new alias or is None"
677
+ pass
678
+
679
+
680
+ UnaliasedStorage = _UnaliasedStorage()
681
+
682
+
683
+ class AliasesPriorGraphOutput(OutputAliasInfo):
684
+ "Marks that the graph output aliases an output of a prior graph"
685
+ __slots__ = ["index"]
686
+
687
+ index: PathOutputIndex
688
+
689
+ def __init__(self, index: PathOutputIndex):
690
+ assert isinstance(index, tuple)
691
+ self.index = index
692
+
693
+
694
+ class AliasesNewOutput(OutputAliasInfo):
695
+ "Marks that the graph output aliases an index in the new, returned outputs"
696
+
697
+ __slots__ = ["index"]
698
+
699
+ index: int
700
+
701
+ def __init__(self, index):
702
+ assert isinstance(index, int)
703
+ self.index = index
704
+
705
+
706
+ class CUDAGraphNode:
707
+ """
708
+ A single recording of a function into a CUDA Graph. Recordings of CUDA Graphs share a single memory pool
709
+ and are structured into a tree, where there is a single recording that can precede it (parent) and multiple
710
+ subsequent recordings that may follow (children). A node will have no parent if it is the first recording
711
+ in a tree; i.e., when it is first recorded, there are no live tensors from a previous recording which
712
+ would force a dependency.
713
+
714
+ On first recording, all of the live tensors in the current CUDA Graph Node path will be
715
+ reflected in the corresponding private pool. On subsequent executions, the caching allocator
716
+ is unaffected when the graph is replayed.
717
+
718
+ In order to support recording a subsequent cuda graph recording after execution of this graph,
719
+ we checkpoint the state of the memory pool so that it may later be resumed.
720
+
721
+ WrappedFunction should have already been warmed up prior to invocation.
722
+
723
+ See [setCheckpointPoolState] for further explanation, as well as
724
+ https://user-images.githubusercontent.com/13564/222815509-374f3400-f83d-4f7d-8fa6-4a092b3250bb.png
725
+ """
726
+
727
+ def __init__(
728
+ self,
729
+ wrapped_function: WrappedFunction,
730
+ id: GraphID,
731
+ parent: Optional[CUDAGraphNode],
732
+ inputs: List[Tensor],
733
+ cuda_graphs_pool: Tuple[int, int],
734
+ device_index: int,
735
+ stack_traces: Optional[StackTraces],
736
+ stream: torch.cuda.Stream,
737
+ ):
738
+ assert isinstance(inputs, (list, tuple))
739
+
740
+ self.wrapped_function = wrapped_function
741
+ self.id = id
742
+ self.device = device_index
743
+ self.stack_traces = stack_traces
744
+ self.stream = stream
745
+
746
+ # if this is a root parent will be None. use weakref to prevent reference cycle
747
+ self._parent = weakref.ref(parent) if parent is not None else None
748
+ # reference to the shared memory pool for the entire cuda graphs tree
749
+ self.cuda_graphs_pool = cuda_graphs_pool
750
+
751
+ # A single wrapped function may be recorded multiple times if memory patterns or
752
+ # invariants change from one execution to the next
753
+ self.children: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
754
+
755
+ # StorageWeakRef maintains whether the Storage C++ object remains allocated,
756
+ # not whether the corresponding memory has been deallocated. In order
757
+ # to use them to track memory deallocations we must maintain a single StorageWeakRef
758
+ # for all Storages that reference that memory (even if we are constructing Storages
759
+ # that do not have a deallocator function). We maintain one single storage_cache
760
+ # as we execute any tree path. When we retrieve a storage from the cache we
761
+ # check that it is still alive, and we hash based on observed recording data ptr
762
+ # and storage cdata.
763
+
764
+ # we preserve a single reference to executed outputs that is then referenced
765
+ # in children to avoid children having to chase parent pointers in the hot path
766
+ # DO NOT reassign output_weakrefs, only call `clear()`
767
+ # Path is a series of nodes from root to the current node
768
+ self.outputs_weakrefs: OutputList[Optional[StorageWeakRefWrapper]] = []
769
+ self.path_weakrefs: LevelList[OutputList[Optional[StorageWeakRefWrapper]]] = [
770
+ node.outputs_weakrefs for node in self._path_from_root
771
+ ]
772
+ self.path_stacktraces: LevelList[StackTraces] = [
773
+ node.stack_traces for node in self._path_from_root
774
+ ]
775
+ self.tensor_weakrefs: OutputList[Optional[TensorWeakRef]] = []
776
+
777
+ # tensors which are outputs of previous graphs in the tree
778
+ self.cudagraph_managed_idxs: List[int] = [
779
+ idx
780
+ for idx, t in enumerate(inputs)
781
+ if isinstance(t, torch.Tensor) and self._is_cuda_graph_recorded_tensor(t)
782
+ ]
783
+
784
+ self.static_input_idxs: List[int] = list(
785
+ set(wrapped_function.static_input_idxs) | set(self.cudagraph_managed_idxs)
786
+ )
787
+
788
+ self.static_input_data_ptrs: InputList[Optional[int]] = [
789
+ (
790
+ inputs[i].data_ptr()
791
+ if isinstance(inputs[i], torch.Tensor) and i in self.static_input_idxs
792
+ else None
793
+ )
794
+ for i in range(len(inputs))
795
+ ]
796
+
797
+ # When we checkpoint, and free generations, we will be manually freeing the outputs
798
+ # of CUDAGraphNodes. We should not be freeing parameters, not do we need to account for
799
+ # their liveness (they are static), so we need to compute which outputs are aliases of
800
+ # parameters. Some static inputs are saved tensors from the forward that die in the backward.
801
+ # Their locations are static but lifetimes are not. We only include the persistent static
802
+ # data ptrs below because the non persistent data ptrs may be outputs of this record and
803
+ # fresh allocations.
804
+
805
+ # precompute expanded dims to avoid computing in the hot path
806
+ self.expanded_dims: List[List[int]] = [
807
+ get_expanded_dims(x)
808
+ if isinstance(x, torch.Tensor) and idx not in self.static_input_idxs
809
+ else []
810
+ for idx, x in enumerate(inputs)
811
+ ]
812
+
813
+ # For each node in path, which outputs were observed to be live
814
+ # before invoking graph recording, and after graph recording
815
+ self.recorded_liveness_before_graph: LevelList[OutputList[bool]] = []
816
+ self.recorded_liveness_after_graph: LevelList[OutputList[bool]] = []
817
+
818
+ # List of Tuples of (depth, output_index) that index into node at depth
819
+ # number of nodes from root and output_index of outputs. Will index into
820
+ # path_weakrefs.
821
+ self.expected_dead_indices_before_graph: List[PathOutputIndex] = []
822
+ self.expected_dead_indices_after_graph: List[PathOutputIndex] = []
823
+
824
+ # all live indices after graph recording
825
+ self.live_indices_after_graph: List[PathOutputIndex] = []
826
+
827
+ if self.parent is not None:
828
+ previous_liveness = self.parent.recorded_liveness_after_graph
829
+ curr_liveness = self._get_liveness(self.path_weakrefs)
830
+
831
+ different_indices = self._get_different_indices(
832
+ previous_liveness, curr_liveness
833
+ )
834
+
835
+ self.recorded_liveness_before_graph = curr_liveness
836
+ self.expected_dead_indices_before_graph = different_indices
837
+
838
+ recording_inputs = self._allocate_and_copy_recording_inputs(inputs)
839
+ # recording inputs will copy over memory, so we can free non recording inputs
840
+ inputs.clear()
841
+ del inputs
842
+
843
+ # graph used for recording model invocation
844
+ self.graph: Optional[torch.cuda.CUDAGraph] = torch.cuda.CUDAGraph()
845
+
846
+ # we allocate non-static inputs within the same memory pool as the CUDAGraph
847
+ # which we will record the model with. For memory efficiency, it is important
848
+ # to reclaim the input memory when the inputs are no longer live. To accomplish this,
849
+ # we reconstruct tensors at the correct data pointers of our inputs which are
850
+ # non owning and do not prevent deallocation. On subsequent executions, input values
851
+ # will be copied over to these tensors.
852
+ self.reconstructed_inputs: InputList[Union[Tensor, int]] = [
853
+ self._reconstruct_from_tensor_metadata(self._tensor_metadata(x))
854
+ if isinstance(x, torch.Tensor)
855
+ else x
856
+ for x in recording_inputs
857
+ ]
858
+
859
+ # DO THE RECORDING!!!
860
+ # We record the CUDA graph in the constructor of CUDAGraphNode, which
861
+ # gives you what the CPU side compute of the function would do. We
862
+ # don't throw the recording outputs away: their memory is
863
+ # correctly accounted for in the CUDAGraphs caching allocator. This
864
+ # means on the very FIRST run of the CUDA graph node, we can directly
865
+ # do more recording, because we have a valid caching allocator state.
866
+ # NB: This relies on run() being called immediately after the
867
+ # constructor, otherwise this optimization would not be valid.
868
+
869
+ # initialized below in _record
870
+
871
+ self.checkpointed_caching_state: Optional[AllocatorState] = None
872
+
873
+ # Output Storage Alias information, can be:
874
+ # - A new, unaliased storage, or the output is None
875
+ # - An alias of an output of a prior graph
876
+ # - An alias of an output already created in the reconstructed outputs
877
+ # This is None if the output in question is an int
878
+ self.output_storage_alias: OutputList[Optional[OutputAliasInfo]] = []
879
+
880
+ # is the output Storage unaliased in subsequent outputs, of all subsequent paths
881
+ # if it is, we cached the output tensor and adjust storage liveness tracking to also
882
+ # check if the output tensor does not have an additional python reference.
883
+ # If a descendent node discovers it has an alias of a prior output, then the output
884
+ # will no longer be cached in the ancestor.
885
+ # The large majority of tensors are unaliased, and preserving aliased output tensors would add
886
+ # significant additional complexity with marginal gains
887
+ # The cached tensor outputs are added on the first execution, and cleared whenever we need
888
+ # to do subsequent recording
889
+ self.unaliased_in_all_paths: OutputList[bool] = []
890
+ self.cached_tensor_outputs: OutputList[Optional[Tensor]] = []
891
+
892
+ # if an output aliases a static, persistent input then the corresponding Tensor will
893
+ # be set here. These are different than cached tensors, because they are tensors that
894
+ # are aliases of parameters that are always live.
895
+ self.static_output_tensors: OutputList[Optional[Tensor]] = []
896
+
897
+ # Cleared after recording
898
+ self.recording_outputs: Optional[
899
+ OutputList[Union[torch.Tensor, int]]
900
+ ] = self._record(wrapped_function.model, recording_inputs)
901
+ self.outputs_metadata: OutputList[Union[Dict[str, Any], int, None]] = []
902
+
903
+ # As with inputs, we do not want to keep the outputs permanently alive because that would prevent
904
+ # their memory being reclaimed in subsequent cuda graph recordings. We record the tensor metadata
905
+ # needed to reconstruct instead.
906
+ assert self.recording_outputs is not None
907
+ for out in self.recording_outputs:
908
+ if isinstance(out, torch.Tensor):
909
+ self.outputs_metadata.append(
910
+ self._tensor_metadata(out, ignore_storage_offset=False)
911
+ )
912
+ else:
913
+ assert isinstance(out, (int, type(None))), type(out)
914
+ self.outputs_metadata.append(out)
915
+
916
+ self.graph.replay()
917
+
918
+ def _copy_input(self, idx, dst, src):
919
+ expanded_dims = self.expanded_dims[idx]
920
+ dst = index_expanded_dims(dst, expanded_dims)
921
+ src = index_expanded_dims(src, expanded_dims)
922
+ # TODO - one jit kernel across multiple inputs
923
+ dst.copy_(src)
924
+
925
+ def run_first_inputs(self, new_inputs):
926
+ if config.triton.fast_path_cudagraph_asserts:
927
+ self.debug_check_invariants_before_invocation()
928
+
929
+ # graph is already invoked in the __init__
930
+ # inputs are copied over in _allocate_recording_inputs and subsequently cleared
931
+ assert len(new_inputs) == 0
932
+ outputs = self.recording_outputs
933
+ self.recording_outputs = None
934
+ return outputs
935
+
936
+ def run(self, new_inputs):
937
+ if config.triton.fast_path_cudagraph_asserts:
938
+ self.debug_check_invariants_before_invocation()
939
+
940
+ assert len(self.static_input_data_ptrs) == len(new_inputs)
941
+ # NB: this ranges over non-static inputs too
942
+ for idx, data_ptr in enumerate(self.static_input_data_ptrs):
943
+ if idx in self.cudagraph_managed_idxs:
944
+ continue
945
+ if not isinstance(new_inputs[idx], torch.Tensor):
946
+ pass
947
+ elif data_ptr is not None:
948
+ # static input, e.g., parameter
949
+ assert data_ptr == new_inputs[idx].data_ptr()
950
+ else:
951
+ # non-static input, need to copy it into CUDA graph
952
+ dst = self.reconstructed_inputs[idx]
953
+ src = new_inputs[idx]
954
+ self._copy_input(idx, dst, src)
955
+
956
+ new_inputs.clear()
957
+ self.run_graph()
958
+
959
+ outputs = self.reconstruct_outputs()
960
+ self.debug_check_invariants_after_invocation()
961
+
962
+ return outputs
963
+
964
+ def reconstruct_outputs(self):
965
+ "Reconstruct output tensors according to their saved metadata and alias information"
966
+
967
+ # Cached tensors will not yet be set on the first execution
968
+ # They are also cleared in checkpointing, so if we checkpoint this node
969
+ # and then execute it again we will need to repopulate cached tensors
970
+ if not self.cached_tensor_outputs:
971
+ self._initialize_cached_tensors()
972
+
973
+ outputs: List[Optional[Union[int, torch.Tensor]]] = []
974
+
975
+ for i, (storage_info, metadata) in enumerate(
976
+ zip(self.output_storage_alias, self.outputs_metadata)
977
+ ):
978
+ if not isinstance(metadata, dict): # tensor metadata
979
+ assert isinstance(metadata, (int, type(None)))
980
+ outputs.append(metadata)
981
+ continue
982
+
983
+ cached_t = self.cached_tensor_outputs[i]
984
+ if cached_t is not None:
985
+ # No need to update weakrefs, already correctly initialized
986
+ outputs.append(cached_t)
987
+ continue
988
+
989
+ static_t = self.static_output_tensors[i]
990
+ if static_t is not None:
991
+ assert self.outputs_weakrefs[i] is None
992
+ outputs.append(static_t)
993
+ continue
994
+
995
+ storage = self.prepare_alias_info_for_tensor_construction(
996
+ storage_info, metadata
997
+ )
998
+
999
+ if isinstance(storage, UntypedStorage) or storage is None:
1000
+ out = self._reconstruct_from_tensor_metadata(metadata, storage)
1001
+ else:
1002
+ assert isinstance(storage, int)
1003
+ out = self._reconstruct_from_tensor_metadata(
1004
+ metadata, cast(torch.Tensor, outputs[storage]).untyped_storage()
1005
+ )
1006
+
1007
+ outputs.append(out)
1008
+ w = self.outputs_weakrefs[i]
1009
+ assert w is not None
1010
+ w.swap_weakref(out.untyped_storage()._weak_ref())
1011
+
1012
+ return outputs
1013
+
1014
+ def prepare_alias_info_for_tensor_construction(
1015
+ self,
1016
+ out_alias_info: Optional[OutputAliasInfo],
1017
+ metadata: Union[Dict[str, Any], int, None],
1018
+ ) -> Union[UntypedStorage, None, int]:
1019
+ if (
1020
+ isinstance(metadata, (int, type(None)))
1021
+ or out_alias_info is UnaliasedStorage
1022
+ ):
1023
+ return None
1024
+
1025
+ if isinstance(out_alias_info, AliasesPriorGraphOutput):
1026
+ depth, existing_output_index = out_alias_info.index
1027
+ ref = self.path_weakrefs[depth][existing_output_index]
1028
+ assert ref is not None
1029
+ return torch.UntypedStorage._new_with_weak_ptr(ref())
1030
+
1031
+ assert isinstance(out_alias_info, AliasesNewOutput)
1032
+ return out_alias_info.index
1033
+
1034
+ def prepare_storages_for_construction(
1035
+ self,
1036
+ ) -> List[Union[UntypedStorage, None, int]]:
1037
+ output_storages = []
1038
+ for output_storage_alias, metadata in zip(
1039
+ self.output_storage_alias, self.outputs_metadata
1040
+ ):
1041
+ output_storages.append(
1042
+ self.prepare_alias_info_for_tensor_construction(
1043
+ output_storage_alias, metadata
1044
+ )
1045
+ )
1046
+
1047
+ return output_storages
1048
+
1049
+ def run_graph(self):
1050
+ assert self.graph is not None
1051
+ self.graph.replay()
1052
+
1053
+ def all_outputs_are_dead(self):
1054
+ "All outputs of the path from this node to its root are dead"
1055
+ for depth, output_index in self.live_indices_after_graph:
1056
+ if is_live(self.path_weakrefs[depth][output_index]):
1057
+ return False
1058
+ return True
1059
+
1060
+ def _record(self, model, inputs):
1061
+ "Record the model"
1062
+
1063
+ def static_input_iter():
1064
+ for i in self.wrapped_function.static_input_idxs:
1065
+ if isinstance(
1066
+ inputs[i], torch.Tensor
1067
+ ) and not self._is_cuda_graph_recorded_tensor(inputs[i]):
1068
+ yield inputs[i]
1069
+
1070
+ # see: output_is_alias_of_persistent_static_inputs above
1071
+ static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper] = {
1072
+ inp.untyped_storage().data_ptr(): StorageWeakRefWrapper(inp)
1073
+ for inp in itertools.chain(
1074
+ static_input_iter(), self.wrapped_function.constants
1075
+ )
1076
+ }
1077
+
1078
+ if config.triton.slow_path_cudagraph_asserts:
1079
+ # need to use parent live weakrefs because live_indices isnt set yet
1080
+ memory = (
1081
+ [] if self.parent is None else list(self.parent.path_live_weakrefs())
1082
+ )
1083
+ memory += [
1084
+ StorageWeakRefWrapper(elem)
1085
+ for i, elem in enumerate(inputs)
1086
+ if isinstance(elem, torch.Tensor)
1087
+ and i not in self.wrapped_function.static_input_idxs
1088
+ and elem.untyped_storage().data_ptr() != 0
1089
+ ]
1090
+ check_memory_pool(self.device, self.cuda_graphs_pool, memory)
1091
+
1092
+ with preserve_rng_state(), torch.cuda.device(
1093
+ self.device
1094
+ ), clear_cublas_manager(), torch.cuda.graph(
1095
+ self.graph,
1096
+ stream=self.stream,
1097
+ pool=self.cuda_graphs_pool,
1098
+ capture_error_mode="thread_local",
1099
+ ), get_history_recording():
1100
+ static_outputs = model(inputs)
1101
+
1102
+ # running model should reclaim memory
1103
+ assert len(inputs) == 0
1104
+
1105
+ if not isinstance(static_outputs, (list, tuple)):
1106
+ static_outputs = (static_outputs,)
1107
+
1108
+ self._add_first_outputs(static_outputs, static_input_persistent_storage_ptrs)
1109
+
1110
+ return static_outputs
1111
+
1112
+ def _add_first_outputs(
1113
+ self,
1114
+ outputs,
1115
+ static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper],
1116
+ ):
1117
+ "Add the outputs from the first invocation of the node and set up metadata"
1118
+
1119
+ # getting liveness before we have added the outputs to path, so the length
1120
+ # of the two lists is equal
1121
+ prev_liveness = self.recorded_liveness_before_graph
1122
+ curr_liveness = self._get_liveness(self.path_weakrefs)
1123
+
1124
+ delta = self._get_different_indices(prev_liveness, curr_liveness)
1125
+ self.expected_dead_indices_after_graph = delta
1126
+
1127
+ assert len(self.outputs_weakrefs) == 0
1128
+ # index from data pointer to index in outputs
1129
+ output_new_storages_index: Dict[StorageDataPtr, int] = {}
1130
+
1131
+ self.unaliased_in_all_paths = [False for _ in range(len(outputs))]
1132
+ self.static_output_tensors = [None for _ in range(len(outputs))]
1133
+
1134
+ for i, o in enumerate(outputs):
1135
+ if o is None or not isinstance(o, torch.Tensor):
1136
+ self.output_storage_alias.append(UnaliasedStorage)
1137
+ continue
1138
+
1139
+ torch._check(
1140
+ o.is_cuda or o.untyped_storage().data_ptr() == 0,
1141
+ lambda: (
1142
+ "Expected all cuda outputs in cuda graph recording. Non cuda output "
1143
+ f"from {self.stack_traces[i] if self.stack_traces else '(unknown)'}"
1144
+ ),
1145
+ ),
1146
+
1147
+ ref = static_input_persistent_storage_ptrs.get(
1148
+ o.untyped_storage().data_ptr(), None
1149
+ )
1150
+ # also treat empty storages as static outputs because we do not need to manage their lifetime
1151
+ # and they should not participate in checkpointing
1152
+ is_empty_storage = o.untyped_storage().data_ptr() == 0
1153
+ if (ref and ref() is not None) or is_empty_storage:
1154
+ self.output_storage_alias.append(None)
1155
+ self.static_output_tensors[i] = o
1156
+ continue
1157
+
1158
+ path_ref = self._is_alias_of_live_recorded_tensor(o)
1159
+ if path_ref is not None:
1160
+ self._mark_prior_graph_output_as_aliased(path_ref)
1161
+ self.output_storage_alias.append(AliasesPriorGraphOutput(path_ref))
1162
+ continue
1163
+
1164
+ if o.untyped_storage().data_ptr() in output_new_storages_index:
1165
+ index = output_new_storages_index[o.untyped_storage().data_ptr()]
1166
+ self.unaliased_in_all_paths[index] = False
1167
+ self.output_storage_alias.append(AliasesNewOutput(index))
1168
+ continue
1169
+
1170
+ output_new_storages_index[o.untyped_storage().data_ptr()] = i
1171
+ self.output_storage_alias.append(UnaliasedStorage)
1172
+ self.unaliased_in_all_paths[i] = True
1173
+
1174
+ if self.stack_traces is None:
1175
+ self.stack_traces = [None for _ in range(len(outputs))]
1176
+ else:
1177
+ assert len(self.stack_traces) == len(
1178
+ outputs
1179
+ ), "Wrong number of stack traces passed in"
1180
+
1181
+ assert not self.outputs_weakrefs
1182
+ for out, static_output_tensor in zip(outputs, self.static_output_tensors):
1183
+ if not isinstance(out, torch.Tensor) or static_output_tensor is not None:
1184
+ self.outputs_weakrefs.append(None)
1185
+ self.tensor_weakrefs.append(None)
1186
+ else:
1187
+ self.outputs_weakrefs.append(StorageWeakRefWrapper(out))
1188
+ self.tensor_weakrefs.append(TensorWeakRef(out))
1189
+
1190
+ self.recorded_liveness_after_graph = self._get_liveness(self.path_weakrefs)
1191
+ self.checkpointed_caching_state = torch._C._cuda_getCheckpointState(
1192
+ self.device, self.cuda_graphs_pool
1193
+ )
1194
+
1195
+ # now, get liveness with outputs added
1196
+ for depth in range(len(self.path_weakrefs)):
1197
+ for output_index in range(len(self.path_weakrefs[depth])):
1198
+ if is_live(self.path_weakrefs[depth][output_index]):
1199
+ self.live_indices_after_graph.append((depth, output_index))
1200
+
1201
+ self.debug_check_invariants_after_invocation()
1202
+ if config.triton.slow_path_cudagraph_asserts:
1203
+ check_memory_pool(
1204
+ self.device, self.cuda_graphs_pool, list(self.path_live_weakrefs())
1205
+ )
1206
+
1207
+ def _mark_prior_graph_output_as_aliased(self, index: PathOutputIndex):
1208
+ "Remove a graph output from the unaliased, cached tensors in an ancestor node"
1209
+ depth, output_index = index
1210
+ node = list(self._path_from_root)[depth]
1211
+ node.unaliased_in_all_paths[output_index] = False
1212
+ x = self.path_weakrefs[depth][output_index]
1213
+ assert x is not None
1214
+ x.remove_extra_reference()
1215
+
1216
+ def _initialize_cached_tensors(self):
1217
+ # we should not be clearing output_weakrefs, and they should be set in the first
1218
+ # record run
1219
+ assert len(self.outputs_weakrefs) == len(self.outputs_metadata)
1220
+
1221
+ for i, (storage_info, metadata, make_cached) in enumerate(
1222
+ zip(
1223
+ self.output_storage_alias,
1224
+ self.outputs_metadata,
1225
+ self.unaliased_in_all_paths,
1226
+ )
1227
+ ):
1228
+ if not make_cached:
1229
+ self.cached_tensor_outputs.append(None)
1230
+ continue
1231
+
1232
+ assert storage_info is UnaliasedStorage
1233
+ assert isinstance(metadata, dict)
1234
+ s = self.create_storage(metadata)
1235
+ out = self._reconstruct_from_tensor_metadata(metadata, storage=s)
1236
+
1237
+ # XXX: let autograd know that there will be an additional reference to the tensor
1238
+ # that can be ignored when deciding whether to do gradient buffer inplacing.
1239
+ # Otherwise, inplacing could differ between tracing and subsequent execution.
1240
+ # For some models we tested this led to inputs no longer being in cudagraph pools,
1241
+ # leading to spurious re-recordings.
1242
+ # It also tells AMP cache that even though the tensor impls cannot be cached
1243
+ # in dtype conversions.
1244
+
1245
+ torch._C._add_cached_tensor(out)
1246
+
1247
+ self_ref = weakref.ref(self)
1248
+
1249
+ # one reference in our array, and calling sys.getrefcount bumps the refcount by one
1250
+ def check_refcount(i):
1251
+ self_loc = self_ref()
1252
+ if self_loc is None:
1253
+ return False
1254
+ return self_loc.get_output_refcount(i) == 2
1255
+
1256
+ check = functools.partial(check_refcount, i=i)
1257
+
1258
+ self.outputs_weakrefs[i] = StorageWeakRefWrapper(out, extra_ref_check=check)
1259
+ self.cached_tensor_outputs.append(out)
1260
+
1261
+ def get_output_refcount(self, index):
1262
+ return sys.getrefcount(self.cached_tensor_outputs[index])
1263
+
1264
+ @property
1265
+ def parent(self):
1266
+ "unwraps the weakref to _parent"
1267
+ return self._parent() if self._parent is not None else None
1268
+
1269
+ @property
1270
+ def _path_to_root(self):
1271
+ "Returns all nodes in the path starting at self and ending at root"
1272
+ node = self
1273
+ while node:
1274
+ yield node
1275
+ node = node.parent
1276
+
1277
+ @property
1278
+ def _path_from_root(self):
1279
+ "Returns all nodes in the path starting at the root and ending at self"
1280
+ nodes = reversed(list(self._path_to_root))
1281
+ yield from nodes
1282
+
1283
+ def _is_cuda_graph_recorded_tensor(self, t: torch.Tensor):
1284
+ "Is this tensor an output of a node in this path"
1285
+ for output_refs in self.path_weakrefs:
1286
+ for storage_weak_ref in output_refs:
1287
+ if storage_weak_ref is None:
1288
+ continue
1289
+ # don't need to check liveness of storage since the cuda graph managed
1290
+ # memory is never released.
1291
+ data_ptr = storage_weak_ref.data_ptr()
1292
+ if t.untyped_storage().data_ptr() == data_ptr:
1293
+ return True
1294
+
1295
+ return False
1296
+
1297
+ def _is_alias_of_live_recorded_tensor(
1298
+ self, t: torch.Tensor
1299
+ ) -> Optional[PathOutputIndex]:
1300
+ for depth, output_refs in enumerate(self.path_weakrefs):
1301
+ for output_index, storage_ref in enumerate(output_refs):
1302
+ if (storage_and_ptr := maybe_deref(storage_ref)) is not None:
1303
+ storage, ptr = storage_and_ptr
1304
+ if ptr == t.untyped_storage().data_ptr():
1305
+ return (depth, output_index)
1306
+
1307
+ return None
1308
+
1309
+ @staticmethod
1310
+ def _check_liveness(
1311
+ indices: List[PathOutputIndex],
1312
+ output_refs: List[List[Optional[StorageWeakRefWrapper]]],
1313
+ ):
1314
+ "Check that all of the indices specified are dead references"
1315
+ for depth, output_index in indices:
1316
+ w = output_refs[depth][output_index]
1317
+ assert w is not None
1318
+ if w() is not None:
1319
+ return False
1320
+ return True
1321
+
1322
+ def add_child(self, function_id: FunctionID, node: CUDAGraphNode):
1323
+ "Adds node as a a child of self"
1324
+ self.children[function_id].append(node)
1325
+
1326
+ @staticmethod
1327
+ def _get_different_indices(
1328
+ prev: List[List[bool]], curr: List[List[bool]]
1329
+ ) -> List[PathOutputIndex]:
1330
+ "Find indices where the two lists differ."
1331
+ dead_indices = []
1332
+ assert len(prev) <= len(curr)
1333
+ for i, (outputs1, outputs2) in enumerate(zip(prev, curr)):
1334
+ assert len(outputs1) == len(outputs2)
1335
+ for j, (output1, output2) in enumerate(zip(outputs1, outputs2)):
1336
+ if output1 != output2:
1337
+ dead_indices.append((i, j))
1338
+
1339
+ return dead_indices
1340
+
1341
+ @staticmethod
1342
+ def _get_liveness(
1343
+ weakrefs: List[List[Optional[StorageWeakRefWrapper]]],
1344
+ ) -> List[List[bool]]:
1345
+ "Maps weakrefs to true if the reference is alive and false otherwise"
1346
+ if len(weakrefs) == 0:
1347
+ return []
1348
+
1349
+ return [pytree.tree_map(is_live, outputs) for outputs in weakrefs]
1350
+
1351
+ def debug_assert_invariants(
1352
+ self, expected_liveness: List[List[bool]], newly_dead: List[PathOutputIndex]
1353
+ ):
1354
+ if not config.triton.fast_path_cudagraph_asserts:
1355
+ return
1356
+
1357
+ for i, node in enumerate(self._path_from_root):
1358
+ assert self.path_weakrefs[i] is node.outputs_weakrefs
1359
+
1360
+ nodes = list(self._path_from_root)
1361
+
1362
+ live_blocks = get_block_addrs(self.cuda_graphs_pool)
1363
+
1364
+ live_storage_data_ptrs = set()
1365
+ live_storage_weak_ptrs = set()
1366
+
1367
+ for depth, outputs_liveness in enumerate(expected_liveness):
1368
+ for output_idx, output_liveness in enumerate(outputs_liveness):
1369
+ # tensor can die early, but it can't be alive when it should be dead
1370
+ w = self.path_weakrefs[depth][output_idx]
1371
+ if (stor_weak_ptr_and_data_ptr := maybe_deref(w)) is not None:
1372
+ assert output_liveness
1373
+ stor_weak_ptr, stor_data_ptr = stor_weak_ptr_and_data_ptr
1374
+ assert (stor_data_ptr in live_storage_data_ptrs) == (
1375
+ stor_weak_ptr in live_storage_weak_ptrs
1376
+ )
1377
+ live_storage_data_ptrs.add(stor_data_ptr)
1378
+ live_storage_weak_ptrs.add(stor_weak_ptr)
1379
+
1380
+ is_persistent_alias = (
1381
+ nodes[depth].static_output_tensors[output_idx] is not None
1382
+ )
1383
+
1384
+ if is_persistent_alias:
1385
+ assert stor_data_ptr not in live_blocks
1386
+
1387
+ for depth, output_index in newly_dead:
1388
+ assert not is_live(self.path_weakrefs[depth][output_index])
1389
+
1390
+ def debug_check_invariants_before_invocation(self):
1391
+ self.debug_assert_invariants(
1392
+ self.recorded_liveness_before_graph, self.expected_dead_indices_before_graph
1393
+ )
1394
+
1395
+ def debug_check_invariants_after_invocation(self):
1396
+ self.debug_assert_invariants(
1397
+ self.recorded_liveness_before_graph, self.expected_dead_indices_after_graph
1398
+ )
1399
+
1400
+ def data_ptrs_dead_since_invocation(self) -> List[int]:
1401
+ """
1402
+ Since this node was invoked, return data ptrs of all tensor outputs that have died
1403
+ in the current executing tree path.
1404
+ """
1405
+ curr_liveness = self._get_liveness(self.path_weakrefs)
1406
+ _get_different_indices = self._get_different_indices(
1407
+ self.recorded_liveness_after_graph, curr_liveness
1408
+ )
1409
+
1410
+ path = list(self._path_from_root)
1411
+ ptrs_to_deallocate = []
1412
+ for depth, output_index in _get_different_indices:
1413
+ ptrs_to_deallocate.append(
1414
+ path[depth].outputs_metadata[output_index]["data_ptr"]
1415
+ )
1416
+
1417
+ return ptrs_to_deallocate
1418
+
1419
+ def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
1420
+ for i, j in self.live_indices_after_graph:
1421
+ out = self.path_weakrefs[i][j]
1422
+ if out is not None and is_live(out):
1423
+ yield out
1424
+
1425
+ def remove_node_cached_tensors(self):
1426
+ for t in self.cached_tensor_outputs:
1427
+ if t is not None:
1428
+ torch._C._remove_cached_tensor(t)
1429
+ self.cached_tensor_outputs.clear()
1430
+
1431
+ for i, unaliased in enumerate(self.unaliased_in_all_paths):
1432
+ if unaliased:
1433
+ n = self.outputs_weakrefs[i]
1434
+ assert n is not None
1435
+ n.remove_extra_reference()
1436
+
1437
+ def remove_path_cached_tensors(self):
1438
+ for node in self._path_from_root:
1439
+ node.remove_node_cached_tensors()
1440
+
1441
+ def clear_path_state(self):
1442
+ "Clear the path state in this current executing node"
1443
+ # this doesnt actually do anything right now, leaving it as placeholder
1444
+ pass
1445
+
1446
+ @staticmethod
1447
+ def _tensor_metadata(x, ignore_storage_offset=True):
1448
+ assert isinstance(x, torch.Tensor)
1449
+ # We ignore the storage offset for inputs, but not for outputs
1450
+ # TODO: - should we make the storage resizable ?
1451
+ return {
1452
+ "nbytes": x.untyped_storage().nbytes(),
1453
+ "data_ptr": x.untyped_storage().data_ptr(),
1454
+ "size": x.shape,
1455
+ "stride": x.stride(),
1456
+ "dtype": x.dtype,
1457
+ "device": x.device,
1458
+ "storage_offset": x.storage_offset() if not ignore_storage_offset else 0,
1459
+ }
1460
+
1461
+ def _reconstruct_from_tensor_metadata(
1462
+ self, metadata: Dict[str, Any], storage=None
1463
+ ) -> Tensor:
1464
+ s = self.create_storage(metadata) if storage is None else storage
1465
+ return torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata(metadata, s)
1466
+
1467
+ def create_storage(self, metadata):
1468
+ return torch._C._construct_storage_from_data_pointer(
1469
+ metadata["data_ptr"], metadata["device"], metadata["nbytes"]
1470
+ )
1471
+
1472
+ def _allocate_and_copy_recording_inputs(
1473
+ self, inputs
1474
+ ) -> List[Union[torch.Tensor, int]]:
1475
+ """
1476
+ Allocate inputs for non static, non cudagraph managraphed managed tensors in the memory pool
1477
+ and copy over the tensor values.
1478
+ """
1479
+
1480
+ torch.cuda.synchronize()
1481
+ self.stream.wait_stream(torch.cuda.current_stream())
1482
+ recording_inputs: List[Union[Tensor, int]] = []
1483
+
1484
+ with warnings.catch_warnings(record=True), torch.cuda.device(
1485
+ self.device
1486
+ ), _use_cuda_memory_pool_manager(
1487
+ self.device,
1488
+ mem_pool=self.cuda_graphs_pool,
1489
+ stream=self.stream,
1490
+ ):
1491
+ for i, inp in enumerate(inputs):
1492
+ if not isinstance(inp, torch.Tensor):
1493
+ assert isinstance(inp, int)
1494
+ recording_inputs.append(inp)
1495
+ elif i not in self.static_input_idxs:
1496
+ # static_input does an allocation!
1497
+ recording_inputs.append(static_input(inp))
1498
+ # copy over and clear non recording input
1499
+ self._copy_input(i, recording_inputs[-1], inp)
1500
+ inputs[i] = None
1501
+ del inp
1502
+ else:
1503
+ recording_inputs.append(inp)
1504
+
1505
+ return recording_inputs
1506
+
1507
+ def check_invariants(self, inputs: List[Tensor]) -> bool:
1508
+ """
1509
+ Checks if this node can be run. The same pattern of tensor liveness and tensors
1510
+ managed in the cudagraph private pool must remain stable.
1511
+ """
1512
+
1513
+ # previously managed data pointers remain stable
1514
+ for idx in self.cudagraph_managed_idxs:
1515
+ if inputs[idx].data_ptr() != self.static_input_data_ptrs[idx]:
1516
+ return False
1517
+
1518
+ if not self._check_liveness(
1519
+ self.expected_dead_indices_before_graph, self.path_weakrefs
1520
+ ):
1521
+ return False
1522
+
1523
+ # the cudagraph managed tensors which died upon recording must also die upon
1524
+ # this invocation. it is too late to check after we've replayed the graph,
1525
+ # because we would have already written over their memory.
1526
+ for idx in self.cudagraph_managed_idxs:
1527
+ inputs[idx] = None # type: ignore[call-overload]
1528
+
1529
+ torch._check(
1530
+ self._check_liveness(
1531
+ self.expected_dead_indices_after_graph, self.path_weakrefs
1532
+ ),
1533
+ lambda: "TODO: graph recording observed an input tensor deallocate during graph "
1534
+ " recording that did not occur during replay. Please file an issue.",
1535
+ )
1536
+ return True
1537
+
1538
+ def num_descendants(self) -> int:
1539
+ "Total number of descendents of this node"
1540
+ num_desc = 0
1541
+ for children in self.children.values():
1542
+ for child in children:
1543
+ num_desc += 1
1544
+ num_desc += child.num_descendants()
1545
+ return num_desc
1546
+
1547
+
1548
+ def get_cudagraph_segments(pool_id):
1549
+ segments = torch.cuda.memory_snapshot()
1550
+ return [segment for segment in segments if segment["segment_pool_id"] == pool_id]
1551
+
1552
+
1553
+ def get_block_addrs(pool_id, live_only=True):
1554
+ blocks = []
1555
+
1556
+ for segment in get_cudagraph_segments(pool_id):
1557
+ addr = segment["address"]
1558
+ for block in segment["blocks"]:
1559
+ if block["state"] == "active_allocated" or not live_only:
1560
+ blocks.append(addr)
1561
+
1562
+ addr += block["size"]
1563
+
1564
+ return blocks
1565
+
1566
+
1567
+ def format_tb(frames):
1568
+ formatted_traceback = []
1569
+
1570
+ for entry in frames:
1571
+ formatted_traceback.append(
1572
+ traceback.FrameSummary(entry["filename"], entry["line"], entry["name"])
1573
+ )
1574
+
1575
+ return "".join(traceback.format_list(formatted_traceback))
1576
+
1577
+
1578
+ def check_memory_pool(device, pool_id, live_storages_ptrs: List[StorageWeakRefWrapper]):
1579
+ assert all(
1580
+ isinstance(elem, StorageWeakRefWrapper) for elem in live_storages_ptrs
1581
+ ) # noqa: C419
1582
+ unique_storages = {stor.data_ptr() for stor in live_storages_ptrs if stor()}
1583
+
1584
+ # check if there is a divergence first, then do the expensive snapshot call after
1585
+ # we know it will error
1586
+ if torch._C._cuda_checkPoolLiveAllocations(device, pool_id, unique_storages):
1587
+ return
1588
+
1589
+ # at this point we are past the fast-path. we have seen rare cases where a dead tensor is dead,
1590
+ # but hasn't been gc'd yet, and gives false positive for allocated_not_in_live_storages
1591
+ gc.collect()
1592
+
1593
+ segments = get_cudagraph_segments(pool_id)
1594
+
1595
+ allocated_not_in_live_storages = {}
1596
+
1597
+ for segment in segments:
1598
+ addr = segment["address"]
1599
+ for block in segment["blocks"]:
1600
+ if block["state"] == "active_allocated":
1601
+ if addr not in unique_storages:
1602
+ allocated_not_in_live_storages[addr] = block
1603
+ else:
1604
+ unique_storages.remove(addr)
1605
+
1606
+ addr += block["size"]
1607
+
1608
+ torch._check(
1609
+ len(unique_storages) == 0,
1610
+ lambda: f"These storage data ptrs are not allocated in pool {pool_id} but should be {unique_storages}",
1611
+ )
1612
+
1613
+ if allocated_not_in_live_storages != 0:
1614
+ formatted = []
1615
+ for dp, block in allocated_not_in_live_storages.items():
1616
+ trace = format_tb(block.get("frames", []))
1617
+ formatted.append(f"Data Pointer: {dp}, history: \n{trace}")
1618
+ formatted_s = "\n".join(formatted)
1619
+ msg = (
1620
+ f"These live storage data ptrs are in the cudagraph pool but not "
1621
+ f"accounted for as an output of cudagraph trees: \n\n{formatted_s}"
1622
+ )
1623
+ raise RuntimeError(msg)
1624
+
1625
+
1626
+ class ExecutionState(Enum):
1627
+ """
1628
+ Represents the state of the CUDAGraph Tree. Will be None if there is no live current memory allocated
1629
+ in the cuda graph pool. Otherwise will reflect the state of the most recently executed node.
1630
+ """
1631
+
1632
+ NONE = auto()
1633
+ WARMUP = auto()
1634
+ RECORDING = auto()
1635
+ EXECUTION = auto()
1636
+
1637
+
1638
+ class CompilationMode(Enum):
1639
+ FORWARD = auto()
1640
+ BACKWARD = auto()
1641
+ INFERENCE = auto()
1642
+
1643
+
1644
+ class CUDAGraphTreeManager:
1645
+ """
1646
+ Groups individual recordings or executions of cuda graphs into a tree of recordings,
1647
+ and checks required invariants, and manages warmups of graphs.
1648
+
1649
+ When graphs are recorded in the same tree, it enforces subsequent execution
1650
+ to follow the same order and have the same output tensor livespans. To remove
1651
+ unnecessary coupling of cuda graphs (and additional imposed invariants),
1652
+ the tree manager will end a currently recording tree whenever it is valid - when
1653
+ the memory pool no longer has any live allocations.
1654
+
1655
+ We ignore outputs from a previous generation that correspond to prior model outputs.
1656
+ Currently this is hardcoded `GenerationTracker.generation` tracked in torch dynamo.
1657
+ # TODO: make generation increment configurable, warn on overwrite.
1658
+
1659
+ We run graph warmups in the cudagraph memory pool and return the result on the first invocation
1660
+ of a function. For many models it is important to reclaim activations as you run the backward.
1661
+ If we were to warm up the model and keep an extra copy of the inputs around to subsequently
1662
+ use for recording, we would incur a memory penalty. Additionally, if we are part way through training
1663
+ your model and need to recompile, memory will be allocated to the cuda graph pool, so we run this
1664
+ warmup run in the cuda graph memory pool. As for recording, warm up needs the state of live tensors
1665
+ to be accurately reflected so we checkpoint the allocator state if we need to warm up following graph
1666
+ replay.
1667
+ """
1668
+
1669
+ def __init__(self, device_index: int):
1670
+ # roots are functions which have no dependencies on an other node. I.e.,
1671
+ # when they are first invoked, none of their inputs are outputs are outputs
1672
+ # of another node, nor are there any live outputs of another node whose
1673
+ # liveness would create a dependency.
1674
+ self.roots: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
1675
+
1676
+ # mapping from function id to wrapped function
1677
+ self.ids_to_funcs: Dict[FunctionID, WrappedFunction] = {}
1678
+
1679
+ self.ids_to_stack_traces: Dict[FunctionID, StackTraces] = {}
1680
+
1681
+ self.warmed_up_functions: Set[FunctionID] = set()
1682
+ # if we fail to increment generation, and are stuck warming up,
1683
+ # only warn on each function once
1684
+ self.warned_functions: Set[FunctionID] = set()
1685
+ torch._C._set_cached_tensors_enabled(True)
1686
+
1687
+ # NB: cuda caching allocator will remember the stream a segment is allocated to
1688
+ # and only allocate that segment to the same stream. we need to use a single stream
1689
+ # for all allocations to the memory pool, otherwise the allocations to separate streams
1690
+ # will not be reused; separate recordings would have use the same memory pool, but not
1691
+ # the same memory.
1692
+
1693
+ with torch.cuda.device(device_index):
1694
+ torch.cuda.synchronize()
1695
+ self.stream = torch.cuda.Stream()
1696
+ self.stream.wait_stream(torch.cuda.current_stream())
1697
+
1698
+ # Keeps Memory Pool Alive
1699
+ self.graph: Optional[torch.cuda.CUDAGraph] = torch.cuda.CUDAGraph()
1700
+ self.cuda_graphs_thread_pool = torch.cuda.graph_pool_handle()
1701
+
1702
+ with warnings.catch_warnings(record=True), torch.cuda.graph(
1703
+ self.graph,
1704
+ pool=self.cuda_graphs_thread_pool,
1705
+ stream=self.stream,
1706
+ capture_error_mode="thread_local",
1707
+ ):
1708
+ pass
1709
+
1710
+ self.graph_counter = itertools.count(0)
1711
+ self.func_counter = itertools.count(0)
1712
+
1713
+ # whether we the current node is in a state of warmup, recording, execution. If
1714
+ # there is no current node the state will be ExecutionState.None.
1715
+ self.path_state = ExecutionState.NONE
1716
+ self.device_index = device_index
1717
+
1718
+ # the most recently invoked cudagraph wrapping of a function. Will be None
1719
+ # when there is no output from a previous recording or execution whose memory
1720
+ # we need to respect in the cuda caching allocation. If you incremented generation,
1721
+ # this will also be none, as ignore those allocations.
1722
+ self.current_node: Optional[CUDAGraphNode] = None
1723
+
1724
+ # current generation of cudagraph invocations. when torch.compile is run
1725
+ # we increment the current generation. are willing to ignore live outputs
1726
+ # of a previous generation in checking liveness.
1727
+ self.current_gen: int = -1
1728
+
1729
+ # number of instances we are in execution and failed to match to an
1730
+ # existing child
1731
+ self.debug_fail_counter = 0
1732
+ # number of instances we had to checkpoint the function
1733
+ self.debug_checkpointing_counter = 0
1734
+
1735
+ self.id_to_mode: Dict[FunctionID, CompilationMode] = {}
1736
+
1737
+ # Note: [Backward Generation Handling]
1738
+ # We generally perform a sequence of forward executions followed by backward executions.
1739
+ # If multiple torch.compile wrapped forwards are executed with their backwards pending,
1740
+ # we should not disregard the outputs from a prior torch.compile since the entire training
1741
+ # loop hasn't completed. Occasionally, a backward pass corresponding to a forward pass may
1742
+ # not be executed, so we cannot wait for all pending forward pass backward completions, so
1743
+ # we cannot wait for all backwards to have been invoked. Instead we wait for a single backward
1744
+ # invocation. Triggering a backward pass typically doesn't lead to another torch.compile
1745
+ # invocation, making it less likely for the generation to increase between multiple
1746
+ # backward calls. The following use case is covered by this approach:
1747
+ # mod1 = torch.compile(...)
1748
+ # mod2 = torch.compile(...)
1749
+ # mod2(mod1(x)).sum().backward()
1750
+
1751
+ self.running_forwards_with_pending_backwards = False
1752
+
1753
+ def run(self, new_inputs: List[Tensor], function_id: FunctionID):
1754
+ assert self.graph is not None, "Running CUDAGraph after shutdown"
1755
+ out = self._run(new_inputs, function_id)
1756
+
1757
+ # The forwards are only pending following invocation, not before
1758
+ mode = self.id_to_mode[function_id]
1759
+ if mode == CompilationMode.FORWARD:
1760
+ self.running_forwards_with_pending_backwards = True
1761
+ elif mode == CompilationMode.BACKWARD:
1762
+ self.running_forwards_with_pending_backwards = False
1763
+
1764
+ return out
1765
+
1766
+ def set_to_running_backward(self):
1767
+ self.running_forwards_with_pending_backwards = False
1768
+
1769
+ def _run(self, new_inputs: List[Tensor], function_id: FunctionID):
1770
+ # we will try to end the current execution lazily, since
1771
+ # we dont want to do unnecessary checking of the existing outputs
1772
+ # on the hot path, but both recording and warmup only happen once
1773
+ # so we check up front
1774
+ if self.in_recording:
1775
+ self.try_end_curr_recording(function_id)
1776
+
1777
+ if self.in_warmup:
1778
+ self.try_end_curr_warmup(function_id)
1779
+
1780
+ # warming up a function and subsequentally recording may use different memory addresses
1781
+ # because both depend on the state of the caching allocator. if we warm up graph A,
1782
+ # then warm up graph B and make more allocations, the subsequent recording of A will not
1783
+ # necessarily use the same addresses as in the warm up. Thus any warm up of a node can only
1784
+ # be followed by warm up runs.
1785
+ if (
1786
+ not (
1787
+ function_id in self.warmed_up_functions
1788
+ or config.triton.skip_cudagraph_warmup
1789
+ )
1790
+ ) or self.in_warmup:
1791
+ # If we are in the middle of executing cuda graphs, then we need to checkpoint memory state.
1792
+ # Both Recording and Warmup will be reflected in the allocator and dont need changes
1793
+ if self.path_state == ExecutionState.EXECUTION:
1794
+ self.apply_checkpoint_execution_state_in_allocator()
1795
+
1796
+ return self.run_eager(new_inputs, function_id)
1797
+
1798
+ child_nodes = (
1799
+ self.roots if self.current_node is None else self.current_node.children
1800
+ )
1801
+
1802
+ if not self.in_recording:
1803
+ for child in child_nodes[function_id]:
1804
+ # here we are checking memory consistency between recording and execution,
1805
+ # as well as things like stability of tensor locations, etc
1806
+ # and other
1807
+ if child.check_invariants(new_inputs):
1808
+ return self.execute_node(child, new_inputs)
1809
+
1810
+ # now that we know the new function can't be run as a child of the
1811
+ # current node, if it is a root, try to end the current execution.
1812
+ # as noted above, we want to do this lazily to avoid having to
1813
+ # check all existing outputs
1814
+ if self.current_node is not None and function_id in self.roots:
1815
+ self.try_end_curr_execution()
1816
+
1817
+ # run again to hit the root matching case which must succeed
1818
+ if self.current_node is None:
1819
+ return self.run(new_inputs, function_id)
1820
+
1821
+ # at this point, we necessarily will do a new recording
1822
+ self.debug_fail_counter += 1
1823
+
1824
+ self.try_end_curr_execution()
1825
+ if self.current_node is not None:
1826
+ self.apply_checkpoint_execution_state_in_allocator()
1827
+
1828
+ # now, we are in a recording state !
1829
+ return self.record_function(new_inputs, function_id)
1830
+
1831
+ def shutdown(self):
1832
+ """
1833
+ Remove all cached tensors in all nodes. Because cached tensors can hold gradients which in turn
1834
+ might reference a backward which invokes a CUDA Graph Node, we have to manually clear them on shutdown
1835
+ to avoid a reference cycle.
1836
+ """
1837
+ nodes = []
1838
+ for roots in self.roots.values():
1839
+ nodes.extend(roots)
1840
+
1841
+ while nodes:
1842
+ node = nodes.pop()
1843
+ for children in node.children.values():
1844
+ nodes.extend(children)
1845
+ node.remove_node_cached_tensors()
1846
+ node.graph = None
1847
+
1848
+ self.graph = None
1849
+ self.roots = None # type: ignore[assignment]
1850
+ self.current_node = None
1851
+
1852
+ def record_function(self, new_inputs, function_id) -> List[Optional[Tensor]]:
1853
+ graph_id = self.new_graph_id()
1854
+ log.debug(
1855
+ "Recording function %d of graph recording id %d",
1856
+ function_id.id,
1857
+ graph_id.id,
1858
+ )
1859
+ torch.cuda.synchronize()
1860
+ node = CUDAGraphNode(
1861
+ self.ids_to_funcs[function_id],
1862
+ graph_id,
1863
+ self.current_node,
1864
+ new_inputs,
1865
+ self.cuda_graphs_thread_pool,
1866
+ self.device_index,
1867
+ self.ids_to_stack_traces[function_id],
1868
+ self.stream,
1869
+ )
1870
+ if self.current_node is None:
1871
+ self.roots[function_id].append(node)
1872
+ else:
1873
+ self.current_node.add_child(function_id, node)
1874
+ self.current_node = node
1875
+ self.path_state = ExecutionState.RECORDING
1876
+ self.update_generation()
1877
+ torch.cuda.synchronize()
1878
+ return node.run_first_inputs(new_inputs)
1879
+
1880
+ def execute_node(self, node: CUDAGraphNode, new_inputs) -> List[Optional[Tensor]]:
1881
+ self.current_node = node
1882
+ self.path_state = ExecutionState.EXECUTION
1883
+ self.update_generation()
1884
+ return node.run(new_inputs)
1885
+
1886
+ def run_eager(self, new_inputs, function_id: FunctionID):
1887
+ # this is only stored on current node, because when we start a new path,
1888
+ # we will deallocate it
1889
+ already_warm = function_id in self.warmed_up_functions
1890
+ if not already_warm:
1891
+ log.debug("Running warmup of function %d", function_id.id)
1892
+ else:
1893
+ log.debug(
1894
+ "Running eager of function %d because ancestor needed to warm up",
1895
+ function_id.id,
1896
+ )
1897
+ self.warmed_up_functions.add(function_id)
1898
+ node = CUDAWarmupNode(
1899
+ self.ids_to_funcs[function_id],
1900
+ self.current_node,
1901
+ self.cuda_graphs_thread_pool,
1902
+ self.graph,
1903
+ self.device_index,
1904
+ self.ids_to_stack_traces[function_id],
1905
+ self.stream,
1906
+ already_warm,
1907
+ )
1908
+ self.current_node = node
1909
+ self.path_state = ExecutionState.WARMUP
1910
+ self.update_generation()
1911
+ return node.run(new_inputs)
1912
+
1913
+ def new_graph_id(self) -> GraphID:
1914
+ return GraphID(next(self.graph_counter))
1915
+
1916
+ def new_func_id(self) -> FunctionID:
1917
+ return FunctionID(next(self.func_counter))
1918
+
1919
+ def add_function(
1920
+ self,
1921
+ model,
1922
+ inputs,
1923
+ static_input_idxs,
1924
+ stack_traces,
1925
+ mode,
1926
+ constants,
1927
+ ) -> Tuple[Callable[..., Any], List[Optional[Tensor]]]:
1928
+ id = self.new_func_id()
1929
+ self.ids_to_stack_traces[id] = stack_traces
1930
+ self.ids_to_funcs[id] = WrappedFunction(
1931
+ model,
1932
+ static_input_idxs,
1933
+ id,
1934
+ tuple(t for t in constants if isinstance(t, torch.Tensor) and t.is_cuda),
1935
+ )
1936
+ self.id_to_mode[id] = mode
1937
+ fn = functools.partial(self.run, function_id=id)
1938
+
1939
+ # container needs to set clean up when fn dies
1940
+ get_container(self.device_index).add_strong_reference(fn)
1941
+ return fn, fn(inputs)
1942
+
1943
+ @property
1944
+ def in_recording(self):
1945
+ return self.path_state == ExecutionState.RECORDING
1946
+
1947
+ @property
1948
+ def in_warmup(self):
1949
+ return self.path_state == ExecutionState.WARMUP
1950
+
1951
+ def get_roots(self) -> Iterator[CUDAGraphNode]:
1952
+ for nodes in self.roots.values():
1953
+ yield from nodes
1954
+
1955
+ @property
1956
+ def current_node(self):
1957
+ return self._current_node
1958
+
1959
+ @current_node.setter
1960
+ def current_node(self, value):
1961
+ self._current_node = value
1962
+ if value is None:
1963
+ self.path_state = ExecutionState.NONE
1964
+
1965
+ def update_generation(self):
1966
+ self.current_gen = self.get_curr_generation()
1967
+
1968
+ @staticmethod
1969
+ def get_curr_generation() -> int:
1970
+ if MarkStepBox.mark_step_counter != 0:
1971
+ return MarkStepBox.mark_step_counter
1972
+
1973
+ return GenerationTracker.generation
1974
+
1975
+ @staticmethod
1976
+ def user_invoked_mark_step():
1977
+ return MarkStepBox.mark_step_counter != 0
1978
+
1979
+ def can_start_new_generation(self) -> bool:
1980
+ if not self.in_new_torch_compile_invocation():
1981
+ return False
1982
+
1983
+ if self.user_invoked_mark_step():
1984
+ return True
1985
+
1986
+ return not self.running_forwards_with_pending_backwards
1987
+
1988
+ def in_new_torch_compile_invocation(self):
1989
+ return self.current_gen != self.get_curr_generation()
1990
+
1991
+ def try_end_curr_recording(self, function_id: FunctionID) -> None:
1992
+ """
1993
+ Check if the current recording can be terminated, either because all outputs of the
1994
+ previously recorded node are dead or because it was executed in a different
1995
+ generation. Will set current_node to None and in_recording to False if successful.
1996
+ """
1997
+ assert self.in_recording
1998
+ assert self.current_node is not None
1999
+
2000
+ # multiple invocations, allow overwriting the previous generation
2001
+ if self.can_start_new_generation():
2002
+ self.dealloc_current_path_weakrefs()
2003
+ self.clear_current_path_state_and_set_to_none()
2004
+ return
2005
+
2006
+ if self.current_node.all_outputs_are_dead():
2007
+ self.clear_current_path_state_and_set_to_none()
2008
+ return
2009
+
2010
+ self.check_warn_on_unable_to_start_executing(function_id)
2011
+
2012
+ def try_end_curr_execution(self) -> None:
2013
+ """
2014
+ Check if the current executing node can be terminated, either because all outputs of the
2015
+ previously executed node are dead or because it was executed in a different generation.
2016
+ Will set current_node to None if successful.
2017
+ """
2018
+
2019
+ assert not self.in_recording
2020
+ if self.current_node is None:
2021
+ return
2022
+
2023
+ if self.can_start_new_generation():
2024
+ self.clear_current_path_state_and_set_to_none()
2025
+ return
2026
+
2027
+ if self.current_node.all_outputs_are_dead():
2028
+ self.clear_current_path_state_and_set_to_none()
2029
+
2030
+ def try_end_curr_warmup(self, function_id: FunctionID):
2031
+ if self.can_start_new_generation():
2032
+ self.dealloc_current_path_weakrefs()
2033
+ self.current_node = None
2034
+ return
2035
+
2036
+ if self.current_node.all_outputs_are_dead():
2037
+ self.current_node = None
2038
+ return
2039
+
2040
+ self.check_warn_on_unable_to_start_executing(function_id)
2041
+
2042
+ def check_warn_on_unable_to_start_executing(self, function_id: FunctionID):
2043
+ "Warn if we in a potential loop where we are unable to hit fast path"
2044
+ if (
2045
+ function_id in self.warned_functions
2046
+ or not self.in_new_torch_compile_invocation()
2047
+ ):
2048
+ return
2049
+
2050
+ existing_nodes = [
2051
+ node
2052
+ for node in self.current_node._path_from_root
2053
+ if node.wrapped_function.id == function_id
2054
+ ]
2055
+
2056
+ if len(existing_nodes) <= 1:
2057
+ return
2058
+
2059
+ # repeated same pattern
2060
+ parents = {
2061
+ n.parent.wrapped_function.id
2062
+ for n in itertools.chain(existing_nodes, (self.current_node,))
2063
+ if n.parent is not None
2064
+ }
2065
+ if len(parents) == len(existing_nodes):
2066
+ return
2067
+
2068
+ self.warned_functions.add(function_id)
2069
+ warnings.warn(
2070
+ "Unable to hit fast path of CUDAGraphs because of pending, uninvoked backwards. "
2071
+ "Consider running with torch.no_grad() or using torch.compiler.cudagraph_mark_step_begin() "
2072
+ "before each model invocation"
2073
+ )
2074
+
2075
+ def dealloc_current_path_weakrefs(self):
2076
+ # TODO: we could also allow the these weak refs to continue to be allocated,
2077
+ # but that adds some complications.
2078
+ for node in self.current_node._path_from_root:
2079
+ assert len(node.tensor_weakrefs) == len(node.stack_traces)
2080
+ for t, stack_trace in zip(node.tensor_weakrefs, node.stack_traces):
2081
+ ten = None if t is None else t()
2082
+ if ten is None:
2083
+ continue
2084
+
2085
+ stack_trace = (
2086
+ stack_trace.strip()
2087
+ if stack_trace
2088
+ else "[Could not find stack trace]"
2089
+ )
2090
+ msg = (
2091
+ "Error: accessing tensor output of CUDAGraphs that has been overwritten by a subsequent run. "
2092
+ f"Stack trace: {stack_trace}. "
2093
+ "To prevent overwriting, clone the tensor outside of torch.compile() "
2094
+ "or call torch.compiler.cudagraph_mark_step_begin() before each model invocation."
2095
+ )
2096
+ torch._C._set_storage_access_error_msg(ten, msg)
2097
+
2098
+ deleted = set()
2099
+ for storage_ref in self.current_node.path_live_weakrefs():
2100
+ if storage_ref() and storage_ref.data_ptr() not in deleted:
2101
+ deleted.add(storage_ref.data_ptr())
2102
+ torch._C._free_And_Remove_DeleterFn(storage_ref())
2103
+
2104
+ def clear_current_path_state_and_set_to_none(self):
2105
+ self.current_node.clear_path_state()
2106
+ self.current_node = None
2107
+
2108
+ def apply_checkpoint_execution_state_in_allocator(self):
2109
+ """
2110
+ Checkpoint the current execution state in the caching allocator so that
2111
+ additional cudagraph recordings can be made respecting existent live storages.
2112
+ """
2113
+ self.debug_checkpointing_counter += 1
2114
+ log.debug(
2115
+ "Checkpointing cuda caching allocator state. Number of checkpoints %d",
2116
+ self.debug_checkpointing_counter,
2117
+ )
2118
+
2119
+ state = self.current_node.checkpointed_caching_state
2120
+ device = self.current_node.device
2121
+ assert state is not None and device is not None
2122
+
2123
+ # currently we deallocate on instead of allowing stale recordings
2124
+ stale_storages: List[int] = []
2125
+
2126
+ # remove cached tensors, otherwise they would prevent memory from being
2127
+ # reclaimed in subsequent recordings
2128
+ self.current_node.remove_path_cached_tensors()
2129
+ live_storages_wrappers = list(self.current_node.path_live_weakrefs())
2130
+
2131
+ live_storages_weak_refs = [t() for t in live_storages_wrappers]
2132
+ ptrs_to_deallocate = self.current_node.data_ptrs_dead_since_invocation()
2133
+ torch._C._cuda_setCheckpointPoolState(
2134
+ device, state, stale_storages, live_storages_weak_refs
2135
+ )
2136
+
2137
+ # NB: deduplicate aliased outputs
2138
+ for ptr in set(ptrs_to_deallocate):
2139
+ torch._C._cuda_cudaCachingAllocator_raw_delete(ptr)
2140
+
2141
+ # Now the live blocks should be exactly equal to the live storages in private pool
2142
+ if config.triton.slow_path_cudagraph_asserts:
2143
+ check_memory_pool(
2144
+ self.device_index, self.cuda_graphs_thread_pool, live_storages_wrappers
2145
+ )
2146
+ for wrapper in live_storages_wrappers:
2147
+ assert wrapper()
2148
+ assert torch._C._has_Standard_Deleter(wrapper())
2149
+ assert wrapper.data_ptr() not in ptrs_to_deallocate
2150
+
2151
+ def live_cudagraph_pool_storages_in_curr_execution(
2152
+ self,
2153
+ ) -> List[StorageWeakRefPointer]:
2154
+ if self.current_node is None:
2155
+ return []
2156
+ # explicitly ignoring previous recorded outputs from past path
2157
+ return [t() for t in self.current_node.path_live_weakrefs()]
env-llmeval/lib/python3.10/site-packages/torch/_inductor/debug.py ADDED
@@ -0,0 +1,561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import contextlib
3
+ import cProfile
4
+ import dataclasses
5
+ import functools
6
+ import itertools
7
+ import logging
8
+ import os
9
+ import os.path
10
+ import pickle
11
+ import pstats
12
+ import shutil
13
+ import subprocess
14
+ from typing import Any, Dict, List, Optional
15
+ from unittest.mock import patch
16
+
17
+ from functorch.compile import draw_graph, get_aot_graph_name, get_graph_being_compiled
18
+
19
+ import torch
20
+ from torch import fx as fx
21
+
22
+ from torch._dynamo.repro.after_aot import save_graph_repro, wrap_compiler_debug
23
+ from torch._dynamo.utils import get_debug_dir
24
+ from torch.fx.graph_module import GraphModule
25
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata
26
+ from torch.fx.passes.tools_common import legalize_graph
27
+ from torch.utils._pytree import tree_map
28
+
29
+ from . import config, ir # noqa: F811, this is needed
30
+ from .scheduler import (
31
+ BaseSchedulerNode,
32
+ FusedSchedulerNode,
33
+ NopKernelSchedulerNode,
34
+ OutputNode,
35
+ SchedulerNode,
36
+ )
37
+ from .virtualized import V
38
+
39
+ log = logging.getLogger(__name__)
40
+
41
+ SchedulerNodeList = List[Any]
42
+ BufMeta = collections.namedtuple("BufMeta", ["name", "n_origin"])
43
+ GRAPHVIZ_COMMAND_SCALABLE = ["dot", "-Gnslimit=2", "-Gnslimit1=2", "-Gmaxiter=5000"]
44
+
45
+
46
+ @functools.lru_cache(None)
47
+ def has_dot() -> bool:
48
+ try:
49
+ subprocess.check_output(["which", "dot"], stderr=subprocess.PIPE)
50
+ return True
51
+ except subprocess.SubprocessError:
52
+ return False
53
+
54
+
55
+ def draw_buffers(nodes: List[BaseSchedulerNode], print_graph=False, fname=None):
56
+ """
57
+ Draw a graph in fname.svg.
58
+ """
59
+ if not has_dot():
60
+ log.warning("draw_buffers() requires `graphviz` package")
61
+ return
62
+
63
+ if fname is None:
64
+ fname = get_graph_being_compiled()
65
+
66
+ graph = create_fx_from_snodes(nodes)
67
+
68
+ for node in graph.nodes:
69
+ if "fusion_meta" not in node.meta:
70
+ continue
71
+ group = node.meta["fusion_meta"].group
72
+ if isinstance(group, tuple):
73
+ if isinstance(group[1], int):
74
+ group = (group[1],)
75
+ else:
76
+ group = group[1]
77
+
78
+ # gather meta data
79
+ dtype = None
80
+ if isinstance(node, ir.ComputedBuffer):
81
+ dtype = node.data.dtype
82
+
83
+ metadata = TensorMetadata(group, dtype, None, None, None, None, None)
84
+ node.meta["tensor_meta"] = metadata
85
+
86
+ if print_graph:
87
+ print(graph)
88
+
89
+ gm = GraphModule({}, graph)
90
+ legalize_graph(gm)
91
+ gm.graph.lint()
92
+ draw_graph(
93
+ gm, fname, clear_meta=False, dot_graph_shape=config.trace.dot_graph_shape
94
+ )
95
+
96
+
97
+ def create_fx_from_snodes(snodes: List[BaseSchedulerNode]) -> fx.Graph:
98
+ """
99
+ Creates a FX Graph from a list of SchedulerNode objects.
100
+ """
101
+
102
+ def get_fake_func(name):
103
+ def func1(*args):
104
+ return 0
105
+
106
+ func1.__name__ = name
107
+ return func1
108
+
109
+ FusionMeta = collections.namedtuple("FusionMeta", ["group", "snode", "type"])
110
+
111
+ buf_to_fx_node = {}
112
+ graph = torch.fx.Graph()
113
+ first_node = None
114
+
115
+ outputs = []
116
+ group: Any = None
117
+ # create call_function node for each Buffer and Kernel
118
+ for snode in snodes:
119
+ if snode.is_extern():
120
+ node_type = "extern"
121
+ group = node_type
122
+ elif snode.is_template():
123
+ node_type = "template"
124
+ group = node_type
125
+ elif isinstance(snode, NopKernelSchedulerNode):
126
+ node_type = "nop"
127
+ group = node_type
128
+ elif isinstance(snode, SchedulerNode):
129
+ node_type = "compute"
130
+ group = snode.group
131
+ elif isinstance(snode, FusedSchedulerNode):
132
+ node_type = "fused"
133
+ group = snode.group
134
+ else:
135
+ raise RuntimeError("Unknown node type")
136
+
137
+ fused_name = torch._inductor.utils.get_fused_kernel_name(
138
+ snode.get_nodes(), "original_aten"
139
+ )
140
+ func_name = f"{node_type}: {fused_name}"
141
+ node_func = get_fake_func(func_name)
142
+ kwargs = {}
143
+ if hasattr(snode, "get_device"):
144
+ kwargs = {"device": snode.get_device()}
145
+ fx_node = graph.call_function(node_func, args=(), kwargs=kwargs)
146
+
147
+ def in_output(snode):
148
+ if isinstance(snode, FusedSchedulerNode):
149
+ return any(in_output(x) for x in snode.snodes)
150
+ return any(isinstance(user.node, OutputNode) for user in snode.users)
151
+
152
+ if in_output(snode):
153
+ outputs.append(fx_node)
154
+ name = snode.get_name()
155
+ fx_node.name = name
156
+
157
+ fx_node.meta["fusion_meta"] = FusionMeta(group, snode, node_type)
158
+
159
+ if isinstance(snode, FusedSchedulerNode):
160
+ for x in snode.snodes:
161
+ buf_to_fx_node[x.get_name()] = fx_node
162
+ buf_to_fx_node[name] = fx_node
163
+
164
+ if first_node is None:
165
+ first_node = fx_node
166
+
167
+ # create edges between nodes
168
+ for snode in snodes:
169
+ name = snode.get_name()
170
+ deps = snode.read_writes.reads
171
+
172
+ fx_node = buf_to_fx_node[name]
173
+ new_args = []
174
+ for dep in deps:
175
+ if dep.name in buf_to_fx_node:
176
+ dep_node = buf_to_fx_node[dep.name]
177
+ else:
178
+ with graph.inserting_before(first_node):
179
+ dep_node = graph.placeholder(dep.name)
180
+ buf_to_fx_node[dep.name] = dep_node
181
+ new_args.append(dep_node)
182
+
183
+ fx_node.args = tuple(new_args)
184
+
185
+ graph.output(outputs[0] if len(outputs) == 1 else tuple(outputs))
186
+ return graph
187
+
188
+
189
+ def update_orig_fx_node_name_to_buf_name(
190
+ nodes: SchedulerNodeList,
191
+ node_name_to_buf_name: Dict[str, str],
192
+ parent_buf_name: Optional[str] = None,
193
+ n_origins: int = 0,
194
+ ):
195
+ if nodes is None:
196
+ return
197
+ for node in nodes:
198
+ # for FusedSchedulerNode, traverse recursively into get_nodes()
199
+ buf_name = node.get_name()
200
+ children_nodes = node.get_nodes()
201
+ if children_nodes is not None and len(children_nodes) > 1:
202
+ update_orig_fx_node_name_to_buf_name(
203
+ children_nodes,
204
+ node_name_to_buf_name,
205
+ buf_name if parent_buf_name is None else parent_buf_name,
206
+ )
207
+ continue
208
+ else:
209
+ assert len(children_nodes) == 1 and children_nodes[0] == node
210
+
211
+ ir_node = node.node
212
+ if ir_node is None or ir_node.origins is None:
213
+ continue
214
+ for origin in ir_node.origins:
215
+ node_name = origin.name
216
+ # when buf1 and buf2 both have origin=node1
217
+ # we draw node1 according to buf1
218
+ if node_name not in node_name_to_buf_name:
219
+ node_name_to_buf_name[node_name] = (
220
+ buf_name if parent_buf_name is None else parent_buf_name
221
+ )
222
+
223
+
224
+ def get_node_name_to_buf_meta(node_name_to_buf_name: Dict[str, str]):
225
+ buf_name_to_n_node = {}
226
+ for node_name, buf_name in node_name_to_buf_name.items():
227
+ if buf_name not in buf_name_to_n_node:
228
+ buf_name_to_n_node[buf_name] = {node_name}
229
+ else:
230
+ buf_name_to_n_node[buf_name].add(node_name)
231
+
232
+ node_name_to_buf_meta = {}
233
+ for node_name, buf_name in node_name_to_buf_name.items():
234
+ n_node = len(buf_name_to_n_node[buf_name])
235
+ node_name_to_buf_meta[node_name] = BufMeta(buf_name, n_node)
236
+ return node_name_to_buf_meta
237
+
238
+
239
+ def annotate_orig_fx_with_snodes(
240
+ gm: torch.fx.GraphModule, snodes: SchedulerNodeList
241
+ ) -> None:
242
+ """
243
+ Creates a FX Graph from a list of SchedulerNode objects.
244
+ """
245
+ node_name_to_buf_name: Dict[str, str] = {}
246
+ update_orig_fx_node_name_to_buf_name(snodes, node_name_to_buf_name)
247
+ if node_name_to_buf_name is None:
248
+ return
249
+ node_name_to_buf_meta = get_node_name_to_buf_meta(node_name_to_buf_name)
250
+ for node in gm.graph.nodes:
251
+ if node.name in node_name_to_buf_meta:
252
+ node.meta["buf_meta"] = node_name_to_buf_meta.get(node.name)
253
+
254
+
255
+ @contextlib.contextmanager
256
+ def enable_aot_logging():
257
+ compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
258
+
259
+ import torch._functorch.aot_autograd
260
+
261
+ log = logging.getLogger(torch._functorch.aot_autograd.__name__)
262
+
263
+ stack = contextlib.ExitStack()
264
+ if not compile_debug:
265
+ try:
266
+ yield
267
+ finally:
268
+ stack.close()
269
+ return
270
+
271
+ # Enable all graphs to be logged to a file by setting the flags to True
272
+ # and the log level of the file logger to DEBUG
273
+ stack.enter_context(patch("functorch.compile.config.debug_partitioner", True))
274
+
275
+ path = os.path.join(get_debug_dir(), "torchinductor")
276
+ if not os.path.exists(path):
277
+ os.makedirs(path)
278
+
279
+ fh = logging.FileHandler(
280
+ os.path.join(
281
+ path,
282
+ f"aot_{get_aot_graph_name()}_debug.log",
283
+ )
284
+ )
285
+ fh.setLevel(logging.DEBUG)
286
+ fh.setFormatter(
287
+ logging.Formatter("[%(filename)s:%(lineno)d %(levelname)s] %(message)s")
288
+ )
289
+ log.addHandler(fh)
290
+ try:
291
+ yield
292
+ finally:
293
+ log.removeHandler(fh)
294
+ stack.close()
295
+
296
+
297
+ class DebugContext:
298
+ _counter = itertools.count()
299
+
300
+ @staticmethod
301
+ def wrap(fn):
302
+ @functools.wraps(fn)
303
+ def inner(*args, **kwargs):
304
+ with DebugContext():
305
+ return fn(*args, **kwargs)
306
+
307
+ return wrap_compiler_debug(inner, compiler_name="inductor")
308
+
309
+ @staticmethod
310
+ def create_debug_dir(folder_name: str) -> Optional[str]:
311
+ debug_dir = config.trace.debug_dir or get_debug_dir()
312
+ for n in DebugContext._counter:
313
+ dirname = os.path.join(
314
+ debug_dir,
315
+ "torchinductor",
316
+ f"{folder_name}.{n}",
317
+ )
318
+ if not os.path.exists(dirname):
319
+ os.makedirs(dirname)
320
+ return dirname
321
+ return None
322
+
323
+ def __init__(self):
324
+ self._prof = None
325
+ self._path = None
326
+ self._stack = contextlib.ExitStack()
327
+
328
+ def copy(self, new_path: str):
329
+ if not self._path:
330
+ return
331
+ assert new_path.endswith(".debug"), new_path
332
+ if os.path.exists(new_path):
333
+ shutil.rmtree(new_path)
334
+ try:
335
+ shutil.copytree(self._path, new_path)
336
+ self._path = new_path
337
+ except OSError:
338
+ log.warning(
339
+ "Failed to copy debug files from %s to %s", self._path, new_path
340
+ )
341
+ pass
342
+
343
+ def fopen(self, filename: str):
344
+ assert self._path
345
+ return open(os.path.join(self._path, filename), "w")
346
+
347
+ def filename(self, suffix: str):
348
+ assert self._path
349
+ return os.path.join(self._path, suffix)
350
+
351
+ def upload_tar(self):
352
+ if config.trace.upload_tar is not None:
353
+ import tarfile
354
+
355
+ assert self._path
356
+ tar_file = os.path.join(
357
+ self._path, f"{os.path.basename(self._path)}.tar.gz"
358
+ )
359
+ with tarfile.open(tar_file, "w:gz") as tar:
360
+ tar.add(self._path, arcname=os.path.basename(self._path))
361
+ config.trace.upload_tar(tar_file)
362
+
363
+ def __enter__(self):
364
+ if config.debug:
365
+ log = logging.getLogger("torch._dynamo")
366
+ prev_level = log.level
367
+ log.setLevel(logging.DEBUG)
368
+
369
+ def reset_log_level(level):
370
+ log.setLevel(level)
371
+
372
+ self._stack.callback(reset_log_level, prev_level)
373
+
374
+ self._stack.enter_context(V.set_debug_handler(self))
375
+
376
+ if not config.trace.enabled:
377
+ return
378
+
379
+ self._path = self.create_debug_dir(get_aot_graph_name())
380
+
381
+ if config.trace.debug_log:
382
+ self._setup_log_capture("debug.log", logging.DEBUG)
383
+ if config.trace.info_log:
384
+ self._setup_log_capture("info.log", logging.INFO)
385
+ if config.trace.compile_profile:
386
+ self._prof = cProfile.Profile()
387
+ self._prof.enable()
388
+
389
+ def _setup_log_capture(self, filename: str, level: int):
390
+ log = logging.getLogger("torch._inductor")
391
+ fd = self._stack.enter_context(self.fopen(filename))
392
+ ch = logging.StreamHandler(fd)
393
+ ch.setLevel(level)
394
+ ch.setFormatter(
395
+ logging.Formatter("[%(filename)s:%(lineno)d %(levelname)s] %(message)s")
396
+ )
397
+ log.addHandler(ch)
398
+ log.setLevel(min(log.level, level))
399
+ self._stack.callback(log.removeHandler, ch)
400
+
401
+ def __exit__(self, exc_type, exc_val, exc_tb):
402
+ if self._prof:
403
+ self._prof.disable()
404
+ self._save_profile_data()
405
+
406
+ if self._path:
407
+ self.upload_tar()
408
+ log.warning("%s debug trace: %s", get_graph_being_compiled(), self._path)
409
+ self._stack.close()
410
+
411
+ def _save_profile_data(self):
412
+ assert self._prof
413
+ self._prof.dump_stats(self.filename("compile.prof"))
414
+ with self.fopen("compile.stats") as fd:
415
+ stats = pstats.Stats(self._prof, stream=fd)
416
+ stats.strip_dirs()
417
+ stats.sort_stats("cumtime")
418
+ stats.print_stats(100)
419
+ stats.sort_stats("tottime")
420
+ stats.print_stats(100)
421
+
422
+ def __getattr__(self, name):
423
+ if config.trace.enabled and getattr(config.trace, name):
424
+ try:
425
+ return getattr(DebugFormatter(self), name)
426
+ except Exception:
427
+ log.warning("Ignoring exception in debug code", exc_info=True)
428
+ else:
429
+
430
+ def ignored(*args, **kwargs):
431
+ pass
432
+
433
+ return ignored
434
+
435
+
436
+ class DebugFormatter:
437
+ def __init__(self, handler):
438
+ self.fopen = handler.fopen
439
+ self.filename = handler.filename
440
+ self.handler = handler
441
+
442
+ def fx_graph(self, gm: torch.fx.GraphModule, inputs: List[torch.Tensor]):
443
+ with self.fopen("fx_graph_runnable.py") as fd:
444
+ save_graph_repro(fd, gm, inputs, "inductor")
445
+
446
+ with self.fopen("fx_graph_readable.py") as fd:
447
+ fd.write(gm.print_readable(print_output=False))
448
+
449
+ def fx_graph_transformed(
450
+ self, gm: torch.fx.GraphModule, inputs: List[torch.Tensor]
451
+ ):
452
+ with self.fopen("fx_graph_transformed.py") as fd:
453
+ fd.write(gm.print_readable(print_output=False))
454
+
455
+ def ir_pre_fusion(self, nodes: SchedulerNodeList):
456
+ self._write_ir("ir_pre_fusion.txt", nodes)
457
+
458
+ def ir_post_fusion(self, nodes: SchedulerNodeList):
459
+ self._write_ir("ir_post_fusion.txt", nodes)
460
+
461
+ def _write_ir(self, filename: str, nodes: SchedulerNodeList):
462
+ with self.fopen(filename) as fd:
463
+ log.info("Writing debug ir to %s", fd.name)
464
+ for node in nodes:
465
+ fd.write(node.debug_str())
466
+ fd.write("\n\n\n")
467
+
468
+ def graph_diagram(self, nodes: SchedulerNodeList):
469
+ draw_buffers(nodes, fname=self.filename("graph_diagram.svg"))
470
+
471
+ def draw_orig_fx_graph(self, gm: torch.fx.GraphModule, nodes: SchedulerNodeList):
472
+ annotate_orig_fx_with_snodes(gm, nodes)
473
+ draw_graph(
474
+ gm,
475
+ fname=self.filename("orig_fx_graph_diagram.svg"),
476
+ clear_meta=False,
477
+ prog=GRAPHVIZ_COMMAND_SCALABLE,
478
+ parse_stack_trace=True,
479
+ dot_graph_shape=config.trace.dot_graph_shape,
480
+ )
481
+
482
+ def output_code(self, filename):
483
+ shutil.copy(filename, self.filename("output_code.py"))
484
+
485
+
486
+ @dataclasses.dataclass
487
+ class TensorMetadataHolder:
488
+ tensor_metadata: TensorMetadata
489
+ device: torch.device
490
+
491
+
492
+ save_args_cnt = itertools.count()
493
+
494
+
495
+ def save_args_for_compile_fx_inner(*args, **kwargs):
496
+ """
497
+ This function is used to save arguments for a compile_fx_inner function call
498
+ to the file system. Later on one can replay the compile_fx_inner call
499
+ with the saved arguments using load_args_and_run_compile_fx_inner.
500
+ """
501
+
502
+ folder = "/tmp/inductor_saved_args"
503
+ if not os.path.exists(folder):
504
+ os.mkdir(folder)
505
+
506
+ def handle_tensor(x):
507
+ """
508
+ Pickle FakeTensor will result in error:
509
+ AttributeError: Can't pickle local object 'WeakValueDictionary.__init__.<locals>.remove'
510
+
511
+ Convert all Tensor to metadata. This may also makes pickle faster.
512
+ """
513
+ if isinstance(x, torch.Tensor):
514
+ return TensorMetadataHolder(_extract_tensor_metadata(x), x.device)
515
+ else:
516
+ return x
517
+
518
+ args_to_save, kwargs_to_save = tree_map(handle_tensor, (args, kwargs))
519
+
520
+ fn_name = "compile_fx_inner"
521
+ path = f"{folder}/{fn_name}_{next(save_args_cnt)}.pkl"
522
+ with open(path, "wb") as f:
523
+ pickle.dump((args_to_save, kwargs_to_save), f)
524
+
525
+ if log.isEnabledFor(logging.DEBUG):
526
+ message = f"""
527
+ Arguments for a compile_fx_inner call is saved to {path}. To replay the call,
528
+ run the following:
529
+
530
+ from torch._inductor.debug import load_args_and_run_compile_fx_inner
531
+ load_args_and_run_compile_fx_inner({path!r})
532
+ """
533
+ # call print rather than log.debug. log.debug will print message
534
+ # prefix for each line which makes the code snippet harder to be
535
+ # copied.
536
+ # Not a big deal since the code is already been guarded by checking
537
+ # the log level.
538
+ print(message)
539
+
540
+
541
+ def load_args_and_run_compile_fx_inner(path: str):
542
+ from torch._inductor.compile_fx import compile_fx_inner
543
+
544
+ with open(path, "rb") as f:
545
+ args, kwargs = pickle.load(f)
546
+
547
+ def handle_tensor(x):
548
+ if isinstance(x, TensorMetadataHolder):
549
+ return torch._dynamo.testing.rand_strided(
550
+ x.tensor_metadata.shape,
551
+ x.tensor_metadata.stride,
552
+ x.tensor_metadata.dtype,
553
+ x.device,
554
+ )
555
+ else:
556
+ return x
557
+
558
+ fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
559
+ with fake_mode, config.patch("save_args", False):
560
+ args, kwargs = tree_map(handle_tensor, (args, kwargs))
561
+ return compile_fx_inner(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/decomposition.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ import math
4
+ import sys
5
+ import typing
6
+ from typing import Optional
7
+
8
+ import torch
9
+ import torch._decomp as decomp
10
+ import torch._prims_common as utils
11
+ import torch.ao.quantization.fx._decomposed
12
+ from torch._decomp import (
13
+ core_aten_decompositions,
14
+ get_decompositions,
15
+ remove_decompositions,
16
+ )
17
+ from torch._decomp.decompositions import (
18
+ _grid_sampler_2d as decomp_grid_sampler_2d,
19
+ pw_cast_for_opmath,
20
+ )
21
+ from torch._decomp.decompositions_for_rng import extra_random_decomps
22
+ from torch._higher_order_ops.out_dtype import out_dtype
23
+ from torch._prims_common import type_to_dtype
24
+
25
+ from . import config, inductor_prims
26
+
27
+ log = logging.getLogger(__name__)
28
+ aten = torch.ops.aten
29
+ prims = torch.ops.prims
30
+ quantized_decomposed = torch.ops.quantized_decomposed
31
+
32
+ inductor_decompositions = get_decompositions(
33
+ [
34
+ aten._adaptive_avg_pool2d_backward,
35
+ aten.arange,
36
+ aten.bitwise_and_,
37
+ aten.bitwise_or_,
38
+ aten.clamp_min_,
39
+ aten.dist,
40
+ aten.empty_like,
41
+ aten.flip,
42
+ aten.gelu,
43
+ aten.hardtanh,
44
+ aten.index_select,
45
+ aten.lcm,
46
+ aten.leaky_relu,
47
+ aten.linalg_vector_norm,
48
+ aten._log_softmax,
49
+ aten.max_pool2d_with_indices_backward,
50
+ aten._native_batch_norm_legit,
51
+ aten._native_batch_norm_legit_functional,
52
+ aten._native_batch_norm_legit_no_training,
53
+ aten.native_batch_norm,
54
+ aten.native_group_norm,
55
+ aten.native_layer_norm,
56
+ aten._softmax,
57
+ aten.sin_,
58
+ aten.sqrt_,
59
+ out_dtype,
60
+ aten._to_copy,
61
+ aten.tril_indices,
62
+ aten.triu_indices,
63
+ aten.upsample_bilinear2d.vec,
64
+ ]
65
+ )
66
+ decompositions = {**core_aten_decompositions(), **inductor_decompositions}
67
+
68
+ # Remove unwanted decompositions included via the core ATen decompositions from
69
+ # the Inductor decomp table.
70
+ decomps_to_exclude = [
71
+ aten._unsafe_index,
72
+ aten._scaled_dot_product_flash_attention.default, # See comments in torch/_decomp/decompositions.py
73
+ aten.clamp_max,
74
+ aten.clamp_min,
75
+ aten.glu, # inductor lowers this directly
76
+ aten.split.Tensor, # inductor lowers this directly
77
+ aten.squeeze, # inductor lowers this directly
78
+ aten.sum, # inductor lowers this directly
79
+ aten.unbind, # inductor lowers this directly
80
+ ]
81
+
82
+ remove_decompositions(decompositions, decomps_to_exclude)
83
+
84
+
85
+ def register_decomposition(ops):
86
+ for op in [ops] if callable(ops) else ops:
87
+ if op in decompositions:
88
+ log.warning("duplicate decomp: %s", ops)
89
+ return decomp.register_decomposition(ops, decompositions)
90
+
91
+
92
+ # TODO: for now, inductor doesn't handle asserts
93
+ # because the condition is symbool -> tensor in the graph.
94
+ @register_decomposition([aten._assert_async.msg])
95
+ def assert_async_msg_decomp(tensor, msg):
96
+ return
97
+
98
+
99
+ # Following `assert_async_msg_decomp` and implement as non-op.
100
+ @register_decomposition([aten._functional_assert_async.msg])
101
+ def functional_assert_async_msg_decomp(tensor, msg):
102
+ return
103
+
104
+
105
+ @register_decomposition([aten.sym_constrain_range_for_size.default])
106
+ def sym_constrain_range_for_size(symbol, *, min=None, max=None):
107
+ return
108
+
109
+
110
+ @register_decomposition([aten.clamp])
111
+ @pw_cast_for_opmath
112
+ def clamp(x, min=None, max=None):
113
+ if min is not None:
114
+ x = x.clamp_min(min)
115
+ if max is not None:
116
+ x = x.clamp_max(max)
117
+ return x
118
+
119
+
120
+ @register_decomposition([aten.full])
121
+ def full(size, fill_value, **kwargs):
122
+ dtype = kwargs.get("dtype")
123
+ if dtype is None:
124
+ kwargs["dtype"] = type_to_dtype(type(fill_value))
125
+ return aten.full(size, fill_value, **kwargs)
126
+ return NotImplemented
127
+
128
+
129
+ # Not really sure how to put this into the main library. PrimTorch wants
130
+ # empty_permuted to go to the prim, and typically users don't really want
131
+ # to decompose to empty_strided (but inductor is OK with it, because we are
132
+ # cool with strides and everything goes to empty_strided)
133
+ @register_decomposition([aten.empty_permuted.default])
134
+ def empty_permuted(size, physical_layout, **kwargs):
135
+ perm = [0] * len(size)
136
+ for p, l in enumerate(physical_layout):
137
+ perm[l] = p
138
+ return torch.empty([size[l] for l in physical_layout], **kwargs).permute(perm)
139
+
140
+
141
+ @register_decomposition([aten.convolution_backward])
142
+ def convolution_backward(
143
+ grad_output,
144
+ input,
145
+ weight,
146
+ bias_sizes,
147
+ stride,
148
+ padding,
149
+ dilation,
150
+ transposed,
151
+ output_padding,
152
+ groups,
153
+ output_mask,
154
+ ):
155
+ if not output_mask[2] or grad_output.device.type != "cuda":
156
+ return NotImplemented
157
+ grad_bias = aten.sum(grad_output, [0] + list(range(2, grad_output.dim())))
158
+ grad_inp, grad_weight, _ = aten.convolution_backward(
159
+ grad_output,
160
+ input,
161
+ weight,
162
+ bias_sizes,
163
+ stride,
164
+ padding,
165
+ dilation,
166
+ transposed,
167
+ output_padding,
168
+ groups,
169
+ [output_mask[0], output_mask[1], False],
170
+ )
171
+ return (grad_inp, grad_weight, grad_bias)
172
+
173
+
174
+ @register_decomposition([aten.log2])
175
+ def log2(x):
176
+ return torch.log(x) * (1.0 / math.log(2.0))
177
+
178
+
179
+ @register_decomposition([aten.round.decimals])
180
+ def round_dec(x, decimals=0):
181
+ ten_pow_decimals = 10.0**decimals
182
+ return aten.round(x * ten_pow_decimals) * (1.0 / ten_pow_decimals)
183
+
184
+
185
+ @register_decomposition([aten.bmm])
186
+ @pw_cast_for_opmath
187
+ def bmm(self, batch2):
188
+ if config.coordinate_descent_tuning:
189
+ if self.shape[1] == 1:
190
+ out = (self.unsqueeze(-1) * batch2.unsqueeze(1)).sum(dim=2)
191
+ return out
192
+ if self.device.type == "cpu":
193
+ if self.size(1) == 1 and batch2.size(-1) == 1:
194
+ return torch.sum(
195
+ self.squeeze(1) * batch2.squeeze(-1), dim=1, keepdim=True
196
+ ).unsqueeze(1)
197
+ return NotImplemented
198
+
199
+
200
+ @register_decomposition([aten.addmm])
201
+ @pw_cast_for_opmath
202
+ def addmm(self, mat1, mat2, beta=1, alpha=1):
203
+ if self.device.type == "cpu":
204
+ if mat1.size(0) == 1 and mat2.size(-1) == 1:
205
+ out = torch.sum(
206
+ mat1.squeeze(0) * mat2.squeeze(-1), dim=0, keepdim=True
207
+ ).unsqueeze(0)
208
+ return alpha * out + beta * self
209
+ if mat1.size(0) == 1 and mat2.size(0) <= 16 and mat2.size(1) <= 16:
210
+ out = (mat1.T * mat2).sum(dim=0, keepdim=True)
211
+ return alpha * out + beta * self
212
+ return NotImplemented
213
+
214
+
215
+ @register_decomposition([aten.mm])
216
+ @pw_cast_for_opmath
217
+ def mm(self, input2):
218
+ # Our matrix vector multiplies only achieve peak bandwidth with coordinate descent tuning.
219
+ # todo: Look into why and fix it (hopefully)
220
+ if config.coordinate_descent_tuning:
221
+ if self.shape[0] == 1 or input2.shape[1] == 1:
222
+ return (self.unsqueeze(2) * input2.unsqueeze(0)).sum(dim=1)
223
+ if self.device.type == "cpu":
224
+ if (
225
+ self.size(-1) == 1
226
+ and self.size(0) > 0
227
+ and input2.size(0) == 1
228
+ and (self.dtype == input2.dtype)
229
+ and ((torch.numel(self) + torch.numel(input2)) <= 32)
230
+ ):
231
+ return torch.cat([self[i, :] * input2 for i in range(self.size(0))])
232
+ if self.size(0) == 1 and input2.size(-1) == 1:
233
+ return torch.sum(
234
+ self.squeeze(0) * input2.squeeze(-1), dim=0, keepdim=True
235
+ ).unsqueeze(0)
236
+ return NotImplemented
237
+
238
+
239
+ @register_decomposition([aten.cat.default])
240
+ def cat(tensors, dim=0):
241
+ def non_empty_tensor(x):
242
+ # special case for cat'ing with an empty tensor -
243
+ # just drop the 'empty' inputs so they don't confuse the logic below.
244
+ return len(x.shape) > 1 or x.shape[0] > 0
245
+
246
+ filtered_tensors = list(filter(non_empty_tensor, tensors))
247
+
248
+ if len(filtered_tensors) == 1:
249
+ return filtered_tensors[0].clone()
250
+ elif 1 < len(filtered_tensors) < len(tensors):
251
+ # on the first call, when we remove empty tensors, we redispatch recursively
252
+ return aten.cat.default(filtered_tensors, dim)
253
+ # when no 'filtering' has occurred, we raise to prevent infinite recursion (no more decomposition needed)
254
+ return NotImplemented
255
+
256
+
257
+ @register_decomposition([aten.angle])
258
+ def angle(x):
259
+ if x.is_complex():
260
+ return torch.where(
261
+ torch.isnan(x.real), float("nan"), torch.atan2(x.imag, x.real)
262
+ )
263
+ else:
264
+ # when x is real number
265
+ # if x >= 0, return 0
266
+ # if x < 0, return pi
267
+ # if x is nan, return nan
268
+ ret = torch.where(x < 0, math.pi, 0.0)
269
+ nan = torch.where(torch.isnan(x), float("nan"), 0.0)
270
+ return ret + nan
271
+
272
+
273
+ @register_decomposition([aten.add])
274
+ def add(x, y, *, alpha=None):
275
+ x_is_complex_tensor = torch.is_tensor(x) and x.is_complex()
276
+ y_is_complex_tensor = torch.is_tensor(y) and y.is_complex()
277
+ if not x_is_complex_tensor or not y_is_complex_tensor:
278
+ return NotImplemented
279
+ z = y
280
+ if alpha is not None:
281
+ z = alpha * y
282
+ complex_type = torch.promote_types(x.dtype, y.dtype)
283
+ return (x.view(x.real.dtype) + z.view(y.real.dtype)).view(complex_type)
284
+
285
+
286
+ @register_decomposition([aten.conj_physical])
287
+ def conj_physical(self):
288
+ assert not self.is_complex(), "TODO: implement this"
289
+ return self
290
+
291
+
292
+ @register_decomposition([aten.lift, aten.detach_])
293
+ def lift(self):
294
+ return self
295
+
296
+
297
+ @register_decomposition([aten.bernoulli.default])
298
+ def bernoulli(self, *, generator=None):
299
+ assert generator is None
300
+ return torch.rand_like(self, dtype=torch.float32) < self
301
+
302
+
303
+ @register_decomposition([aten.fmin, prims.fmin])
304
+ def fmin(self, other):
305
+ return torch.where(torch.isnan(other) | (other > self), self, other)
306
+
307
+
308
+ @register_decomposition([aten.fmax, prims.fmax])
309
+ def fmax(self, other):
310
+ return torch.where(torch.isnan(other) | (other < self), self, other)
311
+
312
+
313
+ @register_decomposition(aten.amax)
314
+ def amax(self, dim=None, keepdim=False):
315
+ if self.dtype == torch.bool:
316
+ return torch.any(self, dim=dim, keepdim=keepdim)
317
+ return NotImplemented
318
+
319
+
320
+ @register_decomposition(aten.amin)
321
+ def amin(self, dim=None, keepdim=False):
322
+ if self.dtype == torch.bool:
323
+ return torch.all(self, dim=dim, keepdim=keepdim)
324
+ return NotImplemented
325
+
326
+
327
+ @register_decomposition([aten.narrow_copy])
328
+ def narrow_copy(self, dim, start, length):
329
+ return torch.narrow(self, dim, start, length).clone()
330
+
331
+
332
+ @register_decomposition([aten.expand_copy])
333
+ def expand_copy(self, size, *, implicit=False):
334
+ return aten.expand(self, size, implicit=implicit).clone()
335
+
336
+
337
+ @register_decomposition([aten.view_copy.default])
338
+ def view_copy_default(self, size):
339
+ return aten.view(self, size).clone()
340
+
341
+
342
+ @register_decomposition([aten.view_copy.dtype])
343
+ def view_copy_dtype(self, dtype):
344
+ return self.to(dtype).clone()
345
+
346
+
347
+ def get_like_layout(
348
+ tensor: torch.Tensor, memory_format: Optional[torch.memory_format]
349
+ ) -> torch.memory_format:
350
+ # TODO: _to_copy tensor to stride permutation
351
+ if memory_format is torch.preserve_format or memory_format is None:
352
+ return utils.suggest_memory_format(tensor)
353
+ else:
354
+ return memory_format
355
+
356
+
357
+ @register_decomposition(aten.rand_like)
358
+ def rand_like(self, *, dtype=None, device=None, memory_format=None, **kwargs):
359
+ return torch.rand(
360
+ [*self.size()],
361
+ dtype=dtype or self.dtype,
362
+ device=device or self.device,
363
+ **kwargs,
364
+ ).to(memory_format=get_like_layout(self, memory_format))
365
+
366
+
367
+ @register_decomposition(aten.randn_like)
368
+ def randn_like(self, *, dtype=None, device=None, memory_format=None, **kwargs):
369
+ return torch.randn(
370
+ [*self.size()],
371
+ dtype=dtype or self.dtype,
372
+ device=device or self.device,
373
+ **kwargs,
374
+ ).to(memory_format=get_like_layout(self, memory_format))
375
+
376
+
377
+ @register_decomposition(aten.full_like)
378
+ def full_like(
379
+ self,
380
+ fill_value,
381
+ *,
382
+ dtype=None,
383
+ layout=None,
384
+ device=None,
385
+ pin_memory=False,
386
+ requires_grad=False,
387
+ memory_format=torch.preserve_format,
388
+ ):
389
+ return torch.full(
390
+ [*self.size()],
391
+ fill_value,
392
+ dtype=dtype or self.dtype,
393
+ layout=layout or self.layout,
394
+ device=device or self.device,
395
+ requires_grad=requires_grad,
396
+ ).to(memory_format=get_like_layout(self, memory_format))
397
+
398
+
399
+ @register_decomposition(aten.randint_like.default)
400
+ def randint_like(self, high, *, dtype=None, device=None, memory_format=None, **kwargs):
401
+ return aten.randint.low(
402
+ 0,
403
+ high,
404
+ [*self.size()],
405
+ dtype=dtype or self.dtype,
406
+ device=device or self.device,
407
+ **kwargs,
408
+ ).to(memory_format=get_like_layout(self, memory_format))
409
+
410
+
411
+ @register_decomposition(aten.randint_like.low_dtype)
412
+ def randint_like_low(
413
+ self, low, high, *, dtype=None, device=None, memory_format=None, **kwargs
414
+ ):
415
+ return aten.randint.low(
416
+ low,
417
+ high,
418
+ [*self.size()],
419
+ dtype=dtype or self.dtype,
420
+ device=device or self.device,
421
+ **kwargs,
422
+ ).to(memory_format=get_like_layout(self, memory_format))
423
+
424
+
425
+ @register_decomposition(aten.randint.default)
426
+ def randint(high, size, **kwargs):
427
+ return aten.randint.low(0, high, size, **kwargs)
428
+
429
+
430
+ # The difference between quantize_per_tensor.default and quantize_per_tensor.tensor is
431
+ # scale and zero_point is scalar or scalar tensor
432
+ @register_decomposition(quantized_decomposed.quantize_per_tensor.default)
433
+ def quantize_per_tensor_default_decomp_impl(
434
+ input: torch.Tensor,
435
+ scale: float,
436
+ zero_point: int,
437
+ quant_min: int,
438
+ quant_max: int,
439
+ dtype: torch.dtype,
440
+ ) -> torch.Tensor:
441
+ if input.dtype == torch.bfloat16:
442
+ input = input.to(torch.float32)
443
+ inv_scale = 1.0 / scale
444
+ return torch.clamp(
445
+ torch.round(input * inv_scale) + zero_point, quant_min, quant_max
446
+ ).to(dtype)
447
+
448
+
449
+ # The difference between dequantize_per_tensor.default and dequantize_per_tensor.tensor is
450
+ # scale and zero_point is scalar or scalar tensor
451
+ @register_decomposition(quantized_decomposed.dequantize_per_tensor.default)
452
+ def dequantize_per_tensor_default_decomp_impl(
453
+ input: torch.Tensor,
454
+ scale: float,
455
+ zero_point: int,
456
+ quant_min: int,
457
+ quant_max: int,
458
+ dtype: torch.dtype,
459
+ ) -> torch.Tensor:
460
+ return (input.to(torch.float32) - zero_point) * scale
461
+
462
+
463
+ @register_decomposition(quantized_decomposed.quantize_per_tensor.tensor)
464
+ def quantize_per_tensor_tensor_decomp_impl(
465
+ input: torch.Tensor,
466
+ scale: torch.Tensor,
467
+ zero_point: torch.Tensor,
468
+ quant_min: int,
469
+ quant_max: int,
470
+ dtype: torch.dtype,
471
+ ) -> torch.Tensor:
472
+ if input.dtype == torch.bfloat16:
473
+ input = input.to(torch.float32)
474
+ inv_scale = 1.0 / scale
475
+ return torch.clamp(
476
+ torch.round(input * inv_scale) + zero_point, quant_min, quant_max
477
+ ).to(dtype)
478
+
479
+
480
+ @register_decomposition(quantized_decomposed.dequantize_per_tensor.tensor)
481
+ def dequantize_per_tensor_tensor_decomp_impl(
482
+ input: torch.Tensor,
483
+ scale: torch.Tensor,
484
+ zero_point: torch.Tensor,
485
+ quant_min: int,
486
+ quant_max: int,
487
+ dtype: torch.dtype,
488
+ ) -> torch.Tensor:
489
+ return (input.to(torch.float32) - zero_point) * scale
490
+
491
+
492
+ @register_decomposition(torch.ops.quantized.embedding_bag_byte_unpack)
493
+ def q_embedding_bag_byte_unpack_decomp(packed):
494
+ def bitcast_u8_to_f32(u8):
495
+ x, y, z, w = (u8[..., n].to(torch.int32) for n in (0, 1, 2, 3))
496
+ if sys.byteorder == "little":
497
+ return (x + (y << 8) + (z << 16) + (w << 24)).view(torch.float32)[..., None]
498
+ else:
499
+ return ((x << 24) + (y << 16) + (z << 8) + w).view(torch.float32)[..., None]
500
+
501
+ scales = bitcast_u8_to_f32(packed[..., -8:-4])
502
+ offsets = bitcast_u8_to_f32(packed[..., -4:])
503
+ return packed[..., :-8].to(torch.float32) * scales + offsets
504
+
505
+
506
+ @register_decomposition([aten.grid_sampler_2d])
507
+ @pw_cast_for_opmath
508
+ def grid_sampler_2d(
509
+ a: torch.Tensor,
510
+ grid: torch.Tensor,
511
+ interpolation_mode: int = 0,
512
+ padding_mode: int = 0,
513
+ align_corners: bool = False,
514
+ ) -> torch.Tensor:
515
+ # We do not expand the grid (_expand_grid=False) on cpu for performance reasons
516
+ # Experimenting locally it was found that compiled CUDA code is accelerated by ~5x
517
+ # and CPU code by ~2x on bicubic mode, if we expand the grid from (N, H, W, 2) into (N, C, H, W, 2)
518
+ # However, this leads to a slowdown around ~0.8x on CPU bilinear mode, channels first.
519
+ # Thus we apply this hack to not expand the grid for this case.
520
+ _expand_grid = not (
521
+ a.device == torch.device("cpu")
522
+ and interpolation_mode == 0
523
+ and a.is_contiguous(memory_format=torch.contiguous_format)
524
+ )
525
+
526
+ output = decomp_grid_sampler_2d(
527
+ a,
528
+ grid=grid,
529
+ interpolation_mode=interpolation_mode,
530
+ padding_mode=padding_mode,
531
+ align_corners=align_corners,
532
+ _expand_grid=_expand_grid,
533
+ )
534
+ return output
535
+
536
+
537
+ @register_decomposition(aten._foreach_addcmul.Scalar)
538
+ def _foreach_addcmul_scalar(self, left_tensors, right_tensors, scalar=1):
539
+ return aten._foreach_add.List(
540
+ self, aten._foreach_mul.List(left_tensors, right_tensors), alpha=scalar
541
+ )
542
+
543
+
544
+ @register_decomposition(aten._foreach_addcdiv.Scalar)
545
+ def _foreach_addcdiv_scalar(self, left_tensors, right_tensors, scalar=1):
546
+ return aten._foreach_add.List(
547
+ self, aten._foreach_div.List(left_tensors, right_tensors), alpha=scalar
548
+ )
549
+
550
+
551
+ @register_decomposition(aten._foreach_lerp.Scalar)
552
+ def _foreach_lerp_scalar(start_tensors, end_tensors, weight):
553
+ return aten._foreach_add.List(
554
+ start_tensors,
555
+ aten._foreach_mul.Scalar(
556
+ aten._foreach_sub.List(end_tensors, start_tensors), weight
557
+ ),
558
+ )
559
+
560
+
561
+ @aten.miopen_batch_norm.default.py_impl(torch._C.DispatchKey.Autograd)
562
+ @register_decomposition(aten.miopen_batch_norm)
563
+ def miopen_batch_norm(
564
+ input: torch.Tensor,
565
+ weight: torch.Tensor,
566
+ bias: typing.Optional[torch.Tensor],
567
+ running_mean: typing.Optional[torch.Tensor],
568
+ running_var: typing.Optional[torch.Tensor],
569
+ training: bool,
570
+ exponential_average_factor: float,
571
+ epsilon: float,
572
+ ):
573
+ a, b, c = aten.native_batch_norm(
574
+ input,
575
+ weight,
576
+ bias,
577
+ running_mean,
578
+ running_var,
579
+ training,
580
+ exponential_average_factor,
581
+ epsilon,
582
+ )
583
+
584
+ if training:
585
+ return (a, b, c)
586
+ return (
587
+ a,
588
+ weight.new_zeros((0,)),
589
+ weight.new_zeros((0,)),
590
+ )
591
+
592
+
593
+ @functools.lru_cache(None)
594
+ def fast_random_decomps():
595
+ return {**decompositions, **extra_random_decomps}
596
+
597
+
598
+ def select_decomp_table():
599
+ """decomps can change based on config"""
600
+ if config.fallback_random:
601
+ return decompositions
602
+ return fast_random_decomps()
603
+
604
+
605
+ @register_decomposition(aten.masked_scatter)
606
+ def masked_scatter(self, mask, source):
607
+ if self.device.type == "cuda":
608
+ # This two-step algorithm is the same as eager CUDA, for eager CPU we
609
+ # use a 1-shot serial iteration.
610
+ self, mask = aten.broadcast_tensors([self, mask])
611
+ source_idx = mask.reshape(-1).cumsum(0) - 1
612
+ return inductor_prims.masked_scatter_with_index(self, mask, source_idx, source)
613
+ return NotImplemented
env-llmeval/lib/python3.10/site-packages/torch/_inductor/dependencies.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dataclasses
3
+ import itertools
4
+ import logging
5
+ import re
6
+ import typing
7
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
8
+
9
+ import sympy
10
+
11
+ import torch
12
+ from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
13
+
14
+ from .codegen.common import index_prevent_reordering
15
+ from .utils import get_dtype_size, sympy_str, sympy_subs, sympy_symbol, VarRanges
16
+ from .virtualized import V
17
+
18
+ log = logging.getLogger(__name__)
19
+ is_indirect = re.compile(r"indirect|tmp").search
20
+ Dep = Union["MemoryDep", "StarDep", "WeakDep"]
21
+
22
+
23
+ class MemoryDep(typing.NamedTuple):
24
+ name: str
25
+ index: sympy.Expr
26
+ var_names: Tuple[sympy.Symbol, ...]
27
+ size: Tuple[sympy.Expr, ...]
28
+
29
+ def __repr__(self):
30
+ return f"MemoryDep({self.name!r}, {self.index}, {self.ranges})"
31
+
32
+ @property
33
+ def ranges(self) -> Dict[sympy.Symbol, sympy.Expr]:
34
+ """{c0: 128, c1: 512, ...}"""
35
+ return dict(zip(self.var_names, self.size))
36
+
37
+ def get_numel(self) -> sympy.Expr:
38
+ if self.is_indirect():
39
+ numel = V.graph.get_numel(self.name)
40
+ else:
41
+ vars = set(self.index.free_symbols)
42
+ numel = sympy.Integer(1)
43
+ for var, size in zip(self.var_names, self.size):
44
+ if var in vars:
45
+ numel = numel * size
46
+ return numel
47
+
48
+ def rename(self, renames: Dict[str, str]) -> "MemoryDep":
49
+ if self.name in renames:
50
+ return MemoryDep(
51
+ renames[self.name], self.index, var_names=self.var_names, size=self.size
52
+ )
53
+ return self
54
+
55
+ def numbytes_hint(self):
56
+ return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(
57
+ V.graph.get_dtype(self.name)
58
+ )
59
+
60
+ def has_unbacked_symbols(self):
61
+ return len(free_unbacked_symbols(self.get_numel())) > 0
62
+
63
+ def is_contiguous(self) -> bool:
64
+ return isinstance(self.index, sympy.Symbol) and self.index in self.var_names
65
+
66
+ def is_scalar(self) -> bool:
67
+ if isinstance(self.index, sympy.Symbol):
68
+ return self.index not in self.var_names and not self.is_indirect()
69
+ return isinstance(self.index, (int, sympy.Integer))
70
+
71
+ def is_indirect(self) -> bool:
72
+ return any(is_indirect(v.name) for v in self.index.free_symbols)
73
+
74
+
75
+ class StarDep(typing.NamedTuple):
76
+ # depends on the entire buffer
77
+ name: str
78
+
79
+ @property
80
+ def index(self):
81
+ raise NotImplementedError("StarDep does not have an index")
82
+
83
+ def get_numel(self) -> sympy.Expr:
84
+ return V.graph.get_numel(self.name)
85
+
86
+ def rename(self, renames: Dict[str, str]) -> "StarDep":
87
+ if self.name in renames:
88
+ return StarDep(renames[self.name])
89
+ return self
90
+
91
+ def numbytes_hint(self):
92
+ return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(
93
+ V.graph.get_dtype(self.name)
94
+ )
95
+
96
+ def has_unbacked_symbols(self):
97
+ return len(free_unbacked_symbols(self.get_numel())) > 0
98
+
99
+ def is_contiguous(self) -> bool:
100
+ return False
101
+
102
+ def is_scalar(self) -> bool:
103
+ return False
104
+
105
+ def is_indirect(self) -> bool:
106
+ return False
107
+
108
+
109
+ # Used for tracking mutation ordering
110
+ # if A reads a buffer and B mutates it
111
+ # B must be ordered after A
112
+ #
113
+ # It is weak because if it turns out A's read is never used, we can still
114
+ # eliminate it
115
+ class WeakDep(typing.NamedTuple):
116
+ name: str
117
+
118
+ @property
119
+ def index(self):
120
+ raise NotImplementedError("WeakDep does not have an index")
121
+
122
+ def get_numel(self) -> sympy.Expr:
123
+ return sympy.Integer(1)
124
+
125
+ def rename(self, renames: Dict[str, str]) -> "WeakDep":
126
+ if self.name in renames:
127
+ return WeakDep(renames[self.name])
128
+ return self
129
+
130
+ def numbytes_hint(self):
131
+ return 1 # Purely inserted for ordering, not an actual dep
132
+
133
+ def has_unbacked_symbols(self):
134
+ return False
135
+
136
+ def is_contiguous(self) -> bool:
137
+ return False
138
+
139
+
140
+ class IndexExprDep(typing.NamedTuple):
141
+ index: sympy.Expr
142
+ var_names: Tuple[sympy.Symbol, ...]
143
+ size: Tuple[sympy.Expr, ...]
144
+
145
+
146
+ @dataclasses.dataclass
147
+ class ReadWrites:
148
+ reads: Set[Dep]
149
+ writes: Set[Dep]
150
+ index_exprs: Set[IndexExprDep]
151
+ range_vars: Optional[List[sympy.Expr]] = None
152
+ var_ranges: Optional[VarRanges] = None
153
+ op_counts: typing.Counter[str] = dataclasses.field(
154
+ default_factory=collections.Counter
155
+ )
156
+
157
+ def rename(self, renames: typing.Dict[str, str]) -> "ReadWrites":
158
+ return ReadWrites(
159
+ {dep.rename(renames) for dep in self.reads},
160
+ {dep.rename(renames) for dep in self.writes},
161
+ self.index_exprs,
162
+ self.range_vars,
163
+ self.var_ranges,
164
+ op_counts=self.op_counts,
165
+ )
166
+
167
+ def with_read(self, dep: Dep) -> "ReadWrites":
168
+ assert isinstance(dep, (WeakDep, StarDep))
169
+ return ReadWrites(
170
+ set.union(self.reads, {dep}),
171
+ self.writes,
172
+ self.index_exprs,
173
+ self.range_vars,
174
+ self.var_ranges,
175
+ op_counts=self.op_counts,
176
+ )
177
+
178
+ def merge(self, other: "ReadWrites"):
179
+ reads = set.union(self.reads, other.reads)
180
+ writes = set.union(self.writes, other.writes)
181
+ index_exprs = set.union(self.index_exprs, other.index_exprs)
182
+ op_counts = collections.Counter(self.op_counts)
183
+ op_counts.update(other.op_counts)
184
+ return ReadWrites(reads - writes, writes, index_exprs, op_counts=op_counts)
185
+
186
+ @staticmethod
187
+ def merge_list(read_writes: List["ReadWrites"]):
188
+ all_writes = set.union(*[rw.writes for rw in read_writes])
189
+ all_reads = set.union(*[rw.reads for rw in read_writes]) - all_writes
190
+ all_index_exprs = set.union(*[rw.index_exprs for rw in read_writes])
191
+
192
+ op_counts: typing.Counter[Any] = collections.Counter()
193
+ for rw in read_writes:
194
+ op_counts.update(rw.op_counts)
195
+
196
+ return ReadWrites(all_reads, all_writes, all_index_exprs, op_counts=op_counts)
197
+
198
+ def remove_reads(self, rem_reads):
199
+ return ReadWrites(
200
+ self.reads - rem_reads,
201
+ self.writes,
202
+ self.index_exprs,
203
+ self.range_vars,
204
+ self.var_ranges,
205
+ op_counts=self.op_counts,
206
+ )
207
+
208
+ def reads_and_writes(self):
209
+ return itertools.chain(self.reads, self.writes)
210
+
211
+
212
+ class _RecordLoadStoreInner(V.MockHandler): # type: ignore[name-defined]
213
+ def __init__(self, var_ranges: VarRanges, normalize: bool):
214
+ super().__init__()
215
+ self._reads: Set[Dep] = set()
216
+ self._writes: Set[MemoryDep] = set()
217
+ self._index_exprs: Set[IndexExprDep] = set()
218
+ self._var_ranges: VarRanges = var_ranges
219
+ self._normalize: bool = normalize
220
+
221
+ def canonicalize(
222
+ self, index: sympy.Expr
223
+ ) -> Tuple[sympy.Expr, Tuple[sympy.Symbol, ...], Tuple[sympy.Expr, ...]]:
224
+ if not self._normalize:
225
+ sizes = [V.graph.sizevars.simplify(x) for x in self._var_ranges.values()]
226
+ var_names = tuple(
227
+ k for k, v in zip(self._var_ranges.keys(), sizes) if v != 1
228
+ )
229
+ sizes = tuple(v for v in sizes if v != 1)
230
+ return index, var_names, sizes
231
+
232
+ # Try to further simplify the indexes even if simplify_loops didn't
233
+ # convert it to the simplest form because of the interference from
234
+ # different indexing formulas.
235
+ free_symbols = index.free_symbols
236
+ var_ranges = {
237
+ k: V.graph.sizevars.simplify(v)
238
+ for k, v in self._var_ranges.items()
239
+ # TODO(jansel): explore this further normalization
240
+ # if k in free_symbols
241
+ }
242
+ index_vars = [*var_ranges.keys()]
243
+ sizes = tuple(var_ranges.values())
244
+ new_sizes, reindex, prune = V.graph.sizevars._simplify_loops(
245
+ index_vars,
246
+ sizes,
247
+ index_prevent_reordering([index], index_vars, sizes),
248
+ )
249
+
250
+ # assign new variables each dimension to deal with numbering mismatches
251
+ # d0, d1, d2 could become d0, d2 -- which won't match d0, d1
252
+ new_vars, add_var = var_builder(canonicalization_prefix())
253
+ replacement = dict(zip(index_vars, reindex([add_var(x) for x in new_sizes])))
254
+ index = sympy_subs(sympy.expand(index), replacement)
255
+
256
+ new_vars = [*new_vars.keys()]
257
+ new_sizes = [*new_sizes]
258
+ free_symbols = index.free_symbols
259
+ while new_vars and new_vars[-1] not in free_symbols:
260
+ # Reduction has last (reduced) dim in its sizes, but
261
+ # downstream users won't. Normalize this away.
262
+ new_vars.pop()
263
+ new_sizes.pop()
264
+ return index, tuple(new_vars), tuple(new_sizes)
265
+
266
+ def load(self, name: str, index: sympy.Expr) -> str:
267
+ self._reads.add(MemoryDep(name, *self.canonicalize(index)))
268
+ return f"load({name}, {sympy_str(index)})"
269
+
270
+ def load_seed(self, name: str, index: int):
271
+ assert isinstance(index, int)
272
+ return self.load(name, sympy.Integer(index))
273
+
274
+ def store(self, name: str, index: sympy.Expr, value: str, mode=None) -> str:
275
+ self._writes.add(MemoryDep(name, *self.canonicalize(index)))
276
+ return f"store({name}, {sympy_str(index)}, {value}, {mode})"
277
+
278
+ def store_reduction(self, name: str, index, value) -> str:
279
+ return self.store(name, index, f"store_reduction({value})")
280
+
281
+ def index_expr(self, index: sympy.Expr, dtype) -> str:
282
+ self._index_exprs.add(IndexExprDep(*self.canonicalize(index)))
283
+ return f"index_expr({sympy_str(index)}, {dtype})"
284
+
285
+ def bucketize(
286
+ self,
287
+ values,
288
+ offsets_name: str,
289
+ offsets_size: sympy.Expr,
290
+ indexing_dtype: torch.dtype,
291
+ right: bool,
292
+ ):
293
+ self._reads.add(StarDep(offsets_name))
294
+ return f"bucketize({values}, {offsets_name}, {sympy_str(offsets_size)}, {indexing_dtype}, {right})"
295
+
296
+
297
+ class _OpCounter:
298
+ """Shim to count how many times each op is used"""
299
+
300
+ def __init__(self, inner):
301
+ super().__init__()
302
+ self.parent_handler = inner
303
+ self._op_counts: typing.Counter[Any] = collections.Counter()
304
+
305
+ def __getattr__(self, name):
306
+ self._op_counts[name] += 1
307
+ return getattr(self.parent_handler, name)
308
+
309
+
310
+ class RecordLoadStore(V.KernelFormatterHandler): # type: ignore[name-defined]
311
+ def __init__(self, var_ranges: VarRanges, normalize: bool):
312
+ parent_handler = _RecordLoadStoreInner(
313
+ var_ranges=var_ranges, normalize=normalize
314
+ )
315
+ parent_handler = _OpCounter(parent_handler)
316
+ super().__init__(parent_handler=parent_handler)
317
+
318
+
319
+ def var_builder(prefix: str) -> Tuple[VarRanges, Callable[[sympy.Expr], sympy.Symbol]]:
320
+ cnt = itertools.count()
321
+ var_ranges: VarRanges = dict()
322
+
323
+ def add_var(length: sympy.Expr) -> sympy.Symbol:
324
+ v = sympy_symbol(f"{prefix}{next(cnt)}")
325
+ var_ranges[v] = length
326
+ return v
327
+
328
+ return var_ranges, add_var
329
+
330
+
331
+ def index_vars_no_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str):
332
+ var_ranges, add_var = var_builder(prefix)
333
+ args: List[List[sympy.Symbol]] = []
334
+ for size in argsizes:
335
+ args.append(list(map(add_var, size)))
336
+ return args, var_ranges
337
+
338
+
339
+ def index_vars_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str = "d"):
340
+ from .ir import SqueezeView
341
+
342
+ var_ranges, add_var = var_builder(prefix)
343
+ args: List[List[sympy.Expr]] = []
344
+ new_sizes: List[List[sympy.Expr]] = []
345
+ for size in argsizes:
346
+ new_size, reindex = SqueezeView.squeezer(size)
347
+ new_sizes.append(new_size)
348
+ args.append(reindex(list(map(add_var, new_size))))
349
+ return args, var_ranges
350
+
351
+
352
+ def extract_read_writes(
353
+ fn: Callable[..., Any],
354
+ *argsizes: Tuple[sympy.Expr, ...],
355
+ normalize: bool = False,
356
+ prefix: str = "d",
357
+ ):
358
+ args, var_ranges = index_vars_squeeze(*argsizes, prefix=prefix)
359
+ rw = RecordLoadStore(var_ranges, normalize=normalize)
360
+ with V.set_ops_handler(rw):
361
+ fn(*args)
362
+
363
+ if normalize:
364
+ range_vars = [] # Number of vars could differ due to normalization
365
+ else:
366
+ range_vars = [*itertools.chain(*args)]
367
+
368
+ inner = rw.parent_handler.parent_handler
369
+ return ReadWrites(
370
+ set(inner._reads),
371
+ set(inner._writes),
372
+ inner._index_exprs,
373
+ range_vars,
374
+ var_ranges,
375
+ rw.parent_handler._op_counts,
376
+ )
377
+
378
+
379
+ def extract_input_node_reduction_ranges(
380
+ input_node: "torch._inductor.ir.TensorBox",
381
+ ) -> Tuple[Optional[List[sympy.Expr]], Optional[List[sympy.Expr]]]:
382
+ """
383
+ Returns the size and reduction size of all inputs, if the sizes and reduction_sizes (if exist) are all the same.
384
+ It's possible that a node has multiple inputs, some are Reduction nodes and others are Pointwise nodes.
385
+ In this case, reduction_sizes of the Reduction nodes need to be the same.
386
+ Otherwise returns (None, None).
387
+ """
388
+
389
+ from .ir import ComputedBuffer, Loops
390
+
391
+ if isinstance(input_node.data, ComputedBuffer):
392
+ # Input node has already been realized. Return its size and reduction_size.
393
+ size = input_node.get_size()
394
+ reduction_size = input_node.get_reduction_size()
395
+ if len(reduction_size) > 0:
396
+ return (size, reduction_size)
397
+ else:
398
+ return (None, None)
399
+
400
+ if not isinstance(input_node.data.data, Loops): # type: ignore[attr-defined]
401
+ # Other IRNodes do not have reduction_ranges.
402
+ return (None, None)
403
+
404
+ # There is one issue: what if there are views / permutations between the input node and its dependent realized nodes?
405
+ # The current method still uses reduction ranges from the dependent realized node, which is not ideal.
406
+ # Is there a way to check whether there are permutations inbetween?
407
+ reads = input_node.get_reads()
408
+ reduction_size = None
409
+ size = None
410
+ while reduction_size is None and len(reads) > 0:
411
+ seen = set()
412
+ new_reads = []
413
+ for read in reads:
414
+ if not isinstance(read, MemoryDep):
415
+ continue
416
+ if read.name in seen:
417
+ continue
418
+ seen.add(read.name)
419
+ buffer = V.graph.get_buffer(read.name)
420
+ if buffer is None:
421
+ continue
422
+ if (
423
+ isinstance(buffer, ComputedBuffer)
424
+ and len(buffer.get_reduction_size()) > 0
425
+ ):
426
+ if reduction_size is None:
427
+ reduction_size = buffer.get_reduction_size()
428
+ size = buffer.get_size()
429
+ elif (
430
+ reduction_size != buffer.get_reduction_size()
431
+ or size != buffer.get_size()
432
+ ):
433
+ return (None, None)
434
+ else:
435
+ new_reads.extend(buffer.get_reads())
436
+ if reads == new_reads:
437
+ return (size, reduction_size)
438
+ else:
439
+ reads = new_reads
440
+ return (size, reduction_size)
441
+
442
+
443
+ def canonicalization_prefix():
444
+ return "c"
env-llmeval/lib/python3.10/site-packages/torch/_inductor/exc.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import tempfile
5
+ import textwrap
6
+ from functools import lru_cache
7
+
8
+ if os.environ.get("TORCHINDUCTOR_WRITE_MISSING_OPS") == "1":
9
+
10
+ @lru_cache(None)
11
+ def _record_missing_op(target):
12
+ with open(f"{tempfile.gettempdir()}/missing_ops.txt", "a") as fd:
13
+ fd.write(str(target) + "\n")
14
+
15
+ else:
16
+
17
+ def _record_missing_op(target): # type: ignore[misc]
18
+ pass
19
+
20
+
21
+ class OperatorIssue(RuntimeError):
22
+ @staticmethod
23
+ def operator_str(target, args, kwargs):
24
+ lines = [f"target: {target}"] + [
25
+ f"args[{i}]: {arg}" for i, arg in enumerate(args)
26
+ ]
27
+ if kwargs:
28
+ lines.append(f"kwargs: {kwargs}")
29
+ return textwrap.indent("\n".join(lines), " ")
30
+
31
+
32
+ class MissingOperatorWithoutDecomp(OperatorIssue):
33
+ def __init__(self, target, args, kwargs):
34
+ _record_missing_op(target)
35
+ super().__init__(f"missing lowering\n{self.operator_str(target, args, kwargs)}")
36
+
37
+
38
+ class MissingOperatorWithDecomp(OperatorIssue):
39
+ def __init__(self, target, args, kwargs):
40
+ _record_missing_op(target)
41
+ super().__init__(
42
+ f"missing decomposition\n{self.operator_str(target, args, kwargs)}"
43
+ + textwrap.dedent(
44
+ f"""
45
+
46
+ There is a decomposition available for {target} in
47
+ torch._decomp.get_decompositions(). Please add this operator to the
48
+ `decompositions` list in torch._inductor.decompositions
49
+ """
50
+ )
51
+ )
52
+
53
+
54
+ class LoweringException(OperatorIssue):
55
+ def __init__(self, exc: Exception, target, args, kwargs):
56
+ super().__init__(
57
+ f"{type(exc).__name__}: {exc}\n{self.operator_str(target, args, kwargs)}"
58
+ )
59
+
60
+
61
+ class InvalidCxxCompiler(RuntimeError):
62
+ def __init__(self):
63
+ from . import config
64
+
65
+ super().__init__(
66
+ f"No working C++ compiler found in {config.__name__}.cpp.cxx: {config.cpp.cxx}"
67
+ )
68
+
69
+
70
+ class CppWrapperCodeGenError(RuntimeError):
71
+ def __init__(self, msg: str):
72
+ super().__init__(f"C++ wrapper codegen error: {msg}")
73
+
74
+
75
+ class CppCompileError(RuntimeError):
76
+ def __init__(self, cmd: list[str], output: str):
77
+ if isinstance(output, bytes):
78
+ output = output.decode("utf-8")
79
+
80
+ super().__init__(
81
+ textwrap.dedent(
82
+ """
83
+ C++ compile error
84
+
85
+ Command:
86
+ {cmd}
87
+
88
+ Output:
89
+ {output}
90
+ """
91
+ )
92
+ .strip()
93
+ .format(cmd=" ".join(cmd), output=output)
94
+ )
95
+
96
+
97
+ class CUDACompileError(CppCompileError):
98
+ pass
env-llmeval/lib/python3.10/site-packages/torch/_inductor/freezing.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import itertools
4
+ import logging
5
+
6
+ import weakref
7
+ from typing import Any, List, Optional, Tuple
8
+
9
+ import torch
10
+ import torch.utils._pytree as pytree
11
+ from torch._dynamo.utils import dynamo_timed, lazy_format_graph_code
12
+ from torch._functorch.aot_autograd import MutationType
13
+ from torch._functorch.compile_utils import fx_graph_cse
14
+ from torch._inductor.constant_folding import constant_fold, replace_node_with_constant
15
+
16
+ from torch._inductor.fx_passes.freezing_patterns import freezing_passes
17
+ from torch._inductor.fx_passes.post_grad import view_to_reshape
18
+
19
+ from . import config
20
+
21
+ aten = torch.ops.aten
22
+ prims = torch.ops.prims
23
+
24
+ log = logging.getLogger(__name__)
25
+
26
+
27
+ def replace_params_with_constants(
28
+ gm: torch.fx.GraphModule,
29
+ flat_params: list[Any],
30
+ fw_metadata: torch._functorch.aot_autograd.ViewAndMutationMeta,
31
+ ) -> List[int]:
32
+ """
33
+ Replaces the parameters of a PyTorch GraphModule with constants wherever possible.
34
+ Returns a list of indices representing the input parameters that were not converted to constants.
35
+ """
36
+ params = [node for node in gm.graph.nodes if node.op == "placeholder"]
37
+ fake_inp_nodes = params[: len(params)]
38
+ preserved_arg_indices = []
39
+ aliased_input_args = [
40
+ out_info.base_idx
41
+ for out_info in fw_metadata.output_info
42
+ if out_info.base_idx is not None
43
+ ]
44
+
45
+ # TODO (tmanlaibaatar) figure out why this is different
46
+ # from mutated_inp_runtime_indices
47
+ mutated_inps = [
48
+ i
49
+ for i, m in enumerate(fw_metadata.input_info)
50
+ if m.mutation_type
51
+ in (MutationType.MUTATED_IN_GRAPH, MutationType.MUTATED_OUT_GRAPH)
52
+ ]
53
+
54
+ for i, (real_input, node) in enumerate(zip(flat_params, fake_inp_nodes)):
55
+ if i in mutated_inps or i in aliased_input_args:
56
+ preserved_arg_indices.append(i)
57
+ continue
58
+ replace_node_with_constant(gm, node, real_input)
59
+ # add on non param inputs
60
+ preserved_arg_indices.extend(range(len(flat_params), len(params)))
61
+ # is this necessary ?
62
+ gm.recompile()
63
+ return preserved_arg_indices
64
+
65
+
66
+ def freeze(
67
+ dynamo_gm: torch.fx.GraphModule,
68
+ aot_autograd_gm: torch.fx.GraphModule,
69
+ example_inputs: List[torch._subclasses.FakeTensor],
70
+ ) -> Tuple[torch.fx.GraphModule, List[int]]:
71
+ """
72
+ Inlines parameters that are not mutated into constants and optimizes the graph through constant propagation
73
+ and other techniques. If enabled, the function also discards the original parameters of the module for memory efficiency.
74
+
75
+ Assumes that this function is run in dynamo tracing post aot_autograd.
76
+
77
+ Args:
78
+ dynamo_gm (torch.fx.GraphModule): The Dynamo constructed GraphModule.
79
+ aot_autograd_gm (torch.fx.GraphModule): The aot_autograd constructed GraphModule to be frozen.
80
+ example_inputs (List[torch.Tensor]): A list of example input tensors to be used in the freezing process.
81
+
82
+ Returns:
83
+ Tuple[torch.fx.GraphModule, List[int]]: A tuple containing the frozen GraphModule and a list of indices
84
+ of the inputs that were preserved (not turned into constants).
85
+ """
86
+ # We have convert conv's weight to channels last which may meet error for .view
87
+ # when doing fake_tensor_prop. So we need to convert view to reshape first.
88
+ # See the details in fx_codegen_and_compile of compile_fx.py.
89
+ view_to_reshape(aot_autograd_gm)
90
+
91
+ if tracing_context := torch._guards.TracingContext.try_get():
92
+ fw_metadata = tracing_context.fw_metadata
93
+ params_flat = tracing_context.params_flat
94
+ assert fw_metadata is not None and params_flat is not None
95
+
96
+ preserved_arg_indices = replace_params_with_constants(
97
+ aot_autograd_gm, params_flat, fw_metadata
98
+ )
99
+ else:
100
+ inputs = [
101
+ node for node in aot_autograd_gm.graph.nodes if node.op == "placeholder"
102
+ ]
103
+ preserved_arg_indices = list(range(len(inputs)))
104
+
105
+ # TODO - further restrict cse ? right now needed to dedup aliasing ops
106
+ cse_graph = fx_graph_cse(aot_autograd_gm.graph)
107
+ aot_autograd_gm.graph = cse_graph
108
+ aot_autograd_gm.recompile()
109
+
110
+ aot_example_inputs = [example_inputs[ind] for ind in preserved_arg_indices]
111
+ freezing_passes(aot_autograd_gm, aot_example_inputs)
112
+
113
+ constant_fold(aot_autograd_gm)
114
+ # invalidate nn Modules
115
+ if config.freezing_discard_parameters:
116
+ invalidate_eager_modules()
117
+ discard_traced_gm_params(dynamo_gm)
118
+
119
+ log.debug("%s", lazy_format_graph_code("FROZEN GRAPH", aot_autograd_gm))
120
+
121
+ return aot_autograd_gm, preserved_arg_indices
122
+
123
+
124
+ class ErasedTensor(torch.Tensor):
125
+ @staticmethod
126
+ def __new__(cls, elem, name, owning_mod):
127
+ return super().__new__(cls, elem.to(device="meta"))
128
+
129
+ def __init__(self, elem, name: Optional[str], mod):
130
+ self.erased_name = name
131
+ self.owning_mod_ref = weakref.ref(mod)
132
+
133
+ @classmethod
134
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
135
+ erased_tensors = [
136
+ e
137
+ for e in pytree.arg_tree_leaves(*args, **kwargs)
138
+ if isinstance(e, ErasedTensor)
139
+ ]
140
+ assert len(erased_tensors) > 0
141
+ e = erased_tensors[0]
142
+
143
+ raise RuntimeError(
144
+ f"Trying to run Pytorch Eager Module after Dynamo Freezing. "
145
+ "The original parameters have been discarded for memory efficiency. "
146
+ f"Found in op {func} for erased parameter {e.erased_name} of {e.owning_mod_ref()}"
147
+ )
148
+
149
+
150
+ @torch.utils._python_dispatch._disable_current_modes()
151
+ def invalidate_eager_modules():
152
+ for mod in torch._guards.TracingContext.get().module_context.nn_modules.values():
153
+ if not isinstance(mod, torch.nn.Module):
154
+ continue
155
+
156
+ for attr_name, tensor in list(
157
+ itertools.chain(
158
+ mod.named_parameters(recurse=False), mod.named_buffers(recurse=False)
159
+ )
160
+ ):
161
+ with torch._dispatch.python.no_python_dispatcher():
162
+ e_t = ErasedTensor(tensor, attr_name, mod)
163
+ if isinstance(tensor, torch.nn.Parameter):
164
+ e_t.requires_grad_(True)
165
+ e_t._is_param = True # type: ignore[attr-defined]
166
+ setattr(mod, attr_name, e_t)
167
+
168
+
169
+ @torch.utils._python_dispatch._disable_current_modes()
170
+ def discard_traced_gm_params(mod: torch.fx.GraphModule):
171
+ for attr_name, tensor in list(
172
+ itertools.chain(
173
+ mod.named_parameters(recurse=False), mod.named_buffers(recurse=False)
174
+ )
175
+ ):
176
+ with torch._dispatch.python.no_python_dispatcher():
177
+ e_t = ErasedTensor(tensor, attr_name, mod)
178
+ if isinstance(tensor, torch.nn.Parameter):
179
+ e_t.requires_grad_(True)
180
+ e_t._is_param = True # type: ignore[attr-defined]
181
+ setattr(mod, attr_name, e_t)
182
+
183
+
184
+ def enforce_output_layout(gm: torch.fx.GraphModule):
185
+ """
186
+ Make sure the output node's layout does not change due to compiler optimizations
187
+ by adding aten.as_strided nodes with the expected strides.
188
+
189
+ Only used for inference so we can assume all graph outputs are model outputs.
190
+ """
191
+ *_, output_node = gm.graph.nodes
192
+ out_list = output_node.args[0]
193
+ with gm.graph.inserting_before(output_node):
194
+ for n in out_list:
195
+ if not isinstance(
196
+ n.meta["val"], torch.Tensor
197
+ ) or not torch._prims_common.is_non_overlapping_and_dense(n.meta["val"]):
198
+ continue
199
+
200
+ # add a node to enforce eager layout
201
+ ft = n.meta["val"]
202
+ new_node = gm.graph.call_function(
203
+ prims.inductor_force_stride_order.default, (n, ft.stride())
204
+ )
205
+
206
+ # can not call
207
+ # n.replace_all_uses_with(new_node)
208
+ # since it will replace the usage of n in new_node itself.
209
+ output_node.replace_input_with(n, new_node)
210
+
211
+ gm.graph.lint()
212
+ gm.recompile()
213
+
214
+
215
+ def enforce_as_strided_input_layout(gm: torch.fx.GraphModule):
216
+ """
217
+ Make sure the as_strided node's input's layout does not change due to compiler
218
+ optimizations, because the as_strided strides info depends on input tensor stride info.
219
+ """
220
+
221
+ as_strided_ops = [
222
+ torch.ops.aten.as_strided.default,
223
+ torch.ops.aten.as_strided_.default,
224
+ torch.ops.aten.as_strided_scatter.default,
225
+ ]
226
+ strided_nodes = [n for n in gm.graph.nodes if n.target in as_strided_ops]
227
+ for n in strided_nodes:
228
+ with gm.graph.inserting_before(n):
229
+ # add a node to enforce eager layout
230
+ ft = n.args[0].meta["val"]
231
+ new_node = gm.graph.call_function(
232
+ prims.inductor_force_stride_order.default, (n.args[0], ft.stride())
233
+ )
234
+ n.replace_input_with(n.args[0], new_node)
235
+
236
+ gm.graph.lint()
237
+ gm.recompile()
238
+
239
+
240
+ @dynamo_timed
241
+ def convert_conv_weights_to_channels_last(gm: torch.fx.GraphModule):
242
+ """
243
+ Convert 4d convolution weight tensor to channels last format.
244
+
245
+ This pass is performed before freezing so the added nodes can be constant
246
+ folded by freezing.
247
+ """
248
+ convs = [n for n in gm.graph.nodes if n.target == aten.convolution.default]
249
+ for conv in convs:
250
+ weight_node = conv.args[1]
251
+ if len(weight_node.meta["val"].size()) != 4 or weight_node.meta[
252
+ "val"
253
+ ].is_contiguous(memory_format=torch.channels_last):
254
+ # not a 4d tensor or already channels last, skip
255
+ continue
256
+
257
+ with gm.graph.inserting_before(conv):
258
+ new_node = gm.graph.call_function(
259
+ aten.clone.default,
260
+ (weight_node,),
261
+ {"memory_format": torch.channels_last},
262
+ )
263
+ conv.replace_input_with(weight_node, new_node)
264
+
265
+ enforce_as_strided_input_layout(gm)
266
+ enforce_output_layout(gm)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_utils.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from typing import Any, Callable, DefaultDict, Dict, Optional, Tuple, Type
3
+
4
+ import torch
5
+ import torch.fx
6
+ from torch.utils import _pytree as pytree
7
+ from torch.utils._pytree import tree_map
8
+ from .virtualized import V
9
+
10
+
11
+ # Check the pattern: (nn.module, F.function/torch.Tensor.method) matched.
12
+ # Works for length 2 patterns with 1 module and 1 function/method.
13
+ def matches_module_function_pattern(
14
+ pattern: Tuple[Type[torch.nn.modules.Module], Callable[..., Any]],
15
+ node: torch.fx.node.Node,
16
+ modules: Dict[str, torch.nn.modules.Module],
17
+ ) -> bool:
18
+ if len(node.args) == 0:
19
+ return False
20
+ if not isinstance(node.args[0], torch.fx.Node) or not isinstance(
21
+ node, torch.fx.Node
22
+ ):
23
+ return False
24
+ # the first node is call_module
25
+ if node.args[0].op != "call_module":
26
+ return False
27
+ if not isinstance(node.args[0].target, str):
28
+ return False
29
+ if node.args[0].target not in modules:
30
+ return False
31
+ if type(modules[node.args[0].target]) is not pattern[0]:
32
+ return False
33
+ # the second node is call_function or call_method
34
+ if node.op != "call_function" and node.op != "call_method":
35
+ return False
36
+ if node.target != pattern[1]:
37
+ return False
38
+ # make sure node.args[0] output is only used by current node.
39
+ if len(node.args[0].users) > 1:
40
+ return False
41
+ return True
42
+
43
+
44
+ class FakeTensorUpdater:
45
+ """
46
+ The main idea here is that it's difficult to maintain accurate fake
47
+ tensors (our primary form of metadata) for each node in our graph as we
48
+ transform it.
49
+
50
+ The most reliable way to obtain this information is by rerunning
51
+ faketensor propagation. However, in general, faketensor propagation is
52
+ fairly expensive. So, instead we'd like to only rerun faketensor
53
+ propagation on nodes that have changed.
54
+
55
+ In order to detect which nodes have changed, we first hash its node,
56
+ target, and argument lists (which are immutable in FX).
57
+
58
+ Then, whenever we call incremental_update, we check which FX nodes have a
59
+ new hash, and recompute the faketensor metadata for that node. Then, we
60
+ continue to recursively compute the faketensors for all users until the
61
+ fake tensors stop changing.
62
+ """
63
+
64
+ def __init__(self, graph: torch.fx.Graph):
65
+ self.processed_hashes = set()
66
+ self.graph = graph
67
+
68
+ for node in self.graph.nodes:
69
+ self.processed_hashes.add(self.hash_node(node))
70
+
71
+ def hash_node(self, node: torch.fx.Node):
72
+ # todo(chilli): Not a great hash function
73
+ return (node, node.target, id(node.args), id(node.kwargs))
74
+
75
+ def incremental_update(self):
76
+ processed = set()
77
+ existing_storages: DefaultDict[Optional[int], int] = defaultdict(int)
78
+ for node in self.graph.nodes:
79
+ existing_storages[get_node_storage(node)] += 1
80
+
81
+ def is_fake_tensor_same(new, old):
82
+ if type(new) != type(old):
83
+ return False
84
+ if isinstance(new, (list, tuple)):
85
+ if len(new) != len(old):
86
+ return False
87
+ return all(
88
+ is_fake_tensor_same(new_i, old_i) for new_i, old_i in zip(new, old)
89
+ )
90
+ assert isinstance(new, torch.Tensor)
91
+ if new.shape != old.shape or new.layout != old.layout:
92
+ return False
93
+ if new.layout == torch.strided and new.stride() != old.stride():
94
+ return False
95
+ if get_storage(new) == get_storage(old):
96
+ return True
97
+
98
+ # This is the case where it returns a completely fresh storage that's used nowhere else.
99
+ if (
100
+ existing_storages[get_storage(old)] == 1
101
+ and get_storage(new) not in existing_storages
102
+ ):
103
+ return True
104
+ return False
105
+
106
+ for node in self.graph.nodes:
107
+ if self.hash_node(node) in self.processed_hashes:
108
+ continue
109
+
110
+ def is_aten_node(node):
111
+ return node.op == "call_function" and isinstance(
112
+ node.target, torch._ops.OpOverload
113
+ )
114
+
115
+ if not is_aten_node(node):
116
+ continue
117
+
118
+ processing = [node]
119
+ while len(processing) > 0:
120
+ updating_node = processing.pop()
121
+ if updating_node in processed:
122
+ continue
123
+ if is_aten_node(updating_node):
124
+ continue
125
+
126
+ is_valid, args, kwargs = get_fake_args_kwargs(updating_node)
127
+ if not is_valid:
128
+ continue
129
+ with V.fake_mode:
130
+ new_fake_tensor = updating_node.target(*args, **kwargs)
131
+ if "val" in updating_node.meta and is_fake_tensor_same(
132
+ new_fake_tensor, updating_node.meta["val"]
133
+ ):
134
+ continue
135
+ updating_node.meta["val"] = new_fake_tensor
136
+
137
+ # todo(chilli): This code path is not exercised by our existing
138
+ # tests - add a test
139
+ existing_storages[get_node_storage(new_fake_tensor)] += 1
140
+ processed.add(updating_node)
141
+ for user in updating_node.users:
142
+ processing.append(user)
143
+
144
+ self.processed_hashes.add(self.hash_node(updating_node))
145
+
146
+
147
+ def get_storage(t: torch.Tensor) -> int:
148
+ return t.untyped_storage()._cdata
149
+
150
+
151
+ def get_node_storage(node: torch.fx.Node) -> Optional[int]:
152
+ if "val" not in node.meta:
153
+ return None
154
+ if not isinstance(node.meta["val"], torch.Tensor):
155
+ return None
156
+ if not torch._C._has_storage(node.meta["val"]):
157
+ return None
158
+ return get_storage(node.meta["val"])
159
+
160
+
161
+ def get_fake(x):
162
+ if isinstance(x, torch.fx.Node):
163
+ if "val" not in x.meta:
164
+ return x
165
+ return x.meta["val"]
166
+ return x
167
+
168
+
169
+ def get_fake_args_kwargs(x: torch.fx.Node) -> Tuple[bool, Tuple[Any], Dict[str, Any]]:
170
+ """
171
+ First value returns a boolean if any of the input nodes don't have a faketensor.
172
+ """
173
+ args, kwargs = tree_map(get_fake, (x.args, x.kwargs))
174
+ if any(
175
+ isinstance(a, torch.fx.Node) for a in pytree.arg_tree_leaves(*args, **kwargs)
176
+ ):
177
+ return False, args, kwargs
178
+ return True, args, kwargs
env-llmeval/lib/python3.10/site-packages/torch/_inductor/graph.py ADDED
@@ -0,0 +1,1133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import logging
3
+ import operator
4
+ import os
5
+ import re
6
+ import sys
7
+ import time
8
+ from collections import defaultdict
9
+ from contextlib import contextmanager
10
+ from typing import Any, Callable, DefaultDict, Dict, List, Optional, Set, Tuple
11
+
12
+ import sympy
13
+
14
+ import torch
15
+ import torch._logging
16
+ import torch.fx
17
+ from torch._decomp import get_decompositions
18
+ from torch._dynamo.utils import defake, dynamo_timed
19
+ from torch._logging import LazyString
20
+ from torch._subclasses.fake_tensor import FakeTensor
21
+ from torch.fx.experimental.sym_node import magic_methods, method_to_operator
22
+ from torch.fx.experimental.symbolic_shapes import has_free_symbols, ShapeEnv, SymTypes
23
+ from torch.utils._mode_utils import no_dispatch
24
+
25
+ from . import config, ir
26
+ from .codegen.common import (
27
+ get_scheduling_for_device,
28
+ get_wrapper_codegen_for_device,
29
+ register_backend_for_device,
30
+ )
31
+ from .codegen.wrapper import CppWrapperCodeGen, CudaWrapperCodeGen, WrapperCodeGen
32
+ from .exc import (
33
+ CppWrapperCodeGenError,
34
+ LoweringException,
35
+ MissingOperatorWithDecomp,
36
+ MissingOperatorWithoutDecomp,
37
+ )
38
+ from .ir import (
39
+ Constant,
40
+ FixedLayout,
41
+ InputBuffer,
42
+ Pointwise,
43
+ Reduction,
44
+ StorageBox,
45
+ TensorBox,
46
+ )
47
+ from .lowering import (
48
+ FALLBACK_ALLOW_LIST,
49
+ fallback_handler,
50
+ fallback_node_due_to_unsupported_type,
51
+ layout_constraints,
52
+ lowerings,
53
+ make_fallback,
54
+ needs_realized_inputs,
55
+ unsupported_output_tensor,
56
+ )
57
+ from .sizevars import SizeVarAllocator
58
+ from .utils import convert_shape_to_inductor, gather_origins, get_sympy_Expr_dtype
59
+ from .virtualized import V
60
+
61
+ log = logging.getLogger(__name__)
62
+ perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
63
+ output_code_log = torch._logging.getArtifactLogger(__name__, "output_code")
64
+
65
+
66
+ def supported_dtype_of_cpp_wrapper(dtype, cuda):
67
+ supported_dtype = {
68
+ torch.float32,
69
+ torch.float64,
70
+ torch.int64,
71
+ torch.int32,
72
+ torch.int16,
73
+ torch.int8,
74
+ torch.uint8,
75
+ torch.bool,
76
+ torch.bfloat16,
77
+ torch.complex64,
78
+ # torch.float16, # TODO: implement this
79
+ }
80
+ if cuda:
81
+ supported_dtype.add(torch.float16)
82
+ supported_dtype.add(torch.float8_e4m3fn)
83
+ supported_dtype.add(torch.float8_e5m2)
84
+
85
+ return dtype in supported_dtype
86
+
87
+
88
+ def may_get_constant_buffer_dtype(constant_buffer):
89
+ assert isinstance(
90
+ constant_buffer, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
91
+ ), "get_constant_buffer_dtype only supports input of sympy.Symbol, sympy.Expr or sympy.core.numbers.Integer"
92
+ if isinstance(constant_buffer, sympy.core.numbers.Integer):
93
+ return torch.int64
94
+
95
+ if isinstance(constant_buffer, sympy.Expr):
96
+ return get_sympy_Expr_dtype(constant_buffer)
97
+
98
+ if constant_buffer.is_integer:
99
+ return torch.int64
100
+ elif constant_buffer.is_float:
101
+ return torch.float32
102
+ else:
103
+ return None
104
+
105
+
106
+ def is_magic_method(op):
107
+ magic_ops = {method_to_operator(m) for m in magic_methods}
108
+ return op in magic_ops
109
+
110
+
111
+ class GraphLowering(torch.fx.Interpreter):
112
+ graph_outputs: List[ir.IRNode]
113
+
114
+ def symbolic_sizes_strides(self, ex: torch.Tensor):
115
+ """
116
+ Support dynamic shapes and dynamic strides by assigning variables
117
+ to each dimension. We duck-shape tensors, so if two tensors
118
+ have the same size they get assigned the same symbolic variable.
119
+ """
120
+ if self.reuse_shape_env:
121
+ return convert_shape_to_inductor(ex.size()), convert_shape_to_inductor(
122
+ ex.stride()
123
+ )
124
+ else:
125
+ from torch._dynamo.source import ConstantSource
126
+
127
+ # TODO: this should not be needed once #93059 lands
128
+ # https://github.com/pytorch/pytorch/pull/94031#discussion_r1096044816
129
+ # TODO: make a dedicated UnknownSource for this?
130
+ # NB: This is using the legacy default behavior from
131
+ # create_symbolic_sizes_strides_storage_offset but we hope we can
132
+ # just delete this entirely
133
+ source = ConstantSource(
134
+ f"__inductor_unknown_tensor_{len(self._shape_env.var_to_val)}"
135
+ )
136
+ (
137
+ size,
138
+ stride,
139
+ _,
140
+ ) = self._shape_env.create_symbolic_sizes_strides_storage_offset(
141
+ ex,
142
+ source,
143
+ )
144
+
145
+ size = [i.node.expr if isinstance(i, torch.SymInt) else i for i in size]
146
+ stride = [i.node.expr if isinstance(i, torch.SymInt) else i for i in stride]
147
+ return size, stride
148
+
149
+ def static_sizes_strides(self, ex: torch.Tensor):
150
+ """
151
+ Primarily used to weights
152
+ """
153
+ size = [sympy.Integer(i) for i in ex.size()]
154
+ stride = [sympy.Integer(i) for i in ex.stride()]
155
+ return size, stride
156
+
157
+ def init_backend_registration(self):
158
+ if get_scheduling_for_device("cpu") is None:
159
+ from .codegen.cpp import CppScheduling
160
+
161
+ register_backend_for_device("cpu", CppScheduling, WrapperCodeGen)
162
+
163
+ if get_scheduling_for_device("cuda") is None:
164
+ from .codegen.cuda_combined_scheduling import CUDACombinedScheduling
165
+
166
+ # CUDACombinedScheduling combines Triton and CUDA C++ scheduling for CUDA devices via delegation
167
+ register_backend_for_device("cuda", CUDACombinedScheduling, WrapperCodeGen)
168
+
169
+ def __init__(
170
+ self,
171
+ gm: torch.fx.GraphModule,
172
+ example_inputs: Optional[List[torch.Tensor]] = None,
173
+ shape_env=None,
174
+ num_static_inputs=None,
175
+ graph_id=None,
176
+ cpp_wrapper=False,
177
+ aot_mode=False,
178
+ user_visible_outputs=frozenset(),
179
+ layout_opt=None,
180
+ extern_node_serializer=None,
181
+ is_inference=False,
182
+ ):
183
+ super().__init__(gm)
184
+
185
+ self.example_inputs = example_inputs
186
+ self.layout_opt = (
187
+ layout_opt
188
+ if layout_opt is not None
189
+ else self.decide_layout_opt(gm, is_inference=is_inference)
190
+ )
191
+ self.num_channels_last_conv = 0
192
+ self.is_inference = is_inference
193
+
194
+ self.extra_traceback = False # we do our own error wrapping
195
+ if shape_env is None:
196
+ shape_env = ShapeEnv()
197
+ self.reuse_shape_env = False
198
+ else:
199
+ self._shape_env = shape_env
200
+ self.reuse_shape_env = True
201
+ self._shape_env = shape_env
202
+ self.sizevars = SizeVarAllocator(shape_env)
203
+ self.graph_inputs: Dict[str, TensorBox] = {}
204
+ self.graph_inputs_original: Dict[str, InputBuffer] = {}
205
+ self.device_types: Set[str] = set()
206
+ self.device_idxs: Set[int] = set()
207
+ self.cuda = False
208
+ self.buffers: List[ir.Buffer] = []
209
+ self.constants: Dict[str, torch.Tensor] = {}
210
+ self.constant_reprs: Dict[str, str] = {}
211
+ self.removed_buffers: Set[str] = set()
212
+ self.removed_inplace_buffers: Set[str] = set()
213
+ self.mutated_buffers: Set[str] = set()
214
+ self.never_reuse_buffers: Set[str] = set()
215
+ self.inplaced_to_remove: Set[str] = set()
216
+ self.wrapper_code: WrapperCodeGen = None # type: ignore[assignment]
217
+ # See `ProxyExecutor Design Note` in ir.py for more details
218
+ self.extern_kernel_nodes: List[ir.ExternKernelNode] = []
219
+ self.extern_node_serializer: Optional[
220
+ Callable[[List[ir.ExternKernelNode]], Any]
221
+ ] = extern_node_serializer
222
+ self.current_node: torch.fx.Node = None # type: ignore[assignment]
223
+ self.num_static_inputs = num_static_inputs
224
+ self.lists: Dict[str, List[str]] = {}
225
+ self.mutated_inputs: Set[str] = set()
226
+ self.mutated_input_idxs: List[int] = []
227
+ self.name_to_buffer: Dict[str, ir.Buffer] = {}
228
+ self.name_to_users: DefaultDict[str, List[ir.IRNode]] = defaultdict(list)
229
+ self.creation_time = time.time()
230
+ self.name = "GraphLowering"
231
+ self.cpp_wrapper = cpp_wrapper
232
+ self.aot_mode = aot_mode
233
+ self.graph_id = graph_id
234
+ self.scheduler: "torch._inductor.scheduler.Scheduler" = None # type: ignore[assignment]
235
+ self.nodes_prefer_channels_last = (
236
+ self.find_nodes_prefer_channels_last() if self.layout_opt else set()
237
+ )
238
+ self._warned_fallback = {"aten.convolution_backward"}
239
+ self.user_visible_outputs = user_visible_outputs
240
+ self.cache_key: str = "" # This is the cache key for the compiled artifact
241
+ self.cache_path: str = "" # This is the path in the filesystem where the compiled artifact is stored
242
+ self.cache_linemap: List[
243
+ Tuple[int, str]
244
+ ] = (
245
+ []
246
+ ) # This is the linemap used by the profiler to mark custom compiled kernels getting run
247
+ # Used if lowering encounters cases where cudagraphs are not supported
248
+ self.disable_cudagraphs = False
249
+ self.disable_cudagraphs_reason = ""
250
+ self.orig_gm: torch.fx.GraphModule = gm.__copy__()
251
+ self.init_backend_registration()
252
+
253
+ @staticmethod
254
+ def decide_layout_opt(gm, *, is_inference) -> bool:
255
+ """
256
+ Decide if we should enable layout optimization for this graph based on
257
+ heuristics.
258
+ """
259
+ if not config.layout_optimization:
260
+ return False
261
+
262
+ if config.force_layout_optimization:
263
+ return True
264
+
265
+ conv_nodes = [
266
+ n for n in gm.graph.nodes if n.target == torch.ops.aten.convolution.default
267
+ ]
268
+ nconv = len(conv_nodes)
269
+
270
+ if nconv == 0:
271
+ return False
272
+
273
+ # NHWC perf issue on ROCm5.7 first noted here https://github.com/pytorch/pytorch/pull/110319
274
+ if torch.version.hip and torch.cuda.is_available():
275
+ return False
276
+
277
+ # For cpu backend and mkldnn enabled, we always using channels_last for a better performance.
278
+ if (
279
+ all(
280
+ n.args[idx].meta["val"].device == torch.device("cpu")
281
+ for n in conv_nodes
282
+ for idx in [0, 1]
283
+ )
284
+ and torch.backends.mkldnn.enabled
285
+ and torch.backends.mkldnn.is_available()
286
+ ):
287
+ return True
288
+
289
+ # Followering models are skipped due to this:
290
+ # jx_nest_base
291
+ # volo_d1_224
292
+ if len(list(gm.graph.nodes)) >= 300 * nconv:
293
+ log.debug("Skipped layout opt because only a few conv")
294
+ return False
295
+
296
+ if any(
297
+ has_free_symbols(n.args[idx].meta["val"])
298
+ for n in conv_nodes
299
+ for idx in [0, 1]
300
+ ):
301
+ log.debug(
302
+ "See perf regression with dynamic shape. Follow up in https://github.com/pytorch/pytorch/issues/102670"
303
+ )
304
+ return False
305
+
306
+ def is_grouped(n):
307
+ return n.args[-1] > 1 and n.args[1].meta["val"].size(1) > 1
308
+
309
+ def is_in_out_channel(n):
310
+ return (
311
+ n.args[1].meta["val"].size(0) * 2 <= n.args[1].meta["val"].size(1)
312
+ and n.args[1].meta["val"].size(2) > 1
313
+ )
314
+
315
+ def is_small_channel(n):
316
+ return (
317
+ n.args[1].meta["val"].size(0) <= 64
318
+ and n.args[1].meta["val"].size(1) <= 64
319
+ )
320
+
321
+ # only grouped convolutions benchmarked as slower in conv samples for inference only
322
+ if is_inference:
323
+ from torch.utils.flop_counter import FlopCounterMode
324
+
325
+ flop_counts: Dict[str, float] = defaultdict(float)
326
+ for node in conv_nodes:
327
+ success, args, kwargs = torch._inductor.fx_utils.get_fake_args_kwargs(
328
+ node
329
+ )
330
+
331
+ if success:
332
+ with FlopCounterMode(display=False) as flop_counter_mode:
333
+ with V.fake_mode:
334
+ node.target(*args, **kwargs)
335
+
336
+ counted_flops = flop_counter_mode.get_total_flops()
337
+ if is_grouped(node):
338
+ node_type = "grouped"
339
+ elif is_small_channel(node):
340
+ node_type = "small"
341
+ elif is_in_out_channel(node):
342
+ node_type = "in_out"
343
+ else:
344
+ node_type = "default"
345
+
346
+ flop_counts[node_type] += counted_flops
347
+ else:
348
+ log.debug("Conv inputs meta not found")
349
+
350
+ # average benchmarked channels last speedup / slowdown, < 1 is speedup.
351
+ # taken from the set of convolution inputs in benchmarks/dynamo/microbenchmarks/operator_inp_logs/torchbench_train/
352
+ # To regenerate these numbers follow https://gist.github.com/eellison/55d7a6ed6f39829d68ac56f95f4df5bb
353
+ GROUPED_MULTIPLIER = 1.358
354
+ DEFAULT_MULTIPLIER = 0.823
355
+ IN_OUT_MULTIPLIER = 0.725
356
+ SMALL_MULTIPLIER = 0.783
357
+
358
+ total_flops = sum(flop_counts.values())
359
+ # TODO - get different values per hardware
360
+ weighted_flops = (
361
+ flop_counts["grouped"] * GROUPED_MULTIPLIER
362
+ + flop_counts["small"] * SMALL_MULTIPLIER
363
+ + flop_counts["in_out"] * IN_OUT_MULTIPLIER
364
+ + flop_counts["default"] * DEFAULT_MULTIPLIER
365
+ )
366
+ do_layout_opt = weighted_flops <= total_flops
367
+ if not do_layout_opt:
368
+ log.debug(
369
+ "Skipped layout opt in inference because weighted flops indicate slowdown, default: %d, channels last: %d",
370
+ total_flops,
371
+ weighted_flops,
372
+ )
373
+ return do_layout_opt
374
+
375
+ # Channels last layout can dramatically hurt grouped conv perf. E.g.
376
+ # Conv with arguments like
377
+ # {"input_shape": [32, 224, 112, 112], "weight_shape": [224, 112, 3, 3],
378
+ # "stride": [2, 2], "padding": [1, 1], "groups": 2}
379
+ # slows down 31x using channels last..
380
+
381
+ # But a lot of timm models use depthwise separable convolution which will
382
+ # result in grouped convolution with in-channel size == 1.
383
+ # For those grouped convolution, channels last still helps a lot.
384
+ # E.g.
385
+ # Conv with arguments
386
+ # {"input_shape": [128, 58, 56, 56], "weight_shape": [58, 1, 3, 3],
387
+ # "stride": [2, 2], "padding": [1, 1], "groups": 58}
388
+ # get 1.86x speedup with channels last layout.
389
+ #
390
+ # The following heuristics skip using channels-last if the model contains
391
+ # grouped convolution with in-channels > 1.
392
+ if any(is_grouped(n) for n in conv_nodes):
393
+ log.debug(
394
+ "Skip layout opt because found grouped convolution with >1 in_channels!"
395
+ )
396
+ return False
397
+
398
+ # For some models that contain convolution with larger in-channel than out-channel, applying
399
+ # channels last hurts performance.
400
+ # Following models are skipped due to this:
401
+ # - pytorch_unet
402
+ # - phlippe_densenet (slightly worse)
403
+ # - Background_Matting (1.22x -> 0.821x)
404
+ # - pytorch_CycleGAN_and_pix2pix (1.597x -> 1.294x)
405
+ if any(is_in_out_channel(n) for n in conv_nodes):
406
+ log.debug(
407
+ "Skip layout opt because some convolutions have smaller out_channel"
408
+ )
409
+ return False
410
+
411
+ # Following models are skipped due to this:
412
+ # - functorch_maml_omniglot
413
+ if all(is_small_channel(n) for n in conv_nodes):
414
+ log.debug("Skip layout opt because all convolution channels are too small")
415
+ return False
416
+
417
+ return True
418
+
419
+ def find_nodes_prefer_channels_last(self):
420
+ """
421
+ The rule to decide if an node prefer channels last is simple.
422
+ 1. if it's input/output of a convolution
423
+ 2. if one of its user prefers channels last
424
+
425
+ We have rule 1 because cudnn runs a faster convolution kernel for channels last inputs;
426
+ Rule 2 is also important. It makes sure that indirect inputs to convolution also prefers
427
+ channels last.
428
+
429
+ Consider the scenario: conv -> batch-norm -> relu -> conv
430
+ Without rule 2, batch-norm output may use a contiguous layout. That will cause 2 extra copies:
431
+ 1. the output of batch-norm should be channels last initially since its input is a conv's output.
432
+ Forcing the batch-norm's output to be contiguous results in the first copy
433
+ 2. The second conv's input is initially contiguous. This layout is propagated from the batch-norm's output.
434
+ We need convert it to channels last layout which results in the second copy.
435
+ With rule 2, we makes sure all the tensors in the chain uses channels last layout. So both copies
436
+ can be saved.
437
+ """
438
+ output_set = set()
439
+ for n in reversed(self.module.graph.nodes):
440
+ if n.target == torch.ops.aten.convolution.default:
441
+ output_set.add(n)
442
+ continue
443
+
444
+ for user in n.users:
445
+ if user in output_set:
446
+ output_set.add(n)
447
+ break
448
+
449
+ # need a second pass to add downstream nodes of those channel last nodes to the sets.
450
+ # This pass is especially needed to avoid mix-layout kernel inputs in backward pass.
451
+ #
452
+ # Let's say a conv-batchnorm 's output is passed to relu whose output is in turn returned
453
+ # from the fwd graph. Without this second pass, we will force relu's output to be contiguous.
454
+ # Then in the kernel in backward pass, the contiguous output of relu may be mix with other channels last
455
+ # tensors and passed to a kernel.
456
+ #
457
+ # This pass improve yolov3 training speedup from 1.116x (worse than disabling layout optimization speedup 1.196x) to 1.457x.
458
+ # It also improves dla102 training speedup from 1.240x (worse than disabling layout optimization speedup 1.523x) to 1.835x .
459
+ # This also helps the following models:
460
+ # - res2net101_26w_4s
461
+ # - res2net50_14w_8s
462
+ # - sebotnet33ts_256
463
+ for n in self.module.graph.nodes:
464
+ if n in output_set:
465
+ for child in n.users:
466
+ output_set.add(child)
467
+
468
+ return output_set
469
+
470
+ def warn_fallback(self, name):
471
+ if name not in self._warned_fallback:
472
+ self._warned_fallback.add(name)
473
+ perf_hint_log.info("Using FallbackKernel: %s", name)
474
+
475
+ def add_device_info(self, device: torch.device):
476
+ self.device_types.add(device.type)
477
+ if device.index is not None:
478
+ self.device_idxs.add(device.index)
479
+
480
+ @property
481
+ def fake_mode(self):
482
+ return V.fake_mode
483
+
484
+ def get_buffer(self, buffer_name: str):
485
+ if buffer_name in self.name_to_buffer:
486
+ return self.name_to_buffer[buffer_name]
487
+ if buffer_name in self.graph_inputs:
488
+ return self.graph_inputs[buffer_name]
489
+ return None
490
+
491
+ def get_dtype(self, buffer_name: str):
492
+ if buffer_name in self.constants:
493
+ return self.constants[buffer_name].dtype
494
+ if buffer_name in self.name_to_buffer:
495
+ return self.name_to_buffer[buffer_name].get_dtype()
496
+ if buffer_name in self.graph_inputs:
497
+ return self.graph_inputs[buffer_name].get_dtype()
498
+ m = re.match(r"(as_strided|reinterpret_tensor)\(([a-zA-Z0-9_]+),", buffer_name)
499
+ if m:
500
+ return self.get_dtype(m.group(1))
501
+ raise KeyError(f"could not find {buffer_name}")
502
+
503
+ def get_numel(self, buffer_name: str):
504
+ from .ir import MultiOutputLayout
505
+
506
+ if buffer_name in self.constants:
507
+ return self.constants[buffer_name].numel()
508
+ if buffer_name in self.name_to_buffer:
509
+ buf = self.name_to_buffer[buffer_name]
510
+ if isinstance(getattr(buf, "layout", None), MultiOutputLayout):
511
+ return 1
512
+ return buf.get_numel()
513
+ if buffer_name in self.graph_inputs:
514
+ return self.graph_inputs[buffer_name].get_numel()
515
+ raise KeyError(f"could not find {buffer_name}")
516
+
517
+ @dynamo_timed
518
+ def run(self, *args):
519
+ return super().run(*args)
520
+
521
+ def register_buffer(self, buffer: ir.Buffer):
522
+ name = f"buf{len(self.buffers)}"
523
+ self.buffers.append(buffer)
524
+ self.name_to_buffer[name] = buffer
525
+ # Skip empty CPU tensor so that CUDA graphs can succeed, see https://github.com/pytorch/pytorch/pull/114144
526
+ if not isinstance(buffer, ir.ComputedBuffer) or not buffer.is_zero_elements():
527
+ self.add_device_info(buffer.get_device())
528
+ return name
529
+
530
+ def register_list(self, buffer_names: List[str]):
531
+ name = "list_" + "_".join(buffer_names)
532
+ self.lists[name] = buffer_names
533
+ return name
534
+
535
+ def register_users_of(self, node_output):
536
+ def register(value):
537
+ if isinstance(value, (list, tuple)):
538
+ for x in value:
539
+ register(x)
540
+ if isinstance(value, ir.IRNode):
541
+ if (
542
+ not hasattr(value, "data")
543
+ or not isinstance(value.data, ir.IRNode)
544
+ or not (
545
+ hasattr(value.data, "data")
546
+ and isinstance(value.data.data, ir.IRNode)
547
+ )
548
+ ):
549
+ return
550
+
551
+ for read_name in value.get_read_names():
552
+ self.name_to_users[read_name].append(value)
553
+
554
+ register(node_output)
555
+
556
+ def mark_buffer_mutated(self, name: str):
557
+ """
558
+ When a buffer is mutated we need to make sure all the reads to
559
+ the old version are realized before the mutation happens.
560
+ """
561
+ assert isinstance(name, str)
562
+ self.mutated_buffers.add(name)
563
+
564
+ if name not in self.name_to_users:
565
+ return
566
+
567
+ for user in self.name_to_users[name]:
568
+ user.realize()
569
+
570
+ def add_tensor_constant(self, data, name=None):
571
+ def allocate(name):
572
+ for constant_name, value in self.constants.items():
573
+ if (
574
+ not data.is_mkldnn
575
+ and data.size() == value.size()
576
+ and data.stride() == value.stride()
577
+ and data.dtype == value.dtype
578
+ and data.device == value.device
579
+ and torch.eq(data, value).all()
580
+ ):
581
+ return constant_name
582
+
583
+ if name is None:
584
+ name = f"constant{len(self.constants)}"
585
+ if name[0].isdigit():
586
+ name = f"constant_{name}"
587
+ # We may generate a var name for each constant in the codegen.
588
+ # Let's only keep sane characters.
589
+ prefix = re.sub(r"[^a-zA-Z0-9_]", "_", name)
590
+ name = prefix
591
+ cnt = 0
592
+ while name in self.constants:
593
+ name = f"{prefix}_{cnt}"
594
+ cnt += 1
595
+ self.constants[name] = data
596
+ self.constant_reprs[name] = hashlib.sha256(
597
+ repr(data).encode("utf-8")
598
+ ).hexdigest()
599
+ return name
600
+
601
+ name = allocate(name)
602
+
603
+ return TensorBox.create(
604
+ ir.ConstantBuffer(
605
+ name,
606
+ FixedLayout(data.device, data.dtype, *self.static_sizes_strides(data)),
607
+ )
608
+ )
609
+
610
+ def constant_name(self, name: str, device_override: Optional[torch.device]):
611
+ """
612
+ We AOT copy constants to the devices they are needed on.
613
+ If device_override doesn't match the constant's device, then
614
+ copy it and return a different name.
615
+ """
616
+ if self.constants[name].device == device_override or device_override is None:
617
+ return name
618
+ alt_name = f"{name}_{device_override.type}{device_override.index or 0}"
619
+ if alt_name not in self.constants:
620
+ self.constants[alt_name] = self.constants[name].to(device_override)
621
+ return alt_name
622
+
623
+ def placeholder(self, target: str, args, kwargs):
624
+ example = super().placeholder(target, args, kwargs)
625
+ if isinstance(example, SymTypes):
626
+ expr = example.node.expr
627
+ self.graph_inputs[target] = expr
628
+ return expr
629
+ elif isinstance(example, (int, bool, float)):
630
+ expr = sympy.sympify(example)
631
+ self.graph_inputs[target] = expr
632
+ return expr
633
+ assert isinstance(example, torch.Tensor), example
634
+ # todo(chilli): We can remove the last check once we turn buffers into
635
+ # static shape tensors. That's a hack to workaround Inductor believing
636
+ # the buffer should be static but us passing in a fake tensor with
637
+ # symbolic shapes.
638
+ if not example._has_symbolic_sizes_strides:
639
+ # the first N inputs are weights
640
+ sizes, strides = self.static_sizes_strides(example)
641
+ else:
642
+ sizes, strides = self.symbolic_sizes_strides(example)
643
+ # TODO(jansel): handle input aliasing
644
+ tensor = TensorBox.create(
645
+ InputBuffer(
646
+ target,
647
+ FixedLayout(example.device, example.dtype, sizes, strides),
648
+ )
649
+ )
650
+ self.graph_inputs[target] = tensor
651
+ self.graph_inputs_original[target] = tensor.data.data
652
+ self.add_device_info(example.device)
653
+ return tensor
654
+
655
+ def call_function(self, target, args, kwargs):
656
+ if target is operator.getitem and isinstance(args[0], (list, tuple, dict)):
657
+ return super().call_function(target, args, kwargs)
658
+
659
+ if hasattr(target, "_inductor_lowering_function"):
660
+ # passthrough lowerings from .pattern_matcher
661
+ return target(*args, **kwargs)
662
+
663
+ if target not in lowerings:
664
+ assert isinstance(
665
+ target, torch._ops.OpOverload
666
+ ), f"{target} is not an OpOverload"
667
+ base_name = target.name().split(".")[0]
668
+ if base_name in FALLBACK_ALLOW_LIST:
669
+ make_fallback(target)
670
+ elif config.implicit_fallbacks:
671
+ error = (
672
+ MissingOperatorWithDecomp
673
+ if get_decompositions([target])
674
+ else MissingOperatorWithoutDecomp
675
+ )
676
+ log.info(
677
+ "Creating implicit fallback for:\n%s",
678
+ error.operator_str(target, args, kwargs),
679
+ )
680
+ make_fallback(target)
681
+ elif get_decompositions([target]):
682
+ # There isn't a good way to dynamically patch this in
683
+ # since AOT Autograd already ran. The error message tells
684
+ # the user how to fix it.
685
+ raise MissingOperatorWithDecomp(target, args, kwargs)
686
+ else:
687
+ raise MissingOperatorWithoutDecomp(target, args, kwargs)
688
+
689
+ try:
690
+ log.debug(" via %s", lowerings[target])
691
+ out = lowerings[target](*args, **kwargs)
692
+ return out
693
+ except Exception as e:
694
+ raise LoweringException(e, target, args, kwargs).with_traceback(
695
+ e.__traceback__
696
+ ) from None
697
+
698
+ @staticmethod
699
+ def can_inline_constant(t: torch.Tensor) -> bool:
700
+ """
701
+ True if this is a small constant attr that will be inlined.
702
+ """
703
+ return len(t.shape) == 1 and t.shape[0] <= 8
704
+
705
+ def get_attr(self, target, args, kwargs):
706
+ # this is a constant
707
+ value = getattr(self.module, target)
708
+
709
+ if config.always_keep_tensor_constants or unsupported_output_tensor(value):
710
+ return self.add_tensor_constant(value, target)
711
+
712
+ with no_dispatch():
713
+ if value.shape == ():
714
+ return Constant(value.item(), value.dtype, value.device)
715
+ if self.can_inline_constant(value):
716
+ # tensor lowering has constant inlining logic
717
+ from .lowering import tensor
718
+
719
+ return tensor(value.tolist(), dtype=value.dtype, device=value.device)
720
+
721
+ return self.add_tensor_constant(value, target)
722
+
723
+ def call_module(self, target, args, kwargs):
724
+ raise AssertionError()
725
+
726
+ def call_method(self, target, args, kwargs):
727
+ raise AssertionError()
728
+
729
+ def output(self, target, args, kwargs):
730
+ result = super().output(target, args, kwargs)
731
+ assert isinstance(result, (tuple, list)), type(result)
732
+ assert all(
733
+ isinstance(
734
+ x,
735
+ (
736
+ TensorBox,
737
+ ir.Constant,
738
+ type(None),
739
+ ir.ConstantBuffer,
740
+ sympy.Expr,
741
+ sympy.logic.boolalg.Boolean,
742
+ int,
743
+ ),
744
+ )
745
+ for x in result
746
+ ), result
747
+ self.graph_outputs = [ir.ExternKernel.realize_input(x) for x in result]
748
+ value: ir.IRNode
749
+ for name, value in self.graph_inputs.items():
750
+ assert isinstance(
751
+ value, (TensorBox, sympy.Expr)
752
+ ), f"Unsupported inductor graph input type: {type(value)}"
753
+ if not isinstance(value, TensorBox):
754
+ continue
755
+ value.realize()
756
+ assert isinstance(value, TensorBox)
757
+ value = value.data
758
+ assert isinstance(value, ir.StorageBox)
759
+ value_storage_box = value
760
+ value = value.data
761
+ if not isinstance(value, InputBuffer) or value.get_name() != name:
762
+ # one of our inputs was mutated, need to turn that into a copy
763
+ ir.MutationLayout.realize_into(value, self.graph_inputs_original[name])
764
+ # replace output with mutated input
765
+ try:
766
+ ind = self.graph_outputs.index(value_storage_box)
767
+ self.graph_outputs[ind] = self.graph_inputs_original[name]
768
+ except ValueError:
769
+ pass
770
+
771
+ self.finalize()
772
+ log.debug(
773
+ "Force channels last inputs for %d conv for the current graph with id %d",
774
+ self.num_channels_last_conv,
775
+ self.graph_id if self.graph_id is not None else -1,
776
+ )
777
+
778
+ def finalize(self):
779
+ for buf in self.buffers:
780
+ buf.decide_layout()
781
+
782
+ @contextmanager
783
+ def set_current_node(self, node: torch.fx.Node):
784
+ old = self.current_node
785
+ try:
786
+ self.current_node = node
787
+ yield
788
+ finally:
789
+ self.current_node = old
790
+
791
+ def run_node(self, n: torch.fx.Node):
792
+ def debug(msg):
793
+ log.debug("lowering %s %s", LazyString(n.format_node), msg)
794
+
795
+ origins = {n}
796
+ if n.op == "call_function":
797
+ args, kwargs = self.fetch_args_kwargs_from_env(n)
798
+ origins |= gather_origins(args, kwargs)
799
+ with ir.IRNode.current_origins(origins), self.set_current_node(
800
+ n
801
+ ), V.set_current_node(n):
802
+ if (
803
+ n.op == "call_function"
804
+ and n.target is not operator.getitem
805
+ and fallback_node_due_to_unsupported_type(n)
806
+ ):
807
+ debug("fallback_handler")
808
+ result = fallback_handler(n.target, add_to_fallback_set=False)(
809
+ *args, **kwargs
810
+ )
811
+ elif n.op == "call_function" and n.target in layout_constraints:
812
+ debug("layout_constraints")
813
+ args, kwargs = layout_constraints[n.target](n, *args, **kwargs)
814
+ result = self.call_function(n.target, args, kwargs)
815
+ elif is_magic_method(n.target):
816
+ # TODO: this is sus, it probably should be handled in the
817
+ # lowerings themselves similarly to sym_size/sym-stride
818
+ debug("is_magic_method")
819
+ if isinstance(n.meta["val"], torch.SymInt):
820
+ result = n.meta["val"].node.expr
821
+ else:
822
+ result = super().run_node(n)
823
+ else:
824
+ debug("")
825
+ result = super().run_node(n)
826
+
827
+ # require the same stride order for dense outputs,
828
+ # 1. user-land view() will not throw because inductor
829
+ # output different strides than eager
830
+ # long term the solution is to make view() always succeed
831
+ # with infallible strides.
832
+ # 2: as_strided ops, we need make sure its input has same size/stride with
833
+ # eager model to align with eager behavior.
834
+ as_strided_ops = [
835
+ torch.ops.aten.as_strided.default,
836
+ torch.ops.aten.as_strided_.default,
837
+ torch.ops.aten.as_strided_scatter.default,
838
+ ]
839
+ is_output = any(user.op == "output" for user in n.users)
840
+ is_input_for_as_strided = any(
841
+ user.target in as_strided_ops for user in n.users
842
+ )
843
+ if (is_output or is_input_for_as_strided) and isinstance(
844
+ n.meta["val"], torch.Tensor
845
+ ):
846
+ strides = n.meta["val"].stride()
847
+ dense = torch._prims_common.is_non_overlapping_and_dense(n.meta["val"])
848
+ # requiring a stride order for a non-dense output wouldn't
849
+ # recreate the same strides, and would fail with view, defer for now.
850
+ if dense and len(strides):
851
+ stride_order = ir.get_stride_order(strides)
852
+ if (
853
+ len(result.get_size()) == 4
854
+ and n in self.nodes_prefer_channels_last
855
+ and n.name not in self.user_visible_outputs
856
+ and not is_input_for_as_strided
857
+ ):
858
+ stride_order = ir.NHWC_STRIDE_ORDER
859
+ result = ir.ExternKernel.require_stride_order(result, stride_order)
860
+
861
+ # Realize if (1) any user need inputs realized, or (2) there is
862
+ # already too many reads and rematerializing can be bad.
863
+ num_users = len(set(n.users))
864
+ if num_users > 1 and isinstance(result, TensorBox):
865
+ for user in n.users:
866
+ if user.target in needs_realized_inputs:
867
+ result.realize_hint()
868
+ # This inclusion is somewhat controversial (from
869
+ # discussion between Horace, Natalia, and Elias).
870
+ # Currently, it's not very clear why this is helpful.
871
+ # The general idea here is that even though a node may
872
+ # have FlexibleLayout, we still often *treat* it as if
873
+ # it was contiguous. This appears to sometimes result in
874
+ # suboptimal behavior.
875
+ #
876
+ # When we do a better job selecting layout, we should
877
+ # revisit this.
878
+ need_fixed_layout = [
879
+ torch.ops.aten.convolution_backward.default,
880
+ torch.ops.aten.mm.default,
881
+ torch.ops.aten._int_mm.default,
882
+ ]
883
+ if not self.layout_opt:
884
+ need_fixed_layout.append(torch.ops.aten.convolution.default)
885
+ if torch._C._has_mkldnn:
886
+ need_fixed_layout += [
887
+ torch.ops.mkldnn._convolution_pointwise.default,
888
+ torch.ops.mkldnn._convolution_pointwise.binary,
889
+ torch.ops.mkldnn._convolution_pointwise_.binary,
890
+ torch.ops.mkldnn._convolution_transpose_pointwise.default,
891
+ torch.ops.mkldnn._linear_pointwise.default,
892
+ torch.ops.mkldnn._linear_pointwise.binary,
893
+ torch.ops.aten.mkldnn_rnn_layer.default,
894
+ torch.ops.onednn.qconv2d_pointwise.default,
895
+ torch.ops.onednn.qconv2d_pointwise.binary,
896
+ torch.ops.onednn.qlinear_pointwise.default,
897
+ ]
898
+ if torch._C.has_mkl:
899
+ need_fixed_layout += [torch.ops.mkl._mkl_linear.default]
900
+ if user.target in need_fixed_layout:
901
+ result = ir.ExternKernel.require_stride_order(
902
+ result, ir.get_stride_order(n.meta["val"].stride())
903
+ )
904
+ if user.op == "output":
905
+ if isinstance(result.data.data, (Pointwise, Reduction)):
906
+ result.realize()
907
+
908
+ # TODO(jansel): introduce a store vs inline choice
909
+ result.mark_reuse(len(n.users))
910
+
911
+ # Realize if the IRNode already has accumulated lots of reads
912
+ if isinstance(result, TensorBox) and result.has_exceeded_max_reads():
913
+ # Prevent excessive accumulation in a computed buffer, when
914
+ # there are multiple branches each with small number of memory
915
+ # reads, but they converge to a user.
916
+ result.realize_hint()
917
+
918
+ # Realize if a Pointwise has too much stuff to be inlined.
919
+ # As this may cause RecursionError during Inductor's evaluation.
920
+ if isinstance(result, TensorBox) and isinstance(result.data, StorageBox):
921
+ curr = result.data.data
922
+ if isinstance(curr, Pointwise):
923
+ # Use inner fn as a rough proxy. Good enough.
924
+ if curr.inner_fn_str_len() > config.realize_bytes_threshold:
925
+ result.realize()
926
+
927
+ # This is not complete, but it doesn't have to be: origin_node
928
+ # tracking is best effort. The logic here critically relies on direct
929
+ # TensorBox -> StorageBox denoting a non-view; we don't bother trying
930
+ # to get views to work. Feel free to add any extra cases as needed.
931
+ #
932
+ # Note: we can't YOLO tree_map over this result, because if there are
933
+ # buffers or a view involved, we might not be able to validly assign
934
+ # the origin_node here.
935
+ if isinstance(result, TensorBox) and isinstance(result.data, ir.StorageBox):
936
+ if isinstance(result.data.data, ir.Loops):
937
+ result.data.data.origin_node = n
938
+ elif isinstance(result.data.data, ir.Buffer):
939
+ result.data.data.origin_node = n
940
+ if isinstance(result.data.data, ir.ComputedBuffer) and isinstance(
941
+ result.data.data.data, ir.Loops
942
+ ):
943
+ result.data.data.data.origin_node = n
944
+ # Not really multi-output, can straightforwardly recurse in
945
+ elif (
946
+ isinstance(result.data.data, ir.MultiOutput)
947
+ and not result.data.data.indices
948
+ ):
949
+ if isinstance(result.data.data.inputs[0], ir.Buffer):
950
+ result.data.data.inputs[0].origin_node = n
951
+
952
+ self.register_users_of(result)
953
+
954
+ return result
955
+
956
+ def validate_can_generate_cpp_wrapper(self):
957
+ if config.disable_cpp_codegen:
958
+ raise CppWrapperCodeGenError("C++ codegen is disabled")
959
+
960
+ if sys.platform != "linux":
961
+ raise CppWrapperCodeGenError(f"Unsupported platform {sys.platform}")
962
+
963
+ for value in self.graph_inputs.values():
964
+ dtype = None
965
+ if isinstance(value, TensorBox):
966
+ dtype = value.get_dtype()
967
+ elif isinstance(
968
+ value, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
969
+ ):
970
+ dtype = may_get_constant_buffer_dtype(value)
971
+
972
+ if not supported_dtype_of_cpp_wrapper(dtype, self.cuda):
973
+ raise CppWrapperCodeGenError(f"Unsupported input dtype {dtype}")
974
+
975
+ def init_wrapper_code(self):
976
+ self.cuda = "cuda" in self.device_types
977
+ if self.cpp_wrapper:
978
+ self.validate_can_generate_cpp_wrapper()
979
+ self.wrapper_code = (
980
+ CudaWrapperCodeGen() if self.cuda else CppWrapperCodeGen()
981
+ )
982
+ return
983
+
984
+ device_types = self.device_types.copy()
985
+ device_types.discard("cpu")
986
+ # TODO(Eikan): Only support mixing cpu and other device now.
987
+ assert len(device_types) <= 1, "Does not support mixing {}".format(
988
+ "+".join(device_types)
989
+ )
990
+ only_cpu = len(device_types) == 0
991
+ device_type = "cpu" if only_cpu else device_types.pop()
992
+ wrapper_code_gen_cls = get_wrapper_codegen_for_device(device_type)
993
+ assert wrapper_code_gen_cls is not None, f"Device {device_type} not supported"
994
+ self.wrapper_code = wrapper_code_gen_cls()
995
+
996
+ def codegen_with_cpp_wrapper(self):
997
+ """
998
+ For CPU, the cpp wrapper codegen is done in one pass.
999
+ For GPU, the cpp wrapper codegen is done in two steps: JIT-compile the model with python
1000
+ wrapper code and run it to generate autotuned kernel binaries in the first pass; and then
1001
+ generate cpp wrapper code and compile it to a dynamic library in the second pass.
1002
+ """
1003
+ if "cuda" in self.device_types:
1004
+ # first pass
1005
+ self.cpp_wrapper = False
1006
+ compiled = self.compile_to_module().call
1007
+
1008
+ def materialize(x):
1009
+ if isinstance(x, (torch.SymInt, torch.SymFloat)):
1010
+ # Need concrete value to run dynamic shapes and tune the result
1011
+ return x.node.hint
1012
+ elif isinstance(x, FakeTensor):
1013
+ return defake(x)
1014
+ else:
1015
+ assert isinstance(
1016
+ x, torch.Tensor
1017
+ ), "Unknown type when creating real inputs" + str(type(x))
1018
+ return x
1019
+
1020
+ with torch.utils._python_dispatch._disable_current_modes():
1021
+ assert self.example_inputs is not None
1022
+ real_inputs = [materialize(x) for x in self.example_inputs]
1023
+ compiled(real_inputs)
1024
+ del real_inputs
1025
+
1026
+ # second pass
1027
+ # TODO: reuse self.scheduler from the first pass to speed up the second pass
1028
+ self.cpp_wrapper = True
1029
+ self.removed_buffers.clear()
1030
+ self.inplaced_to_remove.clear()
1031
+ return self.codegen()
1032
+ else:
1033
+ # cpu
1034
+ return self.codegen()
1035
+
1036
+ def codegen(self):
1037
+ from .scheduler import Scheduler
1038
+
1039
+ self.init_wrapper_code()
1040
+
1041
+ self.scheduler = Scheduler(self.buffers)
1042
+ V.debug.draw_orig_fx_graph(self.orig_gm, self.scheduler.nodes)
1043
+ self.scheduler.codegen()
1044
+ return self.wrapper_code.generate(self.is_inference)
1045
+
1046
+ def count_bytes(self):
1047
+ from .scheduler import Scheduler
1048
+
1049
+ scheduler = Scheduler(self.buffers)
1050
+
1051
+ total_bytes = 0
1052
+ node_counts = []
1053
+ node_runtimes = []
1054
+ for node in scheduler.nodes:
1055
+ num_bytes = node.get_read_write_buffers_sizes()
1056
+ total_bytes += num_bytes
1057
+ node_counts.append((node, num_bytes // 4))
1058
+ node_runtimes.append((node, node.get_estimated_runtime()))
1059
+ return total_bytes, node_counts, node_runtimes
1060
+
1061
+ @dynamo_timed
1062
+ def compile_to_module(self):
1063
+ from .codecache import PyCodeCache
1064
+
1065
+ code, linemap = (
1066
+ self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
1067
+ )
1068
+ linemap = [(line_no, node.stack_trace) for line_no, node in linemap]
1069
+ key, path = PyCodeCache.write(code)
1070
+ mod = PyCodeCache.load_by_key_path(
1071
+ key, path, linemap=linemap, attrs=self.constants
1072
+ )
1073
+ self.cache_key = key
1074
+ self.cache_path = path
1075
+ self.cache_linemap = linemap
1076
+
1077
+ # Logged twice as per https://github.com/pytorch/pytorch/pull/99038#discussion_r1167826029
1078
+ # TODO. Revisit this once the logging API is more mature
1079
+ assert mod.__file__ is not None
1080
+ log.debug("Output code written to: %s", mod.__file__)
1081
+ output_code_log.debug("Output code: \n%s", code)
1082
+ output_code_log.info("Output code written to: %s", mod.__file__)
1083
+ if config.benchmark_kernel:
1084
+ print(f"Compiled module path: {mod.__file__}", file=sys.stderr)
1085
+ V.debug.output_code(mod.__file__)
1086
+ V.debug.copy(os.path.splitext(mod.__file__)[0] + ".debug")
1087
+ return mod
1088
+
1089
+ def compile_to_fn(self):
1090
+ if self.aot_mode:
1091
+ from .codecache import AotCodeCache
1092
+
1093
+ assert self.cpp_wrapper, "AOT mode only supports C++ wrapper"
1094
+ code, linemap = self.codegen_with_cpp_wrapper()
1095
+ output_code_log.debug("Output code: \n%s", code)
1096
+
1097
+ serialized_extern_kernel_nodes = None
1098
+ if (
1099
+ config.is_fbcode()
1100
+ and self.extern_kernel_nodes
1101
+ and self.extern_node_serializer
1102
+ ):
1103
+ serialized_extern_kernel_nodes = self.extern_node_serializer(
1104
+ self.extern_kernel_nodes
1105
+ )
1106
+ output_code_log.debug(
1107
+ "Serialized Extern Kernel Nodes: \n%s",
1108
+ serialized_extern_kernel_nodes,
1109
+ )
1110
+
1111
+ # Directly return the file path with the compiled code
1112
+ return AotCodeCache.compile(
1113
+ self, code, serialized_extern_kernel_nodes, cuda=self.cuda
1114
+ )
1115
+ else:
1116
+ return self.compile_to_module().call
1117
+
1118
+ def get_output_names(self):
1119
+ return [
1120
+ node.get_name()
1121
+ for node in self.graph_outputs
1122
+ if not isinstance(node, ir.NoneAsConstantBuffer)
1123
+ and not isinstance(node, ir.ShapeAsConstantBuffer)
1124
+ ]
1125
+
1126
+ def is_unspec_arg(self, name: str):
1127
+ # dynamo wraps unspec variable as 0d CPU tensor,
1128
+ # need to convert to scalar during codegen (triton only)
1129
+ return (
1130
+ name in self.graph_inputs.keys()
1131
+ and self.graph_inputs[name].get_numel() == 1
1132
+ and self.graph_inputs[name].get_device().type == "cpu"
1133
+ )
env-llmeval/lib/python3.10/site-packages/torch/_inductor/hooks.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ # Executed in the order they're registered
4
+ INTERMEDIATE_HOOKS = []
5
+
6
+
7
+ @contextlib.contextmanager
8
+ def intermediate_hook(fn):
9
+ INTERMEDIATE_HOOKS.append(fn)
10
+ try:
11
+ yield
12
+ finally:
13
+ INTERMEDIATE_HOOKS.pop()
14
+
15
+
16
+ def run_intermediate_hooks(name, val):
17
+ global INTERMEDIATE_HOOKS
18
+ hooks = INTERMEDIATE_HOOKS
19
+ INTERMEDIATE_HOOKS = []
20
+ try:
21
+ for hook in hooks:
22
+ hook(name, val)
23
+ finally:
24
+ INTERMEDIATE_HOOKS = hooks
env-llmeval/lib/python3.10/site-packages/torch/_inductor/index_propagation.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file implements the IndexPropagation ops handler, which wraps an
2
+ underlying handler to add a limited form of constant propagation, as well as
3
+ propagation of sympy expressions downstream of ops.index_expr calls.
4
+
5
+ For example, say we have the IR:
6
+
7
+ tmp0 = ops.index_expr(x, torch.int32)
8
+ tmp1 = ops.constant(2, torch.int32)
9
+ tmp2 = ops.mul(tmp0, tmp1)
10
+ tmp3 = ops.indirect_indexing(tmp2, x_size)
11
+ tmp4 = ops.load("buf0", tmp3)
12
+
13
+ The underlying handler would just see:
14
+
15
+ ops.load("buf0", x * 2)
16
+
17
+ This is limited by the set of operators handled in the sympy expression
18
+ printers. So simple operations like minimum and maximum cannot be translated to
19
+ SymPy expressions yet, despite sympy.Min and sympy.Max existing.
20
+
21
+ """
22
+ import itertools
23
+ from dataclasses import dataclass
24
+ from typing import Any, Callable, Dict, Literal, Optional, overload, Tuple, Union
25
+
26
+ import sympy
27
+
28
+ from typing_extensions import TypeAlias
29
+
30
+ import torch
31
+ from torch._prims_common import is_boolean_dtype, is_integer_dtype
32
+ from torch.utils._sympy.functions import FloorDiv, ModularIndexing, Where
33
+
34
+
35
+ @dataclass
36
+ class TypedExpr:
37
+ """A SymPy expression with associated type"""
38
+
39
+ expr: sympy.Expr
40
+ dtype: torch.dtype
41
+
42
+
43
+ class SymPyOps:
44
+ """An ops handler where all IR values are SymPy expressions
45
+
46
+ When a value cannot be represented as a SymPy expression, the method is
47
+ either not defined, or returns NotImplemented
48
+
49
+ """
50
+
51
+ @staticmethod
52
+ def identity(value: Any) -> Any:
53
+ return value
54
+
55
+ @staticmethod
56
+ def constant(value: Union[int, float, bool], dtype: torch.dtype) -> TypedExpr:
57
+ if is_boolean_dtype(dtype):
58
+ expr = sympy.Integer(bool(value))
59
+ elif is_integer_dtype(dtype):
60
+ expr = sympy.Integer(int(value))
61
+ else:
62
+ expr = sympy.Float(float(value))
63
+ return TypedExpr(expr, dtype)
64
+
65
+ @staticmethod
66
+ def index_expr(value: sympy.Expr, dtype: torch.dtype) -> Union[int, TypedExpr]:
67
+ if isinstance(value, int):
68
+ value = sympy.Integer(value)
69
+ return TypedExpr(value, dtype)
70
+
71
+ @staticmethod
72
+ def to_dtype(
73
+ value: Any, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None
74
+ ) -> Union[int, TypedExpr]:
75
+ if isinstance(value.expr, (sympy.Integer, sympy.Float)):
76
+ return SymPyOps.constant(value.expr, dtype)
77
+ elif is_integer_dtype(dtype) and is_integer_dtype(value.dtype):
78
+ return SymPyOps.index_expr(value.expr, dtype)
79
+ else:
80
+ # TODO: Inductor doesn't handle floating point in sympy expressions well at the moment
81
+ return NotImplemented
82
+
83
+ @staticmethod
84
+ def square(x: TypedExpr) -> TypedExpr:
85
+ return TypedExpr(x.expr * x.expr, x.dtype)
86
+
87
+ @staticmethod
88
+ def add(x: TypedExpr, y: TypedExpr) -> TypedExpr:
89
+ result_type = torch.promote_types(x.dtype, y.dtype)
90
+ return TypedExpr(x.expr + y.expr, result_type)
91
+
92
+ @staticmethod
93
+ def sub(x: TypedExpr, y: TypedExpr) -> TypedExpr:
94
+ result_type = torch.promote_types(x.dtype, y.dtype)
95
+ return TypedExpr(x.expr - y.expr, result_type)
96
+
97
+ @staticmethod
98
+ def mul(x: TypedExpr, y: TypedExpr) -> TypedExpr:
99
+ result_type = torch.promote_types(x.dtype, y.dtype)
100
+ return TypedExpr(x.expr * y.expr, result_type)
101
+
102
+ @staticmethod
103
+ def neg(x: TypedExpr) -> TypedExpr:
104
+ return TypedExpr(-x.expr, x.dtype)
105
+
106
+ @staticmethod
107
+ def floordiv(x: TypedExpr, y: TypedExpr) -> TypedExpr:
108
+ result_type = torch.promote_types(x.dtype, y.dtype)
109
+ if not is_integer_dtype(result_type):
110
+ return NotImplemented
111
+
112
+ return TypedExpr(FloorDiv(x.expr, y.expr), result_type)
113
+
114
+ @staticmethod
115
+ def remainder(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
116
+ result_type = torch.promote_types(x.dtype, y.dtype)
117
+ if not is_integer_dtype(result_type):
118
+ return NotImplemented
119
+
120
+ result_expr = ModularIndexing(x.expr, sympy.Integer(1), y.expr)
121
+ return TypedExpr(result_expr, result_type)
122
+
123
+ @staticmethod
124
+ def minimum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
125
+ result_type = torch.promote_types(x.dtype, y.dtype)
126
+ return TypedExpr(sympy.Min(x.expr, y.expr), result_type)
127
+
128
+ @staticmethod
129
+ def maximum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
130
+ result_type = torch.promote_types(x.dtype, y.dtype)
131
+ return TypedExpr(sympy.Max(x.expr, y.expr), result_type)
132
+
133
+
134
+ @dataclass
135
+ class IndexPropVar:
136
+ value: Any # Either an IR value, or TypedExpr if is_symbolic is true
137
+ is_symbolic: bool = False
138
+
139
+ @staticmethod
140
+ def new_symbolic(expr: TypedExpr) -> "IndexPropVar":
141
+ return IndexPropVar(expr, is_symbolic=True)
142
+
143
+ def __post_init__(self):
144
+ assert not self.is_symbolic or isinstance(
145
+ self.value, TypedExpr
146
+ ), "Symbolic IndexPropVar must contain a TypedExpr"
147
+
148
+
149
+ IndexPropResult: TypeAlias = Union[IndexPropVar, Tuple["IndexPropResult", ...]]
150
+
151
+
152
+ class IndexPropagation:
153
+ """Ops wrapper that tries to propagate constant and index_expr values through the computation.
154
+
155
+ This aims to maximize the compile time simplification possible, and convert
156
+ indirect indexing from arange into normal static indexing.
157
+
158
+ """
159
+
160
+ def __init__(self, inner: Any):
161
+ self._inner = inner
162
+
163
+ def materialize_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> Any:
164
+ # Construct a new constant/index_expr from the SymPy expression
165
+ if isinstance(expr, sympy.Integer):
166
+ return self._inner.constant(int(expr), dtype)
167
+ elif expr.is_number:
168
+ return self._inner.constant(float(expr), dtype)
169
+ return self._inner.index_expr(expr, dtype)
170
+
171
+ def unwrap(self, a: Union[Any, IndexPropVar]) -> Any:
172
+ if isinstance(a, (list, tuple)):
173
+ return tuple(self.unwrap(v) for v in a)
174
+
175
+ if not isinstance(a, IndexPropVar):
176
+ return a
177
+
178
+ # Prefer the sympy representation if possible
179
+ if a.is_symbolic:
180
+ return self.materialize_expr(a.value.expr, a.value.dtype)
181
+
182
+ return a.value
183
+
184
+ def wrap(self, a) -> IndexPropResult:
185
+ if isinstance(a, (list, tuple)):
186
+ return tuple(self.wrap(v) for v in a)
187
+ return IndexPropVar(a)
188
+
189
+ @overload
190
+ def fallback(
191
+ self,
192
+ name: Literal["indirect_indexing"],
193
+ args: Tuple[Any, ...],
194
+ kwargs: Dict[str, Any],
195
+ ) -> IndexPropVar:
196
+ ...
197
+
198
+ @overload
199
+ def fallback(
200
+ self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
201
+ ) -> IndexPropResult:
202
+ ...
203
+
204
+ def fallback(
205
+ self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
206
+ ) -> IndexPropResult:
207
+ # Fallback to the wrapped handler
208
+ new_args = [self.unwrap(a) for a in args]
209
+ new_kwargs = {k: self.unwrap(v) for k, v in kwargs.items()}
210
+ return self.wrap(getattr(self._inner, name)(*new_args, **new_kwargs))
211
+
212
+ def propagate_sympy(
213
+ self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
214
+ ) -> IndexPropResult:
215
+ # Build a new SymPy expression from this ops call
216
+ def unwrap(a: Union[Any, IndexPropVar]) -> Any:
217
+ if not isinstance(a, IndexPropVar):
218
+ return a
219
+ return a.value
220
+
221
+ new_args = [unwrap(a) for a in args]
222
+ new_kwargs = {k: unwrap(v) for k, v in kwargs.items()}
223
+ new_expr = getattr(SymPyOps, name)(*new_args, **new_kwargs)
224
+ is_valid_expr = new_expr is not NotImplemented and (
225
+ # Inductor doesn't expect floating point in sympy expressions, but
226
+ # allow floating point constants to be propagated
227
+ isinstance(new_expr.expr, sympy.Number)
228
+ or new_expr.expr.is_integer
229
+ )
230
+ if not is_valid_expr:
231
+ return self.fallback(name, args, kwargs)
232
+ return IndexPropVar.new_symbolic(new_expr)
233
+
234
+ def __getattr__(self, name: str) -> Callable[..., IndexPropResult]:
235
+ def inner(*args: Any, **kwargs: Any) -> IndexPropResult:
236
+ if not hasattr(SymPyOps, name):
237
+ return self.fallback(name, args, kwargs)
238
+
239
+ var_arguments = [
240
+ a
241
+ for a in itertools.chain(args, kwargs.values())
242
+ if isinstance(a, IndexPropVar)
243
+ ]
244
+ if not all(v.is_symbolic for v in var_arguments):
245
+ return self.fallback(name, args, kwargs)
246
+
247
+ return self.propagate_sympy(name, args, kwargs)
248
+
249
+ return inner
250
+
251
+ def indirect_indexing(
252
+ self, index: Union[Any, IndexPropVar], size: Any, check: bool = True
253
+ ) -> Any:
254
+ # nb. We do index + Where(...) rather than Where(idx >= 0, idx, idx + sz) because we don't have CSE
255
+ # for SymPy expressions, so we don't want to repeat idx too much
256
+
257
+ # indirect_indexing returns a sympy value, so no need to wrap in IndexPropVar here
258
+ if isinstance(index, IndexPropVar) and index.is_symbolic:
259
+ # If we are turning a indirect indexing into direct, we need to wrap it.
260
+ index = index.value.expr
261
+ return index + Where(index >= 0, 0, size)
262
+ return self.fallback("indirect_indexing", (index, size, check), {}).value
env-llmeval/lib/python3.10/site-packages/torch/_inductor/inductor_prims.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from typing import Optional, Sequence
5
+
6
+ import torch
7
+ from torch import _prims, Tensor
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ def make_prim(
13
+ schema: str,
14
+ impl_aten,
15
+ return_type=_prims.RETURN_TYPE.NEW,
16
+ doc: str = "",
17
+ tags: Optional[Sequence[torch.Tag]] = None,
18
+ ):
19
+ def meta(*args, **kwargs):
20
+ return _prims.TensorMeta(impl_aten(*args, **kwargs))
21
+
22
+ return _prims._make_prim(
23
+ schema=schema,
24
+ return_type=return_type,
25
+ meta=meta,
26
+ impl_aten=impl_aten,
27
+ doc=doc,
28
+ tags=tags,
29
+ )
30
+
31
+
32
+ def eager_force_stride(input_tensor: Tensor, stride) -> Tensor:
33
+ if input_tensor.stride() == stride:
34
+ return input_tensor
35
+ new_tensor = input_tensor.clone().as_strided(
36
+ input_tensor.shape,
37
+ stride,
38
+ )
39
+ new_tensor.copy_(input_tensor)
40
+ return new_tensor
41
+
42
+
43
+ # Custom prims used for handling randomness
44
+ seed = make_prim(
45
+ "inductor_seed(Device device) -> Tensor",
46
+ lambda device: torch.randint(2**63 - 1, [], device=device),
47
+ doc="create a fresh seed (one per call) for use with inductor_rand",
48
+ tags=(torch.Tag.nondeterministic_seeded,),
49
+ )
50
+ seeds = make_prim(
51
+ "inductor_seeds(int count, Device device) -> Tensor",
52
+ lambda count, device: torch.randint(2**63 - 1, [count], device=device),
53
+ doc="Horizontal fusion of many inductor_seed() calls",
54
+ tags=(torch.Tag.nondeterministic_seeded,),
55
+ )
56
+ lookup_seed = make_prim(
57
+ # if inductor_lookup_seed changes, update partitioners.py
58
+ "inductor_lookup_seed(Tensor seeds, int index) -> Tensor",
59
+ lambda seeds, index: seeds[index],
60
+ doc="Extract a single seed from the result of inductor_seeds()",
61
+ )
62
+ random = make_prim(
63
+ "inductor_random(SymInt[] size, Tensor seed, str mode) -> Tensor",
64
+ lambda size, seed, mode: getattr(torch, mode)(size, device=seed.device),
65
+ doc="torch.rand()/torch.randn() using backend-specific RNG that can be fused",
66
+ )
67
+ randint = make_prim(
68
+ "inductor_randint(SymInt low, SymInt high, SymInt[] size, Tensor seed) -> Tensor",
69
+ lambda low, high, size, seed: torch.randint(low, high, size, device=seed.device),
70
+ doc="torch.randint() using backend-specific RNG that can be fused",
71
+ )
72
+ force_stride_order = make_prim(
73
+ "inductor_force_stride_order(Tensor input, SymInt[] stride) -> Tensor",
74
+ eager_force_stride,
75
+ doc="Force the stride order for input tensor. No-op if the input tensor already has the stride. Do a copy otherwise",
76
+ )
77
+ masked_scatter_with_index = make_prim(
78
+ "inductor_masked_scatter_with_index(Tensor input, Tensor mask, Tensor source_idx, Tensor source) -> Tensor",
79
+ lambda input_tensor, mask, index, source: torch.masked_scatter(
80
+ input_tensor, mask, source
81
+ ),
82
+ doc="masked_scatter with precomputed indices",
83
+ )
84
+ _unsafe_index_put_ = make_prim(
85
+ "_unsafe_index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)",
86
+ lambda self, indices, values, accumulate=False: torch.ops.aten.index_put_(
87
+ self, indices, values, accumulate
88
+ ),
89
+ doc="Unsafe index_put_ (doesn't issue device asserts)",
90
+ )
env-llmeval/lib/python3.10/site-packages/torch/_inductor/ir.py ADDED
The diff for this file is too large to render. See raw diff