applied-ai-018 commited on
Commit
1d76b00
·
verified ·
1 Parent(s): c496e1d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_cpp_scheduling.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_env.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py +212 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py +45 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_kernel.py +336 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py +241 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/efficient_conv_bn_eval.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/misc_patterns.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py +277 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/efficient_conv_bn_eval.py +157 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py +212 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py +564 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py +791 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py +323 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/misc_patterns.py +130 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py +1085 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py +469 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py +1188 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py +477 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py +1500 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py +137 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__init__.py +0 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_1.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_11.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_12.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_13.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_2.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_3.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_4.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc ADDED
Binary file (98.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc ADDED
Binary file (86.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_foreach.cpython-310.pyc ADDED
Binary file (8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_cpp_scheduling.cpython-310.pyc ADDED
Binary file (7.49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/__pycache__/cuda_env.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import cast, List
3
+
4
+ from ...._dynamo.utils import counters
5
+
6
+ from ... import config, ir
7
+ from ...codecache import code_hash, get_path
8
+ from ...ir import ComputedBuffer, CUDATemplateBuffer, Pointwise
9
+ from ...scheduler import (
10
+ BaseSchedulerNode,
11
+ BaseScheduling,
12
+ FusedSchedulerNode,
13
+ Scheduler,
14
+ SchedulerNode,
15
+ )
16
+ from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_product
17
+ from ...virtualized import V
18
+ from ..common import IndentedBuffer
19
+
20
+ from .cutlass_epilogue_gen import CUTLASSEVTOpNotImplementedError
21
+
22
+ log = logging.getLogger(__name__)
23
+
24
+
25
+ class CUDACPPScheduling(BaseScheduling):
26
+ """
27
+ Partial Scheduling implementation for CUDA C++ Kernels.
28
+ This class is intended to be used in combination with TritonScheduling,
29
+ and delegated to by CUDACombinedScheduling.
30
+
31
+ It handles fusion decisions and CUDA C++ specific template code generation.
32
+ """
33
+
34
+ def __init__(self, scheduler: Scheduler):
35
+ super().__init__()
36
+ self.scheduler = scheduler
37
+
38
+ def group_fn(self, sizes):
39
+ return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes)
40
+
41
+ def is_cuda_cpp_template(self, node: BaseSchedulerNode) -> bool:
42
+ return isinstance(node, SchedulerNode) and isinstance(
43
+ node.node, CUDATemplateBuffer
44
+ )
45
+
46
+ def is_cuda_cpp_fused_template(self, node: BaseSchedulerNode) -> bool:
47
+ return isinstance(node, FusedSchedulerNode) and self.is_cuda_cpp_template(
48
+ node.get_template_node()
49
+ )
50
+
51
+ def _can_fuse_epilogue_impl(
52
+ self,
53
+ cuda_template_buffer: CUDATemplateBuffer,
54
+ epilogue_nodes: List[ir.IRNode],
55
+ additional_node: ir.IRNode,
56
+ ) -> bool:
57
+ """
58
+ Check if the given node can be fused with the epilogue. At the moment, Kernels
59
+ support fusion with Pointwise operations, wrapped in (named) ComputedBuffer nodes.
60
+
61
+ Args:
62
+ cuda_template_buffer : A CUDATemplateBuffer object representing the CUDA template and it's result buffer
63
+ epilogue_nodes : List[ir.Buffer]: The list of already fused epilogue nodes.
64
+ additional_node: The ir.Buffer node to be checked if it can be fused with the epilogue.
65
+ Returns:
66
+ - bool: True if the given node can be fused with the epilogue, False otherwise.
67
+
68
+ """
69
+ if not isinstance(cuda_template_buffer, CUDATemplateBuffer):
70
+ return False
71
+ if not cuda_template_buffer.template.can_fuse_epilogue:
72
+ # The used GEMM op does not support fusing epilogues
73
+ return False
74
+ if not isinstance(additional_node, ComputedBuffer):
75
+ return False
76
+ if not isinstance(additional_node.data, Pointwise):
77
+ return False
78
+ # We can fuse a Pointwise op that depends on the last fused epilogue node
79
+ # if any. If there is no epilogue node yet, it needs to depend on the template
80
+ # node
81
+ node_name = additional_node.get_computed_buffer_name()
82
+ if node_name is None:
83
+ return False
84
+
85
+ if len(epilogue_nodes) == 0:
86
+ if cuda_template_buffer.name not in additional_node.get_read_names():
87
+ return False
88
+ else:
89
+ last_epilogue_node = epilogue_nodes[-1]
90
+ assert isinstance(last_epilogue_node, ir.ComputedBuffer) # for mypy
91
+ last_epilogue_name = (
92
+ last_epilogue_node.name
93
+ if last_epilogue_node.name is not None
94
+ else last_epilogue_node.data.name # type: ignore[attr-defined]
95
+ )
96
+ if last_epilogue_name not in additional_node.get_read_names():
97
+ return False
98
+ if additional_node.layout != cuda_template_buffer.layout:
99
+ return False
100
+ try:
101
+ from torch._inductor.codegen.cuda.cutlass_epilogue_gen import (
102
+ CutlassEVTEpilogueArgumentFormatter,
103
+ CutlassEVTEpilogueTypeFormatter,
104
+ )
105
+
106
+ CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(
107
+ cast(str, cuda_template_buffer.name), "anything", [additional_node]
108
+ )
109
+ CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(
110
+ cast(str, cuda_template_buffer.name), [additional_node]
111
+ )
112
+ except CUTLASSEVTOpNotImplementedError as e:
113
+ not_implemented_op = str(e)
114
+ if not_implemented_op.startswith("_op_"):
115
+ not_implemented_op = not_implemented_op[4:]
116
+ log.warning(
117
+ f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}, likely due to unsupported operation: {not_implemented_op}" # noqa: G004, B950
118
+ )
119
+ return False
120
+ else:
121
+ # Likely due to unsupported dtype.
122
+ log.warning(
123
+ f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}. Reason: {not_implemented_op}" # noqa: G004, B950
124
+ )
125
+ return False
126
+ return True
127
+
128
+ @staticmethod
129
+ def _unwrap_epilogue_nodes(fused_node: FusedSchedulerNode) -> List[ir.IRNode]:
130
+ nodes = fused_node.get_nodes()
131
+ template_node = fused_node.get_template_node()
132
+ nodes.remove(template_node)
133
+ return [n.node for n in nodes]
134
+
135
+ def can_fuse_vertical(
136
+ self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
137
+ ) -> bool:
138
+ if self.is_cuda_cpp_template(node1) and isinstance(node2, SchedulerNode):
139
+ return self._can_fuse_epilogue_impl(
140
+ cast(CUDATemplateBuffer, node1.node), [], node2.node
141
+ )
142
+ elif self.is_cuda_cpp_fused_template(node1) and isinstance(
143
+ node2, SchedulerNode
144
+ ):
145
+ fnode1 = cast(FusedSchedulerNode, node1)
146
+ return self._can_fuse_epilogue_impl(
147
+ fnode1.get_template_node().node,
148
+ self._unwrap_epilogue_nodes(fnode1),
149
+ node2.node,
150
+ )
151
+ return False
152
+
153
+ def define_kernel(self, src_code: str, node_schedule) -> str:
154
+ wrapper = V.graph.wrapper_code
155
+ if src_code in wrapper.src_to_kernel:
156
+ kernel_name = wrapper.src_to_kernel[src_code]
157
+ else:
158
+ fused_name = (
159
+ get_fused_kernel_name(node_schedule, config.triton.descriptive_names)
160
+ if config.triton.descriptive_names
161
+ else ""
162
+ )
163
+ kernel_name = "_".join(["cuda", fused_name, wrapper.next_kernel_suffix()])
164
+ # use the original src_code as the key
165
+ wrapper.src_to_kernel[src_code] = kernel_name
166
+ src_code = src_code.replace("KERNEL_NAME", kernel_name)
167
+
168
+ _, _, kernel_path = get_path(code_hash(src_code), "py")
169
+
170
+ compile_wrapper = IndentedBuffer()
171
+ compile_wrapper.writeline("async_compile.cuda(r'''")
172
+ compile_wrapper.splice(src_code, strip=True)
173
+ compile_wrapper.writeline("''', 'so')")
174
+
175
+ metadata_comment = f"# kernel path: {kernel_path}"
176
+ origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
177
+ metadata_comment += "\n" + origins + "\n" + detailed_origins
178
+ wrapper.define_kernel(
179
+ kernel_name, compile_wrapper.getvalue(), metadata_comment
180
+ )
181
+ return kernel_name
182
+
183
+ def codegen_template(
184
+ self, template_node: BaseSchedulerNode, epilogue_nodes: List[SchedulerNode]
185
+ ):
186
+ """
187
+ Codegen a CUDA template, possibly with fused epilogues
188
+ """
189
+ counters["inductor"]["cuda_epilogue_fusion_counter"] += len(epilogue_nodes)
190
+ assert self.is_cuda_cpp_template(
191
+ template_node
192
+ ), "Template node passed to CUDAScheduler.codegen_template must be a SchedulerNode that wraps a CUDATemplateBuffer"
193
+ template_node = cast(SchedulerNode, template_node)
194
+ _, (numel, rnumel) = template_node.group
195
+ assert rnumel == 1
196
+ ctb: CUDATemplateBuffer = cast(CUDATemplateBuffer, template_node.node)
197
+ epilogue_ir_nodes: List[ir.Buffer] = [n.node for n in epilogue_nodes]
198
+ assert all(
199
+ isinstance(n, ir.ComputedBuffer) for n in epilogue_ir_nodes
200
+ ), "Epilogue nodes must all be instances of ir.ComputedBuffer"
201
+ kernel, render = ctb.make_kernel_render(ctb, epilogue_nodes=epilogue_ir_nodes)
202
+ with kernel:
203
+ for node in [template_node, *epilogue_nodes]:
204
+ node.mark_run()
205
+ src_code = render()
206
+
207
+ with V.set_kernel_handler(kernel):
208
+ node_schedule = [template_node, *epilogue_nodes]
209
+ kernel_name = self.define_kernel(src_code, node_schedule)
210
+ kernel.call_kernel(kernel_name, ctb, epilogue_ir_nodes)
211
+ V.graph.removed_buffers |= kernel.removed_buffers
212
+ self.scheduler.free_buffers()
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_env.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ from typing import Optional
4
+
5
+ import torch
6
+
7
+ from ... import config
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ def get_cuda_arch() -> Optional[str]:
13
+ try:
14
+ cuda_arch = config.cuda.arch
15
+ if cuda_arch is None:
16
+ # Get Compute Capability of the first Visible device
17
+ major, minor = torch.cuda.get_device_capability(0)
18
+ cuda_arch = major * 10 + minor
19
+ return str(cuda_arch)
20
+ except Exception as e:
21
+ log.error("Error getting cuda arch: %s", e)
22
+ return None
23
+
24
+
25
+ def get_cuda_version() -> Optional[str]:
26
+ try:
27
+ cuda_version = config.cuda.version
28
+ if cuda_version is None:
29
+ cuda_version = torch.version.cuda
30
+ return cuda_version
31
+ except Exception as e:
32
+ log.error("Error getting cuda version: %s", e)
33
+ return None
34
+
35
+
36
+ @functools.lru_cache(None)
37
+ def nvcc_exist(nvcc_path: str = "nvcc") -> bool:
38
+ if nvcc_path is None:
39
+ return False
40
+ import subprocess
41
+
42
+ res = subprocess.call(
43
+ ["which", nvcc_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
44
+ )
45
+ return res == 0
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_kernel.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Callable, Dict, List, Optional, TYPE_CHECKING
3
+
4
+ from ... import ir
5
+ from ...autotune_process import CUDABenchmarkRequest
6
+ from ...ir import Buffer, CUDATemplateBuffer, IRNode, Layout, TensorBox
7
+ from ...select_algorithm import ChoiceCaller
8
+ from ...utils import sympy_product
9
+ from ...virtualized import V
10
+
11
+ from ..common import IndentedBuffer, Kernel, OpOverrides
12
+ from ..cpp import CppPrinter, DTYPE_TO_CPP
13
+
14
+ if TYPE_CHECKING:
15
+ from torch._inductor.codegen.cuda.cuda_template import CUDATemplate
16
+
17
+ log = logging.getLogger(__name__)
18
+
19
+ cexpr = CppPrinter().doprint
20
+
21
+
22
+ def _normalize_idx(index: int, total_length: int) -> int:
23
+ return index if index >= 0 else index + total_length
24
+
25
+
26
+ class CUDAKernel(Kernel):
27
+ """
28
+ Baseclass for CUDA / Cutlass based Kernels
29
+ """
30
+
31
+ overrides = OpOverrides # type: ignore[assignment]
32
+
33
+
34
+ class CUDATemplateKernel(CUDAKernel):
35
+ """
36
+ Template kernels defined by CUDA / Cutlass in C++.
37
+ """
38
+
39
+ _EXTRA_CPP_ARGS = "size_t* workspace_size, uint8_t* workspace, cudaStream_t stream"
40
+
41
+ def __init__(self, kernel_name):
42
+ """
43
+ Initializes a new instance of the CUDATemplateKernel class.
44
+
45
+ Args:
46
+ kernel_name (str): The name of the kernel.
47
+ """
48
+ super().__init__()
49
+ self.kernel_name = kernel_name
50
+ # Mapping from arg name to IRNode.
51
+ self.named_nodes: Dict[str, IRNode] = {}
52
+
53
+ def arg_name(self, node: IRNode) -> Optional[str]:
54
+ """
55
+ Returns arg name of a given input or output node.
56
+ """
57
+ if node is None:
58
+ return None
59
+ return {**self.args.input_buffers, **self.args.output_buffers}.get(
60
+ node.get_name(), None
61
+ )
62
+
63
+ def check_not_null(self, node: IRNode) -> str:
64
+ """
65
+ Generates code to check that a node is not null.
66
+ """
67
+
68
+ if node is None:
69
+ return ""
70
+
71
+ size_str = self.size(node, 0, -1)
72
+ name_str = self.arg_name(node)
73
+ if name_str is None:
74
+ return ""
75
+
76
+ res = IndentedBuffer(initial_indent=2)
77
+ res.tabwidth = 1
78
+ res.splice(
79
+ f"""
80
+ {{
81
+ if (!{name_str}) {{
82
+ int64_t {name_str}_size = {size_str};
83
+ if ({name_str}_size > 0) {{
84
+ throw std::runtime_error("input {name_str} is null but size is not 0!");
85
+ }}
86
+ }}
87
+ }}
88
+ """
89
+ )
90
+ return res.getvalue()
91
+
92
+ def def_kernel(
93
+ self,
94
+ inputs: List[IRNode],
95
+ outputs: List[IRNode],
96
+ names_str: str = "",
97
+ input_reorder: Optional[List[int]] = None,
98
+ ) -> str:
99
+ """
100
+ Hook called from template code to generate function definition and
101
+ needed args.
102
+
103
+ Args:
104
+ inputs: List of input IRNodes
105
+ outputs: List of output IRNodes
106
+ names_str: Comma separated list of input + output argument names.
107
+ input_reorder: The actual order of input nodes.
108
+ e.g. The template might have input argument defined as [X, W, Bias],
109
+ and the actual input passed into this template could be [Bias, X, W].
110
+ In this case, the `input_reorder` would be [2, 0, 1].
111
+ """
112
+
113
+ names = [x.strip() for x in names_str.strip().split(",")]
114
+ if len(inputs) + len(outputs) != len(names):
115
+ raise RuntimeError(
116
+ f"{len(inputs) + len(outputs)=} != {len(names)=}, {inputs=}, {outputs=}, {names=}"
117
+ )
118
+
119
+ if input_reorder is not None:
120
+ assert len(inputs) == len(input_reorder)
121
+ else:
122
+ input_reorder = list(range(len(inputs)))
123
+
124
+ for idx in input_reorder:
125
+ name = names[idx]
126
+ node = inputs[idx]
127
+ if node is not None:
128
+ self.named_nodes[name] = node
129
+ self.args.input_buffers[node.get_name()] = name
130
+
131
+ for name, node in zip(names[len(inputs) : len(inputs) + len(outputs)], outputs):
132
+ if node is not None:
133
+ self.named_nodes[name] = node
134
+ self.args.output_buffers[node.get_name()] = name
135
+
136
+ arg_defs, *_ = self.args.cpp_argdefs()
137
+ return f"PT_EXPORT int {self.kernel_name}({', '.join(arg_defs)}, {self._EXTRA_CPP_ARGS})"
138
+
139
+ def call_kernel(
140
+ self, name: str, node: "CUDATemplateBuffer", epilogue_nodes: List[ir.Buffer]
141
+ ) -> None:
142
+ """
143
+ Generates code to call the kernel through V.graph.wrapper_code.
144
+ used from within torch._inductor.wrapper.WrapperCodeGen
145
+
146
+ name: Name of kernel function.
147
+ node: The CUDATemplateBuffer node which contains information about the kernel, it's fused epilogue nodes
148
+ as well as all required inputs and outputs.
149
+ """
150
+ wrapper = V.graph.wrapper_code
151
+ _, call_args, _ = self.args.python_argdefs()
152
+ # dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar
153
+ for i in range(len(call_args)):
154
+ if V.graph.is_unspec_arg(call_args[i]):
155
+ call_args[i] = call_args[i] + ".item()"
156
+ else:
157
+ call_args[i] = f"c_void_p({call_args[i]}.data_ptr())"
158
+
159
+ # workspace_size ptr is NULL to mark this call is not intended for retrieving workspace_size.
160
+ # workspace_size should have already been retrieved prior to this call.
161
+ call_args.append("None")
162
+
163
+ if node.get_workspace_size() > 0:
164
+ call_args.append(f"c_void_p({node.get_name()}_workspace.data_ptr())")
165
+ else:
166
+ call_args.append("None")
167
+
168
+ wrapper.generate_kernel_call(
169
+ name,
170
+ call_args,
171
+ device_index=V.graph.scheduler.current_device.index,
172
+ cuda=True,
173
+ triton=False,
174
+ )
175
+
176
+ def dtype(self, node: IRNode) -> Optional[str]:
177
+ """
178
+ Generates code which represents dtype of a given node.
179
+ """
180
+
181
+ if node is None:
182
+ return "void"
183
+ return DTYPE_TO_CPP.get(node.get_layout().dtype)
184
+
185
+ def offset(self, node: IRNode) -> str:
186
+ """
187
+ Generates code which represents offset of a given node.
188
+ """
189
+
190
+ if node is None:
191
+ return "0"
192
+ return str(node.get_layout().offset)
193
+
194
+ def ptr(self, node: IRNode) -> str:
195
+ """
196
+ Generates code which represents pointer of a given node.
197
+ """
198
+
199
+ if node is None:
200
+ return "nullptr"
201
+ arg_name = self.arg_name(node)
202
+ if arg_name is None:
203
+ return "nullptr"
204
+ offset = self.offset(node)
205
+ return arg_name if offset == "0" else f"{arg_name} + {offset}"
206
+
207
+ def size(
208
+ self,
209
+ node: IRNode,
210
+ start_index: int,
211
+ end_index: Optional[int] = None,
212
+ default_value: int = 0,
213
+ ) -> str:
214
+ """
215
+ Hook called from template code to get the size of an arg.
216
+ Generates code which represents size of a given node in [start_index, end_index).
217
+ If node is None, returns default_value.
218
+
219
+ TODO: Will add needed args to pass it in if it is dynamic.
220
+ """
221
+
222
+ if node is None:
223
+ return str(default_value)
224
+
225
+ start_index = _normalize_idx(start_index, len(node.get_size()))
226
+ if end_index is None:
227
+ end_index = start_index
228
+ end_index = _normalize_idx(end_index, len(node.get_size()))
229
+
230
+ sizes = node.get_size()[start_index : end_index + 1]
231
+ if len(sizes) == 0:
232
+ return str(default_value)
233
+
234
+ val = sympy_product(sizes)
235
+ return cexpr(self.rename_indexing(val))
236
+
237
+ def stride(self, node: IRNode, index: int, default_value: int = 0) -> str:
238
+ """
239
+ Hook called from template code to get the stride of an arg.
240
+ Generates code which represents stride of a given node at index.
241
+ If node is None, returns default_value.
242
+
243
+ TODO: Will add needed args to pass it in if it is dynamic.
244
+ """
245
+
246
+ if node is None:
247
+ return str(default_value)
248
+
249
+ index = _normalize_idx(index, len(node.get_size()))
250
+ if index < 0:
251
+ return str(default_value)
252
+
253
+ stride = node.get_stride()[index]
254
+ return cexpr(self.rename_indexing(stride))
255
+
256
+ def row_or_column_stride(self, node: IRNode, default_value: int = 0) -> str:
257
+ """
258
+ Hook called from template code to get the row or column stride of an arg.
259
+ This is required by some CUTLASS 2.X APIs.
260
+ If the node is in row_major, it returns stride[-2].
261
+ If the node is in column_major, it returns stride[-1].
262
+
263
+ TODO: Will add needed args to pass it in if it is dynamic.
264
+ """
265
+
266
+ if node is None or len(node.get_stride()) < 2:
267
+ return str(default_value)
268
+
269
+ stride0 = node.get_stride()[-1]
270
+ stride1 = node.get_stride()[-2]
271
+ if stride0 == 1:
272
+ return cexpr(self.rename_indexing(stride1))
273
+ elif stride1 == 1:
274
+ return cexpr(self.rename_indexing(stride0))
275
+ else:
276
+ raise RuntimeError(
277
+ f"At least 1 stride should be 1. Strides: {node.get_stride()=}"
278
+ )
279
+
280
+
281
+ class CUDATemplateCaller(ChoiceCaller):
282
+ """
283
+ CUDATemplateCaller
284
+
285
+ This class represents a caller for CUDA template kernels. It is a subclass of ChoiceCaller.
286
+ Attributes:
287
+ name (str): The name of the caller.
288
+ category (str): The category of the caller.
289
+ bmreq (CUDABenchmarkRequest): The benchmark request for the caller.
290
+ template_buffer (CUDATemplateBuffer): The template buffer for the caller.
291
+ """
292
+
293
+ def __init__(
294
+ self,
295
+ name: str,
296
+ category: str,
297
+ input_nodes: List[Buffer],
298
+ layout: Layout,
299
+ make_kernel_render: Callable[[CUDATemplateBuffer, Optional[List[IRNode]]], str],
300
+ bmreq: CUDABenchmarkRequest,
301
+ template: "CUDATemplate",
302
+ ):
303
+ super().__init__(name, input_nodes, layout)
304
+ self.category = category
305
+ self.make_kernel_render = make_kernel_render
306
+ self.bmreq = bmreq
307
+ self.template = template
308
+
309
+ def benchmark(self, *args, out) -> float:
310
+ assert self.bmreq is not None
311
+ return self.bmreq.benchmark(*args, output_tensor=out)
312
+
313
+ def __str__(self):
314
+ return f"CUDATemplateCaller(source_file={self.bmreq.source_file})"
315
+
316
+ def call_name(self) -> str:
317
+ return f"cuda_template_kernels.{self.name}"
318
+
319
+ def hash_key(self) -> str:
320
+ return "-".join(
321
+ [
322
+ self.category,
323
+ self.bmreq.hash_key,
324
+ ]
325
+ )
326
+
327
+ def output_node(self) -> TensorBox:
328
+ return TensorBox.create(
329
+ CUDATemplateBuffer(
330
+ layout=self.layout,
331
+ inputs=self.input_nodes,
332
+ make_kernel_render=self.make_kernel_render,
333
+ workspace_size=self.bmreq.workspace_size,
334
+ template=self.template,
335
+ )
336
+ )
env-llmeval/lib/python3.10/site-packages/torch/_inductor/codegen/cuda/cuda_template.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+ import logging
4
+ from typing import List, Optional
5
+ from unittest.mock import patch
6
+
7
+ import sympy
8
+
9
+ import torch
10
+ from ...autotune_process import CUDABenchmarkRequest, TensorMeta
11
+ from ...ir import Buffer, CUDATemplateBuffer, IRNode, Layout
12
+
13
+ from ...utils import IndentedBuffer, unique
14
+ from ...virtualized import V
15
+ from ..common import KernelTemplate
16
+ from .cuda_kernel import CUDATemplateCaller, CUDATemplateKernel
17
+
18
+ log = logging.getLogger(__name__)
19
+
20
+
21
+ class CUDATemplate(KernelTemplate):
22
+ index_counter = itertools.count()
23
+
24
+ def __init__(
25
+ self,
26
+ name: str,
27
+ input_nodes: List[Buffer],
28
+ layout: Layout,
29
+ input_reorder: Optional[List[int]] = None,
30
+ ):
31
+ """
32
+
33
+ Baseclass for CUDA C++ Templates, derived from KernelTemplate. Not to be instantiated directly.
34
+
35
+ Args:
36
+ name (str): The name of the CUDATemplate object.
37
+ input_nodes (List[IRNode]): A list of input IRNodes.
38
+ layout (Layout): The layout of the output buffer / tensor.
39
+ input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes.
40
+
41
+ """
42
+ super().__init__(name)
43
+ self.input_nodes = input_nodes
44
+ self.output_node: Buffer = Buffer("buf_out", layout)
45
+ self.input_reorder = input_reorder
46
+ self.layout = layout
47
+
48
+ def generate( # type: ignore[override]
49
+ self,
50
+ **kwargs,
51
+ ) -> CUDATemplateCaller:
52
+ """
53
+ Generates the CUDA template caller object for the given GEMM template and operation. This CUDATemplateCaller
54
+ may be used to call and benchmark the generated CUDA kernel in a standalone manner to enable Autotuning.
55
+
56
+ Args:
57
+ kwargs: Additional keyword arguments.
58
+
59
+ Returns:
60
+ A CUDATemplateCaller object representing the generated CUDA template caller.
61
+ """
62
+ kernel_name = f"cuda_{self.name}"
63
+ with patch.object(
64
+ V.graph, "get_dtype", self._fake_get_dtype(self.output_node)
65
+ ), CUDATemplateKernel(
66
+ kernel_name=kernel_name,
67
+ ) as kernel:
68
+ code = self.render(kernel=kernel, **kwargs)
69
+ _, call_args, _ = kernel.args.python_argdefs()
70
+ log.debug("Generated Code:\n%s", code)
71
+ log.debug(
72
+ "Args: cpp_argdefs: %s, python_argdefs: %s",
73
+ kernel.args.cpp_argdefs(),
74
+ kernel.args.python_argdefs(),
75
+ )
76
+
77
+ input_reorder = (
78
+ self.input_reorder
79
+ if self.input_reorder is not None
80
+ else list(range(len(self.input_nodes)))
81
+ )
82
+ expected_args = list(
83
+ unique(self.input_nodes[idx].get_name() for idx in input_reorder)
84
+ )
85
+ expected_args.extend([self.output_node.get_name()])
86
+ assert list(call_args)[: len(expected_args)] == expected_args, (
87
+ call_args,
88
+ expected_args,
89
+ )
90
+ extra_args = V.graph.sizevars.size_hints(
91
+ map(sympy.expand, call_args[len(expected_args) :])
92
+ )
93
+
94
+ kernel_hash_name = f"cuda_{self.name}_{next(self.index_counter)}"
95
+
96
+ # create the BenchmarkRequest
97
+ bmreq = CUDABenchmarkRequest(
98
+ kernel_name=kernel_name,
99
+ input_tensor_meta=TensorMeta.from_irnodes(self.input_nodes),
100
+ output_tensor_meta=TensorMeta.from_irnodes(self.output_node),
101
+ extra_args=extra_args,
102
+ source_code=code,
103
+ )
104
+
105
+ def make_kernel_render(
106
+ template_node: CUDATemplateBuffer,
107
+ epilogue_nodes: Optional[List[IRNode]] = None,
108
+ ):
109
+ kernel = CUDATemplateKernel(
110
+ kernel_name="KERNEL_NAME",
111
+ )
112
+ render = functools.partial(
113
+ self.render,
114
+ kernel=kernel,
115
+ template_buffer_node=template_node,
116
+ epilogue_nodes=epilogue_nodes,
117
+ **kwargs, # includes "op" argument in case of CUTLASSGemmTemplate
118
+ )
119
+ return kernel, render
120
+
121
+ return CUDATemplateCaller(
122
+ kernel_hash_name,
123
+ self.name,
124
+ self.input_nodes,
125
+ self.output_node.get_layout(),
126
+ make_kernel_render,
127
+ bmreq,
128
+ self,
129
+ )
130
+
131
+ def header(self) -> IndentedBuffer:
132
+ res = IndentedBuffer()
133
+ res.splice(
134
+ """
135
+ #include <exception>
136
+ #include <iostream>
137
+ #include <memory>
138
+ #include <random>
139
+ #include <vector>
140
+ """
141
+ )
142
+ return res
143
+
144
+ def globals(self) -> IndentedBuffer:
145
+ res = IndentedBuffer()
146
+ res.splice(
147
+ """
148
+ // We compile all models with -fvisibility=hidden. Any symbols that need to be
149
+ // exposed in the final shared library must be declared with PT_EXPORT to make
150
+ // them visible.
151
+ #ifdef __GNUC__ // Applies to any compiler with GNU extensions (clang and g++)
152
+ #define PT_EXPORT __attribute__((__visibility__("default")))
153
+ #else
154
+ #ifdef _WIN32
155
+ #define PT_EXPORT __declspec(dllexport)
156
+ #else
157
+ #define PT_EXPORT
158
+ #endif
159
+ #endif
160
+ using bfloat16 = nv_bfloat16;
161
+ """
162
+ )
163
+ return res
164
+
165
+ def render(self, **kwargs) -> str:
166
+ raise NotImplementedError
167
+
168
+
169
+ class CUTLASSTemplate(CUDATemplate):
170
+ """
171
+ CUTLASSTemplate is a class that provides a template for generating CUTLASS Templates. Used as a baseclass for the
172
+ CUTLASSGemmTemplate, providing functionality that might also be relevant for non-GEMM CUTLASS Kernels.
173
+ """
174
+
175
+ def header(self) -> IndentedBuffer:
176
+ res = super().header()
177
+ res.splice(
178
+ """
179
+ #include "cute/tensor.hpp"
180
+ #include "cutlass/cutlass.h"
181
+ #include "cutlass/numeric_types.h"
182
+ #include "cutlass/tensor_ref.h"
183
+ #include "cutlass/util/host_tensor.h"
184
+ #include "cutlass/util/reference/host/tensor_fill.h"
185
+ #include "cutlass/util/reference/device/tensor_fill.h"
186
+ #include "cutlass/util/device_memory.h"
187
+ """
188
+ )
189
+ return res
190
+
191
+ def globals(self) -> IndentedBuffer:
192
+ res = super().globals()
193
+ res.splice(
194
+ """
195
+ using namespace cute;
196
+ #define CUTLASS_CHECK(status) \\
197
+ { \\
198
+ cutlass::Status error = status; \\
199
+ if (error != cutlass::Status::kSuccess) { \\
200
+ auto msg = std::string("[") + __FILE__ + "] Got cutlass error: " + \\
201
+ cutlassGetStatusString(error) + " at: " + std::to_string(__LINE__); \\
202
+ throw std::runtime_error(msg); \\
203
+ } \\
204
+ }
205
+
206
+ // Used as pass-through functor in EVT just for type casting / rounding
207
+ template <typename T>
208
+ struct identity_op {
209
+ CUTLASS_HOST_DEVICE
210
+ T operator()(T val) const { return val; }
211
+ };
212
+
213
+ """
214
+ )
215
+ return res
216
+
217
+ def cute_int(self, int_str: str, var_name: str) -> str:
218
+ res = ""
219
+ if int_str in {"1", "1L"}:
220
+ res = "cute::Int<1>{}"
221
+ else:
222
+ res = int_str
223
+
224
+ return f"{res} /* {var_name} */"
225
+
226
+ _DTYPE_TO_CUTLASS = {
227
+ torch.float32: "float",
228
+ torch.float64: "double",
229
+ torch.float16: "cutlass::half_t",
230
+ torch.int32: "int",
231
+ torch.int8: "int8_t",
232
+ torch.uint8: "uint8_t",
233
+ torch.bool: "bool",
234
+ torch.bfloat16: "cutlass::bfloat16_t",
235
+ }
236
+
237
+ def cutlass_type_cast(self, node: IRNode, ptr: str) -> str:
238
+ if node is None:
239
+ return ptr
240
+ else:
241
+ return f"({self._DTYPE_TO_CUTLASS.get(node.get_dtype())}*)({ptr})"
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/efficient_conv_bn_eval.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc ADDED
Binary file (5.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc ADDED
Binary file (8.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/misc_patterns.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc ADDED
Binary file (27.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc ADDED
Binary file (29.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+
4
+ import torch
5
+ from ..._dynamo.utils import counters
6
+
7
+ from ..pattern_matcher import Arg, CallFunction, KeywordArg
8
+ from .freezing_patterns import register_binary_folding_pattern
9
+
10
+ aten = torch.ops.aten
11
+ prims = torch.ops.prims
12
+
13
+
14
+ def mark_mixed_dtype_conv(conv):
15
+ conv_dtype = conv.meta["val"].dtype
16
+ if conv_dtype not in (torch.float16, torch.bfloat16):
17
+ return
18
+
19
+ if not len(conv.users) == 1:
20
+ return
21
+
22
+ conv_user = next(iter(conv.users.keys()))
23
+ if not isinstance(conv_user.meta["val"], torch.Tensor):
24
+ return
25
+
26
+ if not conv_user.meta["val"].dtype == torch.float32:
27
+ return
28
+
29
+ while conv_user.target in _binary_ops:
30
+ if not len(conv_user.users) == 1:
31
+ return
32
+
33
+ conv_user = next(iter(conv_user.users.keys()))
34
+
35
+ if not (
36
+ conv_user.target == prims.convert_element_type.default
37
+ and conv_user.args[1] == conv_dtype
38
+ ):
39
+ return
40
+
41
+ conv.meta["_allow_conv_mixed_dtype_folding"] = conv_dtype
42
+
43
+
44
+ def mark_mixed_dtype_allowed_convs(gm):
45
+ """
46
+ Mark convolutions which we will binary fold even with mixed precision constants. We constant fold in the higher precision
47
+ for better accuracy and then recover the original precision after.
48
+ """
49
+ for node in gm.graph.nodes:
50
+ if node.target is aten.convolution.default:
51
+ mark_mixed_dtype_conv(node)
52
+
53
+
54
+ def recover_original_precision_folded_convs(gm):
55
+ """
56
+ After binary folding conv weights and biases to a higher dtype, recover the original precision they were in.
57
+ """
58
+ graph = gm.graph
59
+ convs = [node for node in graph.nodes if node.target is aten.convolution.default]
60
+ for node in convs:
61
+ orig_dtype = node.meta.get("_allow_conv_mixed_dtype_folding", None)
62
+ if orig_dtype is None:
63
+ continue
64
+
65
+ with graph.inserting_before(node):
66
+ for idx in [1, 2]:
67
+ old_input = node.args[idx]
68
+ if old_input is None:
69
+ continue
70
+
71
+ new_input = graph.create_node(
72
+ "call_function",
73
+ prims.convert_element_type.default,
74
+ (old_input, orig_dtype),
75
+ )
76
+ node.replace_input_with(old_input, new_input)
77
+
78
+
79
+ _binary_ops = [aten.add.Tensor, aten.sub.Tensor, aten.mul.Tensor, aten.div.Tensor]
80
+
81
+
82
+ @functools.lru_cache(None)
83
+ def binary_folding_init():
84
+ _conv_args = [Arg() for _ in range(9)]
85
+ _computation_ops = [aten.convolution.default]
86
+ _computation_calls = [CallFunction(aten.convolution.default, *_conv_args, _users=1)]
87
+
88
+ """
89
+ In order to fuse add/sub/mul/div with conv, the dimensions of its
90
+ constant tensor must satisfy the following:
91
+ - with resizing, broadcast to w/ weight/bias tensor shape
92
+ - broadcast to the conv output shape
93
+ It needs to have a shape that can resize to weight/bias
94
+ tensor shape because we need to run the op with the conv
95
+ weights/bias without changing their sizes.
96
+ It needs to broadcast to the conv output shape so that we do
97
+ accidentally change the shape of op output by pre-fusing it
98
+ compared to eager.
99
+ The only dimension value shared by weight/bias/conv output
100
+ is they all contain a dim with value = channels-out. In the
101
+ conv output tensor, this is in the second dimension,
102
+ so the pointwise op tensor may have a second dimension of
103
+ value == channels-out, but all the other dimensions have to be 1
104
+ """
105
+
106
+ def _op_not_broadcasting_with_conv(weight_tensor, other_tensor):
107
+ # According to opDoesNotBroadCastWithConv of frozen_conv_folding.cpp
108
+ weight_shape = weight_tensor.shape
109
+ other_shape = other_tensor.shape
110
+ if len(weight_shape) < len(other_shape):
111
+ return False
112
+ if len(weight_shape) == len(other_shape) + 1:
113
+ # weight shape is [o, i, *], other_shape is [o, 1...].
114
+ for i in reversed(range(len(other_shape))):
115
+ if i == 0 and weight_shape[0] == other_shape[i]:
116
+ continue
117
+ if other_shape[i] != 1:
118
+ return False
119
+ else:
120
+ # weight shape is [o, i, *], other_shape is [1, i, *]
121
+ for i in reversed(range(len(other_shape))):
122
+ if i == 1 and weight_shape[0] == other_shape[i]:
123
+ continue
124
+ if other_shape[i] != 1:
125
+ return False
126
+ return True
127
+
128
+ def _check_conv_and_broadcast_op(conv_node, other):
129
+ # According to checkConvAndBroadcastingOpPreConditions of frozen_conv_folding.cpp.
130
+ # conv.weight
131
+ if conv_node.args[1].op != "get_attr":
132
+ return False
133
+ # conv.bias
134
+ if conv_node.args[1] is not None and conv_node.args[1].op != "get_attr":
135
+ return False
136
+ if (
137
+ not isinstance(other, int)
138
+ and not isinstance(other, float)
139
+ and other.op != "get_attr"
140
+ ):
141
+ return False
142
+
143
+ if not len(conv_node.args[1].users) == 1:
144
+ return False
145
+
146
+ weight_meta_value = conv_node.args[1].meta.get("val")
147
+ if weight_meta_value is None:
148
+ return False
149
+ # Avoid fusing op that causes type promotion
150
+ # restricting to float avoids int/float difficulties with scalar overload
151
+ if not weight_meta_value.is_floating_point():
152
+ return False
153
+ if isinstance(other, torch.fx.Node) and other.op == "get_attr":
154
+ other_meta_value = other.meta.get("val")
155
+ if not other_meta_value.is_floating_point():
156
+ return False
157
+ if (
158
+ torch.promote_types(other_meta_value.dtype, weight_meta_value.dtype)
159
+ != weight_meta_value.dtype
160
+ ):
161
+ if not conv_node.meta.get("_allow_conv_mixed_dtype_folding", False):
162
+ return False
163
+
164
+ if (
165
+ other_meta_value.dtype != torch.float
166
+ and weight_meta_value.dtype not in (torch.float16, torch.bfloat16)
167
+ ):
168
+ return False
169
+
170
+ if not _op_not_broadcasting_with_conv(weight_meta_value, other_meta_value):
171
+ return False
172
+ else:
173
+ # TODO: support scalar case
174
+ return False
175
+
176
+ return True
177
+
178
+ def _is_foldable_pattern(match):
179
+ binary_node = match.output_node()
180
+ computation_node = binary_node.args[0]
181
+ other = binary_node.args[1]
182
+ if binary_node.args[0].target not in _computation_ops:
183
+ computation_node = binary_node.args[1]
184
+ other = binary_node.args[0]
185
+ if binary_node.args[0].target == aten.convolution.default:
186
+ return _check_conv_and_broadcast_op(computation_node, other)
187
+
188
+ return False
189
+
190
+ def resize_scalar_or_tensor_to_shape(graph, other, shape):
191
+ # TODO: support scalar case
192
+ if other.meta.get("val").numel() == 1:
193
+ # expand errors if the shape input has less # dims than the tensor input
194
+ res = graph.create_node(
195
+ "call_function",
196
+ aten.reshape.default,
197
+ (other, (1,)),
198
+ )
199
+ res = graph.create_node(
200
+ "call_function",
201
+ aten.expand.default,
202
+ (res, shape),
203
+ )
204
+ else:
205
+ res = graph.create_node(
206
+ "call_function",
207
+ aten.reshape.default,
208
+ (other, shape),
209
+ )
210
+ return res
211
+
212
+ def _create_new_conv_node(graph, conv_node, binary_node, other):
213
+ assert conv_node.target == aten.convolution.default
214
+ conv_args = list(conv_node.args)
215
+ weight_meta_value = conv_node.args[1].meta.get("val")
216
+ bias = conv_args[2]
217
+ if binary_node.target in [aten.add.Tensor, aten.sub.Tensor]:
218
+ other_reshape = resize_scalar_or_tensor_to_shape(
219
+ graph, other, (weight_meta_value.size(0),)
220
+ )
221
+ new_bias = graph.create_node(
222
+ "call_function",
223
+ binary_node.target,
224
+ (0 if bias is None else bias, other_reshape),
225
+ )
226
+ conv_args[2] = new_bias
227
+ else:
228
+ assert binary_node.target in [aten.mul.Tensor, aten.div.Tensor]
229
+ weight_broadcast_shape = [1 for _ in range(len(weight_meta_value.shape))]
230
+ weight_broadcast_shape[0] = weight_meta_value.size(0)
231
+ other_reshape1 = resize_scalar_or_tensor_to_shape(
232
+ graph, other, tuple(weight_broadcast_shape)
233
+ )
234
+ new_weight = graph.create_node(
235
+ "call_function", binary_node.target, (conv_args[1], other_reshape1)
236
+ )
237
+ new_weight.meta.update(conv_args[1].meta)
238
+ conv_args[1] = new_weight
239
+ if bias is not None:
240
+ other_reshape = resize_scalar_or_tensor_to_shape(
241
+ graph, other, (weight_meta_value.size(0),)
242
+ )
243
+ new_bias = graph.create_node(
244
+ "call_function", binary_node.target, (bias, other_reshape)
245
+ )
246
+ new_bias.meta.update(bias.meta)
247
+ conv_args[2] = new_bias
248
+ return graph.create_node("call_function", conv_node.target, tuple(conv_args))
249
+
250
+ for _computation_call, binary_op in itertools.product(
251
+ _computation_calls, _binary_ops
252
+ ):
253
+
254
+ @register_binary_folding_pattern(
255
+ CallFunction(binary_op, _computation_call, KeywordArg("other")),
256
+ extra_check=_is_foldable_pattern,
257
+ )
258
+ def folded_op(match, *args, **kwargs):
259
+ counters["inductor"]["binary_folding"] += 1
260
+ other = kwargs.get("other")
261
+ binary_node = match.output_node()
262
+ computation_node = (
263
+ binary_node.args[0]
264
+ if binary_node.args[0].target in _computation_ops
265
+ else binary_node.args[1]
266
+ )
267
+ graph = match.graph
268
+ with graph.inserting_before(binary_node):
269
+ # TODO: support linear?
270
+ assert computation_node.target == aten.convolution.default
271
+ new_computation_node = _create_new_conv_node(
272
+ graph, computation_node, binary_node, other
273
+ )
274
+ binary_node.replace_all_uses_with(new_computation_node)
275
+ new_computation_node.meta.update(computation_node.meta)
276
+ graph.erase_node(binary_node)
277
+ graph.erase_node(computation_node)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/efficient_conv_bn_eval.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from torch._dynamo.utils import counters
5
+ from torch._inductor import config as inductor_config
6
+ from torch.func import functional_call
7
+
8
+ from ..pattern_matcher import CallModuleVarArgs, Match, register_graph_pattern
9
+
10
+ from .pre_grad import efficient_conv_bn_eval_pass
11
+
12
+
13
+ def efficient_conv_bn_eval(
14
+ bn: nn.modules.batchnorm._BatchNorm, conv: nn.modules.conv._ConvNd, x: torch.Tensor
15
+ ):
16
+ """
17
+ Implementation based on https://arxiv.org/abs/2305.11624
18
+ "Tune-Mode ConvBN Blocks For Efficient Transfer Learning"
19
+ It leverages the associative law between convolution and affine transform,
20
+ i.e., normalize (weight conv feature) = (normalize weight) conv feature.
21
+ It works for Eval mode of ConvBN blocks during validation, and can be used
22
+ for **training** as well, but only if one sets `bn.training=False`. It
23
+ reduces memory footprint and computation cost, at the cost of slightly
24
+ reduced numerical stability.
25
+ Args:
26
+ bn (nn.modules.batchnorm._BatchNorm): a BatchNorm module.
27
+ conv (nn.modules.conv._ConvNd): a conv module
28
+ x (torch.Tensor): Input feature map.
29
+ """
30
+
31
+ assert bn.running_var is not None
32
+
33
+ # These lines of code are designed to deal with various cases
34
+ # like bn without affine transform, and conv without bias
35
+ weight_on_the_fly = conv.weight
36
+ if conv.bias is not None:
37
+ bias_on_the_fly = conv.bias
38
+ else:
39
+ bias_on_the_fly = torch.zeros_like(bn.running_var)
40
+
41
+ if bn.weight is not None:
42
+ bn_weight = bn.weight
43
+ else:
44
+ bn_weight = torch.ones_like(bn.running_var)
45
+
46
+ if bn.bias is not None:
47
+ bn_bias = bn.bias
48
+ else:
49
+ bn_bias = torch.zeros_like(bn.running_var)
50
+
51
+ # shape of [C_out, 1, 1, 1] in Conv2d
52
+ target_shape = [-1] + [1] * (conv.weight.ndim - 1)
53
+ if isinstance(conv, nn.modules.conv._ConvTransposeNd):
54
+ # for transposed conv, the C_out dimension should at index 1.
55
+ target_shape[:2] = [target_shape[1], target_shape[0]]
56
+ weight_coeff = torch.rsqrt(bn.running_var + bn.eps).reshape(target_shape)
57
+ # shape of [C_out, 1, 1, 1] in Conv2d
58
+ coefff_on_the_fly = bn_weight.view_as(weight_coeff) * weight_coeff
59
+
60
+ # shape of [C_out, C_in, k, k] in Conv2d
61
+ weight_on_the_fly = weight_on_the_fly * coefff_on_the_fly
62
+ # shape of [C_out] in Conv2d
63
+ bias_on_the_fly = bn_bias + coefff_on_the_fly.flatten() * (
64
+ bias_on_the_fly - bn.running_mean
65
+ )
66
+
67
+ input = x
68
+ params = {"weight": weight_on_the_fly, "bias": bias_on_the_fly}
69
+ output = functional_call(conv, params, input)
70
+ return output
71
+
72
+
73
+ @register_graph_pattern(
74
+ CallModuleVarArgs(
75
+ [
76
+ nn.modules.batchnorm._BatchNorm,
77
+ nn.BatchNorm1d,
78
+ nn.BatchNorm2d,
79
+ nn.BatchNorm3d,
80
+ nn.SyncBatchNorm,
81
+ ],
82
+ ),
83
+ pass_dict=efficient_conv_bn_eval_pass,
84
+ extra_check=lambda match: not inductor_config.freezing
85
+ and inductor_config.efficient_conv_bn_eval_fx_passes,
86
+ )
87
+ def efficient_conv_bn_eval_graph_transform(match: Match, *args, **kwargs):
88
+ # We matched a BN node
89
+ bn_node = match.nodes[0]
90
+ graph = match.graph
91
+ gm = graph.owning_module
92
+ bn_mod = getattr(gm, bn_node.target)
93
+
94
+ # We can only use efficient conv-bn for eval mode with track_running_stats
95
+ if not bn_mod.track_running_stats or bn_mod.training:
96
+ return
97
+
98
+ # Check if the input is Conv
99
+ if bn_node.args:
100
+ input_node = bn_node.args[0]
101
+ else:
102
+ input_node = bn_node.kwargs["input"]
103
+ if input_node.op != "call_module":
104
+ return
105
+ if not hasattr(gm, input_node.target):
106
+ return
107
+ input_mod = getattr(gm, input_node.target)
108
+ supported_convs = [
109
+ nn.Linear,
110
+ nn.Conv1d,
111
+ nn.Conv2d,
112
+ nn.Conv3d,
113
+ nn.ConvTranspose1d,
114
+ nn.ConvTranspose2d,
115
+ nn.ConvTranspose3d,
116
+ ]
117
+ if not any(isinstance(input_mod, cls) for cls in supported_convs):
118
+ return
119
+ conv_node = input_node
120
+ # Output of conv is used by other nodes, cannot optimize
121
+ if len(conv_node.users) > 1:
122
+ return
123
+
124
+ # Find a pair of conv and bn computation nodes to optimize.
125
+ counters["inductor"]["efficient_conv_bn_eval"] += 1
126
+
127
+ with graph.inserting_before(conv_node):
128
+ # create `get_attr` node to access modules
129
+ # note that we directly call `create_node` to fill the `name`
130
+ # argument. `graph.get_attr` and
131
+ # `graph.call_function` does not allow the `name` argument.
132
+ conv_get_node = graph.create_node(
133
+ op="get_attr", target=conv_node.target, name="get_conv"
134
+ )
135
+ bn_get_node = graph.create_node(
136
+ op="get_attr", target=bn_node.target, name="get_bn"
137
+ )
138
+ if conv_node.args:
139
+ conv_input = conv_node.args[0]
140
+ else:
141
+ conv_input = conv_node.kwargs["input"]
142
+ # prepare args for the fused function
143
+ args = (bn_get_node, conv_get_node, conv_input)
144
+ # create a new node
145
+ new_node = graph.create_node(
146
+ op="call_function",
147
+ target=efficient_conv_bn_eval,
148
+ args=args,
149
+ name="efficient_conv_bn_eval",
150
+ )
151
+ # this node replaces the original conv + bn, and therefore
152
+ # should replace the uses of bn_node
153
+ bn_node.replace_all_uses_with(new_node)
154
+ # take care of the deletion order:
155
+ # delete bn_node first, and then conv_node
156
+ graph.erase_node(bn_node)
157
+ graph.erase_node(conv_node)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+ from torch._inductor.compile_fx import fake_tensor_prop
5
+ from ..._dynamo.utils import counters
6
+
7
+ from .. import config
8
+ from ..pattern_matcher import (
9
+ _return_true,
10
+ CallFunction,
11
+ fwd_only,
12
+ Ignored,
13
+ init_once_fakemode,
14
+ KeywordArg,
15
+ Match,
16
+ PatternMatcherPass,
17
+ register_graph_pattern,
18
+ register_replacement,
19
+ stable_topological_sort,
20
+ )
21
+
22
+ aten = torch.ops.aten
23
+
24
+ # First pass_patterns[0] are applied, then [1], then [2]
25
+ pass_patterns = [
26
+ PatternMatcherPass(),
27
+ PatternMatcherPass(),
28
+ PatternMatcherPass(),
29
+ ]
30
+
31
+ binary_folding_pass = PatternMatcherPass()
32
+
33
+
34
+ def freezing_passes(gm: torch.fx.GraphModule, aot_example_inputs):
35
+ """
36
+ Passes that are applied to the graph to freeze pass.
37
+ """
38
+
39
+ from ..freezing import constant_fold
40
+
41
+ lazy_init()
42
+ # We need a few rounds of binary folding to get rid of all the
43
+ # unnecessary nodes, but may need a good method to chose the rounds number.
44
+ # works like: conv+binary+binary.
45
+ binary_folding = counters["inductor"]["binary_folding"]
46
+ fake_tensor_prop(gm, aot_example_inputs, True)
47
+
48
+ torch._inductor.fx_passes.binary_folding.mark_mixed_dtype_allowed_convs(gm)
49
+ for _ in range(4):
50
+ constant_fold(gm)
51
+ # Make sure meta['val'] is properly set for all nodes
52
+ fake_tensor_prop(gm, aot_example_inputs, True)
53
+ binary_folding_pass.apply(gm.graph)
54
+ # If we don't have binary folding, we don't need to run the pass again.
55
+ # TODO: remove the need to run fake_tensor_prop on the whole model.
56
+ if counters["inductor"]["binary_folding"] == binary_folding:
57
+ break
58
+ binary_folding = counters["inductor"]["binary_folding"]
59
+
60
+ torch._inductor.fx_passes.binary_folding.recover_original_precision_folded_convs(gm)
61
+
62
+ constant_fold(gm)
63
+ fake_tensor_prop(gm, aot_example_inputs, True)
64
+
65
+ for pattern in pass_patterns:
66
+ pattern.apply(gm.graph)
67
+
68
+ # The CPU weight packing always assume the conv's weight is channels last,
69
+ # So make sure the layout_optimization is on when doing it.
70
+ if (
71
+ torch._C._has_mkldnn
72
+ and config.cpp.weight_prepack
73
+ and config.layout_optimization
74
+ ):
75
+ from .mkldnn_fusion import _eliminate_duplicate_packed_nodes
76
+
77
+ _eliminate_duplicate_packed_nodes(gm)
78
+
79
+ stable_topological_sort(gm.graph)
80
+ gm.recompile()
81
+ gm.graph.lint()
82
+
83
+
84
+ @init_once_fakemode
85
+ def lazy_init():
86
+ if torch._C._has_mkldnn and config.cpp.weight_prepack:
87
+ from .mkldnn_fusion import _mkldnn_weight_pack_init
88
+
89
+ _mkldnn_weight_pack_init()
90
+
91
+ from .binary_folding import binary_folding_init
92
+
93
+ addmm_patterns_init()
94
+ binary_folding_init()
95
+
96
+
97
+ def register_freezing_graph_pattern(pattern, extra_check=_return_true, pass_number=0):
98
+ return register_graph_pattern(
99
+ pattern,
100
+ extra_check=extra_check,
101
+ pass_dict=pass_patterns[pass_number],
102
+ )
103
+
104
+
105
+ def register_binary_folding_pattern(pattern, extra_check=_return_true):
106
+ return register_graph_pattern(
107
+ pattern,
108
+ extra_check=extra_check,
109
+ pass_dict=binary_folding_pass,
110
+ )
111
+
112
+
113
+ @functools.lru_cache(None)
114
+ def addmm_patterns_init():
115
+ if torch.cuda.is_available():
116
+ # workaround https://github.com/pytorch/pytorch/issues/97894
117
+ device = "cuda"
118
+ else:
119
+ device = "cpu"
120
+ val = functools.partial(torch.empty, (10, 10), device=device, requires_grad=False)
121
+
122
+ def check_concat_weights(match):
123
+ weights = [
124
+ match.kwargs["w1"],
125
+ match.kwargs["w2"],
126
+ ]
127
+ if "w3" in match.kwargs:
128
+ weights.append(match.kwargs["w3"])
129
+
130
+ return all(
131
+ w.op == "get_attr" and w.meta["val"].shape == weights[0].meta["val"].shape
132
+ for w in weights
133
+ )
134
+
135
+ def matmul_fuse_pattern(inp, w1, w2, w3):
136
+ return (inp @ w1, inp @ w2, inp @ w3)
137
+
138
+ def matmul_replacement(inp, w1, w2, w3):
139
+ cat_t = torch.cat((w1, w2, w3), dim=1)
140
+ mm = inp @ cat_t
141
+ return mm.chunk(3, dim=1)
142
+
143
+ register_replacement(
144
+ matmul_fuse_pattern,
145
+ matmul_replacement,
146
+ [val(), val(), val(), val()],
147
+ fwd_only,
148
+ pass_patterns[0],
149
+ extra_check=check_concat_weights,
150
+ exclusive_arg_names=("w1", "w2", "w3"),
151
+ )
152
+
153
+ def matmul_fuse_pattern_two(inp, w1, w2):
154
+ return (inp @ w1, inp @ w2)
155
+
156
+ def matmul_replacement_two(inp, w1, w2):
157
+ cat_t = torch.cat((w1, w2), dim=1)
158
+ mm = inp @ cat_t
159
+ return mm.chunk(2, dim=1)
160
+
161
+ register_replacement(
162
+ matmul_fuse_pattern_two,
163
+ matmul_replacement_two,
164
+ [val(), val(), val()],
165
+ fwd_only,
166
+ pass_patterns[0],
167
+ extra_check=check_concat_weights,
168
+ exclusive_arg_names=("w1", "w2"),
169
+ )
170
+
171
+ def addmm_fuse_pattern_second(inp, w1, w2, w3, b1, b2, b3):
172
+ return (
173
+ aten.addmm(b1, inp, w1),
174
+ aten.addmm(b2, inp, w2),
175
+ aten.addmm(b3, inp, w3),
176
+ )
177
+
178
+ def addmm_fuse_replacement_second(inp, w1, w2, w3, b1, b2, b3):
179
+ cat_w = torch.cat((w1, w2, w3), dim=1)
180
+ cat_b = torch.cat((b1, b2, b3))
181
+ return aten.addmm(cat_b, inp, cat_w).chunk(3, dim=1)
182
+
183
+ register_replacement(
184
+ addmm_fuse_pattern_second,
185
+ addmm_fuse_replacement_second,
186
+ [val() for _ in range(7)],
187
+ fwd_only,
188
+ pass_patterns[0],
189
+ extra_check=check_concat_weights,
190
+ exclusive_arg_names=("w1", "w2", "w3", "b1", "b2", "b3"),
191
+ )
192
+
193
+
194
+ def same_dtype(match):
195
+ return match.output_node().args[0].meta["val"].dtype == match.kwargs["dtype"]
196
+
197
+
198
+ @register_graph_pattern(
199
+ CallFunction(
200
+ torch.ops.prims.convert_element_type.default,
201
+ Ignored(),
202
+ KeywordArg("dtype"),
203
+ ),
204
+ pass_dict=pass_patterns[0],
205
+ extra_check=same_dtype,
206
+ )
207
+ def unnecessary_dtype_convert(match: Match, **kwargs):
208
+ """Remove unnecessary dtype conversion op, probably left as a result of Conv-Bn folding"""
209
+ graph = match.graph
210
+ node = match.output_node()
211
+ node.replace_all_uses_with(node.args[0])
212
+ graph.erase_node(node)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import inspect
3
+ import logging
4
+ import math
5
+
6
+ import torch
7
+ from ..._dynamo.utils import counters
8
+ from ..pattern_matcher import (
9
+ filter_nodes,
10
+ fwd_only,
11
+ joint_fwd_bwd,
12
+ register_replacement,
13
+ )
14
+
15
+ log = logging.getLogger(__name__)
16
+ aten = torch.ops.aten
17
+
18
+
19
+ def _sfdp_pattern_1(query, key, value, inv_scale):
20
+ return (
21
+ torch.matmul(query, key.transpose(-2, -1))
22
+ .div(inv_scale)
23
+ .softmax(dim=-1)
24
+ .matmul(value)
25
+ )
26
+
27
+
28
+ def _sfdp_replacement_1(query, key, value, inv_scale):
29
+ counters["inductor"]["fuse_attention"] += 1
30
+ return aten.scaled_dot_product_attention(
31
+ query.contiguous(),
32
+ key.contiguous(),
33
+ value.contiguous(),
34
+ attn_mask=None,
35
+ dropout_p=0.0,
36
+ is_causal=False,
37
+ scale=1.0 / inv_scale,
38
+ )
39
+
40
+
41
+ def _sfdp_pattern_2(query, key, value, scale_factor):
42
+ return (
43
+ torch.matmul(query, key.transpose(-2, -1))
44
+ .mul(scale_factor)
45
+ .softmax(dim=-1)
46
+ .matmul(value)
47
+ )
48
+
49
+
50
+ def _sfdp_replacement_2(query, key, value, scale_factor):
51
+ counters["inductor"]["fuse_attention"] += 1
52
+ return aten.scaled_dot_product_attention(
53
+ query.contiguous(),
54
+ key.contiguous(),
55
+ value.contiguous(),
56
+ attn_mask=None,
57
+ dropout_p=0.0,
58
+ is_causal=False,
59
+ scale=scale_factor,
60
+ )
61
+
62
+
63
+ def _sfdp_pattern_3(query, key, value, inv_scale_factor, dropout_p):
64
+ return torch.nn.functional.dropout(
65
+ torch.matmul(query, key.transpose(-2, -1))
66
+ .div(inv_scale_factor)
67
+ .softmax(dim=-1),
68
+ p=dropout_p,
69
+ ).matmul(value)
70
+
71
+
72
+ def _sfdp_replacement_3(query, key, value, inv_scale_factor, dropout_p):
73
+ counters["inductor"]["fuse_attention"] += 1
74
+ return aten.scaled_dot_product_attention(
75
+ query.contiguous(),
76
+ key.contiguous(),
77
+ value.contiguous(),
78
+ attn_mask=None,
79
+ dropout_p=dropout_p,
80
+ is_causal=False,
81
+ scale=1.0 / inv_scale_factor,
82
+ )
83
+
84
+
85
+ def _sfdp_pattern_4(query, key, value, scale_factor, dropout_p):
86
+ return torch.nn.functional.dropout(
87
+ torch.matmul(query, key.transpose(-2, -1)).mul(scale_factor).softmax(dim=-1),
88
+ p=dropout_p,
89
+ ).matmul(value)
90
+
91
+
92
+ def _sfdp_replacement_4(query, key, value, scale_factor, dropout_p):
93
+ counters["inductor"]["fuse_attention"] += 1
94
+ return aten.scaled_dot_product_attention(
95
+ query.contiguous(),
96
+ key.contiguous(),
97
+ value.contiguous(),
98
+ attn_mask=None,
99
+ dropout_p=dropout_p,
100
+ is_causal=False,
101
+ scale=scale_factor,
102
+ )
103
+
104
+
105
+ def _sfdp_pattern_5(query, key, value, attn_mask):
106
+ attn_weight = torch.softmax(
107
+ (query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
108
+ )
109
+ # attn_weight = torch.dropout(attn_weight, dropout_p)
110
+ return attn_weight @ value
111
+
112
+
113
+ def _sfdp_replacement_5(query, key, value, attn_mask):
114
+ counters["inductor"]["fuse_attention"] += 1
115
+ return aten.scaled_dot_product_attention(
116
+ query.contiguous(),
117
+ key.contiguous(),
118
+ value.contiguous(),
119
+ attn_mask=attn_mask.to(dtype=query.dtype),
120
+ dropout_p=0.0,
121
+ is_causal=False,
122
+ )
123
+
124
+
125
+ def _sfdp_pattern_6(query, key, value, attn_mask, dropout_p):
126
+ attn_weight = torch.softmax(
127
+ (query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
128
+ )
129
+ attn_weight = torch.dropout(attn_weight, dropout_p, True)
130
+ return attn_weight @ value
131
+
132
+
133
+ def _sfdp_replacement_6(query, key, value, attn_mask, dropout_p):
134
+ counters["inductor"]["fuse_attention"] += 1
135
+ return aten.scaled_dot_product_attention(
136
+ query.contiguous(),
137
+ key.contiguous(),
138
+ value.contiguous(),
139
+ attn_mask=attn_mask.to(dtype=query.dtype),
140
+ dropout_p=dropout_p,
141
+ is_causal=False,
142
+ )
143
+
144
+
145
+ def _sfdp_pattern_7(query, key, value, dropout_p):
146
+ # in real workloads inputs to matmul are permuted
147
+ # causing matmul to expand to a series of expand and clone calls
148
+ # we want the same to happen during pattern tracing
149
+ q = query.permute(0, 2, 1, 3)
150
+ k = key.permute(0, 2, 1, 3)
151
+ v = value.permute(0, 2, 1, 3)
152
+ div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
153
+ div = div.to(torch.float32)
154
+ attn_weight = torch.softmax(div, dim=-1)
155
+ attn_weight = torch.dropout(attn_weight, dropout_p, True)
156
+ attn_weight = attn_weight.to(torch.float16)
157
+ return attn_weight @ v
158
+
159
+
160
+ def _sfdp_replacement_7(query, key, value, dropout_p):
161
+ # sdpa prefers inputs in permuted format
162
+ # it makes a copy to put them in this format
163
+ # if they aren't already
164
+ # to make replacement efficient ensure that inputs to sdpa
165
+ # are in required order
166
+ counters["inductor"]["fuse_attention"] += 1
167
+ q = query.permute(0, 2, 1, 3)
168
+ k = key.permute(0, 2, 1, 3)
169
+ v = value.permute(0, 2, 1, 3)
170
+ return aten.scaled_dot_product_attention(
171
+ q,
172
+ k,
173
+ v,
174
+ attn_mask=None, # attn_mask,
175
+ dropout_p=dropout_p,
176
+ is_causal=False,
177
+ )
178
+
179
+
180
+ def _sfdp_pattern_8(query, key, value):
181
+ # no dropout version of pattern 7
182
+ q = query.permute(0, 2, 1, 3)
183
+ k = key.permute(0, 2, 1, 3)
184
+ v = value.permute(0, 2, 1, 3)
185
+ div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
186
+ div = div.to(torch.float32)
187
+ attn_weight = torch.softmax(div, dim=-1)
188
+ attn_weight = attn_weight.to(torch.float16)
189
+ return attn_weight @ v
190
+
191
+
192
+ def _sfdp_replacement_8(query, key, value):
193
+ counters["inductor"]["fuse_attention"] += 1
194
+ q = query.permute(0, 2, 1, 3)
195
+ k = key.permute(0, 2, 1, 3)
196
+ v = value.permute(0, 2, 1, 3)
197
+ return aten.scaled_dot_product_attention(
198
+ q,
199
+ k,
200
+ v,
201
+ attn_mask=None, # attn_mask,
202
+ dropout_p=0.0,
203
+ is_causal=False,
204
+ )
205
+
206
+
207
+ def _sfdp_pattern_9(query, key, value, dropout_p):
208
+ q = query.permute(0, 2, 1, 3)
209
+ k = key.permute(0, 2, 1, 3)
210
+ v = value.permute(0, 2, 1, 3)
211
+ q = q / math.sqrt(q.size(-1))
212
+ div = q @ k.transpose(-2, -1)
213
+ div = div.to(torch.float32)
214
+ attn_weight = torch.softmax(div, dim=-1)
215
+ attn_weight = torch.dropout(attn_weight, dropout_p, True)
216
+ attn_weight = attn_weight.to(torch.float16)
217
+ return attn_weight @ v
218
+
219
+
220
+ def _sfdp_replacement_9(query, key, value, dropout_p):
221
+ counters["inductor"]["fuse_attention"] += 1
222
+ q = query.permute(0, 2, 1, 3)
223
+ k = key.permute(0, 2, 1, 3)
224
+ v = value.permute(0, 2, 1, 3)
225
+ return aten.scaled_dot_product_attention(
226
+ q,
227
+ k,
228
+ v,
229
+ attn_mask=None, # attn_mask,
230
+ dropout_p=dropout_p,
231
+ is_causal=False,
232
+ )
233
+
234
+
235
+ def _sfdp_pattern_10(query, key, value):
236
+ # no dropout version of 9
237
+ q = query.permute(0, 2, 1, 3)
238
+ k = key.permute(0, 2, 1, 3)
239
+ v = value.permute(0, 2, 1, 3)
240
+ q = q / math.sqrt(q.size(-1))
241
+ div = q @ k.transpose(-2, -1)
242
+ div = div.to(torch.float32)
243
+ attn_weight = torch.softmax(div, dim=-1)
244
+ attn_weight = attn_weight.to(torch.float16)
245
+ return attn_weight @ v
246
+
247
+
248
+ def _sfdp_replacement_10(query, key, value):
249
+ counters["inductor"]["fuse_attention"] += 1
250
+ q = query.permute(0, 2, 1, 3)
251
+ k = key.permute(0, 2, 1, 3)
252
+ v = value.permute(0, 2, 1, 3)
253
+ return aten.scaled_dot_product_attention(
254
+ q,
255
+ k,
256
+ v,
257
+ attn_mask=None, # attn_mask,
258
+ dropout_p=0.0,
259
+ is_causal=False,
260
+ )
261
+
262
+
263
+ def _sfdp_pattern_11(query, key, value, inv_scale):
264
+ # Mainly for huggingface models
265
+ q = query.permute(0, 2, 1, 3)
266
+ k = key.permute(0, 2, 1, 3)
267
+ v = value.permute(0, 2, 1, 3)
268
+ return torch.matmul(q, k.transpose(-2, -1)).div(inv_scale).softmax(dim=-1).matmul(v)
269
+
270
+
271
+ def _sfdp_replacement_11(query, key, value, inv_scale):
272
+ counters["inductor"]["fuse_attention"] += 1
273
+ return aten.scaled_dot_product_attention(
274
+ query.transpose(1, 2),
275
+ key.transpose(1, 2),
276
+ value.transpose(1, 2),
277
+ attn_mask=None,
278
+ dropout_p=0.0,
279
+ is_causal=False,
280
+ scale=1.0 / inv_scale,
281
+ )
282
+
283
+
284
+ def _sfdp_pattern_12(query, key, value, inv_scale_factor, dropout_p):
285
+ q = query.permute(0, 2, 1, 3)
286
+ k = key.permute(0, 2, 1, 3)
287
+ v = value.permute(0, 2, 1, 3)
288
+ return torch.nn.functional.dropout(
289
+ torch.matmul(q, k.transpose(-2, -1)).div(inv_scale_factor).softmax(dim=-1),
290
+ p=dropout_p,
291
+ ).matmul(v)
292
+
293
+
294
+ def _sfdp_replacement_12(query, key, value, inv_scale_factor, dropout_p):
295
+ counters["inductor"]["fuse_attention"] += 1
296
+ return aten.scaled_dot_product_attention(
297
+ query.transpose(1, 2),
298
+ key.transpose(1, 2),
299
+ value.transpose(1, 2),
300
+ attn_mask=None,
301
+ dropout_p=dropout_p,
302
+ is_causal=False,
303
+ scale=1.0 / inv_scale_factor,
304
+ )
305
+
306
+
307
+ def _sfdp_pattern_13(query, key, value, dropout_p):
308
+ attn_weight = torch.bmm(query, key.transpose(1, 2)).softmax(dim=-1)
309
+ attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p)
310
+ return torch.bmm(attn_weight, value)
311
+
312
+
313
+ def _sfdp_replacement_13(query, key, value, dropout_p):
314
+ counters["inductor"]["fuse_attention"] += 1
315
+ return aten.scaled_dot_product_attention(
316
+ query.unsqueeze(0),
317
+ key.unsqueeze(0),
318
+ value.unsqueeze(0),
319
+ dropout_p=dropout_p,
320
+ scale=1.0,
321
+ ).squeeze(0)
322
+
323
+
324
+ def _sfdp_params_check(match):
325
+ assert all(k in match.kwargs for k in ("query", "key", "value"))
326
+ query = match.kwargs["query"].meta["val"]
327
+ key = match.kwargs["key"].meta["val"]
328
+ value = match.kwargs["value"].meta["val"]
329
+ if not (query.dtype == key.dtype == value.dtype) or not (
330
+ query.device == key.device == value.device
331
+ ):
332
+ return False
333
+ add_mask_node = filter_nodes(match.nodes, aten.add.Tensor)
334
+ # Has attn_mask add.
335
+ if len(add_mask_node) > 0:
336
+ attn_mask_node = add_mask_node[0].args[1]
337
+ # attn_mask_node may be a float/int number.
338
+ if not hasattr(attn_mask_node, "meta"):
339
+ return False
340
+ attn_mask = attn_mask_node.meta["val"]
341
+ # Make sure attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool
342
+ if (
343
+ not isinstance(attn_mask, torch.Tensor)
344
+ or not (attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool)
345
+ or query.device != attn_mask.device
346
+ ):
347
+ return False
348
+ return True
349
+
350
+
351
+ def _sfdp_scale_factor_check(scale_factor_op):
352
+ def fn(match):
353
+ scale_factor_node = filter_nodes(match.nodes, scale_factor_op)[0]
354
+ # Note: args[1] of the scale_factor_node is always the scale_factor for the current patterns.
355
+ scale_factor = scale_factor_node.args[1]
356
+ # make sure the scale_factor a float/int. SymInt?
357
+ if not isinstance(scale_factor, (float, int)):
358
+ return False
359
+ return _sfdp_params_check(match)
360
+
361
+ return fn
362
+
363
+
364
+ def partialize_and_update_signature(func, **kwargs):
365
+ """
366
+ Equivalent to functools.partial but also updates the signature on returned function
367
+ """
368
+ original_sig = inspect.signature(func)
369
+ parameters = original_sig.parameters
370
+
371
+ new_parameters = {
372
+ key: value for key, value in parameters.items() if key not in kwargs
373
+ }
374
+ new_sig = inspect.Signature(parameters=list(new_parameters.values()))
375
+
376
+ partial_func = functools.partial(func, **kwargs)
377
+
378
+ def wrapper(*args, **kwargs):
379
+ return partial_func(*args, **kwargs)
380
+
381
+ wrapper.__signature__ = new_sig # type: ignore[attr-defined]
382
+ wrapper.__name__ = func.__name__
383
+
384
+ return wrapper
385
+
386
+
387
+ def _get_sfdp_patterns():
388
+ from .joint_graph import patterns
389
+
390
+ if torch.cuda.is_available():
391
+ # workaround https://github.com/pytorch/pytorch/issues/97894
392
+ device = "cuda"
393
+ else:
394
+ device = "cpu"
395
+
396
+ # sizes/values don't actually matter for initial trace
397
+ # once we get a possible match we re-trace with the actual values and verify the match still holds
398
+ g_inp = functools.partial(
399
+ torch.empty, (2, 4, 8, 16), device=device, requires_grad=True
400
+ )
401
+ b_inp = functools.partial(torch.empty, (1, 1, 8, 8), device=device)
402
+ c_inp = functools.partial(torch.tensor, 2.0, device=device)
403
+ # workaround https://github.com/pytorch/pytorch/issues/97894
404
+ # 0.113377 is a "magic" value that lets us recover the lost input arg relationship
405
+ d = {"dropout_p": 0.113377}
406
+
407
+ # we could also generate all these patterns in 3d.. TODO
408
+ g_3d_inp = functools.partial(
409
+ torch.empty, (1024, 128, 128), device=device, requires_grad=True
410
+ )
411
+
412
+ # softmax will generate a dtype conversion on inputs if they are in half,
413
+ # but will not in float, so we generate a pattern for both
414
+ for dtype in [torch.float, torch.half]:
415
+ g = functools.partial(g_inp, dtype=dtype)
416
+ b = functools.partial(b_inp, dtype=dtype)
417
+ c = functools.partial(c_inp, dtype=dtype)
418
+ g_3d = functools.partial(g_3d_inp, dtype=dtype)
419
+
420
+ for pattern, replacement, args, workaround, extra_check in [
421
+ (
422
+ _sfdp_pattern_1,
423
+ _sfdp_replacement_1,
424
+ [g(), g(), g(), c()],
425
+ {},
426
+ _sfdp_scale_factor_check(aten.div.Tensor),
427
+ ),
428
+ (
429
+ _sfdp_pattern_2,
430
+ _sfdp_replacement_2,
431
+ [g(), g(), g(), c()],
432
+ {},
433
+ _sfdp_scale_factor_check(aten.mul.Tensor),
434
+ ),
435
+ (
436
+ _sfdp_pattern_3,
437
+ _sfdp_replacement_3,
438
+ [g(), g(), g(), c()],
439
+ d,
440
+ _sfdp_scale_factor_check(aten.div.Tensor),
441
+ ),
442
+ (
443
+ _sfdp_pattern_4,
444
+ _sfdp_replacement_4,
445
+ [g(), g(), g(), c()],
446
+ d,
447
+ _sfdp_scale_factor_check(aten.mul.Tensor),
448
+ ),
449
+ (
450
+ _sfdp_pattern_5,
451
+ _sfdp_replacement_5,
452
+ [g(), g(), g(), b()],
453
+ {},
454
+ _sfdp_params_check,
455
+ ),
456
+ (
457
+ _sfdp_pattern_6,
458
+ _sfdp_replacement_6,
459
+ [g(), g(), g(), b()],
460
+ d,
461
+ _sfdp_params_check,
462
+ ),
463
+ (
464
+ _sfdp_pattern_7,
465
+ _sfdp_replacement_7,
466
+ [g(), g(), g()],
467
+ d,
468
+ _sfdp_params_check,
469
+ ),
470
+ (
471
+ _sfdp_pattern_8,
472
+ _sfdp_replacement_8,
473
+ [g(), g(), g()],
474
+ {},
475
+ _sfdp_params_check,
476
+ ),
477
+ (
478
+ _sfdp_pattern_9,
479
+ _sfdp_replacement_9,
480
+ [g(), g(), g()],
481
+ d,
482
+ _sfdp_params_check,
483
+ ),
484
+ (
485
+ _sfdp_pattern_10,
486
+ _sfdp_replacement_10,
487
+ [g(), g(), g()],
488
+ {},
489
+ _sfdp_params_check,
490
+ ),
491
+ (
492
+ _sfdp_pattern_11,
493
+ _sfdp_replacement_11,
494
+ [g(), g(), g(), c()],
495
+ {},
496
+ _sfdp_scale_factor_check(aten.div.Tensor),
497
+ ),
498
+ (
499
+ _sfdp_pattern_12,
500
+ _sfdp_replacement_12,
501
+ [g(), g(), g(), c()],
502
+ d,
503
+ _sfdp_scale_factor_check(aten.div.Tensor),
504
+ ),
505
+ (
506
+ _sfdp_pattern_13,
507
+ _sfdp_replacement_13,
508
+ [g_3d(), g_3d(), g_3d()],
509
+ d,
510
+ _sfdp_params_check,
511
+ ),
512
+ ]:
513
+ # XXX: when adding a new pattern, re-run `gen_attention_patterns` so the pattern
514
+ # gets serialized to a python file and does not require tracing at runtime.
515
+ assert isinstance(workaround, dict)
516
+ name = pattern.__name__
517
+
518
+ training_name = (
519
+ f"{name}_training" if dtype == torch.float else f"{name}_training_half"
520
+ )
521
+ yield training_name, {
522
+ "search_fn": pattern,
523
+ "replace_fn": replacement,
524
+ "example_inputs": args,
525
+ "trace_fn": joint_fwd_bwd,
526
+ "pass_dicts": patterns,
527
+ "extra_check": extra_check,
528
+ "scalar_workaround": workaround,
529
+ }
530
+
531
+ if workaround:
532
+ assert len(workaround) == 1 and "dropout_p" in workaround
533
+ # functools.partial insufficient because we look at signature downstream
534
+ pattern = partialize_and_update_signature(pattern, dropout_p=0.0)
535
+ replacement = partialize_and_update_signature(
536
+ replacement, dropout_p=0.0
537
+ )
538
+ workaround = {}
539
+
540
+ inference_name = (
541
+ f"{name}_inference"
542
+ if dtype == torch.float
543
+ else f"{name}_inference_half"
544
+ )
545
+ yield inference_name, {
546
+ "search_fn": pattern,
547
+ "replace_fn": replacement,
548
+ "example_inputs": args,
549
+ "trace_fn": fwd_only,
550
+ "pass_dicts": patterns,
551
+ "extra_check": extra_check,
552
+ "scalar_workaround": workaround,
553
+ }
554
+
555
+
556
+ @functools.lru_cache(None)
557
+ def _sfdp_init():
558
+ from .serialized_patterns.central_index import get_serialized_pattern
559
+
560
+ for key, register_replacement_kwargs in _get_sfdp_patterns():
561
+ search_fn_pattern = get_serialized_pattern(key)
562
+ register_replacement(
563
+ **register_replacement_kwargs, search_fn_pattern=search_fn_pattern
564
+ )
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py ADDED
@@ -0,0 +1,791 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+ import operator
4
+ from typing import Any, DefaultDict, Deque, Dict, Iterator, List, Optional, Set, Tuple
5
+
6
+ import torch
7
+ from torch._dynamo.utils import counters
8
+ from torch._utils_internal import print_graph
9
+
10
+ from .. import config
11
+ from ..pattern_matcher import (
12
+ CallFunctionVarArgs,
13
+ get_arg_value,
14
+ stable_topological_sort,
15
+ )
16
+
17
+ try:
18
+ # importing this will register fbgemm lowerings for inductor
19
+ import deeplearning.fbgemm.fbgemm_gpu.fb.inductor_lowerings # noqa: F401
20
+
21
+ has_fbgemm = True
22
+ except Exception:
23
+ has_fbgemm = False
24
+ pass
25
+
26
+ aten = torch.ops.aten
27
+
28
+ log = logging.getLogger(__name__)
29
+
30
+ MIN_FUSE_SET_SIZE = 5
31
+ MAX_FUSE_SET_SIZE = 300
32
+ MAX_FUSE_SEARCH_DEPTH = 5
33
+ # The maximum tensor size that can go into the fusion group
34
+ MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR = 4096
35
+
36
+ # exclude these nodes from BFS
37
+ # excluding get item improves optimizer compilation time by 60s
38
+ SEARCH_EXCLUSIONS = {operator.getitem}
39
+
40
+
41
+ default_graph_search_options = {
42
+ "min_fuse_set_size": MIN_FUSE_SET_SIZE,
43
+ "max_fuse_set_size": MAX_FUSE_SET_SIZE,
44
+ "max_fuse_search_depth": MAX_FUSE_SEARCH_DEPTH,
45
+ "max_fuse_tensor_size_group_linear": MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR,
46
+ }
47
+
48
+ graph_search_options = default_graph_search_options
49
+
50
+
51
+ class GroupBatchFusionBase:
52
+ def __init__(self, **kwargs):
53
+ self.graph_search_options = kwargs.pop(
54
+ "graph_search_options", default_graph_search_options
55
+ )
56
+
57
+ def match(self, node):
58
+ raise NotImplementedError("match called on base")
59
+
60
+ def fuse(self, graph, subset):
61
+ raise NotImplementedError("fuse called on base")
62
+
63
+
64
+ PRE_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = dict()
65
+ POST_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = dict()
66
+
67
+
68
+ def register_fusion(name: str, pre_grad=True):
69
+ def decorator(fusion_cls: GroupBatchFusionBase):
70
+ if pre_grad:
71
+ PRE_GRAD_FUSIONS[name] = fusion_cls
72
+ else:
73
+ POST_GRAD_FUSIONS[name] = fusion_cls
74
+ return fusion_cls
75
+
76
+ return decorator
77
+
78
+
79
+ def list_group_batch_fusions(pre_grad=True) -> List[str]:
80
+ if pre_grad:
81
+ return list(PRE_GRAD_FUSIONS.keys())
82
+ else:
83
+ return list(POST_GRAD_FUSIONS.keys())
84
+
85
+
86
+ def decompose_stack(graph: torch.fx.GraphModule, input_tensors: List[Any]) -> Any:
87
+ unsqueezed_inputs = []
88
+ for input_tensor in input_tensors:
89
+ unsqueezed_input = graph.call_function(aten.unsqueeze, args=(input_tensor, 0))
90
+ unsqueezed_inputs.append(unsqueezed_input)
91
+ stacked_inputs = graph.call_function(
92
+ aten.cat,
93
+ args=(unsqueezed_inputs, 0),
94
+ )
95
+ return stacked_inputs
96
+
97
+
98
+ class GroupFusion(GroupBatchFusionBase):
99
+ """
100
+ Fuse ops in a group way, e.g, fuse mm/addmm of arbitrary input shapes with fbgemm.gmm.
101
+ """
102
+
103
+ pass
104
+
105
+
106
+ class BatchFusion(GroupBatchFusionBase):
107
+ """
108
+ Fuse ops in a batch way, e.g, fuse mm/addmm of same input shapes with bmm.
109
+ """
110
+
111
+ pass
112
+
113
+
114
+ class BatchPointwiseOpsFusionFactory(BatchFusion):
115
+ def __init__(self, op, **kwargs):
116
+ super().__init__(**kwargs)
117
+ self.op = op
118
+
119
+
120
+ @register_fusion("batch_linear_post_grad", pre_grad=False)
121
+ class PostGradBatchLinearFusion(BatchFusion):
122
+ """
123
+ Fuse ops in a batch way in post grad (aten level).
124
+ """
125
+
126
+ def _addmm_node_can_be_fused(self, node: torch.fx.Node) -> bool:
127
+ return (
128
+ node.kwargs.get("beta", 1.0) == 1.0 and node.kwargs.get("alpha", 1.0) == 1.0
129
+ )
130
+
131
+ def _is_input_2d(self, input: torch.fx.Node) -> bool:
132
+ return len(input.meta["tensor_meta"].shape) == 2
133
+
134
+ def match(self, node: torch.fx.Node) -> Optional[Tuple[str, int, int, int, bool]]:
135
+ if CallFunctionVarArgs(aten.mm).match(node):
136
+ input_m, weight_m = node.args
137
+ bias_m = None
138
+
139
+ elif CallFunctionVarArgs(aten.addmm.default).match(
140
+ node
141
+ ) and self._addmm_node_can_be_fused(node):
142
+ bias_m, input_m, weight_m = node.args
143
+ else:
144
+ return None
145
+
146
+ # only handle the cases where inputs are 2D tensors
147
+ if not self._is_input_2d(input_m) or not self._is_input_2d(weight_m):
148
+ return None
149
+ m, k = input_m.meta["tensor_meta"].shape
150
+ n = weight_m.meta["tensor_meta"].shape[1]
151
+ batch_key = ("batch_linear", m, k, n, bias_m is not None)
152
+ return batch_key
153
+
154
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
155
+ batch_inputs = []
156
+ batch_weights = []
157
+ batch_biases = []
158
+ batch_nodes = []
159
+
160
+ for node in subset:
161
+ if CallFunctionVarArgs(aten.addmm.default).match(node):
162
+ bias, input, weight = node.args
163
+ elif CallFunctionVarArgs(aten.mm.default).match(node):
164
+ input, weight = node.args
165
+ bias = None
166
+ batch_nodes.append(node)
167
+ batch_inputs.append(input)
168
+ batch_weights.append(weight)
169
+ batch_biases.append(bias)
170
+
171
+ with graph.inserting_before(subset[-1]):
172
+ fused_inputs = decompose_stack(graph, batch_inputs)
173
+ fused_weights = decompose_stack(graph, batch_weights)
174
+ fused_bmm = graph.call_function(
175
+ aten.bmm,
176
+ args=(fused_inputs, fused_weights),
177
+ )
178
+
179
+ for i, original_mm in enumerate(batch_nodes):
180
+ has_bias = False
181
+ with graph.inserting_after(fused_bmm):
182
+ new_mm = graph.call_function(aten.select, args=((fused_bmm, 0, i)))
183
+ if batch_biases[i]:
184
+ has_bias = True
185
+ new_bias_add = graph.call_function(
186
+ aten.add, args=((batch_biases[i], new_mm))
187
+ )
188
+ new_mm_cont = new_bias_add if has_bias else new_mm
189
+ original_mm.replace_all_uses_with(new_mm_cont)
190
+ new_mm_cont.meta.update(original_mm.meta)
191
+ graph.erase_node(original_mm)
192
+
193
+
194
+ @register_fusion("group_linear", pre_grad=False)
195
+ class GroupLinearFusion(GroupFusion):
196
+ def _addmm_node_can_be_fused(self, node: torch.fx.Node):
197
+ input_shape = node.args[1].meta["tensor_meta"].shape
198
+ weight_shape = node.args[2].meta["tensor_meta"].shape
199
+ return (
200
+ node.kwargs.get("beta", 1.0) == 1.0
201
+ and node.kwargs.get("alpha", 1.0) == 1.0
202
+ and len(input_shape) == 2
203
+ and len(weight_shape) == 2
204
+ and all(x % 2 == 0 for x in input_shape + weight_shape)
205
+ and all(
206
+ shape <= self.graph_search_options["max_fuse_tensor_size_group_linear"]
207
+ for shape in input_shape + weight_shape
208
+ )
209
+ )
210
+
211
+ def _mm_node_can_be_fused(self, node: torch.fx.Node):
212
+ input_shape = node.args[0].meta["tensor_meta"].shape
213
+ weight_shape = node.args[1].meta["tensor_meta"].shape
214
+ return (
215
+ len(input_shape) == 2
216
+ and len(weight_shape) == 2
217
+ and all(x % 2 == 0 for x in input_shape + weight_shape)
218
+ and all(
219
+ shape <= self.graph_search_options["max_fuse_tensor_size_group_linear"]
220
+ for shape in input_shape + weight_shape
221
+ )
222
+ )
223
+
224
+ def match(self, node: torch.fx.Node) -> Optional[Tuple[str, bool]]:
225
+ if CallFunctionVarArgs(aten.mm.default).match(
226
+ node
227
+ ) and self._mm_node_can_be_fused(node):
228
+ group_key = ("group_linear", True)
229
+ elif CallFunctionVarArgs(aten.addmm.default).match(
230
+ node
231
+ ) and self._addmm_node_can_be_fused(node):
232
+ bias = node.args[0]
233
+ group_key = ("group_linear", bias is None)
234
+ else:
235
+ group_key = None
236
+ return group_key
237
+
238
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
239
+ group_inputs = []
240
+ group_weights = []
241
+ group_biases = []
242
+ group_nodes = []
243
+ for node in subset:
244
+ if CallFunctionVarArgs(aten.addmm.default).match(node):
245
+ bias, input, weight = node.args
246
+ else:
247
+ assert CallFunctionVarArgs(aten.mm.default).match(node)
248
+ input, weight = node.args
249
+ bias = None
250
+
251
+ group_nodes.append(node)
252
+ group_inputs.append(input)
253
+ group_weights.append(weight)
254
+ group_biases.append(bias)
255
+
256
+ if all(bias is None for bias in group_biases):
257
+ group_biases = None # type: ignore[assignment]
258
+ group_biases: Optional[List[Any]]
259
+
260
+ with graph.inserting_before(subset[0]):
261
+ fused_mm = graph.call_function(
262
+ torch.ops.fbgemm.gmm.default,
263
+ args=(group_inputs, group_weights, group_biases),
264
+ )
265
+
266
+ for i, original_mm in enumerate(group_nodes):
267
+ with graph.inserting_after(fused_mm):
268
+ new_mm = graph.call_function(operator.getitem, args=(fused_mm, i))
269
+ original_mm.replace_all_uses_with(new_mm)
270
+ new_mm.meta.update(original_mm.meta)
271
+ graph.erase_node(original_mm)
272
+
273
+
274
+ @register_fusion("batch_linear_lhs")
275
+ class BatchLinearLHSFusion(BatchFusion):
276
+ """
277
+ Batch linear left-hand side fusion. This pass tries to fuse the following patterns:
278
+
279
+ torch.nn.functional.linear(x, w1), linear(x, w2),... * linear(x, wn)
280
+ -> torch.mm(x, torch.cat([w1, w2,... * wn]).transpose(0, 1))
281
+
282
+ We have a separate pass to eliminate contiguous transpose in a generic way.
283
+ """
284
+
285
+ def match(self, node: torch.fx.Node) -> Optional[Tuple[str, bool, Any]]:
286
+ if CallFunctionVarArgs(torch.nn.functional.linear).match(
287
+ node
288
+ ) and is_linear_node_can_be_fused(node):
289
+ input = get_arg_value(node, 0, "input")
290
+ bias = get_arg_value(node, 2, "bias")
291
+ group_key = ("batch_linear_lhs", bias is None, input)
292
+ else:
293
+ group_key = None
294
+ return group_key
295
+
296
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
297
+ batch_nodes = []
298
+ batch_input = None
299
+ batch_weights = []
300
+ batch_biases = []
301
+ split_sections = []
302
+ for node in subset:
303
+ input = get_arg_value(node, 0, "input")
304
+ weight = get_arg_value(node, 1, "weight")
305
+ bias = get_arg_value(node, 2, "bias")
306
+ batch_nodes.append(node)
307
+ if batch_input is None:
308
+ batch_input = input
309
+ else:
310
+ assert batch_input is input
311
+ batch_weights.append(weight)
312
+ if bias:
313
+ batch_biases.append(bias)
314
+ split_sections.append(weight.meta["example_value"].shape[0])
315
+
316
+ with graph.inserting_before(subset[0]):
317
+ cat_weights = graph.call_function(
318
+ torch.cat, args=(batch_weights,), kwargs={"dim": 0}
319
+ )
320
+ transposed_weights = graph.call_function(
321
+ torch.transpose, args=(cat_weights, 0, 1)
322
+ )
323
+ if len(batch_biases) > 0:
324
+ cat_biases = graph.call_function(
325
+ torch.cat, args=(batch_biases,), kwargs={"dim": 0}
326
+ )
327
+ fused_lhs = graph.call_function(
328
+ torch.addmm,
329
+ args=(cat_biases, batch_input, transposed_weights),
330
+ )
331
+ else:
332
+ fused_lhs = graph.call_function(
333
+ torch.mm,
334
+ args=(batch_input, transposed_weights),
335
+ )
336
+ fused_lhs_list = graph.call_function(
337
+ torch.split, args=(fused_lhs, split_sections), kwargs={"dim": 1}
338
+ )
339
+
340
+ for i, node in enumerate(batch_nodes):
341
+ with graph.inserting_after(fused_lhs_list):
342
+ new_node = graph.call_function(
343
+ operator.getitem, args=(fused_lhs_list, i)
344
+ )
345
+ node.replace_all_uses_with(new_node)
346
+ new_node.meta.update(node.meta)
347
+ graph.erase_node(node)
348
+
349
+
350
+ def is_node_meta_valid(node: Optional[torch.fx.Node]):
351
+ if node is None:
352
+ return True
353
+ if "example_value" not in node.meta:
354
+ return False
355
+ return True
356
+
357
+
358
+ def is_linear_node_can_be_fused(node: torch.fx.Node):
359
+ input = get_arg_value(node, 0, "input")
360
+ weight = get_arg_value(node, 1, "weight")
361
+ return (
362
+ is_node_meta_valid(node)
363
+ and is_node_meta_valid(input)
364
+ and is_node_meta_valid(weight)
365
+ and len(input.meta["example_value"].shape) == 2
366
+ and len(weight.meta["example_value"].shape) == 2
367
+ )
368
+
369
+
370
+ @register_fusion("batch_linear")
371
+ class PreGradBatchLinearFusion(BatchFusion):
372
+ """
373
+ Batch linear fusion in pre grad pass.
374
+ Fuse linear with same size with torch.baddmm
375
+ """
376
+
377
+ def _getitem_args(self, getitem_node: torch.fx.Node):
378
+ if getitem_node.target != operator.__getitem__ or (
379
+ getitem_node.op != "call_function"
380
+ ):
381
+ return None
382
+ return getitem_node.args[0]
383
+
384
+ def match(self, node: torch.fx.Node):
385
+ if CallFunctionVarArgs(torch.nn.functional.linear).match(
386
+ node
387
+ ) and is_linear_node_can_be_fused(node):
388
+ input = get_arg_value(node, 0, "input")
389
+ weight = get_arg_value(node, 1, "weight")
390
+ bias = get_arg_value(node, 2, "bias")
391
+ group_key = (
392
+ "batch_linear_pre_grad",
393
+ self._getitem_args(input),
394
+ str(input.meta["example_value"].shape),
395
+ str(weight.meta["example_value"].shape),
396
+ bias is None,
397
+ )
398
+ else:
399
+ group_key = None
400
+ return group_key
401
+
402
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
403
+ batch_nodes = []
404
+ batch_inputs = []
405
+ batch_weights = []
406
+ batch_biases = []
407
+ for node in subset:
408
+ batch_nodes.append(node)
409
+ batch_inputs.append(get_arg_value(node, 0, "input"))
410
+ batch_weights.append(get_arg_value(node, 1, "weight"))
411
+ batch_biases.append(get_arg_value(node, 2, "bias"))
412
+
413
+ with graph.inserting_before(subset[0]):
414
+ stack_inputs = graph.call_function(
415
+ torch.stack, args=(batch_inputs,), kwargs={"dim": 0}
416
+ )
417
+ stack_weights = graph.call_function(
418
+ torch.stack, args=(batch_weights,), kwargs={"dim": 0}
419
+ )
420
+ transpose_weight = graph.call_function(
421
+ torch.transpose, args=(stack_weights, 1, 2)
422
+ )
423
+ if all(bias is None for bias in batch_biases):
424
+ bmm = graph.call_function(
425
+ torch.bmm,
426
+ args=(stack_inputs, transpose_weight),
427
+ )
428
+ else:
429
+ stack_biases = graph.call_function(
430
+ torch.stack, args=(batch_biases,), kwargs={"dim": 0}
431
+ )
432
+ unsqueeze_biases = graph.call_function(
433
+ torch.unsqueeze, args=(stack_biases, 1)
434
+ )
435
+ bmm = graph.call_function(
436
+ torch.baddbmm,
437
+ args=(unsqueeze_biases, stack_inputs, transpose_weight),
438
+ )
439
+
440
+ bmm = graph.call_function(torch.unbind, args=(bmm,), kwargs={"dim": 0})
441
+ for i, linear in enumerate(batch_nodes):
442
+ with graph.inserting_after(bmm):
443
+ getitem = graph.call_function(operator.getitem, args=(bmm, i))
444
+ linear.replace_all_uses_with(getitem)
445
+ getitem.meta.update(linear.meta)
446
+ graph.erase_node(linear)
447
+
448
+
449
+ @register_fusion("batch_layernorm")
450
+ class BatchLayernormFusion(BatchFusion):
451
+ """
452
+ Batch layer norm fusion in pre grad pass
453
+ """
454
+
455
+ def match(self, node: torch.fx.Node):
456
+ if CallFunctionVarArgs(torch.nn.functional.layer_norm).match(node):
457
+ input = get_arg_value(node, 0, "input")
458
+ weight = get_arg_value(node, 2, "weight")
459
+ bias = get_arg_value(node, 3, "bias")
460
+ group_key = (
461
+ (
462
+ "batch_layernorm",
463
+ str(input.meta["example_value"].shape),
464
+ str(weight.meta["example_value"].shape)
465
+ if weight is not None
466
+ else "",
467
+ str(bias.meta["example_value"].shape) if bias is not None else "",
468
+ str(get_arg_value(node, 1, "normalized_shape")),
469
+ str(get_arg_value(node, 4, "eps")),
470
+ )
471
+ if "example_value" in input.meta
472
+ and is_node_meta_valid(weight)
473
+ and is_node_meta_valid(bias)
474
+ else None
475
+ )
476
+ else:
477
+ group_key = None
478
+ return group_key
479
+
480
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
481
+ group_inputs = []
482
+ group_shapes = []
483
+ group_weights = []
484
+ group_biases = []
485
+ group_epss = []
486
+ group_nodes = []
487
+ for node in subset:
488
+ group_nodes.append(node)
489
+ group_inputs.append(get_arg_value(node, 0, "input"))
490
+ group_shapes.append(get_arg_value(node, 1, "normalized_shape"))
491
+ group_weights.append(get_arg_value(node, 2, "weight"))
492
+ group_biases.append(get_arg_value(node, 3, "bias"))
493
+ eps = get_arg_value(node, 4, "eps")
494
+ if eps is None:
495
+ eps = 1e-5
496
+ group_epss.append(eps)
497
+ stack_dim = -1 - len(group_shapes[-1])
498
+
499
+ if all(bias is None for bias in group_biases):
500
+ group_biases = None # type: ignore[assignment]
501
+ group_biases: Optional[List[Any]]
502
+ if all(weight is None for weight in group_weights):
503
+ group_weights = None # type: ignore[assignment]
504
+ group_weights: Optional[List[Any]]
505
+ assert all(
506
+ eps == group_epss[0] for eps in group_epss
507
+ ), "all epsilon values must be equal"
508
+
509
+ with graph.inserting_before(subset[0]):
510
+ stack_input = graph.call_function(
511
+ torch.stack, args=(group_inputs,), kwargs={"dim": stack_dim}
512
+ )
513
+ if group_weights is not None:
514
+ stack_weight = graph.call_function(
515
+ torch.stack, args=(group_weights,), kwargs={"dim": 0}
516
+ )
517
+ else:
518
+ stack_weight = None
519
+ if group_biases is not None:
520
+ stack_bias = graph.call_function(
521
+ torch.stack, args=(group_biases,), kwargs={"dim": 0}
522
+ )
523
+ else:
524
+ stack_bias = None
525
+
526
+ batch_layer_norm = graph.call_function(
527
+ torch.nn.functional.layer_norm,
528
+ args=(stack_input, group_shapes[-1]),
529
+ kwargs={"eps": group_epss[-1]},
530
+ )
531
+
532
+ if group_weights is not None and group_biases is not None:
533
+ batch_layer_norm = graph.call_function(
534
+ torch.addcmul, args=(stack_bias, stack_weight, batch_layer_norm)
535
+ )
536
+ elif group_weights is not None and group_biases is None:
537
+ batch_layer_norm = graph.call_function(
538
+ torch.mul, args=(stack_weight, batch_layer_norm)
539
+ )
540
+ elif group_weights is None and group_biases is not None:
541
+ batch_layer_norm = graph.call_function(
542
+ torch.add, args=(stack_bias, batch_layer_norm)
543
+ )
544
+
545
+ batch_layer_norm_unbind = graph.call_function(
546
+ torch.unbind,
547
+ args=(batch_layer_norm,),
548
+ kwargs={"dim": stack_dim},
549
+ )
550
+
551
+ for i, node in enumerate(group_nodes):
552
+ with graph.inserting_after(batch_layer_norm_unbind):
553
+ new_node = graph.call_function(
554
+ operator.getitem, args=(batch_layer_norm_unbind, i)
555
+ )
556
+ node.replace_all_uses_with(new_node)
557
+ new_node.meta.update(node.meta)
558
+ graph.erase_node(node)
559
+
560
+
561
+ class BatchPointwiseOpsPreGradFusion(BatchPointwiseOpsFusionFactory):
562
+ """
563
+ Batch poinwise ops (e.g., sigmoid, relu, tanh) fusion in pre grad pass.
564
+ We fuse it in random place, and the introduced stack node may be merged in split cat.
565
+ """
566
+
567
+ def __init__(self, op, **kwargs):
568
+ super().__init__(op, **kwargs)
569
+ self.op = op
570
+
571
+ def match(self, node: torch.fx.Node):
572
+ input = get_arg_value(node, 0, "input")
573
+ if CallFunctionVarArgs(self.op).match(node) and is_node_meta_valid(node):
574
+ # for relu op, we also use the inplace to construct the key
575
+ group_key = (
576
+ "batch_" + self.op.__name__.lower() + "_pre_grad",
577
+ str(input.meta["example_value"].shape),
578
+ str(node.kwargs.get("inplace", False)),
579
+ )
580
+ else:
581
+ group_key = None
582
+ return group_key
583
+
584
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
585
+ batch_nodes = []
586
+ batch_inputs = []
587
+
588
+ for node in subset:
589
+ batch_nodes.append(node)
590
+ batch_inputs.append(get_arg_value(node, 0, "input"))
591
+
592
+ with graph.inserting_before(subset[0]):
593
+ stack_inputs = graph.call_function(
594
+ torch.stack, args=(batch_inputs,), kwargs={"dim": 0}
595
+ )
596
+ if self.op == torch.nn.functional.relu:
597
+ batch_op = graph.call_function(
598
+ self.op,
599
+ args=(stack_inputs,),
600
+ kwargs={"inplace": subset[0].kwargs.get("inplace", False)},
601
+ )
602
+ else:
603
+ batch_op = graph.call_function(
604
+ self.op,
605
+ args=(stack_inputs,),
606
+ )
607
+ unbind_op = graph.call_function(
608
+ torch.unbind, args=(batch_op,), kwargs={"dim": 0}
609
+ )
610
+ for i, node in enumerate(batch_nodes):
611
+ with graph.inserting_after(unbind_op):
612
+ getitem = graph.call_function(operator.getitem, args=(unbind_op, i))
613
+ node.replace_all_uses_with(getitem)
614
+ getitem.meta.update(node.meta)
615
+ graph.erase_node(node)
616
+
617
+
618
+ @register_fusion("batch_tanh")
619
+ class BatchTanhPreGradFusion(BatchPointwiseOpsPreGradFusion):
620
+ def __init__(self, **kwargs):
621
+ super().__init__(torch.tanh, **kwargs)
622
+
623
+
624
+ @register_fusion("batch_sigmoid")
625
+ class BatchSigmoidPreGradFusion(BatchPointwiseOpsPreGradFusion):
626
+ def __init__(self, **kwargs):
627
+ super().__init__(torch.sigmoid, **kwargs)
628
+
629
+
630
+ @register_fusion("batch_relu")
631
+ class BatchReLuPreGradFusion(BatchPointwiseOpsPreGradFusion):
632
+ def __init__(self, **kwargs):
633
+ super().__init__(torch.nn.functional.relu, **kwargs)
634
+
635
+
636
+ def find_independent_subset_greedy(
637
+ node_list: List[torch.fx.Node],
638
+ graph_search_options: Dict[str, Any],
639
+ ) -> Iterator[List[torch.fx.Node]]:
640
+ """
641
+ Return a list of subset from node_list, all nodes in each subset are independent with each other and can be fused together.
642
+ The type of subset is list, so we can preserve node's order and benefit from split-cat elimination in later pass.
643
+ """
644
+ visited_node_set: Set[torch.fx.Node] = set()
645
+ dep_set: Set[torch.fx.Node] = set()
646
+
647
+ def find_dependent_nodes(src_node, cur_node):
648
+ for input_node in cur_node.all_input_nodes:
649
+ if input_node in node_list:
650
+ dep_set.add(input_node)
651
+
652
+ if input_node not in visited_node_set:
653
+ visited_node_set.add(input_node)
654
+ find_dependent_nodes(src_node, input_node)
655
+
656
+ while len(node_list) > 0:
657
+ subset: List[torch.fx.Node] = []
658
+ subset_deps: Set[torch.fx.Node] = set()
659
+
660
+ for node in node_list:
661
+ if len(subset) >= graph_search_options["max_fuse_set_size"]:
662
+ break
663
+
664
+ visited_node_set.clear()
665
+ dep_set.clear()
666
+
667
+ find_dependent_nodes(node, node)
668
+ if not dep_set.intersection(subset) and node not in subset_deps:
669
+ subset.append(node)
670
+ subset_deps.update(dep_set)
671
+
672
+ if len(subset) >= graph_search_options["min_fuse_set_size"]:
673
+ yield subset
674
+
675
+ next_round_node_list = [node for node in node_list if node not in subset]
676
+ node_list = next_round_node_list
677
+
678
+
679
+ def get_fusion_candidates(
680
+ rule: GroupBatchFusionBase, root_node: torch.fx.Node, fused_set: Set[torch.fx.Node]
681
+ ) -> DefaultDict[Any, List[torch.fx.Node]]:
682
+ """
683
+ Search fusion candidates for a specific rule using BFS starting from the root node.
684
+ We only search the subgraph within graph_search_options["max_fuse_search_depth"].
685
+ """
686
+ q: Deque[Tuple[int, torch.fx.Node]] = collections.deque()
687
+
688
+ candidate_dict: DefaultDict[Any, List[torch.fx.Node]] = collections.defaultdict(
689
+ list
690
+ )
691
+
692
+ if root_node.target in SEARCH_EXCLUSIONS:
693
+ return candidate_dict
694
+
695
+ visited_set: Set[torch.fx.Node] = set()
696
+
697
+ for next_node in root_node.all_input_nodes:
698
+ q.append((1, next_node))
699
+ visited_set.add(next_node)
700
+
701
+ while len(q) > 0:
702
+ depth, node = q.popleft()
703
+
704
+ if node in fused_set:
705
+ continue
706
+
707
+ key = rule.match(node)
708
+ # SymInt is not hashable, so we need to skip it
709
+ if key is not None and not isinstance(key, torch.SymInt):
710
+ candidate_nodes = candidate_dict[key]
711
+ if node not in candidate_nodes:
712
+ candidate_nodes.append(node)
713
+ else:
714
+ if depth < rule.graph_search_options["max_fuse_search_depth"]:
715
+ for next_node in node.all_input_nodes:
716
+ if next_node not in visited_set:
717
+ visited_set.add(next_node)
718
+ q.append((depth + 1, next_node))
719
+
720
+ return candidate_dict
721
+
722
+
723
+ def apply_group_batch_fusion(graph: torch.fx.GraphModule, rule: GroupBatchFusionBase):
724
+ stable_topological_sort(graph)
725
+ fused_set: Set[torch.fx.Node] = set()
726
+
727
+ for node in reversed(graph.nodes):
728
+ candidates = get_fusion_candidates(rule, node, fused_set)
729
+
730
+ for key, candidate_nodes in candidates.items():
731
+ if len(candidate_nodes) < MIN_FUSE_SET_SIZE:
732
+ continue
733
+
734
+ for subset in find_independent_subset_greedy(
735
+ candidate_nodes, rule.graph_search_options
736
+ ):
737
+ rule.fuse(graph, subset)
738
+ fused_set.update(subset)
739
+ if isinstance(rule, GroupFusion):
740
+ counters["inductor"]["group_fusion"] += 1
741
+ elif isinstance(rule, BatchFusion):
742
+ counters["inductor"]["batch_fusion"] += 1
743
+ else:
744
+ counters["inductor"]["unknown_group_batch_fusion"] += 1
745
+
746
+ log.info(
747
+ f"{rule.__class__.__name__}: key = {key}; subset size = {len(subset)}" # noqa: G004
748
+ )
749
+
750
+
751
+ def generate_fusion_from_config(config_options: Dict[str, Any], pre_grad=True):
752
+ fusions: List[GroupBatchFusionBase] = []
753
+ for name, options in config_options.items():
754
+ fusion_cls = PRE_GRAD_FUSIONS[name] if pre_grad else POST_GRAD_FUSIONS[name]
755
+ _options = graph_search_options.copy()
756
+ _options.update(options)
757
+ fusions.append(fusion_cls(graph_search_options=_options)) # type: ignore[operator]
758
+ return fusions
759
+
760
+
761
+ def group_batch_fusion_passes(graph: torch.fx.Graph, pre_grad=True):
762
+ print_graph(graph, "Before group_batch fusion in pre grad pass.")
763
+ fusions: List[GroupBatchFusionBase] = []
764
+ # we keep all current pre grad fusions to keep
765
+ # current implementation, will remove this later
766
+ if pre_grad:
767
+ fusions += generate_fusion_from_config(
768
+ config.pre_grad_fusion_options, pre_grad=True
769
+ )
770
+ else:
771
+ fbgemm_fusion_keys = [
772
+ x
773
+ for x in config.post_grad_fusion_options
774
+ if config.post_grad_fusion_options[x].get("require_fbgemm", False)
775
+ ]
776
+ fbgemm_fusions = {
777
+ fusion: config.post_grad_fusion_options[fusion]
778
+ for fusion in fbgemm_fusion_keys
779
+ }
780
+ non_fbgemm_fusions = {
781
+ fusion: config.post_grad_fusion_options[fusion]
782
+ for fusion in config.post_grad_fusion_options.keys()
783
+ if fusion not in fbgemm_fusion_keys
784
+ }
785
+ fusions += generate_fusion_from_config(non_fbgemm_fusions, pre_grad=False)
786
+ if has_fbgemm:
787
+ fusions += generate_fusion_from_config(fbgemm_fusions, pre_grad=False)
788
+
789
+ for rule in fusions:
790
+ apply_group_batch_fusion(graph, rule)
791
+ print_graph(graph, f"Apply fusion {rule.__class__.__name__}.")
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import typing
3
+ from collections import Counter
4
+ from typing import Dict, Set
5
+
6
+ import torch
7
+ import torch._guards
8
+ from torch._inductor.constant_folding import ConstantFolder
9
+ from torch.multiprocessing.reductions import StorageWeakRef
10
+
11
+ from .. import config
12
+ from ..pattern_matcher import (
13
+ CallFunction,
14
+ init_once_fakemode,
15
+ KeywordArg,
16
+ Match,
17
+ PatternMatcherPass,
18
+ register_graph_pattern,
19
+ stable_topological_sort,
20
+ )
21
+ from .replace_random import replace_random_passes
22
+
23
+ log = logging.getLogger(__name__)
24
+ patterns = PatternMatcherPass()
25
+
26
+
27
+ @init_once_fakemode
28
+ def lazy_init():
29
+ from .fuse_attention import _sfdp_init
30
+ from .misc_patterns import _misc_patterns_init
31
+ from .pad_mm import _pad_mm_init
32
+
33
+ _pad_mm_init()
34
+ _sfdp_init()
35
+ _misc_patterns_init()
36
+
37
+
38
+ @torch.utils._python_dispatch._disable_current_modes()
39
+ def remove_no_ops(
40
+ gm: torch.fx.GraphModule, zeros: Set[torch.fx.Node], ones: Set[torch.fx.Node]
41
+ ):
42
+ "Removes no-ops: (+ 0, - 0, * 1, / 1)"
43
+ aten = torch.ops.aten
44
+ graph = gm.graph
45
+
46
+ def fake_tensors_eq(t1, t2, fields=("shape", "dtype", "device")):
47
+ if any(not isinstance(t, torch.Tensor) for t in (t1, t2)):
48
+ return False
49
+ for field in fields:
50
+ if getattr(t1, field) != getattr(t2, field):
51
+ return False
52
+ return True
53
+
54
+ def replace_no_op(node, replace_input_index):
55
+ replacement = node.args[replace_input_index]
56
+
57
+ # https://github.com/pytorch/pytorch/issues/86128 causes
58
+ # non-Tensor inputs even for ops with only Tensor inputs.
59
+ # TODO - decompose/type promote to avoid this
60
+ if not all(isinstance(arg, torch.fx.Node) for arg in node.args):
61
+ return
62
+
63
+ if not fake_tensors_eq(node.meta["val"], replacement.meta["val"]):
64
+ if fake_tensors_eq(
65
+ node.meta["val"],
66
+ replacement.meta["val"],
67
+ ("shape", "device"),
68
+ ):
69
+ with graph.inserting_after(node):
70
+ replacement = graph.call_function(
71
+ torch.ops.prims.convert_element_type.default,
72
+ args=(replacement, node.meta["val"].dtype),
73
+ )
74
+ else:
75
+ return
76
+
77
+ node.replace_all_uses_with(replacement)
78
+ replacement.meta.update(node.meta)
79
+ graph.erase_node(node)
80
+
81
+ for node in graph.nodes:
82
+ if node.op != "call_function":
83
+ continue
84
+
85
+ # TODO handle Tensor-Scalar adds, it's a different schema
86
+ if node.target == aten.add.Tensor and len(node.args) == 2:
87
+ if (
88
+ not any(e in zeros for e in node.args)
89
+ or node.kwargs.get("alpha", 1) != 1
90
+ ):
91
+ continue
92
+
93
+ replace_index = 1 if node.args[0] in zeros else 0
94
+ replace_no_op(node, replace_index)
95
+
96
+ elif node.target == aten.sub.Tensor and len(node.args) == 2:
97
+ if node.args[1] not in zeros or node.kwargs.get("alpha", 1) != 1:
98
+ continue
99
+
100
+ replace_no_op(node, 0)
101
+
102
+ elif node.target == aten.mul.Tensor and len(node.args) == 2:
103
+ if not any(e in ones for e in node.args):
104
+ continue
105
+
106
+ replace_input_index = 1 if node.args[0] in ones else 0
107
+ replace_no_op(node, replace_input_index)
108
+
109
+ elif (
110
+ node.target == aten.div.Tensor
111
+ and len(node.args) == 2
112
+ and node.args[1] in ones
113
+ ):
114
+ replace_no_op(node, 0)
115
+
116
+
117
+ @torch.utils._python_dispatch._disable_current_modes()
118
+ def remove_redundant_views(gm: torch.fx.GraphModule):
119
+ """
120
+ Removes redundant views by reusing existing ones.
121
+ """
122
+
123
+ # A dictionary mapping a tensor to all aliased views.
124
+ views: Dict[torch.fx.Node, Dict[torch.dtype, torch.fx.Node]] = {}
125
+ graph = gm.graph
126
+
127
+ for node in graph.nodes:
128
+ if node.op != "call_function":
129
+ continue
130
+
131
+ if node.target != torch.ops.aten.view.dtype:
132
+ continue
133
+
134
+ src = node.args[0]
135
+ to_type = node.args[1]
136
+ existing_views = views.get(src)
137
+ is_needed = True
138
+
139
+ if existing_views:
140
+ # Replace the view with the an existing view if available.
141
+ alias = existing_views.get(to_type)
142
+ if alias:
143
+ is_needed = False
144
+ node.replace_all_uses_with(alias)
145
+ alias.meta.update(node.meta)
146
+ graph.erase_node(node)
147
+ else:
148
+ from_type = src.meta["val"].dtype
149
+ existing_views = {from_type: src}
150
+ views[src] = existing_views
151
+
152
+ if is_needed:
153
+ # Save the new alias but do not replace existing one.
154
+ existing_views.setdefault(to_type, node)
155
+ views[node] = existing_views
156
+
157
+ # Clean up unused views.
158
+ while True:
159
+ unused_views = []
160
+ for alias in views:
161
+ if not alias.users:
162
+ unused_views.append(alias)
163
+ if len(unused_views) == 0:
164
+ break
165
+ for unused in unused_views:
166
+ views.pop(unused)
167
+ graph.erase_node(unused)
168
+
169
+
170
+ class UniformValueConstantFolder(ConstantFolder):
171
+ """
172
+ Runs constant folding and replaces tensors that have a unifrom value
173
+ with a tensor constructor call: aten.full([shape], value, ...)
174
+ """
175
+
176
+ def __init__(self, gm, skip_constructors=False):
177
+ super().__init__(gm, skip_constructors)
178
+ self.node_storages_ptrs: Dict[torch.fx.Node, int] = {}
179
+ self.constant_data_ptrs: Dict[torch.fx.Node, StorageWeakRef] = {}
180
+
181
+ def insertable_tensor_check(self, t: torch.Tensor) -> bool:
182
+ # TODO - we could also Tensors which get replaced with arange here
183
+ return (
184
+ t.numel() != 0
185
+ and bool((t == t.flatten()[0]).all())
186
+ and torch._C._has_storage(t)
187
+ and t.layout == torch.strided
188
+ )
189
+
190
+ def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
191
+ self.node_replacements[node] = tensor.flatten()[0].item()
192
+ self.constant_data_ptrs[node] = StorageWeakRef(tensor.untyped_storage())
193
+
194
+
195
+ @torch.utils._python_dispatch._disable_current_modes()
196
+ def constant_fold_uniform_value(gm: torch.fx.GraphModule):
197
+ "Runs constant folding and replaces constants which can be constructed with a single `full` call. Calls into remove_no_ops."
198
+ aten = torch.ops.aten
199
+
200
+ # Constant folding can leak memory, especially with repeated compilation, so we are only going to
201
+ # remove constants which can be replaced with a constructor.
202
+ cf = UniformValueConstantFolder(gm)
203
+ cf.run()
204
+
205
+ node_replacements = cf.node_replacements
206
+
207
+ graph = gm.graph
208
+
209
+ zeros = set()
210
+ ones = set()
211
+
212
+ # Got failures in `test_is_set_to_cuda` if we change aliasing on constants,
213
+ # so just constant-ify if a Tensor is unaliased
214
+ constant_data_ptr_count: typing.Counter[StorageWeakRef] = Counter()
215
+
216
+ for node in cf.node_replacements:
217
+ constant_data_ptr_count[cf.constant_data_ptrs[node]] += 1
218
+
219
+ for node, value in node_replacements.items():
220
+ # we dont have a functional way right now of instantiating a non-contiguous tensor with full/zeros/ones right now
221
+ # hasn't shown up to be important yet
222
+ fake_tensor = node.meta["val"]
223
+ if not fake_tensor.is_contiguous(memory_format=torch.contiguous_format):
224
+ continue
225
+
226
+ if constant_data_ptr_count[cf.constant_data_ptrs[node]] > 1:
227
+ continue
228
+
229
+ with graph.inserting_after(node):
230
+ # the conversion from tensor and back to value can be lossy, just use the original full ctor value
231
+ if (
232
+ node.op == "call_function"
233
+ and node.target == aten.full.default
234
+ and len(node.args) == 2
235
+ ):
236
+ value = node.args[1]
237
+
238
+ # zeros, and ones just get traced into full, so we insert those
239
+ new_node = graph.call_function(
240
+ aten.full.default,
241
+ args=(list(fake_tensor.shape), value),
242
+ kwargs={
243
+ "dtype": fake_tensor.dtype,
244
+ "layout": torch.strided,
245
+ "device": fake_tensor.device,
246
+ "pin_memory": False,
247
+ },
248
+ )
249
+
250
+ new_node.meta.update(node.meta)
251
+ node.replace_all_uses_with(new_node)
252
+ graph.erase_node(node)
253
+
254
+ if value == 0:
255
+ zeros.add(new_node)
256
+ elif value == 1:
257
+ ones.add(new_node)
258
+
259
+ remove_no_ops(gm, zeros, ones)
260
+ remove_redundant_views(gm)
261
+
262
+
263
+ def joint_graph_passes(graph: torch.fx.GraphModule):
264
+ """
265
+ Run FX transformations on the joint forwards+backwards graph.
266
+ """
267
+ lazy_init()
268
+ count = 0
269
+
270
+ if config.joint_graph_constant_folding:
271
+ constant_fold_uniform_value(graph)
272
+
273
+ if config.pattern_matcher:
274
+ count += patterns.apply(graph.graph)
275
+
276
+ if not config.fallback_random:
277
+ count += replace_random_passes(graph)
278
+
279
+ if count:
280
+ stable_topological_sort(graph.graph)
281
+ graph.graph.lint()
282
+ graph.recompile()
283
+ return graph
284
+
285
+
286
+ @register_graph_pattern(
287
+ CallFunction(
288
+ torch.ops.prims.convert_element_type.default,
289
+ CallFunction(
290
+ torch.ops.prims.convert_element_type.default,
291
+ KeywordArg("arg"),
292
+ KeywordArg("dtype1"),
293
+ ),
294
+ KeywordArg("dtype2"),
295
+ ),
296
+ pass_dict=patterns,
297
+ )
298
+ def pointless_convert(match: Match, arg, dtype1: torch.dtype, dtype2: torch.dtype):
299
+ """Remove chain of dtype conversions often created by AMP"""
300
+ graph = match.graph
301
+ node = match.output_node()
302
+ allowed = {torch.float16, torch.bfloat16, torch.float32, torch.float64}
303
+ if dtype1 in allowed and dtype2 in allowed:
304
+ repl = graph.call_function(
305
+ torch.ops.prims.convert_element_type.default, (arg, dtype2)
306
+ )
307
+ repl.meta.update(node.meta)
308
+ node.replace_all_uses_with(repl)
309
+ match.erase_nodes(graph)
310
+
311
+
312
+ @register_graph_pattern(
313
+ CallFunction(torch.ops.aten.view.default, KeywordArg("arg"), KeywordArg("size")),
314
+ pass_dict=patterns,
315
+ )
316
+ def pointless_view(match: Match, arg, size):
317
+ """Remove no-op view"""
318
+ graph = match.graph
319
+ node = match.output_node()
320
+ arg_size = list(node.args[0].meta["val"].shape)
321
+ if size == arg_size:
322
+ node.replace_all_uses_with(node.args[0])
323
+ match.erase_nodes(graph)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/misc_patterns.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ from typing import Dict, Set, Tuple
4
+
5
+ import torch
6
+ from torch._dynamo.utils import counters
7
+
8
+ from torch._ops import OpOverload, OpOverloadPacket
9
+ from ..pattern_matcher import fwd_only, register_replacement
10
+
11
+ aten = torch.ops.aten
12
+
13
+
14
+ @functools.lru_cache(None)
15
+ def _misc_patterns_init():
16
+ from .joint_graph import patterns as joint_graph_patterns
17
+ from .post_grad import pass_patterns as post_grad_patterns_all
18
+
19
+ post_grad_patterns = post_grad_patterns_all[1] # medium priority
20
+
21
+ if torch.cuda.is_available():
22
+ # workaround https://github.com/pytorch/pytorch/issues/97894
23
+ device = "cuda"
24
+ else:
25
+ device = "cpu"
26
+
27
+ # These patterns do 2 things
28
+ # 1. Since we know that index is completely unique, we can codegen it using
29
+ # stores instead of atomic adds, which is quite a bit faster.
30
+ # 2. Also, since we are guaranteed that they are completely within bounds,
31
+ # we can use unsafe indexing and skip debug asserts
32
+ def randperm_index_add_pattern(x, y):
33
+ index = torch.randperm(x.shape[0], device=x.device)[: y.shape[0]]
34
+ return torch.index_add(x, dim=0, source=y, index=index), index
35
+
36
+ def randperm_index_add_replacement(x, y):
37
+ index = torch.randperm(x.shape[0], device=x.device)[: y.shape[0]]
38
+ return (
39
+ torch.ops.aten._unsafe_index_put(
40
+ x, (index,), aten._unsafe_index(x, (index,)) + y, accumulate=False
41
+ ),
42
+ index,
43
+ )
44
+
45
+ register_replacement(
46
+ randperm_index_add_pattern,
47
+ randperm_index_add_replacement,
48
+ [torch.empty(4, 8, device=device), torch.empty(2, 8, device=device)],
49
+ fwd_only,
50
+ [post_grad_patterns, joint_graph_patterns],
51
+ )
52
+
53
+ def randperm_index_pattern(x, slice_shape):
54
+ index = torch.randperm(x.shape[0], device=x.device)[:slice_shape]
55
+ return torch.ops.aten.index(x, (index,)), index
56
+
57
+ def randperm_index_replacement(x, slice_shape):
58
+ index = torch.randperm(x.shape[0], device=x.device)[:slice_shape]
59
+ return torch.ops.aten._unsafe_index(x, (index,)), index
60
+
61
+ pattern = register_replacement(
62
+ randperm_index_pattern,
63
+ randperm_index_replacement,
64
+ [torch.empty(4, 8, device=device)],
65
+ fwd_only,
66
+ [post_grad_patterns, joint_graph_patterns],
67
+ scalar_workaround={"slice_shape": 42},
68
+ )
69
+
70
+
71
+ class NumpyCompatNormalization:
72
+ numpy_compat: Dict[str, Tuple[str, ...]] = {
73
+ "dim": ("axis",),
74
+ "keepdim": ("keepdims",),
75
+ "input": ("x", "a", "x1"),
76
+ "other": ("x2",),
77
+ }
78
+ inverse_mapping: Dict[str, str]
79
+ cache: Dict["torch.fx.graph.Target", Set[str]]
80
+
81
+ def __init__(self):
82
+ self.cache = {} # callable -> tuple of replaceable args e.g. ["axis"]
83
+ self.inverse_mapping = {}
84
+ for actual_kwarg, numpy_kwargs in self.numpy_compat.items():
85
+ for numpy_kwarg in numpy_kwargs:
86
+ assert numpy_kwarg not in self.inverse_mapping
87
+ self.inverse_mapping[numpy_kwarg] = actual_kwarg
88
+
89
+ def __call__(self, graph: torch.fx.Graph):
90
+ for node in graph.nodes:
91
+ if node.op != "call_function":
92
+ continue
93
+ if isinstance(node.target, (OpOverload, OpOverloadPacket)):
94
+ # only applies to torch ops; e.g. torch.stack(axis=1) works, torch.ops.aten.stack(axis=1) doesn't.
95
+ continue
96
+ kwargs = node.kwargs
97
+
98
+ if node.target in self.cache:
99
+ replaceable_kwargs = self.cache[node.target]
100
+ else:
101
+ signatures = torch.fx.operator_schemas.get_signature_for_torch_op(
102
+ node.target
103
+ )
104
+ signatures = () if signatures is None else signatures
105
+ replaceable_kwargs = set()
106
+ for sig in signatures:
107
+ for param_name in sig.parameters.keys():
108
+ if param_name in self.numpy_compat:
109
+ replaceable_kwargs.update(self.numpy_compat[param_name])
110
+
111
+ self.cache[node.target] = replaceable_kwargs
112
+
113
+ if not replaceable_kwargs:
114
+ continue
115
+
116
+ new_kwargs = {}
117
+ kwargs_changed = False
118
+ for k, v in kwargs.items():
119
+ if k in replaceable_kwargs:
120
+ kwargs_changed = True
121
+ new_kwargs[self.inverse_mapping[k]] = v
122
+ else:
123
+ new_kwargs[k] = v
124
+
125
+ if kwargs_changed:
126
+ node.kwargs = torch.fx.immutable_collections.immutable_dict(new_kwargs)
127
+ counters["inductor"]["numpy_compat_normalization"] += 1
128
+
129
+
130
+ numpy_compat_normalization = NumpyCompatNormalization()
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py ADDED
@@ -0,0 +1,1085 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import operator
3
+ from functools import reduce
4
+ from typing import Any, Tuple
5
+
6
+ import torch
7
+
8
+ from torch.fx.experimental.symbolic_shapes import has_free_symbols
9
+
10
+ from .. import ir
11
+
12
+ from ..lowering import lowerings as L
13
+ from ..pattern_matcher import (
14
+ Arg,
15
+ CallFunction,
16
+ filter_nodes,
17
+ get_arg_value,
18
+ KeywordArg,
19
+ MULTIPLE,
20
+ )
21
+ from ..virtualized import ops
22
+ from .freezing_patterns import register_freezing_graph_pattern
23
+ from .post_grad import register_lowering_pattern
24
+ from .quantization import (
25
+ _register_quantization_lowerings,
26
+ _register_quantization_weight_pack_pass,
27
+ )
28
+
29
+ if torch._C._has_mkldnn:
30
+ aten = torch.ops.aten
31
+ mkldnn = torch.ops.mkldnn
32
+ prims = torch.ops.prims
33
+
34
+ _conv_args = [Arg() for _ in range(10)]
35
+ _linear_args = [Arg() for _ in range(6)]
36
+ _conv_transpose_args = [Arg() for _ in range(11)]
37
+
38
+ def _conv_call(users=1):
39
+ return CallFunction(
40
+ mkldnn._convolution_pointwise.default, *_conv_args, _users=users
41
+ )
42
+
43
+ def _linear_call(users=1):
44
+ return CallFunction(
45
+ mkldnn._linear_pointwise.default, *_linear_args, _users=users
46
+ )
47
+
48
+ def _conv_transpose_call(users=1):
49
+ return CallFunction(
50
+ mkldnn._convolution_transpose_pointwise.default,
51
+ *_conv_transpose_args,
52
+ _users=users,
53
+ )
54
+
55
+ def _to_float(input_call, users=1):
56
+ return CallFunction(
57
+ prims.convert_element_type.default,
58
+ input_call,
59
+ KeywordArg("to_float"),
60
+ _users=users,
61
+ )
62
+
63
+ def _to_bf16(input_call):
64
+ return CallFunction(
65
+ prims.convert_element_type.default,
66
+ input_call,
67
+ KeywordArg("to_bf16"),
68
+ _users=1,
69
+ )
70
+
71
+ def _unary_fusion_pattern(unary_fusion, call_fn, users, is_bf16):
72
+ # only insert to_dtype if is_bf16 is True
73
+ computation_call = (
74
+ _to_float(call_fn(), users=users) if is_bf16 else call_fn(users=users)
75
+ )
76
+ out = unary_fusion(computation_call)
77
+ return _to_bf16(out) if is_bf16 else out
78
+
79
+ def _gelu_fusion_1(computation_call):
80
+ return CallFunction(
81
+ aten.mul,
82
+ CallFunction(aten.mul, computation_call, 0.5),
83
+ CallFunction(
84
+ aten.add,
85
+ CallFunction(
86
+ aten.erf,
87
+ CallFunction(aten.mul, computation_call, 0.7071067811865476),
88
+ ),
89
+ 1,
90
+ ),
91
+ )
92
+
93
+ def _gelu_fusion_2(computation_call):
94
+ return CallFunction(
95
+ aten.mul,
96
+ CallFunction(aten.mul, computation_call, 0.5),
97
+ CallFunction(
98
+ aten.add,
99
+ CallFunction(
100
+ aten.tanh,
101
+ CallFunction(
102
+ aten.mul,
103
+ CallFunction(
104
+ aten.add,
105
+ computation_call,
106
+ CallFunction(
107
+ aten.mul,
108
+ CallFunction(
109
+ aten.mul,
110
+ CallFunction(
111
+ aten.mul, computation_call, computation_call
112
+ ),
113
+ computation_call,
114
+ ),
115
+ 0.044715,
116
+ ),
117
+ ),
118
+ 0.7978845608028654,
119
+ ),
120
+ ),
121
+ 1,
122
+ ),
123
+ )
124
+
125
+ def _hardswish_fusion(computation_call):
126
+ return CallFunction(
127
+ aten.div,
128
+ CallFunction(
129
+ aten.mul,
130
+ computation_call,
131
+ CallFunction(
132
+ aten.clamp_max,
133
+ CallFunction(
134
+ aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0
135
+ ),
136
+ 6,
137
+ ),
138
+ ),
139
+ 6,
140
+ )
141
+
142
+ def _silu_fusion(computation_call):
143
+ return CallFunction(
144
+ aten.mul, computation_call, CallFunction(aten.sigmoid, computation_call)
145
+ )
146
+
147
+ def _hardsigmoid_fusion(computation_call):
148
+ return CallFunction(
149
+ aten.div,
150
+ CallFunction(
151
+ aten.clamp_max,
152
+ CallFunction(
153
+ aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0
154
+ ),
155
+ 6,
156
+ ),
157
+ 6,
158
+ )
159
+
160
+ def _leaky_relu_fusion(computation_call):
161
+ return CallFunction(
162
+ aten.where,
163
+ CallFunction(aten.gt, computation_call, 0),
164
+ computation_call,
165
+ CallFunction(aten.mul, computation_call, KeywordArg("negative_slope")),
166
+ )
167
+
168
+ def _hardtanh_fusion(computation_call):
169
+ return CallFunction(
170
+ aten.clamp_max,
171
+ CallFunction(aten.clamp_min, computation_call, KeywordArg("min_value")),
172
+ KeywordArg("max_value"),
173
+ )
174
+
175
+ def _combined_fusion(computation_call, elementwise_op):
176
+ return CallFunction(elementwise_op, computation_call)
177
+
178
+ # binary_op(other, computation_op)
179
+ def _binary_fusion_v1(computation_call, binary_fn):
180
+ return CallFunction(binary_fn, KeywordArg("other"), computation_call)
181
+
182
+ # binary_op(computation_op, other)
183
+ def _binary_fusion_v2(computation_call, binary_fn):
184
+ return CallFunction(binary_fn, computation_call, KeywordArg("other"))
185
+
186
+ def _is_single_computation_op(computation_op):
187
+ def fn(match):
188
+ computation_nodes = filter_nodes(match.nodes, computation_op)
189
+ if len(computation_nodes) < 1:
190
+ return False
191
+ if any(n.args[-3] != "none" for n in computation_nodes):
192
+ return False
193
+ return True
194
+
195
+ return fn
196
+
197
+ def _is_valid_computation_unary_fusion(computation_op, is_bf16=False):
198
+ def fn(match):
199
+ matched = _is_single_computation_op(computation_op)(match)
200
+ computation_node = filter_nodes(match.nodes, computation_op)[0]
201
+ if is_bf16:
202
+ conversion_dtype_nodes = filter_nodes(
203
+ match.nodes, prims.convert_element_type.default
204
+ )
205
+ if len(conversion_dtype_nodes) != 2:
206
+ return False
207
+ # fusion pattern is always in the form of computation_op + to_float32 + unary_op + to_bfloat16
208
+ if computation_node == conversion_dtype_nodes[0].args[0]:
209
+ to_float = conversion_dtype_nodes[0].args[1]
210
+ to_bf16 = conversion_dtype_nodes[1].args[1]
211
+ else:
212
+ to_float = conversion_dtype_nodes[1].args[1]
213
+ to_bf16 = conversion_dtype_nodes[0].args[1]
214
+ matched = (
215
+ matched and to_float == torch.float and to_bf16 == torch.bfloat16
216
+ )
217
+ return matched
218
+
219
+ return fn
220
+
221
+ def _register_unary_fusion_lowering(
222
+ pattern, unary_attr, computation_op, is_bf16=False
223
+ ):
224
+ @register_lowering_pattern(
225
+ pattern,
226
+ extra_check=_is_valid_computation_unary_fusion(computation_op, is_bf16),
227
+ )
228
+ def fn(match, *args, **kwargs):
229
+ computation_args = list(args)[:-3] + [
230
+ unary_attr.op_name,
231
+ unary_attr.scalars_attr,
232
+ unary_attr.algorithm_attr,
233
+ ]
234
+ return L[computation_op](*computation_args)
235
+
236
+ return fn
237
+
238
+ def _register_leaky_relu_fusion_lowering(pattern, computation_op, is_bf16=False):
239
+ @register_lowering_pattern(
240
+ pattern, extra_check=_is_single_computation_op(computation_op)
241
+ )
242
+ def fn(match, *args, **kwargs):
243
+ negative_slope = kwargs.get("negative_slope")
244
+ if isinstance(negative_slope, ir.TensorBox):
245
+ matched = False
246
+ else: # inp is a Number
247
+ matched = True
248
+ if is_bf16:
249
+ dtype1 = kwargs.get("to_float")
250
+ dtype2 = kwargs.get("to_bf16")
251
+ matched = matched and dtype1 == torch.float and dtype2 == torch.bfloat16
252
+ computation_args = list(args)
253
+ if matched:
254
+ computation_args = computation_args[:-3] + [
255
+ "leaky_relu",
256
+ [negative_slope],
257
+ "",
258
+ ]
259
+ return L[computation_op](*computation_args)
260
+ else:
261
+ # computation_args += ["none", [], ""]
262
+ out = L[computation_op](*computation_args)
263
+ if is_bf16:
264
+ out = L[prims.convert_element_type.default](out, dtype=torch.float)
265
+ out = L[aten.where](
266
+ L[aten.gt](out, 0),
267
+ out,
268
+ L[aten.mul](out, negative_slope),
269
+ )
270
+ if is_bf16:
271
+ out = L[prims.convert_element_type.default](
272
+ out, dtype=torch.bfloat16
273
+ )
274
+ return out
275
+
276
+ return fn
277
+
278
+ def _register_hardtanh_fusion_lowering(pattern, computation_op, is_bf16=False):
279
+ @register_lowering_pattern(
280
+ pattern, extra_check=_is_single_computation_op(computation_op)
281
+ )
282
+ def fn(match, *args, **kwargs):
283
+ min_value = kwargs.get("min_value")
284
+ max_value = kwargs.get("max_value")
285
+ if isinstance(min_value, ir.TensorBox) or isinstance(
286
+ max_value, ir.TensorBox
287
+ ):
288
+ matched = False
289
+ else: # inp is a Number
290
+ assert max_value is not None
291
+ matched = min_value <= max_value
292
+ if is_bf16:
293
+ dtype1 = kwargs.get("to_float")
294
+ dtype2 = kwargs.get("to_bf16")
295
+ matched = matched and dtype1 == torch.float and dtype2 == torch.bfloat16
296
+ computation_args = list(args)
297
+ if matched:
298
+ computation_args = computation_args[:-3] + [
299
+ "hardtanh",
300
+ [min_value, max_value],
301
+ "",
302
+ ]
303
+ return L[computation_op](*computation_args)
304
+ else:
305
+ out = L[computation_op](*computation_args)
306
+ if is_bf16:
307
+ out = L[prims.convert_element_type.default](out, dtype=torch.float)
308
+ out = L[aten.clamp_max](L[aten.clamp_min](out, min_value), max_value)
309
+ if is_bf16:
310
+ out = L[prims.convert_element_type.default](
311
+ out, dtype=torch.bfloat16
312
+ )
313
+ return out
314
+
315
+ return fn
316
+
317
+ _binary_attr = {
318
+ aten.add: "add",
319
+ ops.add: "add",
320
+ aten.sub: "sub",
321
+ ops.sub: "sub",
322
+ }
323
+
324
+ def _is_valid_binary(match, fn):
325
+ binary_nodes = filter_nodes(match.nodes, fn)
326
+ if len(binary_nodes) < 1:
327
+ return False
328
+ if any(
329
+ not (
330
+ hasattr(n.args[0], "meta")
331
+ and isinstance(n.args[0].meta.get("val", None), torch.Tensor)
332
+ )
333
+ or not (
334
+ hasattr(n.args[1], "meta")
335
+ and isinstance(n.args[1].meta.get("val", None), torch.Tensor)
336
+ )
337
+ for n in binary_nodes
338
+ ):
339
+ return False
340
+ # check alpha is one.
341
+ if any(
342
+ get_arg_value(n, 2, kwarg_name="alpha") != 1.0
343
+ and get_arg_value(n, 2, kwarg_name="alpha") is not None
344
+ for n in binary_nodes
345
+ ):
346
+ return False
347
+ if any(
348
+ n.args[0].meta["val"].size() != n.args[1].meta["val"].size()
349
+ or n.args[0].meta["val"].device != n.args[1].meta["val"].device
350
+ or n.args[0].meta["val"].dtype != n.args[1].meta["val"].dtype
351
+ for n in binary_nodes
352
+ ):
353
+ return False
354
+ # check args[0] and args[1] is not same
355
+ if any(n.args[0] == n.args[1] for n in binary_nodes):
356
+ return False
357
+ return True
358
+
359
+ def _is_valid_computation_binary(computation_op, binary_op, other_index=None):
360
+ def fn(match):
361
+ if not _is_single_computation_op(computation_op)(match):
362
+ return False
363
+ if not _is_valid_binary(match, binary_op):
364
+ return False
365
+ return True
366
+
367
+ return fn
368
+
369
+ def _is_valid_computation_binary_inplace(computation_op, binary_op, other_index):
370
+ def fn(match):
371
+ if not _is_valid_computation_binary(computation_op, binary_op)(match):
372
+ return False
373
+ binary_nodes = filter_nodes(match.nodes, binary_op)
374
+ if any(len(n.args[other_index].users) > 1 for n in binary_nodes):
375
+ return False
376
+ if any(
377
+ n.args[other_index].op in ["placeholder", "output"]
378
+ for n in binary_nodes
379
+ ):
380
+ return False
381
+ return True
382
+
383
+ return fn
384
+
385
+ def _register_binary_unary_fusion_lowering(
386
+ pattern,
387
+ computation_op,
388
+ binary_op,
389
+ fusion_op,
390
+ unary_attr=None,
391
+ ):
392
+ @register_lowering_pattern(
393
+ pattern, extra_check=_is_valid_computation_binary(computation_op, binary_op)
394
+ )
395
+ def fn(match, *args, **kwargs):
396
+ other = kwargs.get("other")
397
+ assert isinstance(other, ir.TensorBox)
398
+ binary_attr = _binary_attr[binary_op]
399
+ args_list = list(args)
400
+ computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr]
401
+ if len(args_list) > 6:
402
+ if unary_attr is not None:
403
+ computation_args += [
404
+ 1.0,
405
+ unary_attr.op_name,
406
+ unary_attr.scalars_attr,
407
+ unary_attr.algorithm_attr,
408
+ ]
409
+ else:
410
+ computation_args += [1.0, None, [], None]
411
+ return L[fusion_op](*computation_args)
412
+
413
+ return fn
414
+
415
+ def _register_binary_unary_maybe_inplace_fusion_lowering(
416
+ pattern,
417
+ computation_op,
418
+ binary_op,
419
+ inplace_fusion_op,
420
+ outplace_fusion_op,
421
+ unary_attr=None,
422
+ other_index=None,
423
+ ):
424
+ @register_lowering_pattern(
425
+ pattern,
426
+ extra_check=_is_valid_computation_binary_inplace(
427
+ computation_op, binary_op, other_index
428
+ ),
429
+ )
430
+ def fn(match, *args, **kwargs):
431
+ other = kwargs.get("other")
432
+ assert isinstance(other, ir.TensorBox)
433
+ binary_attr = _binary_attr[binary_op]
434
+ args_list = list(args)
435
+ computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr]
436
+ if len(args_list) > 6:
437
+ if unary_attr is not None:
438
+ computation_args += [
439
+ 1.0,
440
+ unary_attr.op_name,
441
+ unary_attr.scalars_attr,
442
+ unary_attr.algorithm_attr,
443
+ ]
444
+ else:
445
+ computation_args += [1.0, None, [], None]
446
+ # Make sure the other is not an alias or mutation(fx side doesn't has such info).
447
+ other.realize()
448
+ can_be_inplace = not (
449
+ isinstance(other.data, ir.ReinterpretView)
450
+ or isinstance(other.get_layout(), (ir.MutationLayout, ir.AliasedLayout))
451
+ )
452
+ if not can_be_inplace:
453
+ return L[outplace_fusion_op](*computation_args)
454
+ return L[inplace_fusion_op](*computation_args)
455
+
456
+ return fn
457
+
458
+ computation_ops = [
459
+ mkldnn._convolution_pointwise.default,
460
+ mkldnn._linear_pointwise.default,
461
+ mkldnn._convolution_transpose_pointwise.default,
462
+ ]
463
+
464
+ class UnaryAttr:
465
+ def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None):
466
+ self.op_name = op_name
467
+ self.scalars_attr = scalars_attr if scalars_attr else []
468
+ self.algorithm_attr = algorithm_attr if algorithm_attr else ""
469
+
470
+ def _register_unary_fusion():
471
+ computation_call_fns = [_conv_call, _linear_call, _conv_transpose_call]
472
+
473
+ def _unary_fusion_patterns(is_bf16):
474
+ replacement_unary_fusion_patterns = {
475
+ UnaryAttr("gelu", algorithm_attr="tanh"): [
476
+ _unary_fusion_pattern(_gelu_fusion_2, call_fn, 4, is_bf16)
477
+ for call_fn in computation_call_fns
478
+ ],
479
+ UnaryAttr("gelu", algorithm_attr="none"): [
480
+ _unary_fusion_pattern(_gelu_fusion_1, call_fn, 2, is_bf16)
481
+ for call_fn in computation_call_fns
482
+ ],
483
+ UnaryAttr("hardswish"): [
484
+ _unary_fusion_pattern(_hardswish_fusion, call_fn, 2, is_bf16)
485
+ for call_fn in computation_call_fns
486
+ ],
487
+ UnaryAttr("hardsigmoid"): [
488
+ _unary_fusion_pattern(_hardsigmoid_fusion, call_fn, 1, is_bf16)
489
+ for call_fn in computation_call_fns
490
+ ],
491
+ UnaryAttr("swish"): [
492
+ _unary_fusion_pattern(_silu_fusion, call_fn, 2, is_bf16)
493
+ for call_fn in computation_call_fns
494
+ ],
495
+ }
496
+ if not is_bf16:
497
+ call_user1 = [call_fn(users=1) for call_fn in computation_call_fns]
498
+ replacement_unary_fusion_patterns.update(
499
+ {
500
+ UnaryAttr("relu"): [
501
+ _combined_fusion(u, aten.relu) for u in call_user1
502
+ ],
503
+ UnaryAttr("sigmoid"): [
504
+ _combined_fusion(u, aten.sigmoid) for u in call_user1
505
+ ],
506
+ UnaryAttr("tanh"): [
507
+ _combined_fusion(u, aten.tanh) for u in call_user1
508
+ ],
509
+ }
510
+ )
511
+
512
+ return replacement_unary_fusion_patterns
513
+
514
+ for is_bf16 in [True, False]:
515
+ replace_patterns = _unary_fusion_patterns(is_bf16)
516
+ for unary_attr, patterns in replace_patterns.items():
517
+ _register_unary_fusion_lowering(
518
+ patterns[0], unary_attr, computation_ops[0], is_bf16
519
+ )
520
+ _register_unary_fusion_lowering(
521
+ patterns[1], unary_attr, computation_ops[1], is_bf16
522
+ )
523
+ _register_unary_fusion_lowering(
524
+ patterns[2], unary_attr, computation_ops[2], is_bf16
525
+ )
526
+ _leaky_relu_patterns = [
527
+ _unary_fusion_pattern(_leaky_relu_fusion, call_fn, 3, is_bf16)
528
+ for call_fn in computation_call_fns
529
+ ]
530
+ for pattern, computation_op in zip(_leaky_relu_patterns, computation_ops):
531
+ _register_leaky_relu_fusion_lowering(pattern, computation_op, is_bf16)
532
+ hardtanh_patterns = [
533
+ _unary_fusion_pattern(_hardtanh_fusion, call_fn, 1, is_bf16)
534
+ for call_fn in computation_call_fns
535
+ ]
536
+ for pattern, computation_op in zip(hardtanh_patterns, computation_ops):
537
+ _register_hardtanh_fusion_lowering(pattern, computation_op, is_bf16)
538
+
539
+ def _register_inplace_fusion():
540
+ binary_ops = [aten.add, ops.add]
541
+ inplace_fusion_op = mkldnn._convolution_pointwise_.binary
542
+ outplace_fusion_op = mkldnn._convolution_pointwise.binary
543
+ conv_call = _conv_call(users=1)
544
+ conv_op = computation_ops[0]
545
+ for binary_op in binary_ops:
546
+ binary_v1 = _binary_fusion_v1(conv_call, binary_op)
547
+ binary_unary_v1 = _combined_fusion(binary_v1, aten.relu)
548
+ _register_binary_unary_maybe_inplace_fusion_lowering(
549
+ binary_unary_v1,
550
+ conv_op,
551
+ binary_op,
552
+ inplace_fusion_op,
553
+ outplace_fusion_op,
554
+ other_index=0,
555
+ unary_attr=UnaryAttr("relu"),
556
+ )
557
+ _register_binary_unary_maybe_inplace_fusion_lowering(
558
+ binary_v1,
559
+ conv_op,
560
+ binary_op,
561
+ inplace_fusion_op,
562
+ outplace_fusion_op,
563
+ other_index=0,
564
+ )
565
+ binary_v2 = _binary_fusion_v2(conv_call, binary_op)
566
+ binary_unary_v2 = _combined_fusion(binary_v2, aten.relu)
567
+ _register_binary_unary_maybe_inplace_fusion_lowering(
568
+ binary_unary_v2,
569
+ conv_op,
570
+ binary_op,
571
+ inplace_fusion_op,
572
+ outplace_fusion_op,
573
+ other_index=1,
574
+ unary_attr=UnaryAttr("relu"),
575
+ )
576
+ _register_binary_unary_maybe_inplace_fusion_lowering(
577
+ binary_v2,
578
+ conv_op,
579
+ binary_op,
580
+ inplace_fusion_op,
581
+ outplace_fusion_op,
582
+ other_index=1,
583
+ )
584
+
585
+ def _register_binary_fusion():
586
+ binary_ops = [aten.add, ops.add, aten.sub, ops.sub]
587
+ fusion_ops = [
588
+ mkldnn._convolution_pointwise.binary,
589
+ mkldnn._linear_pointwise.binary,
590
+ ]
591
+ _computation_user_1 = [_conv_call(users=1), _linear_call(users=1)]
592
+ for computation_call, computation_op, fusion_op in zip(
593
+ _computation_user_1, computation_ops[:-1], fusion_ops
594
+ ):
595
+ for binary_op in binary_ops:
596
+ pattern = _binary_fusion_v2(computation_call, binary_op)
597
+ _register_binary_unary_fusion_lowering(
598
+ pattern, computation_op, binary_op, fusion_op
599
+ )
600
+
601
+ for binary_op in [aten.add, ops.add]:
602
+ pattern = _binary_fusion_v1(computation_call, binary_op)
603
+ _register_binary_unary_fusion_lowering(
604
+ pattern, computation_op, binary_op, fusion_op
605
+ )
606
+
607
+ def _register_binary_unary_fusion():
608
+ binary_ops = [aten.add, ops.add, aten.sub, ops.sub]
609
+ fusion_ops = [mkldnn._convolution_pointwise.binary]
610
+ _computation_user_1 = [_conv_call(users=1)]
611
+ for computation_call, computation_op, fusion_op in zip(
612
+ _computation_user_1, computation_ops[:-1], fusion_ops
613
+ ):
614
+ for binary_op in binary_ops:
615
+ pattern_v1 = _combined_fusion(
616
+ _binary_fusion_v2(computation_call, binary_op), aten.relu
617
+ )
618
+ _register_binary_unary_fusion_lowering(
619
+ pattern_v1,
620
+ computation_op,
621
+ binary_op,
622
+ fusion_op,
623
+ unary_attr=UnaryAttr("relu"),
624
+ )
625
+ for binary_op in [aten.add, ops.add]:
626
+ pattern_v2 = _combined_fusion(
627
+ _binary_fusion_v1(computation_call, binary_op), aten.relu
628
+ )
629
+ _register_binary_unary_fusion_lowering(
630
+ pattern_v2,
631
+ computation_op,
632
+ binary_op,
633
+ fusion_op,
634
+ unary_attr=UnaryAttr("relu"),
635
+ )
636
+
637
+ def _recover_linear():
638
+ # convert reshape+linear+reshape to a single linear for applying fusion path.
639
+ @register_freezing_graph_pattern(
640
+ CallFunction(
641
+ aten.reshape.default,
642
+ CallFunction(
643
+ mkldnn._linear_pointwise.default,
644
+ CallFunction(
645
+ aten.reshape.default,
646
+ Arg(),
647
+ KeywordArg("reshape_1"),
648
+ _users=MULTIPLE,
649
+ ),
650
+ Arg(),
651
+ Arg(),
652
+ Arg(),
653
+ Arg(),
654
+ Arg(),
655
+ ),
656
+ KeywordArg("reshape_2"),
657
+ ),
658
+ pass_number=1,
659
+ )
660
+ def reshape_linear_reshape_pattern(match, *args, **kwargs):
661
+ reshape_1 = kwargs.get("reshape_1")
662
+ reshape_2 = kwargs.get("reshape_2")
663
+ assert isinstance(reshape_1, list)
664
+ assert isinstance(reshape_2, list)
665
+ assert len(reshape_1) == 2
666
+ dynamic_shapes = not all(
667
+ isinstance(x, int) for x in ([reshape_1[0]] + reshape_2[:-1])
668
+ )
669
+
670
+ graph = match.graph
671
+ reshape_2_node = match.output_node()
672
+ linear_input_node = reshape_2_node.args[0].args[0].args[0]
673
+ # check linear's input's shape[:-1] == reshape_2[:-1]
674
+ # and check product(reshape_2[:-1]) == reshape_1[0]
675
+ if dynamic_shapes:
676
+ # TODO: Haozhe investigate how add guard here
677
+ return
678
+ else:
679
+ can_remove_reshape = linear_input_node.meta.get("val").shape[
680
+ :-1
681
+ ] == torch.Size(reshape_2[:-1])
682
+ can_remove_reshape = can_remove_reshape and (
683
+ reduce(lambda x, y: x * y, reshape_2[:-1]) == reshape_1[0]
684
+ )
685
+
686
+ if can_remove_reshape:
687
+ repl = graph.call_function(mkldnn._linear_pointwise.default, args)
688
+ repl.meta.update(reshape_2_node.meta)
689
+ reshape_2_node.replace_all_uses_with(repl)
690
+ old_linear_node = reshape_2_node.args[0]
691
+ reshape_1_node = old_linear_node.args[0]
692
+ graph.erase_node(reshape_2_node)
693
+ graph.erase_node(old_linear_node)
694
+ if len(reshape_1_node.users) == 0:
695
+ graph.erase_node(reshape_1_node)
696
+
697
+ def is_linear_add_bias(match):
698
+ add_node = match.output_node()
699
+ linear_node = add_node.args[0]
700
+ weight_meta = linear_node.args[1].meta.get("val")
701
+ bias_meta = add_node.args[1].meta.get("val")
702
+ if weight_meta is None or bias_meta is None:
703
+ return False
704
+ return (
705
+ linear_node.args[2] is None
706
+ and bias_meta.dim() == 1
707
+ and bias_meta.size(0) == weight_meta.size(0)
708
+ )
709
+
710
+ # convert linear+bias to a single linear for applying fusion path.
711
+ @register_freezing_graph_pattern(
712
+ CallFunction(
713
+ aten.add.Tensor,
714
+ CallFunction(mkldnn._linear_pointwise.default, *_linear_args),
715
+ Arg(),
716
+ ),
717
+ pass_number=1,
718
+ extra_check=is_linear_add_bias,
719
+ )
720
+ def linear_bias_pattern(match, *args):
721
+ graph = match.graph
722
+ add_node = match.output_node()
723
+ linear_node = add_node.args[0]
724
+ new_args = list(linear_node.args)
725
+ new_args[2] = add_node.args[1]
726
+ repl = graph.call_function(
727
+ mkldnn._linear_pointwise.default, tuple(new_args)
728
+ )
729
+ repl.meta.update(add_node.meta)
730
+ add_node.replace_all_uses_with(repl)
731
+ match.erase_nodes(graph)
732
+
733
+ def _is_packable_mkldnn_rnn_layer(match):
734
+ lstm_node = match.output_node()
735
+ POS_WEIGHTS = [1, 2]
736
+ POS_INPUTS = [0, 5, 6]
737
+ POS_ARGS = POS_WEIGHTS + POS_INPUTS
738
+ # Weights should be Constant
739
+ if any(
740
+ lstm_node.args[POS_WEIGHT].op != "get_attr" for POS_WEIGHT in POS_WEIGHTS
741
+ ):
742
+ return False
743
+
744
+ # Meta info for weights and inputs should be available
745
+ if any(lstm_node.args[POS_ARG].meta.get("val") is None for POS_ARG in POS_ARGS):
746
+ return False
747
+
748
+ # Check device
749
+ if any(
750
+ lstm_node.args[POS_ARG].meta.get("val").device.type != "cpu"
751
+ for POS_ARG in POS_ARGS
752
+ ):
753
+ return False
754
+
755
+ # Check dtype
756
+ if any(
757
+ lstm_node.args[POS_ARG].meta.get("val").dtype == torch.bfloat16
758
+ and not mkldnn._is_mkldnn_bf16_supported()
759
+ for POS_ARG in POS_ARGS
760
+ ):
761
+ return False
762
+
763
+ return True
764
+
765
+ def _is_packable_convolution(match):
766
+ """
767
+ Check if the node is supported for MKLDNN convolution.
768
+ """
769
+ conv_node = match.output_node()
770
+ input_meta_value = conv_node.args[0].meta.get("val")
771
+ weight_meta_value = conv_node.args[1].meta.get("val")
772
+ if input_meta_value is None or weight_meta_value is None:
773
+ return False
774
+ input_size = input_meta_value.shape
775
+ if conv_node.args[1].op != "get_attr":
776
+ return False
777
+ for meta_value in [input_meta_value, weight_meta_value]:
778
+ if (
779
+ meta_value is None
780
+ or meta_value.device.type != "cpu"
781
+ or meta_value.dim() != 4
782
+ ):
783
+ return False
784
+ if (
785
+ input_meta_value.dtype == torch.bfloat16
786
+ or weight_meta_value.dtype == torch.bfloat16
787
+ ):
788
+ if not mkldnn._is_mkldnn_bf16_supported():
789
+ return False
790
+ is_transposed = conv_node.args[-3]
791
+ if is_transposed:
792
+ # TODO: Support dynamic shape case for MKLDNN conv transpose.
793
+ if has_free_symbols(input_size):
794
+ return False
795
+ groups = conv_node.args[-1]
796
+ in_channels = weight_meta_value.size(0)
797
+ # doesn't support group_depthwise_conv_transpose.
798
+ if groups > 1 and groups == in_channels:
799
+ return False
800
+ # Port from: aten/src/ATen/native/Convolution.cpp:is_output_padding_big
801
+ output_paddings = conv_node.args[-2]
802
+ strides = conv_node.args[3]
803
+ if any(
804
+ output_padding >= stride
805
+ for output_padding, stride in zip(output_paddings, strides)
806
+ ):
807
+ return False
808
+ return True
809
+
810
+ def _is_packable_linear(match):
811
+ """
812
+ Check if the node is supported for MKLDNN linear.
813
+ """
814
+ linear_node = match.output_node()
815
+ # weight_idx is 1 for aten.mm and is 2 for aten.addmm
816
+ weight_idx = 2 if linear_node.target == aten.addmm.default else 1
817
+ if linear_node.args[weight_idx].op != "get_attr":
818
+ return False
819
+ input_meta_value = linear_node.args[weight_idx - 1].meta.get("val")
820
+ weight_meta_value = linear_node.args[weight_idx].meta.get("val")
821
+ if input_meta_value is None or weight_meta_value is None:
822
+ return False
823
+ batch_size = input_meta_value.shape[0]
824
+ is_bf16_weight = weight_meta_value.dtype == torch.bfloat16
825
+ # for fp32, mkl should be enabled and batch_size should not be a free symbol.
826
+ if not is_bf16_weight and (
827
+ (not torch._C.has_mkl) or has_free_symbols(batch_size)
828
+ ):
829
+ return False
830
+ for meta_value in [input_meta_value, weight_meta_value]:
831
+ if (
832
+ meta_value is None
833
+ or meta_value.device.type != "cpu"
834
+ or meta_value.dim() != 2
835
+ ):
836
+ return False
837
+ if weight_idx == 2:
838
+ bias_meta_value = linear_node.args[0].meta.get("val")
839
+ if (
840
+ bias_meta_value is None
841
+ or meta_value.device.type != "cpu"
842
+ or bias_meta_value.dim() != 1
843
+ or bias_meta_value.size(0) != weight_meta_value.size(1)
844
+ ):
845
+ return False
846
+
847
+ if (
848
+ input_meta_value.dtype == torch.bfloat16
849
+ or weight_meta_value.dtype == torch.bfloat16
850
+ ):
851
+ if not mkldnn._is_mkldnn_bf16_supported():
852
+ return False
853
+ return True
854
+
855
+ _aten_conv_args = (
856
+ Arg(),
857
+ Arg(),
858
+ Arg(),
859
+ Arg(),
860
+ Arg(),
861
+ Arg(),
862
+ KeywordArg("is_transposed"),
863
+ Arg(),
864
+ Arg(),
865
+ )
866
+
867
+ _aten_mkldnn_rnn_layer_args = (
868
+ Arg(), # input
869
+ Arg(), # weight0
870
+ Arg(), # weight1
871
+ Arg(), # weight2
872
+ Arg(), # weight3
873
+ Arg(), # hx_
874
+ Arg(), # cx_
875
+ KeywordArg("reverse"), # reverse
876
+ Arg(), # batch_sizes
877
+ Arg(), # mode
878
+ Arg(), # hidden_size
879
+ Arg(), # num_layers
880
+ Arg(), # has_biases
881
+ Arg(), # bidirectional
882
+ Arg(), # batch_first
883
+ Arg(), # train
884
+ )
885
+
886
+ def _register_weight_pack_pass():
887
+ @register_freezing_graph_pattern(
888
+ CallFunction(aten.convolution.default, *_aten_conv_args),
889
+ extra_check=_is_packable_convolution,
890
+ )
891
+ def convolution(match, *args, **kwargs):
892
+ is_transposed = kwargs.get("is_transposed")
893
+ assert isinstance(is_transposed, bool)
894
+ graph = match.graph
895
+ conv_node = match.output_node()
896
+ input_size = conv_node.args[0].meta.get("val").shape
897
+ with graph.inserting_before(conv_node):
898
+ constant_args = [args[4], args[3], args[5], args[-1]]
899
+ packed_weight_op = mkldnn._reorder_convolution_weight
900
+ packed_conv_op = mkldnn._convolution_pointwise.default
901
+ if is_transposed:
902
+ constant_args.insert(1, args[-2]) # output_padding
903
+ packed_weight_op = mkldnn._reorder_convolution_transpose_weight
904
+ packed_conv_op = mkldnn._convolution_transpose_pointwise.default
905
+ if not has_free_symbols(input_size):
906
+ packed_weight_inputs = (
907
+ (args[1],) + tuple(constant_args) + (input_size,)
908
+ )
909
+ packed_weight_node = graph.create_node(
910
+ "call_function", packed_weight_op, args=packed_weight_inputs
911
+ )
912
+ else:
913
+ assert not is_transposed
914
+ # For dynamic shape case, we need to pack weight in runtime.
915
+ packed_weight_node = args[1]
916
+ packed_conv_inputs = (
917
+ (args[0], packed_weight_node, args[2])
918
+ + tuple(constant_args)
919
+ + ("none", [], "")
920
+ )
921
+ packed_conv_node = graph.create_node(
922
+ "call_function", packed_conv_op, tuple(packed_conv_inputs)
923
+ )
924
+ conv_node.replace_all_uses_with(packed_conv_node)
925
+ packed_conv_node.meta.update(conv_node.meta)
926
+ graph.erase_node(conv_node)
927
+
928
+ @register_freezing_graph_pattern(
929
+ CallFunction(aten.mkldnn_rnn_layer.default, *_aten_mkldnn_rnn_layer_args),
930
+ extra_check=_is_packable_mkldnn_rnn_layer,
931
+ )
932
+ def mkldnn_rnn_layer(match, *args, **kwargs):
933
+ def get_item(graph, node, index):
934
+ return graph.call_function(operator.getitem, (node, index))
935
+
936
+ graph = match.graph
937
+ lstm_node = match.output_node()
938
+ input = args[0]
939
+ weight0, weight1 = args[1:3]
940
+ reverse = kwargs.get("reverse")
941
+ packed_lstm_op = aten.mkldnn_rnn_layer.default
942
+ hidden_size = args[9]
943
+ has_biases = args[11]
944
+ batch_first = args[13]
945
+ with graph.inserting_before(lstm_node):
946
+ packed_weight_op = mkldnn._reorder_mkldnn_rnn_layer_weight.default
947
+ packed_weight_inputs = (
948
+ weight0,
949
+ weight1,
950
+ hidden_size,
951
+ reverse,
952
+ has_biases,
953
+ batch_first,
954
+ )
955
+ packed_weight_node = graph.create_node(
956
+ "call_function", packed_weight_op, packed_weight_inputs, {}, "name"
957
+ )
958
+ packed_weight_items = [
959
+ get_item(graph, packed_weight_node, i) for i in range(2)
960
+ ]
961
+ pack_lstm_inputs = (
962
+ args[0],
963
+ *packed_weight_items,
964
+ args[3],
965
+ args[4],
966
+ args[5],
967
+ args[6],
968
+ reverse,
969
+ *args[7:],
970
+ )
971
+
972
+ packed_lstm_node = graph.create_node(
973
+ "call_function", packed_lstm_op, args=pack_lstm_inputs
974
+ )
975
+ lstm_node.replace_all_uses_with(packed_lstm_node)
976
+ packed_lstm_node.meta.update(lstm_node.meta)
977
+ graph.erase_node(lstm_node)
978
+
979
+ @register_freezing_graph_pattern(
980
+ CallFunction(aten.addmm.default, Arg(), Arg(), Arg()),
981
+ extra_check=_is_packable_linear,
982
+ )
983
+ @register_freezing_graph_pattern(
984
+ CallFunction(aten.mm.default, Arg(), Arg()),
985
+ extra_check=_is_packable_linear,
986
+ )
987
+ def linear(match, *args, **kwargs):
988
+ graph = match.graph
989
+ linear_node = match.output_node()
990
+ input = args[0] if linear_node.target == aten.mm.default else args[1]
991
+ bias = None if linear_node.target == aten.mm.default else args[0]
992
+ weight = args[1] if linear_node.target == aten.mm.default else args[2]
993
+ with graph.inserting_before(linear_node):
994
+ transpose_weight_node = graph.create_node(
995
+ "call_function", aten.permute.default, (weight, (1, 0))
996
+ )
997
+ weight_dtype = weight.meta.get("val").dtype
998
+ is_bf16_weight = weight_dtype == torch.bfloat16
999
+ batch_size = input.meta.get("val").shape[0]
1000
+ if has_free_symbols(batch_size):
1001
+ assert (
1002
+ is_bf16_weight
1003
+ ), f"only bf16 weight prepacking supports dynamic shape inputs but got {weight_dtype}"
1004
+ # For bfloat16 dynamic shape path, using input size hint to pack weight for a better performance.
1005
+ packed_weight_inputs = (
1006
+ transpose_weight_node,
1007
+ batch_size.node.shape_env.size_hint(batch_size.node.expr)
1008
+ if has_free_symbols(batch_size)
1009
+ else batch_size,
1010
+ )
1011
+ packed_weight_op = (
1012
+ mkldnn._reorder_linear_weight
1013
+ if is_bf16_weight
1014
+ else torch.ops.mkl._mkl_reorder_linear_weight
1015
+ )
1016
+ packed_weight_node = graph.create_node(
1017
+ "call_function", packed_weight_op, args=packed_weight_inputs
1018
+ )
1019
+
1020
+ packed_linear_inputs: Tuple[Any, ...] = (input, packed_weight_node)
1021
+ if is_bf16_weight:
1022
+ packed_linear_inputs += (bias, "none", [], "")
1023
+ packed_linear_op = mkldnn._linear_pointwise.default
1024
+ else:
1025
+ packed_linear_inputs += (transpose_weight_node, bias, batch_size)
1026
+ packed_linear_op = torch.ops.mkl._mkl_linear
1027
+ packed_linear_node = graph.create_node(
1028
+ "call_function", packed_linear_op, packed_linear_inputs
1029
+ )
1030
+ linear_node.replace_all_uses_with(packed_linear_node)
1031
+ packed_linear_node.meta.update(linear_node.meta)
1032
+ graph.erase_node(linear_node)
1033
+
1034
+ def _eliminate_duplicate_packed_nodes(gm):
1035
+ """
1036
+ Combine packed weight nodes with the same inputs to reduce memory usage.
1037
+ for example:
1038
+ class Model(nn.Module):
1039
+ def __init__(self):
1040
+ super().__init__()
1041
+ self.linear = nn.Linear(32, 32, bias=True)
1042
+
1043
+ def forward(self, x):
1044
+ return self.linear(self.linear(x))
1045
+
1046
+ the above's packed weight nodes are duplicate if two linear calls have same input size.
1047
+ """
1048
+ if not (torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()):
1049
+ return gm
1050
+
1051
+ packed_weight_ops = [
1052
+ torch._C._nn.mkldnn_reorder_conv2d_weight,
1053
+ mkldnn._reorder_convolution_transpose_weight,
1054
+ mkldnn._reorder_linear_weight,
1055
+ mkldnn._reorder_mkldnn_rnn_layer_weight,
1056
+ ]
1057
+ if torch._C.has_mkl:
1058
+ packed_weight_ops.append(torch.ops.mkl._mkl_reorder_linear_weight)
1059
+
1060
+ for node in gm.graph.nodes:
1061
+ if node.target in packed_weight_ops and len(node.args[0].users) > 1:
1062
+ for user_node in list(node.args[0].users.keys()):
1063
+ if (
1064
+ user_node.target == node.target
1065
+ and user_node != node
1066
+ and user_node.args == node.args
1067
+ ):
1068
+ user_node.replace_all_uses_with(node)
1069
+ gm.graph.erase_node(user_node)
1070
+
1071
+ @functools.lru_cache(None)
1072
+ def _mkldnn_fusion_init():
1073
+ if torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available():
1074
+ _register_unary_fusion()
1075
+ _register_inplace_fusion()
1076
+ _register_binary_unary_fusion()
1077
+ _register_binary_fusion()
1078
+ _register_quantization_lowerings()
1079
+
1080
+ @functools.lru_cache(None)
1081
+ def _mkldnn_weight_pack_init():
1082
+ if torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available():
1083
+ _register_weight_pack_pass()
1084
+ _recover_linear()
1085
+ _register_quantization_weight_pack_pass()
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from itertools import chain
3
+ from typing import List, Optional
4
+
5
+ import torch
6
+ from torch import Tensor
7
+ from torch._inductor import utils
8
+ from torch.utils._mode_utils import no_dispatch
9
+ from torch.utils._triton import has_triton
10
+
11
+ from ..pattern_matcher import fwd_only, joint_fwd_bwd, Match, register_replacement
12
+
13
+ aten = torch.ops.aten
14
+
15
+
16
+ def fetch_fake_tensors(match, kwarg_names) -> List[Tensor]:
17
+ kwargs = match.kwargs
18
+ return [kwargs[name].meta["val"] for name in kwarg_names]
19
+
20
+
21
+ def unwrap_fake_args(*arg_names):
22
+ def decorator(func):
23
+ def wrapper(match):
24
+ fake_tensors = fetch_fake_tensors(match, arg_names)
25
+ return func(*fake_tensors)
26
+
27
+ return wrapper
28
+
29
+ return decorator
30
+
31
+
32
+ def get_alignment_size(x: Tensor) -> int:
33
+ if x.dtype == torch.float16 or x.dtype == torch.half or x.dtype == torch.bfloat16:
34
+ return 8
35
+ elif x.dtype == torch.float32 or x.dtype == torch.float:
36
+ return 4
37
+ else:
38
+ return 0
39
+
40
+
41
+ def check_device(a: Tensor, b: Tensor) -> bool:
42
+ return a.is_cuda and b.is_cuda
43
+
44
+
45
+ def check_dtype(a: Tensor, b: Tensor) -> bool:
46
+ return a.is_floating_point() and b.is_floating_point()
47
+
48
+
49
+ def is_symbolic(a: Optional[Tensor]) -> bool:
50
+ return a is not None and any(
51
+ isinstance(x, torch.SymInt) for x in chain(a.size(), a.stride())
52
+ )
53
+
54
+
55
+ def any_is_symbolic(*args: Optional[Tensor]) -> bool:
56
+ return any(is_symbolic(a) for a in args)
57
+
58
+
59
+ def should_pad_common(
60
+ mat1: Tensor, mat2: Tensor, input: Optional[Tensor] = None
61
+ ) -> bool:
62
+ return (
63
+ torch._inductor.config.shape_padding
64
+ and check_device(mat1, mat2)
65
+ and check_dtype(mat1, mat2)
66
+ and not any_is_symbolic(mat1, mat2, input)
67
+ )
68
+
69
+
70
+ def get_padded_length(x: int, alignment_size) -> int:
71
+ if alignment_size == 0 or x % alignment_size == 0:
72
+ return 0
73
+ return int((x // alignment_size + 1) * alignment_size) - x
74
+
75
+
76
+ def pad_dim(x: Tensor, padded_length: int, dim: int) -> Tensor:
77
+ if padded_length == 0:
78
+ return x
79
+ pad = x.new_zeros(*x.shape[:dim], padded_length, *x.shape[dim + 1 :])
80
+ return torch.cat([x, pad], dim=dim)
81
+
82
+
83
+ def addmm_pattern(
84
+ input: Tensor, mat1: Tensor, mat2: Tensor, beta: float, alpha: float
85
+ ) -> Tensor:
86
+ return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha)
87
+
88
+
89
+ def should_pad_addmm(match: Match) -> bool:
90
+ mat1, mat2, input = fetch_fake_tensors(match, ("mat1", "mat2", "input"))
91
+ return should_pad_common(mat1, mat2, input) and should_pad_bench(
92
+ mat1, mat2, torch.ops.aten.addmm, input=input
93
+ )
94
+
95
+
96
+ def addmm_replace(
97
+ input: Optional[Tensor], mat1: Tensor, mat2: Tensor, beta=1.0, alpha=1.0
98
+ ) -> Tensor:
99
+ m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1))
100
+ k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
101
+ n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2))
102
+
103
+ if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0:
104
+ return pad_addmm(
105
+ input,
106
+ mat1,
107
+ mat2,
108
+ m_padded_length,
109
+ k_padded_length,
110
+ n_padded_length,
111
+ beta,
112
+ alpha,
113
+ )
114
+
115
+ return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha)
116
+
117
+
118
+ def pad_addmm(
119
+ input: Optional[Tensor],
120
+ mat1: Tensor,
121
+ mat2: Tensor,
122
+ m_padded_length: int,
123
+ k_padded_length: int,
124
+ n_padded_length: int,
125
+ beta=1.0,
126
+ alpha=1.0,
127
+ ):
128
+ # addmm decomp with padding will go through pad_addmm multiple times if multiple dimensions are needed to be padded
129
+ if k_padded_length != 0:
130
+ mat1 = pad_dim(mat1, k_padded_length, 1)
131
+ mat2 = pad_dim(mat2, k_padded_length, 0)
132
+ elif n_padded_length != 0:
133
+ mat2 = pad_dim(mat2, n_padded_length, 1)
134
+ elif m_padded_length != 0:
135
+ mat1 = pad_dim(mat1, m_padded_length, 0)
136
+
137
+ # the add broadcasts, so we only pad if the dimension != 1
138
+ if input is not None and k_padded_length == 0:
139
+ if n_padded_length != 0:
140
+ if input.dim() == 2 and input.shape[1] != 1:
141
+ input = pad_dim(input, n_padded_length, 1)
142
+ elif input.dim() == 1 and input.shape[0] != 1:
143
+ input = pad_dim(input, n_padded_length, 0)
144
+ elif m_padded_length != 0 and input.dim() == 2 and input.shape[0] != 1:
145
+ input = pad_dim(input, m_padded_length, 0)
146
+
147
+ if k_padded_length != 0:
148
+ return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)
149
+ elif n_padded_length != 0:
150
+ return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[
151
+ :, :-n_padded_length
152
+ ]
153
+ else:
154
+ return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[
155
+ :-m_padded_length, :
156
+ ]
157
+
158
+
159
+ def is_mm_compute_bound(M: int, K: int, N: int, dtype: torch.dtype) -> bool:
160
+ denominator = M * K + N * K + M * N
161
+ if denominator == 0:
162
+ return False
163
+ arithmetic_intensity = (M * N * K) / denominator
164
+
165
+ # Fails with AMD
166
+ try:
167
+ machine_balance = (
168
+ 1000 * utils.get_device_tflops(dtype)
169
+ ) / utils.get_gpu_dram_gbps()
170
+ except Exception:
171
+ return True
172
+
173
+ # dram_gbps might be underestimating bandwidth because of cache.
174
+ # if we estimate machine balance too low we might miss some speedups,
175
+ # if we extimate too high there will be unnecessary compilation time increase.
176
+ # TODO - finetune coefficient here. As a reference point, Triton mm model assumes
177
+ # 80% of reads are in cache and cache is 4x faster than dram_gbps
178
+ machine_balance = machine_balance * 0.5
179
+
180
+ return arithmetic_intensity > machine_balance
181
+
182
+
183
+ @functools.lru_cache(None)
184
+ def get_pad_cache():
185
+ return torch._inductor.codecache.LocalCache()
186
+
187
+
188
+ def get_cached_should_pad(key):
189
+ return get_pad_cache().lookup(key)
190
+
191
+
192
+ def set_cached_should_pad(key, value):
193
+ return get_pad_cache().set_value(key, value=value)
194
+
195
+
196
+ def should_pad_bench_key(
197
+ mat1: Tensor, mat2: Tensor, op, input: Optional[Tensor] = None
198
+ ) -> str:
199
+ def tensor_key(t):
200
+ return (t.shape, t.stride(), t.dtype)
201
+
202
+ tf32_key = (
203
+ None if mat1.dtype != torch.float32 else torch.backends.cuda.matmul.allow_tf32
204
+ )
205
+ key = (
206
+ tensor_key(mat1),
207
+ tensor_key(mat2),
208
+ op,
209
+ input if input is None else tensor_key(input),
210
+ tf32_key,
211
+ )
212
+
213
+ return str(key)
214
+
215
+
216
+ def should_pad_bench(
217
+ mat1: Tensor, mat2: Tensor, op, input: Optional[Tensor] = None
218
+ ) -> bool:
219
+ if not has_triton():
220
+ return False
221
+
222
+ do_bench = functools.partial(
223
+ utils.do_bench,
224
+ warmup=5,
225
+ )
226
+
227
+ with no_dispatch():
228
+ if op is torch.ops.aten.mm or op is torch.ops.aten.addmm:
229
+ m = mat1.shape[0]
230
+ k = mat1.shape[1]
231
+ n = mat2.shape[1]
232
+
233
+ m_padded_length = get_padded_length(m, get_alignment_size(mat1))
234
+ k_padded_length = get_padded_length(k, get_alignment_size(mat1))
235
+ n_padded_length = get_padded_length(n, get_alignment_size(mat2))
236
+ elif op is torch.ops.aten.bmm:
237
+ m = mat1.shape[1]
238
+ k = mat2.shape[2]
239
+ n = mat2.shape[2]
240
+
241
+ m_padded_length = get_padded_length(m, get_alignment_size(mat1))
242
+ k_padded_length = get_padded_length(k, get_alignment_size(mat1))
243
+ n_padded_length = get_padded_length(n, get_alignment_size(mat2))
244
+ else:
245
+ return False
246
+
247
+ if m_padded_length == k_padded_length == n_padded_length == 0:
248
+ return False
249
+
250
+ if not is_mm_compute_bound(m, k, n, mat1.dtype):
251
+ return False
252
+
253
+ # We don't want to look up the cache for cases that are trivially false
254
+ # since it does file io
255
+ key = should_pad_bench_key(mat1, mat2, op, input)
256
+
257
+ cached_pad = get_cached_should_pad(key)
258
+ if cached_pad is not None:
259
+ return cached_pad
260
+
261
+ mat1 = torch.randn_like(mat1)
262
+ mat2 = torch.randn_like(mat2)
263
+ if op is torch.ops.aten.bmm or op is torch.ops.aten.mm:
264
+ ori_time = do_bench(
265
+ lambda: op(mat1, mat2),
266
+ )
267
+ else:
268
+ if input is not None:
269
+ input = torch.randn_like(input)
270
+ ori_time = do_bench(
271
+ lambda: op(input, mat1, mat2),
272
+ )
273
+
274
+ mat1_pad = torch.randn_like(mat1)
275
+ mat2_pad = torch.randn_like(mat2)
276
+
277
+ if op is torch.ops.aten.addmm:
278
+ input_pad = None
279
+ if input is not None and input.is_cuda:
280
+ input_pad = torch.randn_like(input)
281
+ pad_time = do_bench(
282
+ lambda: pad_addmm(
283
+ input_pad,
284
+ mat1_pad,
285
+ mat2_pad,
286
+ m_padded_length,
287
+ k_padded_length,
288
+ n_padded_length,
289
+ ),
290
+ )
291
+ elif op is torch.ops.aten.mm:
292
+ pad_time = do_bench(
293
+ lambda: pad_mm(
294
+ mat1_pad,
295
+ mat2_pad,
296
+ m_padded_length,
297
+ k_padded_length,
298
+ n_padded_length,
299
+ ),
300
+ )
301
+ else:
302
+ pad_time = do_bench(
303
+ lambda: pad_bmm(
304
+ mat1_pad,
305
+ mat2_pad,
306
+ m_padded_length,
307
+ k_padded_length,
308
+ n_padded_length,
309
+ ),
310
+ )
311
+
312
+ # Shape padding introduces additional memory ops. Based on microbenchmarks, 1.1x represents a reasonable
313
+ # tradeoff between performance improvement from shape padding and overhead from additional memory ops
314
+ # TODO: Build a learned model which would be better than this heuristic
315
+ should_pad = ori_time > pad_time * 1.1
316
+ set_cached_should_pad(key, should_pad)
317
+
318
+ return should_pad
319
+
320
+
321
+ def mm_pattern(mat1: Tensor, mat2: Tensor) -> Tensor:
322
+ return aten.mm(mat1, mat2)
323
+
324
+
325
+ def should_pad_mm(match: Match) -> bool:
326
+ mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2"))
327
+ return should_pad_common(mat1, mat2) and should_pad_bench(
328
+ mat1, mat2, torch.ops.aten.mm
329
+ )
330
+
331
+
332
+ def mm_replace(mat1: Tensor, mat2: Tensor) -> Tensor:
333
+ m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1))
334
+ k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
335
+ n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2))
336
+
337
+ return pad_mm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length)
338
+
339
+
340
+ def pad_mm(
341
+ mat1: Tensor,
342
+ mat2: Tensor,
343
+ m_padded_length: int,
344
+ k_padded_length: int,
345
+ n_padded_length: int,
346
+ ) -> Tensor:
347
+ # mm_replace will go through pad_mm multiple times if multiple dimensions are needed to be padded
348
+ if k_padded_length != 0:
349
+ mat1 = pad_dim(mat1, k_padded_length, 1)
350
+ mat2 = pad_dim(mat2, k_padded_length, 0)
351
+ return torch.ops.aten.mm(mat1, mat2)
352
+ elif n_padded_length != 0:
353
+ mat2 = pad_dim(mat2, n_padded_length, 1)
354
+ return torch.ops.aten.mm(mat1, mat2)[:, :-n_padded_length]
355
+ else:
356
+ mat1 = pad_dim(mat1, m_padded_length, 0)
357
+ return torch.ops.aten.mm(mat1, mat2)[:-m_padded_length, :]
358
+
359
+
360
+ def bmm_pattern(mat1: Tensor, mat2: Tensor) -> Tensor:
361
+ return aten.bmm(mat1, mat2)
362
+
363
+
364
+ def should_pad_bmm(match: Match) -> bool:
365
+ mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2"))
366
+ return should_pad_common(mat1, mat2) and should_pad_bench(
367
+ mat1, mat2, torch.ops.aten.bmm
368
+ )
369
+
370
+
371
+ def bmm_replace(mat1: Tensor, mat2: Tensor) -> Tensor:
372
+ m_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
373
+ k_padded_length = get_padded_length(mat1.shape[2], get_alignment_size(mat1))
374
+ n_padded_length = get_padded_length(mat2.shape[2], get_alignment_size(mat2))
375
+
376
+ if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0:
377
+ return pad_bmm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length)
378
+
379
+ return aten.bmm(mat1, mat2)
380
+
381
+
382
+ def pad_bmm(
383
+ mat1: Tensor,
384
+ mat2: Tensor,
385
+ m_padded_length: int,
386
+ k_padded_length: int,
387
+ n_padded_length: int,
388
+ ) -> Tensor:
389
+ # bmm_replace will go through pad_bmm multiple times if multiple dimensions are needed to be padded
390
+ if k_padded_length != 0:
391
+ mat1 = pad_dim(mat1, k_padded_length, 2)
392
+ mat2 = pad_dim(mat2, k_padded_length, 1)
393
+
394
+ return aten.bmm(mat1, mat2)
395
+ elif n_padded_length != 0:
396
+ mat2 = pad_dim(mat2, n_padded_length, 2)
397
+ return aten.bmm(mat1, mat2)[:, :, :-n_padded_length].contiguous()
398
+ else:
399
+ mat1 = pad_dim(mat1, m_padded_length, 1)
400
+ return aten.bmm(mat1, mat2)[:, :-m_padded_length, :].contiguous()
401
+
402
+
403
+ @functools.lru_cache(None)
404
+ def _pad_mm_init():
405
+ from .joint_graph import patterns
406
+
407
+ if torch.cuda.is_available():
408
+ # workaround https://github.com/pytorch/pytorch/issues/97894
409
+ device = "cuda"
410
+ else:
411
+ device = "cpu"
412
+
413
+ # sizes/values dont actually matter for initial trace
414
+ # once we get a possible match we re-trace with the actual values and verify the match still holds
415
+
416
+ dim2a = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True)
417
+ dim2b = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True)
418
+
419
+ dim3a = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True)
420
+ dim3b = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True)
421
+
422
+ dim1a = functools.partial(torch.empty, (4), device=device, requires_grad=True)
423
+
424
+ # workaround https://github.com/pytorch/pytorch/issues/97894
425
+ # 0.113377 is a "magic" value that lets us recover the lost input arg relationship
426
+ rep = {"beta": 0.213377, "alpha": 0.113377}
427
+
428
+ for pattern, replacement, args, workaround, extra_check in [
429
+ (
430
+ mm_pattern,
431
+ mm_replace,
432
+ [dim2a(), dim2b()],
433
+ {},
434
+ should_pad_mm,
435
+ ),
436
+ (
437
+ bmm_pattern,
438
+ bmm_replace,
439
+ [dim3a(), dim3b()],
440
+ {},
441
+ should_pad_bmm,
442
+ ),
443
+ (
444
+ addmm_pattern,
445
+ addmm_replace,
446
+ [dim1a(), dim2a(), dim2b()],
447
+ rep,
448
+ should_pad_addmm,
449
+ ),
450
+ ]:
451
+ assert isinstance(workaround, dict) # mypy is unable to infer the type properly
452
+ register_replacement(
453
+ pattern,
454
+ replacement,
455
+ args,
456
+ joint_fwd_bwd,
457
+ patterns,
458
+ extra_check=extra_check,
459
+ scalar_workaround=workaround,
460
+ )
461
+ register_replacement(
462
+ pattern,
463
+ replacement,
464
+ args,
465
+ fwd_only,
466
+ patterns,
467
+ extra_check=extra_check,
468
+ scalar_workaround=workaround,
469
+ )
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py ADDED
@@ -0,0 +1,1188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+ import logging
4
+ import operator
5
+ from collections import Counter, defaultdict, namedtuple
6
+ from typing import Any, Dict, List, Optional, Set, Union
7
+
8
+ from sympy import Expr
9
+
10
+ import torch
11
+ import torch._inductor as inductor
12
+ import torch.utils._pytree as pytree
13
+ from torch import fx
14
+ from torch._decomp import register_decomposition
15
+
16
+ from torch._higher_order_ops.triton_kernel_wrap import triton_kernel_wrapper_functional
17
+ from torch._prims_common import is_boolean_dtype, is_expandable_to, is_integer_dtype
18
+
19
+ from torch._utils_internal import print_graph
20
+ from torch.fx.experimental.symbolic_shapes import definitely_true, sym_eq
21
+ from torch.fx.immutable_collections import immutable_dict
22
+
23
+ from .. import config, inductor_prims, ir, pattern_matcher
24
+ from ..fx_utils import FakeTensorUpdater, get_fake_args_kwargs, get_node_storage
25
+
26
+ from ..lowering import (
27
+ inplaceable_foreach_ops as inplaceable_foreach_ops_lowerings,
28
+ lowerings as L,
29
+ )
30
+ from ..pattern_matcher import (
31
+ _return_true,
32
+ Arg,
33
+ CallFunction,
34
+ filter_nodes,
35
+ get_arg_value,
36
+ Ignored,
37
+ init_once_fakemode,
38
+ KeywordArg,
39
+ ListOf,
40
+ Match,
41
+ MULTIPLE,
42
+ PatternMatcherPass,
43
+ register_graph_pattern,
44
+ stable_topological_sort,
45
+ )
46
+ from ..utils import decode_device, is_pointwise_use
47
+ from ..virtualized import V
48
+ from .group_batch_fusion import group_batch_fusion_passes
49
+
50
+ log = logging.getLogger(__name__)
51
+ aten = torch.ops.aten
52
+ prims = torch.ops.prims
53
+
54
+ # First pass_patterns[0] are applied, then [1], then [2]
55
+ pass_patterns = [
56
+ PatternMatcherPass(),
57
+ PatternMatcherPass(),
58
+ PatternMatcherPass(),
59
+ ]
60
+ # patterns applied only in inference
61
+ inference_patterns = PatternMatcherPass()
62
+
63
+
64
+ def post_grad_passes(gm: torch.fx.GraphModule, is_inference: bool):
65
+ """
66
+ Passes that run on after grad. This is called once on the forwards
67
+ graph and once on the backwards graph.
68
+
69
+ The IR here has been normalized and functionalized.
70
+ """
71
+ if config.dce:
72
+ # has some issues with mutation in inference mode
73
+ gm.graph.eliminate_dead_code()
74
+
75
+ if is_inference and config.reorder_for_locality:
76
+ reorder_for_locality(gm.graph)
77
+
78
+ fake_tensor_updater = FakeTensorUpdater(gm.graph)
79
+
80
+ if config.post_grad_custom_pre_pass is not None:
81
+ config.post_grad_custom_pre_pass(gm.graph)
82
+
83
+ if config.pattern_matcher:
84
+ lazy_init()
85
+
86
+ group_batch_fusion_passes(gm.graph, pre_grad=False)
87
+ remove_noop_ops(gm.graph)
88
+ print_graph(gm.graph, "Before split cat in post grad pass.")
89
+ for patterns in pass_patterns:
90
+ patterns.apply(gm.graph)
91
+ print_graph(
92
+ gm.graph,
93
+ f"Apply split cat pattern matcher {patterns.__class__.__name__} in post grad.",
94
+ )
95
+ if is_inference:
96
+ inference_patterns.apply(gm.graph)
97
+
98
+ if config.post_grad_custom_post_pass is not None:
99
+ config.post_grad_custom_post_pass(gm.graph)
100
+
101
+ stable_topological_sort(gm.graph)
102
+
103
+ move_constructors_to_cuda(gm.graph)
104
+
105
+ fake_tensor_updater.incremental_update()
106
+
107
+ # Keep this last, since it introduces mutation. Look at
108
+ # ./fx_passes/README.md for a discussion of mutation invariants.
109
+ reinplace_inplaceable_ops(gm.graph)
110
+ gm.recompile()
111
+ gm.graph.lint()
112
+
113
+ print_graph(gm.graph, "Aftre recompile in post grad pass.")
114
+
115
+
116
+ @init_once_fakemode
117
+ def lazy_init():
118
+ if torch._C._has_mkldnn:
119
+ from .mkldnn_fusion import _mkldnn_fusion_init
120
+
121
+ _mkldnn_fusion_init()
122
+
123
+
124
+ def reorder_for_locality(graph: torch.fx.Graph):
125
+ def visit(other_node):
126
+ if (
127
+ other_node.op == "call_function"
128
+ and other_node.target != operator.getitem
129
+ and all((n in seen_nodes) for n in other_node.users)
130
+ ):
131
+ # move node's producers right before it
132
+ node.prepend(other_node)
133
+
134
+ seen_nodes = set()
135
+
136
+ # only reorder nodes before the first copy_ in the graph.
137
+ # copy_ will appear at the end of functionalized graphs when there is mutation on inputs,
138
+ # and this reordering doesnt work well with mutation
139
+ first_copy = next(
140
+ (
141
+ node
142
+ for node in graph.nodes
143
+ if node.op == "call_function"
144
+ and node.target == torch.ops.aten.copy_.default
145
+ ),
146
+ None,
147
+ )
148
+ past_mutating_epilogue = True if first_copy is None else False
149
+
150
+ for node in reversed(graph.nodes):
151
+ seen_nodes.add(node)
152
+ if not past_mutating_epilogue:
153
+ past_mutating_epilogue = node is first_copy
154
+ continue
155
+
156
+ torch.fx.map_arg((node.args, node.kwargs), visit)
157
+
158
+
159
+ def register_lowering_pattern(pattern, extra_check=_return_true, pass_number=1):
160
+ """
161
+ Register an aten to inductor IR replacement pattern
162
+ """
163
+ return pattern_matcher.register_lowering_pattern(
164
+ pattern, extra_check, pass_dict=pass_patterns[pass_number]
165
+ )
166
+
167
+
168
+ ################################################################################
169
+ # Actual patterns below this point.
170
+ # Priority of patterns is:
171
+ # - later output nodes first
172
+ # - order patterns are defined in
173
+ ################################################################################
174
+
175
+
176
+ @register_lowering_pattern(
177
+ CallFunction(
178
+ aten.add,
179
+ CallFunction(aten.mm, Arg(), Arg()),
180
+ CallFunction(aten.mm, Arg(), Arg()),
181
+ )
182
+ )
183
+ def mm_plus_mm(match: Match, mat1, mat2, mat3, mat4):
184
+ return inductor.kernel.mm_plus_mm.tuned_mm_plus_mm(mat1, mat2, mat3, mat4)
185
+
186
+
187
+ def cuda_and_enabled_mixed_mm(match):
188
+ return (config.use_mixed_mm or config.force_mixed_mm) and getattr(
189
+ match.kwargs["mat1"].meta.get("val"), "is_cuda", False
190
+ )
191
+
192
+
193
+ def cuda_and_enabled_mixed_mm_and_not_int8(match):
194
+ return (
195
+ cuda_and_enabled_mixed_mm(match)
196
+ and getattr(match.kwargs["mat1"].meta.get("val"), "is_cuda", False)
197
+ and getattr(match.kwargs["mat2"].meta.get("val"), "dtype", torch.int8)
198
+ != torch.int8
199
+ ) # bitshift numerics in triton and pytorch don't match for torch.int8
200
+
201
+
202
+ """
203
+ this is intended to be used to unpack a [K,N] int4 tensor from a [K/2, N] uint4x2 tensor
204
+ (where the int4 and uint4x2 are represented with int8 and uint8 respectively)
205
+ where every other row of the int4 is packed with the row above it as:
206
+ uint4x2[k,n] = (8+int4[2*k,n])+(8+int4[2*k+1,n])<<4
207
+
208
+ unpack formulas:
209
+ int4[2*k,n]=(uint4x2[k,n] & 0xF) - 8
210
+ int4[2*k+1,n]=(uint4x2[k,n] >> 4) - 8
211
+
212
+ thus matching on unpack formula:
213
+ torch.mm(mat1, torch.cat((mat2 & 0xF, mat2>>4),1).reshape(mat2_mm_shape).to(mat2_dtype).sub(8))
214
+
215
+ note: although the unpack formula in pytorch and the triton kernel is designed for a uint8 mat2, the behavior
216
+ of the kernel matches the pytorch formula for all dtypes except torch.int8
217
+ where the bitwise numerics in triton do not match those in pytorch.
218
+ """
219
+
220
+
221
+ @register_lowering_pattern(
222
+ CallFunction(
223
+ aten.mm.default,
224
+ KeywordArg("mat1"),
225
+ CallFunction(
226
+ aten.sub.Tensor,
227
+ CallFunction(
228
+ prims.convert_element_type.default,
229
+ CallFunction(
230
+ aten.reshape.default,
231
+ CallFunction(
232
+ aten.cat.default,
233
+ ListOf(
234
+ CallFunction(
235
+ aten.bitwise_and.Scalar,
236
+ KeywordArg("mat2"),
237
+ 0xF,
238
+ ),
239
+ CallFunction(
240
+ aten.__rshift__.Scalar,
241
+ KeywordArg("mat2"),
242
+ 4,
243
+ ),
244
+ ),
245
+ 1,
246
+ ),
247
+ KeywordArg("mat2_mm_shape"),
248
+ ),
249
+ KeywordArg("mat2_dtype"),
250
+ ),
251
+ 8,
252
+ ),
253
+ ),
254
+ extra_check=cuda_and_enabled_mixed_mm_and_not_int8,
255
+ )
256
+ def uint4x2_mixed_mm(match: Match, mat1, mat2, mat2_mm_shape, mat2_dtype):
257
+ return inductor.kernel.unpack_mixed_mm.tuned_uint4x2_mixed_mm(
258
+ mat1, mat2, mat2_mm_shape, mat2_dtype
259
+ )
260
+
261
+
262
+ """
263
+ torch.mm(mat1, mat2.to(mat2_dtype))
264
+ """
265
+
266
+
267
+ @register_lowering_pattern(
268
+ CallFunction(
269
+ aten.mm,
270
+ KeywordArg("mat1"),
271
+ CallFunction(
272
+ prims.convert_element_type.default,
273
+ KeywordArg("mat2"),
274
+ KeywordArg("mat2_dtype"),
275
+ ),
276
+ ),
277
+ extra_check=cuda_and_enabled_mixed_mm,
278
+ )
279
+ def mixed_mm(match: Match, mat1, mat2, mat2_dtype):
280
+ return inductor.kernel.mm.tuned_mixed_mm(mat1, mat2, mat2_dtype)
281
+
282
+
283
+ @register_graph_pattern(
284
+ CallFunction(
285
+ aten.cumsum.default,
286
+ CallFunction(
287
+ torch.ops.aten.full.default,
288
+ KeywordArg("shape"),
289
+ KeywordArg("fill_value"),
290
+ dtype=KeywordArg("dtype"),
291
+ layout=Ignored(),
292
+ device=KeywordArg("device"),
293
+ pin_memory=False,
294
+ _users=MULTIPLE,
295
+ ),
296
+ KeywordArg("dim"),
297
+ _users=MULTIPLE,
298
+ ),
299
+ pass_dict=pass_patterns[1],
300
+ )
301
+ def pointless_cumsum_replacement(match: Match, shape, fill_value, device, dtype, dim):
302
+ """Based on a pattern in OPTForCausalLM"""
303
+
304
+ if is_integer_dtype(dtype) or is_boolean_dtype(dtype):
305
+ # cumsum promotes all integral types to int64
306
+ dtype = torch.int64
307
+
308
+ def repl(*shape):
309
+ dim_size = shape[dim]
310
+ idx = torch.arange(1, dim_size + 1, device=device, dtype=dtype)
311
+
312
+ inter_shape = [1] * len(shape)
313
+ inter_shape[dim] = dim_size
314
+ return (idx * fill_value).view(inter_shape).expand(shape)
315
+
316
+ # only replace the output node, not all nodes
317
+ match.nodes = [match.output_node()]
318
+ with V.fake_mode:
319
+ match.replace_by_example(repl, list(shape))
320
+
321
+
322
+ def shape_of_mm(a, b):
323
+ m, _ = a.get_size()
324
+ _, n = b.get_size()
325
+ return [m, n]
326
+
327
+
328
+ @register_lowering_pattern(
329
+ CallFunction(aten.cat, ListOf(CallFunction(aten.mm, Arg(), Arg())), Arg()),
330
+ )
331
+ def cat_mm(match, inputs, dim):
332
+ return cat_tuned_op(match, inputs, dim, op=L[aten.mm], shape_of=shape_of_mm)
333
+
334
+
335
+ @register_lowering_pattern(
336
+ CallFunction(
337
+ aten.cat, ListOf(CallFunction(aten.addmm, Arg(), Arg(), Arg())), Arg()
338
+ ),
339
+ )
340
+ def cat_addmm(match, inputs, dim):
341
+ def shape_of(bias, a, b):
342
+ m, _ = a.get_size()
343
+ _, n = b.get_size()
344
+ return [m, n]
345
+
346
+ return cat_tuned_op(match, inputs, dim, op=L[aten.addmm], shape_of=shape_of)
347
+
348
+
349
+ def cat_tuned_op(match, inputs, dim, *, op, shape_of):
350
+ """
351
+ Memory planning to remove cat. We can't use the stock memory
352
+ planner since autotuning matmuls needs to know the output layout.
353
+ """
354
+ if len(inputs) == 1:
355
+ return op(*inputs[0])
356
+
357
+ # TODO(jansel): rewrite this as a bmm?
358
+ if dim < 0:
359
+ dim += len(shape_of(*inputs[0]))
360
+ assert dim in (0, 1)
361
+ notdim = 1 - dim
362
+
363
+ new_size: Optional[Union[List[Expr], List[int]]] = None
364
+ offsets_start = []
365
+ offsets_end = []
366
+
367
+ # compute output sizes
368
+ for i in range(len(inputs)):
369
+ shape = shape_of(*inputs[i])
370
+ if new_size is None:
371
+ new_size = shape
372
+ else:
373
+ new_size[notdim] = V.graph.sizevars.guard_equals(
374
+ shape[notdim], new_size[notdim]
375
+ )
376
+ new_size[dim] += shape[dim]
377
+ offsets_start.append(new_size[dim] - shape[dim])
378
+ offsets_end.append(new_size[dim])
379
+
380
+ assert new_size is not None
381
+ dtype = functools.reduce(
382
+ torch.promote_types, [x.get_dtype() for x in itertools.chain(*inputs)]
383
+ )
384
+ device = inputs[0][0].get_device()
385
+ kernel = ir.ConcatKernel(
386
+ name=None,
387
+ layout=ir.FixedLayout(device, dtype, new_size),
388
+ inputs=[],
389
+ )
390
+ kernel_tensor = ir.TensorBox.create(kernel)
391
+
392
+ for i in range(len(inputs)):
393
+ dst = ir.SliceView.create(kernel_tensor, dim, offsets_start[i], offsets_end[i])
394
+ src = op(*inputs[i], layout=dst.get_layout()).data.data
395
+ assert isinstance(src, (ir.ExternKernelOut, ir.TemplateBuffer))
396
+ src.layout = ir.AliasedLayout(dst)
397
+ kernel.inputs.append(src)
398
+
399
+ kernel.name = V.graph.register_buffer(kernel)
400
+ kernel.inputs = ir.ConcatKernel.unwrap_storage(kernel.inputs)
401
+ return kernel_tensor
402
+
403
+
404
+ _cat_1 = CallFunction(aten.cat, Arg(), 1, _users=2)
405
+
406
+
407
+ @register_lowering_pattern(
408
+ CallFunction(
409
+ aten.cat,
410
+ [
411
+ _cat_1,
412
+ CallFunction(
413
+ aten.slice,
414
+ _cat_1,
415
+ 1,
416
+ 0,
417
+ KeywordArg("size"),
418
+ ),
419
+ ],
420
+ 1,
421
+ )
422
+ )
423
+ def cat_slice_cat(match, cat_input, size, dim=1):
424
+ """
425
+ This is an example of a more complex pattern where cat_1 is used
426
+ multiple times inside the pattern. We fold 2 calls to cat into one.
427
+
428
+ Matches:
429
+ cat_1: f32[1024, 4077] = torch.ops.aten.cat.default([add_26, primals_217], 1)
430
+ slice_1: f32[1024, 4077] = torch.ops.aten.slice.Tensor(cat_1, 0, 0, 9223372036854775807)
431
+ slice_2: f32[1024, 19] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 19)
432
+ cat_2: f32[1024, 4096] = torch.ops.aten.cat.default([cat_1, slice_2], 1)
433
+
434
+
435
+ Rewrite to:
436
+ slice_2 = torch.ops.aten.slice.Tensor(add_26, 1, 0, 19)
437
+ cat_2 = torch.ops.aten.cat.default([add_26, primals_217, slice2], 1)
438
+ """
439
+ first, *rest = cat_input
440
+ # Optimization is optional, because we can just not fold the cat
441
+ # size should be within first.get_size()[dim] such that the optimization is valid.
442
+ # For negative `end`, we currently fallback to not optimizing.
443
+ if size >= 0 and V.graph.sizevars.statically_known_leq(size, first.get_size()[dim]):
444
+ # fold 2 cats into 1 cat
445
+ return L[aten.cat](
446
+ [
447
+ first,
448
+ *rest,
449
+ L[aten.slice](first, dim, 0, size),
450
+ ],
451
+ dim,
452
+ )
453
+ else:
454
+ # don't expect to hit this case, just fall back
455
+ tmp = L[aten.cat](cat_input, dim)
456
+ return L[aten.cat](
457
+ [
458
+ tmp,
459
+ L[aten.slice](tmp, dim, 0, size),
460
+ ],
461
+ dim,
462
+ )
463
+
464
+
465
+ def is_valid_splitwithsizes_cat(match):
466
+ split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
467
+ cat_nodes = filter_nodes(match.nodes, aten.cat)
468
+ get_item_nodes = filter_nodes(match.nodes, operator.getitem)
469
+ if len(split_nodes) != 1 or len(cat_nodes) != 1:
470
+ return False
471
+ split_node, cat_node = split_nodes[0], cat_nodes[0]
472
+ # The dim of split and cat should match for passthrough
473
+ if get_arg_value(split_node, 2, "dim") != get_arg_value(cat_node, 1, "dim"):
474
+ return False
475
+ get_item_args = {
476
+ get_arg_value(get_item_node, 1) for get_item_node in get_item_nodes
477
+ }
478
+ assert None not in get_item_args
479
+ split_sizes = get_arg_value(split_node, 1, "split_sizes")
480
+ # All parts of split should be included in the cat
481
+ if get_item_args != set(range(len(split_sizes))):
482
+ return False
483
+ # The order of get_item_args should same with cat_node used.
484
+ # For example, if the split_node like split_with_sizes(input, [2, 2, 3], 1),
485
+ # the cat node should be like cat([get_item(0), get_item(1), get_item(2)], 1).
486
+ cat_items_args_order = [
487
+ get_arg_value(item_node, 1) for item_node in get_arg_value(cat_node, 0)
488
+ ]
489
+ if cat_items_args_order != list(range(len(split_sizes))):
490
+ return False
491
+
492
+ return True
493
+
494
+
495
+ def same_meta(node1: torch.fx.Node, node2: torch.fx.Node):
496
+ """True if two nodes have the same metadata"""
497
+ val1 = node1.meta.get("val")
498
+ val2 = node2.meta.get("val")
499
+ return (
500
+ val1 is not None
501
+ and val2 is not None
502
+ and definitely_true(sym_eq(val1.size(), val2.size()))
503
+ and val1.layout == val2.layout
504
+ and val1.dtype == val2.dtype
505
+ and val1.device == val2.device
506
+ and (
507
+ val1.layout != torch.strided
508
+ or definitely_true(sym_eq(val1.stride(), val2.stride()))
509
+ )
510
+ )
511
+
512
+
513
+ noop_registry: Dict[Any, Any] = {}
514
+
515
+
516
+ def register_noop_decomp(targets, nop_arg=0):
517
+ def register_fun(cond):
518
+ register_decomposition(targets, registry=noop_registry, unsafe=True)(
519
+ (cond, nop_arg)
520
+ )
521
+ return cond
522
+
523
+ return register_fun
524
+
525
+
526
+ @register_noop_decomp(aten.slice)
527
+ def slice_noop(self, dim=0, start=None, end=None, step=1):
528
+ if start is None or end is None:
529
+ return False
530
+ if start == 0 and end >= 2**63 - 1 and step == 1:
531
+ return True
532
+ return False
533
+
534
+
535
+ @register_noop_decomp(aten.slice_scatter, 1)
536
+ def slice_scatter_noop(self, src, dim=0, start=None, end=None, step=1):
537
+ if start is None:
538
+ start = 0
539
+ if end is None:
540
+ end = 2**63 - 1
541
+ if start == 0 and end >= 2**63 - 1 and step == 1:
542
+ return True
543
+ return False
544
+
545
+
546
+ @register_noop_decomp(aten.repeat)
547
+ def repeat_noop(self, repeats):
548
+ return all(r == 1 for r in repeats)
549
+
550
+
551
+ @register_noop_decomp(aten.constant_pad_nd)
552
+ def constant_pad_nd(x, padding, fill_value=0):
553
+ return all(p == 0 for p in padding)
554
+
555
+
556
+ @register_noop_decomp(torch.ops.prims.convert_element_type)
557
+ def convert_element_type_noop(x, dtype: torch.dtype):
558
+ return x.dtype == dtype
559
+
560
+
561
+ @register_noop_decomp(torch.ops.prims.device_put)
562
+ def device_put_noop(x, device):
563
+ return x.device == decode_device(device)
564
+
565
+
566
+ @register_noop_decomp([aten.ceil, aten.floor, aten.round, aten.trunc])
567
+ def int_noop(x):
568
+ return is_integer_dtype(x.dtype)
569
+
570
+
571
+ @register_noop_decomp([aten.pow])
572
+ def pow_noop(a, b):
573
+ return isinstance(b, int) and b == 1
574
+
575
+
576
+ @register_noop_decomp([aten.cat], lambda args: args[0][0])
577
+ def cat_noop(inputs, dim=0):
578
+ return len(inputs) == 1
579
+
580
+
581
+ @register_noop_decomp(aten.view)
582
+ def view_noop(arg, size):
583
+ return arg.shape == size
584
+
585
+
586
+ # Note, we also always have a check for identical metadata, which is why these
587
+ # are safe
588
+ @register_noop_decomp([aten.copy], nop_arg=1)
589
+ @register_noop_decomp([aten.alias, aten.clone])
590
+ def true_noop(*args, **kwargs):
591
+ return True
592
+
593
+
594
+ def remove_noop_ops(graph: torch.fx.Graph):
595
+ """
596
+ Removes both operations that are essentially aten.clone and operations that are essentially aten.alias from the graph.
597
+ """
598
+ input_storages = set()
599
+ output_storages = set()
600
+
601
+ for node in graph.nodes:
602
+ if node.op == "placeholder":
603
+ input_storages.add(get_node_storage(node))
604
+ else:
605
+ break
606
+
607
+ for out in next(iter(reversed(graph.nodes))).args[0]:
608
+ if isinstance(out, torch.fx.Node):
609
+ output_storages.add(get_node_storage(out))
610
+
611
+ for node in graph.nodes:
612
+ if node.target in noop_registry:
613
+ cond, src_index = noop_registry[node.target]
614
+ if isinstance(src_index, int):
615
+ src = node.args[src_index]
616
+ else:
617
+ src = src_index(node.args)
618
+ if not isinstance(src, torch.fx.Node):
619
+ continue
620
+ # See fx_passes/README.md for a discussion of why this is
621
+ # necessary.
622
+ if get_node_storage(node) in output_storages and (
623
+ get_node_storage(src) in input_storages
624
+ or get_node_storage(src) in output_storages
625
+ ):
626
+ continue
627
+ is_valid, args, kwargs = get_fake_args_kwargs(node)
628
+ if not is_valid:
629
+ continue
630
+ if same_meta(node, src) and cond(*args, **kwargs):
631
+ node.replace_all_uses_with(src)
632
+ graph.erase_node(node)
633
+
634
+
635
+ InplaceableOp = namedtuple("InplaceableOp", ["inplace_op", "mutated_arg"])
636
+
637
+ inplaceable_ops = {
638
+ aten.index_put.default: InplaceableOp(aten.index_put_.default, 0),
639
+ aten._unsafe_index_put.default: InplaceableOp(inductor_prims._unsafe_index_put_, 0),
640
+ }
641
+
642
+ try:
643
+ c10d_functional = torch.ops._c10d_functional
644
+ inplaceable_collective_ops = {
645
+ c10d_functional.all_reduce.default: InplaceableOp(
646
+ c10d_functional.all_reduce_.default, 0
647
+ ),
648
+ c10d_functional.all_reduce_coalesced.default: InplaceableOp(
649
+ c10d_functional.all_reduce_coalesced_.default, 0
650
+ ),
651
+ }
652
+ inplaceable_ops.update(inplaceable_collective_ops)
653
+ except AttributeError:
654
+ # _c10d_functional ops are only available when torch
655
+ # is built with USE_DISTRIBUTED=1.
656
+ pass
657
+
658
+ inplaceable_foreach_ops = {}
659
+ for outplace_op, inplace_op in inplaceable_foreach_ops_lowerings.items():
660
+ inplaceable_foreach_ops[outplace_op] = InplaceableOp(inplace_op, 0)
661
+
662
+
663
+ inplaceable_triton_ops = {triton_kernel_wrapper_functional}
664
+
665
+
666
+ def reinplace_inplaceable_ops(graph):
667
+ """
668
+ Reinplaces in-placeable operations.
669
+ If there are no uses of a view of the mutated arg after the current node,
670
+ it is possible to inplace the op.
671
+ This above algorithm could be justified by observing side effects. While
672
+ we traverse the graph in forwards direction, only latter nodes could view
673
+ side effects of the current node. If the current node is not used later as
674
+ well as no view of this node is used later in the graph, then it is safe to
675
+ inplace as there would be no way to observe the side effects.
676
+ This condition is slightly different for graph inputs where they can only
677
+ be inplaced if the above condition is true and there's a copy_ in the
678
+ epilogue that signals that the caller wants to observe the mutation.
679
+ """
680
+
681
+ copy_args_to_copy_nodes = {}
682
+ foreach_node_to_copy_nodes = defaultdict(list)
683
+ mutated_inputs = set()
684
+ storage_to_nodes = defaultdict(list)
685
+ node_order: Dict[Any, int] = {}
686
+ for i, node in enumerate(reversed(graph.nodes)):
687
+ node_order[node] = len(graph.nodes) - i - 1
688
+ storage_to_nodes[get_node_storage(node)].append(node)
689
+ if node.target == aten.copy_.default:
690
+ dst = node.args[0]
691
+ src = node.args[1]
692
+ # If the target is a getitem and it indexes a possible clone,
693
+ # then skip over it
694
+ if src.target == operator.getitem and (
695
+ (
696
+ src.args[0].target == triton_kernel_wrapper_functional
697
+ and src.args[0].kwargs["kwargs"][src.args[1]] == node.args[0]
698
+ )
699
+ or (src.args[0].target in inplaceable_foreach_ops)
700
+ ):
701
+ src = src.args[0]
702
+
703
+ copy_args_to_copy_nodes[(dst, src)] = node
704
+
705
+ assert node.args[0].op == "placeholder"
706
+ mutated_inputs.add(node.args[0])
707
+
708
+ def any_use_of_views_after_node(node, shared_view_nodes, *, copy_node):
709
+ node_loc = node_order[node]
710
+ for view in shared_view_nodes:
711
+ for user in view.users:
712
+ # Skip all users before node
713
+ if node_order[user] <= node_loc:
714
+ continue
715
+ # Skip over the copy_ epilogue node that could get reinplaced
716
+ if copy_node == user:
717
+ continue
718
+ return True
719
+ return False
720
+
721
+ def can_inplace(node, mutated_arg):
722
+ if isinstance(mutated_arg, (list, tuple)):
723
+ return all(can_inplace(node, arg) for arg in mutated_arg)
724
+
725
+ if get_node_storage(mutated_arg) is None:
726
+ return False
727
+ shared_view_nodes = storage_to_nodes[get_node_storage(mutated_arg)]
728
+ if mutated_arg.op == "placeholder":
729
+ if not (
730
+ copy_node := copy_args_to_copy_nodes.get((mutated_arg, node), False)
731
+ ):
732
+ return False
733
+
734
+ if any_use_of_views_after_node(
735
+ node, shared_view_nodes, copy_node=copy_node
736
+ ):
737
+ return False
738
+
739
+ return True
740
+ elif any(view.op == "placeholder" for view in shared_view_nodes):
741
+ # If mutated arg is view of any of the inputs of the graph,
742
+ # do not allow for inplacing.
743
+ # This would require more sophisticated algorithm to handle
744
+ return False
745
+ else:
746
+ return not any_use_of_views_after_node(
747
+ node, shared_view_nodes, copy_node=None
748
+ )
749
+
750
+ for node in graph.nodes:
751
+ if (inplaceable_op := inplaceable_ops.get(node.target, None)) is not None:
752
+ mutated_arg = node.args[inplaceable_op.mutated_arg]
753
+ if can_inplace(node, mutated_arg):
754
+ # TODO(yifu): this doesn't properly remove copy epilogues for
755
+ # ops that mutate multiple inputs. Need to revise the copy
756
+ # node tracking logic to support the case.
757
+ copy_node = copy_args_to_copy_nodes.get((mutated_arg, node))
758
+ if copy_node is not None:
759
+ graph.erase_node(copy_node)
760
+ node.target = inplaceable_op.inplace_op
761
+ elif node.target in inplaceable_triton_ops:
762
+ # inplaceable_triton_ops take an additional argument called
763
+ # tensors_to_clone which contain a list of tensors to clone
764
+ # This pass iterates over them and sees which ones are safe
765
+ # to eliminate (i.e. no longer need the clones)
766
+ tensors_to_clone = []
767
+ for arg in node.kwargs["tensors_to_clone"]:
768
+ assert arg in node.kwargs["kwargs"]
769
+ mutated_arg = node.kwargs["kwargs"][arg]
770
+ if can_inplace(node, mutated_arg):
771
+ copy_node = copy_args_to_copy_nodes.get((mutated_arg, node))
772
+ if copy_node is not None:
773
+ graph.erase_node(copy_node)
774
+ else:
775
+ tensors_to_clone.append(arg)
776
+ kwargs = dict(node.kwargs)
777
+ kwargs["tensors_to_clone"] = tensors_to_clone
778
+ node.kwargs = immutable_dict(kwargs)
779
+ elif (
780
+ inplaceable_op := inplaceable_foreach_ops.get(node.target, None)
781
+ ) is not None:
782
+ mutated_args = node.args[inplaceable_op.mutated_arg]
783
+
784
+ if not all((arg, node) in copy_args_to_copy_nodes for arg in mutated_args):
785
+ continue
786
+
787
+ if can_inplace(node, mutated_args):
788
+ for arg in mutated_args:
789
+ copy_node = copy_args_to_copy_nodes[(arg, node)]
790
+ graph.erase_node(copy_node)
791
+
792
+ node.target = inplaceable_op.inplace_op
793
+
794
+
795
+ @register_lowering_pattern(
796
+ CallFunction(
797
+ aten.cat,
798
+ ListOf(
799
+ CallFunction(
800
+ operator.getitem,
801
+ CallFunction(
802
+ aten.split_with_sizes,
803
+ KeywordArg("input_"),
804
+ Ignored(),
805
+ Ignored(),
806
+ _users=MULTIPLE,
807
+ ),
808
+ Ignored(),
809
+ ),
810
+ ),
811
+ Ignored(),
812
+ ),
813
+ pass_number=2,
814
+ extra_check=is_valid_splitwithsizes_cat,
815
+ )
816
+ def splitwithsizes_cat_replace(match, input_):
817
+ return input_
818
+
819
+
820
+ def is_valid_cat_splitwithsizes(match):
821
+ cat_nodes = filter_nodes(match.nodes, aten.cat)
822
+ split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
823
+ if len(split_nodes) != 1 or len(cat_nodes) != 1:
824
+ return False
825
+ split_node, cat_node = split_nodes[0], cat_nodes[0]
826
+
827
+ # the cat node has other users: can't eliminate
828
+ if len(cat_node.users) > 1:
829
+ return False
830
+
831
+ # the dim of the cat and split should match
832
+ dim = get_arg_value(split_node, 2, "dim")
833
+ if dim != get_arg_value(cat_node, 1, "dim"):
834
+ return False
835
+
836
+ cat_inputs = list(get_arg_value(cat_node, 0))
837
+ split_sizes = get_arg_value(split_node, 1, "split_sizes")
838
+ # the number of input tensors in cat and the
839
+ # length of the split sizes should match
840
+ if len(cat_inputs) != len(split_sizes):
841
+ return False
842
+
843
+ for cat_input, split_size in zip(cat_inputs, split_sizes):
844
+ # each cat input tensor's size along dim
845
+ # should match the corresponding split size
846
+ if "val" not in cat_input.meta:
847
+ return False
848
+ cat_input_size = cat_input.meta["val"].size(dim)
849
+ if cat_input_size != split_size:
850
+ return False
851
+
852
+ return True
853
+
854
+
855
+ @register_lowering_pattern(
856
+ CallFunction(
857
+ aten.split_with_sizes,
858
+ CallFunction(
859
+ aten.cat,
860
+ KeywordArg("input_"),
861
+ Ignored(),
862
+ _users=MULTIPLE,
863
+ ),
864
+ Ignored(),
865
+ Ignored(),
866
+ ),
867
+ pass_number=2,
868
+ extra_check=is_valid_cat_splitwithsizes,
869
+ )
870
+ def cat_splitwithsizes_replace(match, input_):
871
+ return input_
872
+
873
+
874
+ def view_to_reshape(gm):
875
+ """
876
+ Replace view ops in the GraphModule to reshape ops.
877
+ """
878
+ for nd in gm.graph.nodes:
879
+ if nd.target == torch.ops.aten.view.default:
880
+ nd.target = torch.ops.aten.reshape.default
881
+
882
+
883
+ def should_prefer_unfused_addmm(match):
884
+ inp = match.kwargs["inp"]
885
+ if not inp.meta["val"].is_cuda:
886
+ return False
887
+
888
+ output = match.output_node()
889
+ return all(is_pointwise_use(use) for use in output.users)
890
+
891
+
892
+ @register_graph_pattern(
893
+ CallFunction(aten.addmm, KeywordArg("inp"), Arg(), Arg()),
894
+ pass_dict=pass_patterns[2],
895
+ extra_check=should_prefer_unfused_addmm,
896
+ )
897
+ def unfuse_bias_add_to_pointwise(match: Match, mat1, mat2, *, inp):
898
+ def repl(inp, x1, x2):
899
+ return x1 @ x2 + inp
900
+
901
+ with V.fake_mode:
902
+ match.replace_by_example(repl, [inp, mat1, mat2])
903
+
904
+
905
+ def is_valid_addmm_fusion(match):
906
+ mat1, mat2 = match.args
907
+ inp = match.kwargs["inp"]
908
+
909
+ if not (
910
+ isinstance(inp, torch.fx.Node) and isinstance(inp.meta["val"], torch.Tensor)
911
+ ):
912
+ return False # Input is a number
913
+
914
+ in_shape = inp.meta["val"].shape
915
+ mm_shape = mat1.meta["val"].shape[0], mat2.meta["val"].shape[1]
916
+ matched = is_expandable_to(in_shape, mm_shape)
917
+ if not matched:
918
+ return False # Shape mismatch
919
+
920
+ return not should_prefer_unfused_addmm(match)
921
+
922
+
923
+ @register_graph_pattern(
924
+ CallFunction(
925
+ aten.add,
926
+ CallFunction(aten.mm, Arg(), Arg()),
927
+ KeywordArg("inp"),
928
+ ),
929
+ pass_dict=pass_patterns[2],
930
+ extra_check=is_valid_addmm_fusion,
931
+ )
932
+ @register_graph_pattern(
933
+ CallFunction(
934
+ aten.add,
935
+ KeywordArg("inp"),
936
+ CallFunction(aten.mm, Arg(), Arg()),
937
+ ),
938
+ pass_dict=pass_patterns[2],
939
+ extra_check=is_valid_addmm_fusion,
940
+ )
941
+ def addmm(match, mat1, mat2, *, inp):
942
+ def repl(inp, mat1, mat2):
943
+ return aten.addmm(inp, mat1, mat2)
944
+
945
+ with V.fake_mode:
946
+ match.replace_by_example(repl, [inp, mat1, mat2])
947
+
948
+
949
+ def check_shape_cuda_and_fused_int_mm_mul_enabled(match):
950
+ return (
951
+ config.force_fuse_int_mm_with_mul
952
+ and len(getattr(match.args[2].meta.get("val"), "shape", [])) == 2
953
+ and getattr(match.args[2].meta.get("val"), "is_cuda", False)
954
+ )
955
+
956
+
957
+ @register_lowering_pattern(
958
+ CallFunction(
959
+ prims.convert_element_type.default,
960
+ CallFunction(
961
+ aten.mul,
962
+ CallFunction(
963
+ aten._int_mm,
964
+ Arg(),
965
+ Arg(),
966
+ ),
967
+ Arg(),
968
+ ),
969
+ Arg(),
970
+ ),
971
+ check_shape_cuda_and_fused_int_mm_mul_enabled,
972
+ )
973
+ @register_lowering_pattern(
974
+ CallFunction(
975
+ aten.mul,
976
+ CallFunction(
977
+ aten._int_mm,
978
+ Arg(),
979
+ Arg(),
980
+ ),
981
+ Arg(),
982
+ ),
983
+ check_shape_cuda_and_fused_int_mm_mul_enabled,
984
+ )
985
+ def fused_int_mm_mul(match: Match, mat1, mat2, mat3, out_dtype=None):
986
+ return inductor.kernel.mm.tuned_fused_int_mm_mul(mat1, mat2, mat3, out_dtype)
987
+
988
+
989
+ class ConstructorMoverPass:
990
+ def __init__(self, target: str, allow_outputs: bool = False) -> None:
991
+ """
992
+ Move constructors from cpu to the target_device.
993
+
994
+ Sweeps through the module, looking for constructor nodes that can be moved
995
+ to the target_device.
996
+
997
+ A constructor node can be moved to the target_device iff all of its users
998
+ can also be moved (tested by cannot_be_moved). Otherwise, all dependent
999
+ constructor nodes won't be moved.
1000
+
1001
+ - target: target device type
1002
+ - allow_outputs: allow outputs to be moved
1003
+ """
1004
+
1005
+ self.target = target
1006
+ self.allow_outputs = allow_outputs
1007
+
1008
+ assert isinstance(target, str), (
1009
+ "target should be a string representing the device type. "
1010
+ f"Got: {type(target).__name__}"
1011
+ )
1012
+
1013
+ def allow_cpu_device(self, node: fx.Node) -> bool:
1014
+ """
1015
+ Returns whether a node that returns a tensor on the target device may have
1016
+ cpu tensors as input.
1017
+ """
1018
+ return node.target in (
1019
+ torch.ops.aten.index.Tensor,
1020
+ torch.ops.aten.index_put.default,
1021
+ torch.ops.aten.index_put_.default,
1022
+ torch.ops.aten.copy.default,
1023
+ torch.ops.aten.copy_.default,
1024
+ torch.ops.aten.slice_scatter.default,
1025
+ )
1026
+
1027
+ def cannot_be_moved(self, node: fx.Node) -> bool:
1028
+ """
1029
+ Returns whether a node can be moved to the target device.
1030
+
1031
+ If this function returns False, it means that this node and all of its users
1032
+ won't be moved into the target device.
1033
+ """
1034
+ if node.target == "output":
1035
+ return not self.allow_outputs
1036
+
1037
+ if not (
1038
+ isinstance(node.target, torch._ops.OpOverload)
1039
+ and node.target.namespace in ("prims", "aten")
1040
+ ):
1041
+ return True
1042
+
1043
+ return False
1044
+
1045
+ def get_node_device(self, node: fx.Node) -> Optional[torch.device]:
1046
+ """
1047
+ Get the device of a node.
1048
+ """
1049
+ ten = node.meta.get("val")
1050
+ return None if not isinstance(ten, torch.Tensor) else ten.device
1051
+
1052
+ def get_cpu_indeg_count(self, graph: fx.Graph) -> Dict[fx.Node, int]:
1053
+ """
1054
+ Get the number of cpu inputs to a node
1055
+ """
1056
+ cpu_indeg: Dict[fx.Node, int] = Counter()
1057
+
1058
+ for node in graph.nodes:
1059
+ cpu_count = 0
1060
+
1061
+ def add_cpu_inp(node):
1062
+ nonlocal cpu_count
1063
+ device = self.get_node_device(node)
1064
+ cpu_count += device is not None and device.type == "cpu"
1065
+
1066
+ pytree.tree_map_only(fx.Node, add_cpu_inp, (node.args, node.kwargs))
1067
+
1068
+ if cpu_count:
1069
+ cpu_indeg[node] = cpu_count
1070
+
1071
+ return cpu_indeg
1072
+
1073
+ def __call__(self, graph: fx.Graph) -> None:
1074
+ target_devices = set()
1075
+ constructors = []
1076
+
1077
+ for node in graph.nodes:
1078
+ device = self.get_node_device(node)
1079
+ if device and device.type == self.target:
1080
+ target_devices.add(device)
1081
+
1082
+ if not (
1083
+ isinstance(node.target, torch._ops.OpOverload)
1084
+ and node.target.namespace in ("prims", "aten")
1085
+ ):
1086
+ continue
1087
+
1088
+ if not torch._subclasses.fake_tensor._is_tensor_constructor(node.target):
1089
+ continue
1090
+
1091
+ if not node.kwargs.get("device") == torch.device("cpu"):
1092
+ continue
1093
+
1094
+ constructors.append(node)
1095
+
1096
+ # not handling multiple target devices initially
1097
+ if not constructors or len(target_devices) != 1:
1098
+ return
1099
+
1100
+ movable_constructors = self.find_movable_constructors(graph, constructors)
1101
+
1102
+ for node in movable_constructors:
1103
+ kwargs = node.kwargs.copy()
1104
+ kwargs["device"] = next(iter(target_devices))
1105
+ node.kwargs = kwargs
1106
+
1107
+ def find_movable_constructors(
1108
+ self, graph: fx.Graph, constructors: List[fx.Node]
1109
+ ) -> Set[fx.Node]:
1110
+ """
1111
+ Starting from the cpu constructors, iterate through the graph and test that all of their
1112
+ downstream uses can safely be moved to cpu.
1113
+ """
1114
+ cpu_indeg: Dict[fx.Node, int] = self.get_cpu_indeg_count(graph)
1115
+
1116
+ # which constructors cannot be moved to cuda
1117
+ cannot_move_to_cuda: Set[fx.Node] = set()
1118
+
1119
+ # For any node in the graph, which constructors does it have a dependency on
1120
+ constructor_dependencies: Dict[fx.Node, Set[fx.Node]] = defaultdict(set)
1121
+
1122
+ # if a cpu node has a dependency on two different cpu constructors,
1123
+ # then if either constructor cannot be moved to cuda, the other cannot as well.
1124
+ # In this case any node with a dependency on one will have a dependency on the other
1125
+ equal_constructor_sets: Dict[fx.Node, Set[fx.Node]] = {
1126
+ c: {c} for c in constructors
1127
+ }
1128
+
1129
+ def make_dependencies_equivalent(
1130
+ set1: Set[fx.Node], set2: Set[fx.Node]
1131
+ ) -> Set[fx.Node]:
1132
+ # could use union find but not worth complexity here
1133
+ set1.update(set2)
1134
+ for obj in set1:
1135
+ equal_constructor_sets[obj] = set1
1136
+ return set1
1137
+
1138
+ queue: List[fx.Node] = list(constructors)
1139
+
1140
+ for c in queue:
1141
+ constructor_dependencies[c].add(c)
1142
+
1143
+ while queue:
1144
+ node = queue.pop()
1145
+ dependencies = constructor_dependencies[node]
1146
+
1147
+ for user in node.users:
1148
+ if self.cannot_be_moved(user):
1149
+ cannot_move_to_cuda.update(dependencies)
1150
+ break
1151
+
1152
+ # this node was used on a op which takes in multiple devices and output a cuda
1153
+ # tensor. we can convert its cpu input to cuda without making further changes
1154
+ node_device = self.get_node_device(user)
1155
+ if (
1156
+ self.allow_cpu_device(user)
1157
+ and node_device
1158
+ and node_device.type == self.target
1159
+ ):
1160
+ del cpu_indeg[user]
1161
+ else:
1162
+ # otherwise, we should continue look at its downstream uses
1163
+ cpu_indeg[user] -= 1
1164
+ if cpu_indeg[user] == 0:
1165
+ del cpu_indeg[user]
1166
+ queue.append(user)
1167
+
1168
+ unioned_set = make_dependencies_equivalent(
1169
+ dependencies, constructor_dependencies[user]
1170
+ )
1171
+ constructor_dependencies[user] = unioned_set
1172
+
1173
+ for node in cpu_indeg:
1174
+ if constructor_dependencies[node]:
1175
+ cannot_move_to_cuda.update(constructor_dependencies[node])
1176
+
1177
+ all_cannot_move_to_cuda = cannot_move_to_cuda.copy()
1178
+ for constructor in cannot_move_to_cuda:
1179
+ all_cannot_move_to_cuda.update(equal_constructor_sets[constructor])
1180
+
1181
+ return set(constructors) - all_cannot_move_to_cuda
1182
+
1183
+
1184
+ def move_constructors_to_cuda(graph: fx.Graph) -> None:
1185
+ """
1186
+ Moves intermediary tensors which are constructed on the cpu to cuda when safe
1187
+ """
1188
+ ConstructorMoverPass("cuda")(graph)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import logging
3
+ from typing import List, Optional
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch._dynamo.utils import detect_fake_mode
8
+ from torch._utils_internal import print_graph
9
+ from torch.fx.experimental.optimization import (
10
+ matches_module_pattern,
11
+ replace_node_module,
12
+ )
13
+ from torch.fx.passes.shape_prop import ShapeProp
14
+ from torch.nn import functional as F
15
+ from torch.nn.utils.fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
16
+
17
+ from .. import config
18
+
19
+ from ..fx_utils import matches_module_function_pattern
20
+ from ..pattern_matcher import (
21
+ init_once_fakemode,
22
+ PatternMatcherPass,
23
+ stable_topological_sort,
24
+ )
25
+ from ..utils import is_cpu_device
26
+ from .group_batch_fusion import group_batch_fusion_passes
27
+ from .misc_patterns import numpy_compat_normalization
28
+
29
+ log = logging.getLogger(__name__)
30
+
31
+ normalization_pass = PatternMatcherPass(prevent_match_across_mutations=True)
32
+ merge_splits_pass = PatternMatcherPass(prevent_match_across_mutations=True)
33
+ split_cat_pass = PatternMatcherPass(prevent_match_across_mutations=True)
34
+ unbind_stack_pass = PatternMatcherPass(prevent_match_across_mutations=True)
35
+ efficient_conv_bn_eval_pass = PatternMatcherPass(prevent_match_across_mutations=True)
36
+ merge_getitem_cat_pass = PatternMatcherPass(prevent_match_across_mutations=True)
37
+
38
+ pattern_matcher_passes: List[PatternMatcherPass] = [
39
+ normalization_pass,
40
+ merge_getitem_cat_pass,
41
+ merge_splits_pass,
42
+ split_cat_pass,
43
+ unbind_stack_pass,
44
+ efficient_conv_bn_eval_pass,
45
+ ]
46
+
47
+
48
+ @init_once_fakemode
49
+ def lazy_init():
50
+ from . import efficient_conv_bn_eval, split_cat # noqa: F401 # noqa: F401
51
+
52
+ if config.is_fbcode():
53
+ from . import fb # type: ignore[attr-defined] # noqa: F401
54
+
55
+
56
+ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs):
57
+ """
58
+ Apply passes on the input FX graph using Torch IR.
59
+
60
+ WARNING:
61
+ The IR before grad is not functional or normalized, so it is harder
62
+ to write passes on this IR. Passes must be safe with respect to
63
+ aliasing and mutation and need to handle all possible arg schemas.
64
+
65
+ Consider adding a new pass to post_grad.py or joint_graph.py which
66
+ are after functionalization and normalization.
67
+ """
68
+
69
+ if config.pattern_matcher:
70
+ lazy_init()
71
+ gm = fuse_fx(gm, example_inputs)
72
+ numpy_compat_normalization(gm.graph)
73
+ group_batch_fusion_passes(gm.graph, pre_grad=True)
74
+ print_graph(gm.graph, "Before split cat in pre grad pass.")
75
+ for pattern_matcher_pass in pattern_matcher_passes:
76
+ pattern_matcher_pass.apply(gm.graph)
77
+ print_graph(
78
+ gm.graph,
79
+ f"Apply split cat pattern matcher {pattern_matcher_pass.__class__.__name__} in pre grad.",
80
+ )
81
+
82
+ if config.pre_grad_custom_pass is not None:
83
+ config.pre_grad_custom_pass(gm.graph)
84
+ stable_topological_sort(gm.graph)
85
+ gm.graph.lint()
86
+ gm.recompile()
87
+
88
+ print_graph(gm.graph, "Aftre recompile in pre grad pass.")
89
+
90
+ return gm
91
+
92
+
93
+ def fuse_fx(gm: torch.fx.GraphModule, example_inputs) -> torch.fx.GraphModule:
94
+ is_cpu = is_cpu_device(example_inputs)
95
+
96
+ fake_mode = detect_fake_mode(example_inputs)
97
+
98
+ gm = sink_cat_after_pointwise(gm)
99
+ if config.permute_fusion and not is_cpu:
100
+ # For linear permute fusion, we need to check input info to identify
101
+ # and perform proper permutation/transpose
102
+ ShapeProp(gm, fake_mode=fake_mode).propagate(*example_inputs)
103
+ gm = linear_permute_fusion(gm)
104
+ gm = permute_linear_fusion(gm)
105
+ gm = permute_matmul_fusion(gm)
106
+
107
+ # make sure the autograd is disabled.
108
+ if torch.is_grad_enabled() or not is_cpu:
109
+ return gm
110
+ if config.freezing:
111
+ gm = remove_identity(gm)
112
+ gm = fuse_conv_bn(gm)
113
+ return gm
114
+
115
+
116
+ def fetch_attr(target: str, mod):
117
+ target_atoms = target.split(".")
118
+ attr_itr = mod
119
+ for i, atom in enumerate(target_atoms):
120
+ if not hasattr(attr_itr, atom):
121
+ raise RuntimeError(
122
+ f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}"
123
+ )
124
+ attr_itr = getattr(attr_itr, atom)
125
+ return attr_itr
126
+
127
+
128
+ def remove_identity(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
129
+ """
130
+ Removes all identity layers from the module.
131
+ """
132
+
133
+ class IdentityRemover(torch.fx.Transformer):
134
+ def call_module(self, target, args, kwargs):
135
+ if isinstance(self.submodules[target], nn.Identity):
136
+ assert len(args) == 1
137
+ return args[0]
138
+ else:
139
+ return super().call_module(target, args, kwargs)
140
+
141
+ return IdentityRemover(gm).transform()
142
+
143
+
144
+ def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModule:
145
+ """
146
+ Fuses Convolution/BN layers for inference purposes.
147
+ """
148
+ modules_patterns = [
149
+ (torch.nn.Conv1d, torch.nn.BatchNorm1d),
150
+ (torch.nn.Conv2d, torch.nn.BatchNorm2d),
151
+ (torch.nn.Conv3d, torch.nn.BatchNorm3d),
152
+ ]
153
+ module_function_patterns = [
154
+ (torch.nn.Conv1d, F.batch_norm),
155
+ (torch.nn.Conv2d, F.batch_norm),
156
+ (torch.nn.Conv3d, F.batch_norm),
157
+ ]
158
+ modules = dict(gm.named_modules())
159
+ for pattern in modules_patterns:
160
+ for node in gm.graph.nodes:
161
+ if matches_module_pattern(pattern, node, modules):
162
+ if len(node.args[0].users) > 1: # Output of conv is used by other nodes
163
+ continue
164
+ conv = modules[node.args[0].target]
165
+ bn = modules[node.target]
166
+ eval_mode = all(not n.training for n in [conv, bn])
167
+ if not eval_mode:
168
+ continue
169
+ if not bn.track_running_stats:
170
+ continue
171
+ fused_conv = fuse_conv_bn_eval(conv, bn)
172
+ replace_node_module(node.args[0], modules, fused_conv)
173
+ node.replace_all_uses_with(node.args[0])
174
+ gm.graph.erase_node(node)
175
+ gm.graph.lint()
176
+ for pattern in module_function_patterns:
177
+ for node in gm.graph.nodes:
178
+ if matches_module_function_pattern(pattern, node, modules):
179
+ # TODO: support kwargs.
180
+ if len(node.args) != 8:
181
+ continue
182
+ conv = modules[node.args[0].target]
183
+ bn_training = node.args[5]
184
+ bn_eps = node.args[7]
185
+ if conv.training or bn_training:
186
+ continue
187
+ if type(bn_eps) is not float:
188
+ continue
189
+ bn_args_is_constant = all(
190
+ n.op == "get_attr" and len(n.users) == 1 for n in node.args[1:5]
191
+ )
192
+ if not bn_args_is_constant:
193
+ continue
194
+ bn_running_mean = fetch_attr(node.args[1].target, gm)
195
+ bn_running_var = fetch_attr(node.args[2].target, gm)
196
+ bn_weight = fetch_attr(node.args[3].target, gm)
197
+ bn_bias = fetch_attr(node.args[4].target, gm)
198
+ if bn_running_mean is None or bn_running_var is None:
199
+ continue
200
+ fused_conv = copy.deepcopy(conv)
201
+ fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights(
202
+ fused_conv.weight,
203
+ fused_conv.bias,
204
+ bn_running_mean,
205
+ bn_running_var,
206
+ bn_eps,
207
+ bn_weight,
208
+ bn_bias,
209
+ )
210
+ replace_node_module(node.args[0], modules, fused_conv)
211
+ node.replace_all_uses_with(node.args[0])
212
+ gm.graph.erase_node(node)
213
+ gm.graph.lint()
214
+ gm.recompile()
215
+
216
+ return gm
217
+
218
+
219
+ class NormalizedLinearNode:
220
+ def __init__(self, node: torch.fx.Node) -> None:
221
+ assert node.op == "call_function"
222
+ assert node.target in [torch.nn.functional.linear]
223
+ self.node: torch.fx.Node = node
224
+
225
+ def get_input(self) -> torch.fx.Node:
226
+ if len(self.node.args) > 0:
227
+ return self.node.args[0]
228
+ else:
229
+ return self.node.kwargs["input"]
230
+
231
+ def get_weight(self) -> torch.fx.Node:
232
+ if len(self.node.args) > 1:
233
+ return self.node.args[1]
234
+ else:
235
+ return self.node.kwargs["weight"]
236
+
237
+ def get_bias(self) -> torch.fx.Node:
238
+ if len(self.node.args) > 2:
239
+ return self.node.args[2]
240
+ else:
241
+ return self.node.kwargs["bias"] if "bias" in self.node.kwargs else None
242
+
243
+
244
+ class NormalizedMatmulNode:
245
+ def __init__(self, node: torch.fx.Node) -> None:
246
+ assert node.op == "call_function"
247
+ assert node.target in [torch.bmm, torch.matmul]
248
+ self.node: torch.fx.Node = node
249
+
250
+ def get_input(self) -> torch.fx.Node:
251
+ if len(self.node.args) > 0:
252
+ return self.node.args[0]
253
+ else:
254
+ return self.node.kwargs["input"]
255
+
256
+ def get_other(self) -> torch.fx.Node:
257
+ if len(self.node.args) > 1:
258
+ return self.node.args[1]
259
+ else:
260
+ return self.node.kwargs["other"]
261
+
262
+
263
+ def check_permute(node: torch.fx.Node) -> bool:
264
+ ranks = len(node.meta["tensor_meta"].shape)
265
+ if len(node.args) > 3:
266
+ permutation = [node.args[i] % ranks for i in range(1, ranks + 1)]
267
+ elif (
268
+ "permutation" in node.kwargs
269
+ and node.kwargs["permutation"] is not None
270
+ and len(node.kwargs["permutation"]) > 2
271
+ ):
272
+ permutation = [i % ranks for i in node.kwargs["permutation"]]
273
+ else:
274
+ return False
275
+ allowed_permutation = list(range(ranks))
276
+ allowed_permutation[-1] = ranks - 2
277
+ allowed_permutation[-2] = ranks - 1
278
+ return permutation == allowed_permutation
279
+
280
+
281
+ def sink_cat_after_pointwise(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
282
+ def one_user(node):
283
+ users = list(node.users)
284
+ return users[0] if len(users) == 1 else None
285
+
286
+ def is_view(node):
287
+ view = {"view"}
288
+ return node.op == "call_method" and node.target in view
289
+
290
+ def is_pointwise_unary(node):
291
+ pointwise = {torch.relu, torch.tanh, "relu", "tanh"}
292
+ return node.op in {"call_function", "call_method"} and node.target in pointwise
293
+
294
+ g = module.graph
295
+ for node in g.nodes:
296
+ if node.op != "call_function" or node.target != torch.cat:
297
+ continue
298
+
299
+ cat_or_view = node
300
+ while True:
301
+ user = one_user(cat_or_view)
302
+ if not user or not is_view(user):
303
+ break
304
+ cat_or_view = user
305
+
306
+ if user and is_pointwise_unary(user):
307
+ with g.inserting_before(node):
308
+
309
+ def cat_args(tensors, dim=0):
310
+ return tensors, dim
311
+
312
+ tensors, dim = cat_args(*node.args, **node.kwargs)
313
+ new_tensors = [
314
+ g.create_node(user.op, user.target, args=(arg,), kwargs=user.kwargs)
315
+ for arg in tensors
316
+ ]
317
+ new_cat = g.create_node(
318
+ "call_function", torch.cat, args=(new_tensors, dim)
319
+ )
320
+ user.replace_all_uses_with(cat_or_view)
321
+ node.replace_all_uses_with(new_cat)
322
+ g.erase_node(user)
323
+ g.erase_node(node)
324
+ g.lint()
325
+ module.recompile()
326
+ return module
327
+
328
+
329
+ def linear_permute_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
330
+ for node in module.graph.nodes:
331
+ if (
332
+ node.op == "call_method"
333
+ and node.target == "permute"
334
+ and check_permute(node)
335
+ ):
336
+ if len(node.args) > 0:
337
+ input_node = node.args[0]
338
+ else:
339
+ input_node = node.kwargs["input"]
340
+ if (
341
+ input_node.op == "call_function"
342
+ and input_node.target == torch.nn.functional.linear
343
+ ):
344
+ normalized = NormalizedLinearNode(input_node)
345
+ input = normalized.get_input()
346
+ weight = normalized.get_weight()
347
+ bias = normalized.get_bias()
348
+ with module.graph.inserting_before(node):
349
+ fused_node = module.graph.call_function(
350
+ linear_transpose, args=(input, weight, bias)
351
+ )
352
+ node.replace_all_uses_with(fused_node)
353
+ module.graph.erase_node(node)
354
+ if len(input_node.users) == 0:
355
+ module.graph.erase_node(input_node)
356
+
357
+ module.graph.lint()
358
+ module.recompile()
359
+ return module
360
+
361
+
362
+ # Y1 = X * W^T + bias
363
+ # Y2 = Y1.permute(0, 2, 1)
364
+ # ---->
365
+ # Y2 = (W * X^T + bias.unsqueeze(-1))^T
366
+ def linear_transpose(
367
+ input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
368
+ ) -> torch.Tensor:
369
+ if bias is None:
370
+ return torch.matmul(weight, input.transpose(-1, -2))
371
+ return torch.matmul(weight, input.transpose(-1, -2)) + bias.unsqueeze(-1)
372
+
373
+
374
+ def permute_linear_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
375
+ for node in module.graph.nodes:
376
+ if node.op == "call_function" and node.target == torch.nn.functional.linear:
377
+ if len(node.args) > 0:
378
+ input_node = node.args[0]
379
+ else:
380
+ input_node = node.kwargs["input"]
381
+ if (
382
+ input_node.op == "call_method"
383
+ and input_node.target == "permute"
384
+ and check_permute(input_node)
385
+ ):
386
+ normalized = NormalizedLinearNode(node)
387
+ if len(input_node.args) > 0:
388
+ input = input_node.args[0]
389
+ else:
390
+ input = input_node.kwargs["input"]
391
+ weight = normalized.get_weight()
392
+ bias = normalized.get_bias()
393
+ with module.graph.inserting_before(node):
394
+ fused_node = module.graph.call_function(
395
+ transpose_linear, args=(input, weight, bias)
396
+ )
397
+ node.replace_all_uses_with(fused_node)
398
+ module.graph.erase_node(node)
399
+ if len(input_node.users) == 0:
400
+ module.graph.erase_node(input_node)
401
+
402
+ module.graph.lint()
403
+ module.recompile()
404
+ return module
405
+
406
+
407
+ def permute_matmul_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
408
+ for node in module.graph.nodes:
409
+ if node.op == "call_function" and (
410
+ node.target == torch.bmm or node.target == torch.matmul
411
+ ):
412
+ normalized = NormalizedMatmulNode(node)
413
+ input_A_node = normalized.get_input()
414
+ input_B_node = normalized.get_other()
415
+ input_A = input_A_node
416
+ input_B = input_B_node
417
+ Atrans = Btrans = False
418
+ if (
419
+ input_A_node.op == "call_method"
420
+ and input_A_node.target == "permute"
421
+ and check_permute(input_A_node)
422
+ ):
423
+ Atrans = True
424
+ if len(input_A_node.args) > 0:
425
+ input_A = input_A_node.args[0]
426
+ else:
427
+ input_A = input_A_node.kwargs["input"]
428
+
429
+ if (
430
+ input_B_node.op == "call_method"
431
+ and input_B_node.target == "permute"
432
+ and check_permute(input_B_node)
433
+ ):
434
+ Btrans = True
435
+ if len(input_B_node.args) > 0:
436
+ input_B = input_B_node.args[0]
437
+ else:
438
+ input_B = input_B_node.kwargs["input"]
439
+
440
+ if Atrans or Btrans:
441
+ with module.graph.inserting_before(node):
442
+ fused_node = module.graph.call_function(
443
+ transpose_matmul,
444
+ args=(input_A, input_B, Atrans, Btrans),
445
+ )
446
+ node.replace_all_uses_with(fused_node)
447
+ module.graph.erase_node(node)
448
+ if Atrans and len(input_A_node.users) == 0:
449
+ module.graph.erase_node(input_A_node)
450
+ if Btrans and len(input_B_node.users) == 0:
451
+ module.graph.erase_node(input_B_node)
452
+
453
+ module.graph.lint()
454
+ module.recompile()
455
+ return module
456
+
457
+
458
+ # X1 = X.permute(0, 2, 1)
459
+ # Y1 = X1 * W1^T + bias1
460
+ # ---->
461
+ # Y2 = X1.transpose(-1, -2) * W1^T + bias1
462
+ def transpose_linear(
463
+ input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
464
+ ) -> torch.Tensor:
465
+ if bias is None:
466
+ return torch.matmul(input.transpose(-1, -2), weight.t())
467
+ return torch.matmul(input.transpose(-1, -2), weight.t()) + bias
468
+
469
+
470
+ def transpose_matmul(
471
+ A: torch.Tensor, B: torch.Tensor, Atrans: bool, Btrans: bool
472
+ ) -> torch.Tensor:
473
+ if Atrans:
474
+ A = A.transpose(-1, -2)
475
+ if Btrans:
476
+ B = B.transpose(-1, -2)
477
+ return torch.matmul(A, B)
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py ADDED
@@ -0,0 +1,1500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import functools
3
+ import math
4
+ import operator
5
+ from typing import Any, Tuple
6
+
7
+ import torch
8
+ from torch._dynamo.utils import counters
9
+ from torch.fx.experimental.symbolic_shapes import has_free_symbols
10
+ from ..lowering import lowerings as L, require_channels_last
11
+ from ..pattern_matcher import Arg, CallFunction, filter_nodes, KeywordArg, ListOf, Match
12
+ from ..utils import pad_listlike
13
+ from .freezing_patterns import register_freezing_graph_pattern
14
+ from .post_grad import register_lowering_pattern
15
+
16
+ aten = torch.ops.aten
17
+ prims = torch.ops.prims
18
+ quantized_decomposed = torch.ops.quantized_decomposed
19
+ quantized = torch.ops.quantized
20
+
21
+ """
22
+ The quantization.py file primarily incorporates passes related to quantization fusion
23
+ in inductor, includes:
24
+ 1. Dequant Promotion;
25
+ 2. Conv/GEMM weight prepack with oneDNN Library;
26
+ 3. Conv/GEMM quantization fusion with output quant node (if have);
27
+ 4. Other pointwise operators' quantization fusion like: qmaxpool2d, qcat and more;
28
+
29
+ It also involves int8-mixed-fp32 and int8-mixed-bf16 quantization. The main difference
30
+ of patterns for int8-mixed-bf16, comparing with int8-mixed-fp32, is
31
+ 1. There is to(dtype=torch.bfloat16) node at the inputs of activation and weight for Conv/GEMM.
32
+ 2. There is to(dtype=torch.float32) node at the outputs of Conv/GEMM before inputs to next quant node.
33
+ Refer to: https://github.com/pytorch/pytorch/issues/111640 for detail design of int8-mixed-bf16
34
+ quantization.
35
+ """
36
+
37
+
38
+ def _may_generate_pattern_with_dtype_convert(pattern, dtype=Arg(), dtype_convert=True):
39
+ if dtype_convert:
40
+ return CallFunction(
41
+ prims.convert_element_type.default,
42
+ pattern,
43
+ dtype,
44
+ )
45
+ else:
46
+ return pattern
47
+
48
+
49
+ """
50
+ dequantize activation:
51
+ x = x.to(fp32)
52
+ x = x - zero_point
53
+ x = x * scale
54
+ """
55
+ dequantize_per_tensor_activation_pattern = CallFunction(
56
+ aten.mul.Tensor,
57
+ CallFunction(
58
+ aten.sub.Tensor,
59
+ CallFunction(
60
+ prims.convert_element_type.default,
61
+ KeywordArg("x"),
62
+ KeywordArg("x_dq_dtype"),
63
+ ),
64
+ KeywordArg("x_zp"),
65
+ ),
66
+ KeywordArg("x_scale"),
67
+ )
68
+
69
+ dequantize_per_channel_weight_pattern = CallFunction(
70
+ quantized_decomposed.dequantize_per_channel.default,
71
+ KeywordArg("q_weight"),
72
+ KeywordArg("w_scale"),
73
+ KeywordArg("w_zp"),
74
+ KeywordArg("w_axis"),
75
+ KeywordArg("w_quant_min"),
76
+ KeywordArg("w_quant_max"),
77
+ KeywordArg("w_dtype"),
78
+ )
79
+
80
+ dequantize_per_channel_to_bf16_weight_pattern = (
81
+ _may_generate_pattern_with_dtype_convert(
82
+ dequantize_per_channel_weight_pattern,
83
+ KeywordArg("autocast_wgt_dtype"),
84
+ )
85
+ )
86
+
87
+ dequantize_per_channel_clone_weight_pattern = CallFunction(
88
+ aten.clone.default,
89
+ dequantize_per_channel_weight_pattern,
90
+ memory_format=KeywordArg("memory_format"),
91
+ )
92
+
93
+ dequantize_per_channel_to_bf16_clone_weight_pattern = CallFunction(
94
+ aten.clone.default,
95
+ dequantize_per_channel_to_bf16_weight_pattern,
96
+ memory_format=KeywordArg("memory_format"),
97
+ )
98
+
99
+ dequantize_qconv_pt2e_pattern = CallFunction(
100
+ torch.ops.onednn.qconv2d_pointwise.default,
101
+ KeywordArg("x"),
102
+ KeywordArg("x_scale"), # x_scale
103
+ KeywordArg("x_zp"), # x_zp
104
+ KeywordArg("packed_weight"), # packed_weight
105
+ KeywordArg("w_scale"), # w_scale
106
+ KeywordArg("w_zp"), # w_zp
107
+ KeywordArg("b"), # bias
108
+ KeywordArg("stride"),
109
+ KeywordArg("padding"),
110
+ KeywordArg("dilation"),
111
+ KeywordArg("groups"),
112
+ KeywordArg("inv_output_scale"), # inv_output_scale = 1.0
113
+ KeywordArg("output_zero_point"), # output_zero_point = 0
114
+ KeywordArg("output_dtype"), # output_dtype = None
115
+ KeywordArg("attr"), # attr = "none"
116
+ Arg(), # scalars
117
+ Arg(), # algorithm
118
+ )
119
+
120
+ qlinear_pt2e_pattern = CallFunction(
121
+ torch.ops.onednn.qlinear_pointwise.default,
122
+ KeywordArg("x"),
123
+ KeywordArg("x_scale"),
124
+ KeywordArg("x_zp"),
125
+ KeywordArg("packed_weight"),
126
+ KeywordArg("w_scale"),
127
+ KeywordArg("w_zp"),
128
+ KeywordArg("b"),
129
+ KeywordArg("output_scale"),
130
+ KeywordArg("output_zero_point"),
131
+ KeywordArg("output_dtype"),
132
+ KeywordArg("postop_name"),
133
+ KeywordArg("postop_args"),
134
+ KeywordArg("postop_algorithm"),
135
+ )
136
+
137
+ dequantize_accum_pattern = CallFunction(
138
+ aten.mul.Tensor,
139
+ CallFunction(
140
+ aten.sub.Tensor,
141
+ CallFunction(
142
+ prims.convert_element_type.default,
143
+ KeywordArg("accum"),
144
+ KeywordArg("accum_dq_dtype"),
145
+ ),
146
+ KeywordArg("accum_zp"),
147
+ ),
148
+ KeywordArg("accum_scale"),
149
+ )
150
+
151
+
152
+ def generate_pattern_with_binary(
153
+ binary_post_op,
154
+ computation_call,
155
+ extra_input_pattern,
156
+ int8_mixed_bf16_with_inplace_add=False,
157
+ ):
158
+ binary_pattern = CallFunction(
159
+ binary_post_op,
160
+ computation_call,
161
+ extra_input_pattern,
162
+ )
163
+ return _may_generate_pattern_with_dtype_convert(
164
+ binary_pattern,
165
+ KeywordArg("convert_dtype_after_inplace_add"),
166
+ int8_mixed_bf16_with_inplace_add,
167
+ )
168
+
169
+
170
+ def generate_pattern_with_unary(computation_call, unary_post_op):
171
+ if unary_post_op is not None:
172
+ if unary_post_op == aten.hardtanh.default:
173
+ return CallFunction(
174
+ aten.clamp_max,
175
+ CallFunction(aten.clamp_min, computation_call, KeywordArg("min_value")),
176
+ KeywordArg("max_value"),
177
+ )
178
+ else:
179
+ return CallFunction(
180
+ unary_post_op,
181
+ computation_call,
182
+ )
183
+ return computation_call
184
+
185
+
186
+ def generate_pattern_with_output_quant(computation_call, dtype=torch.float32):
187
+ """
188
+ quantize output:
189
+ output = round(output * o_inv_scale)
190
+ output = output + zero_point
191
+ output = clamp_min(output, 0)
192
+ output = clamp_max(output, 127)
193
+ output = output.to(uint8)
194
+ """
195
+ assert dtype in [torch.float32, torch.bfloat16]
196
+ quantized_op_output_pattern_pt2e = CallFunction(
197
+ prims.convert_element_type.default,
198
+ CallFunction(
199
+ aten.clamp_max.default,
200
+ CallFunction(
201
+ aten.clamp_min.default,
202
+ CallFunction(
203
+ aten.add.Tensor,
204
+ CallFunction(
205
+ aten.round.default,
206
+ CallFunction(
207
+ aten.mul.Tensor,
208
+ _may_generate_pattern_with_dtype_convert(
209
+ computation_call,
210
+ KeywordArg("autocast_output_quant_dtype"),
211
+ dtype != torch.float32,
212
+ ),
213
+ KeywordArg("o_inv_scale"),
214
+ ),
215
+ ),
216
+ KeywordArg("o_zp"),
217
+ ),
218
+ KeywordArg("o_qmin"),
219
+ ),
220
+ KeywordArg("o_qmax"),
221
+ ),
222
+ KeywordArg("o_dtype"),
223
+ )
224
+ return quantized_op_output_pattern_pt2e
225
+
226
+
227
+ def _check_node_kwarg_arg_value(check_node, kwarg_name, args_index, expected_value):
228
+ if kwarg_name in check_node.kwargs:
229
+ actual_value = check_node.kwargs[kwarg_name]
230
+ return actual_value == expected_value
231
+ else:
232
+ assert len(check_node.args) >= (args_index + 1)
233
+ actual_value = check_node.args[args_index]
234
+ return actual_value == expected_value
235
+
236
+
237
+ def _is_valid_quantized_conv2d_optimization_pattern(output_dtype):
238
+ def fn(match):
239
+ if output_dtype is not None:
240
+ # Only keep matched pattern with same output_dtype
241
+ qconv_node_after_weight_prepack = filter_nodes(
242
+ match.nodes, torch.ops.onednn.qconv2d_pointwise
243
+ )[0]
244
+ return _check_node_kwarg_arg_value(
245
+ qconv_node_after_weight_prepack, "output_dtype", 13, output_dtype
246
+ )
247
+ return True
248
+
249
+ return fn
250
+
251
+
252
+ def _register_quantized_conv_lowering(
253
+ pattern,
254
+ pass_number,
255
+ computation_op,
256
+ output_dtype,
257
+ unary_attr,
258
+ original_pattern_output_dtype=torch.float32,
259
+ ):
260
+ @register_lowering_pattern(
261
+ pattern,
262
+ extra_check=_is_valid_quantized_conv2d_optimization_pattern(output_dtype),
263
+ pass_number=pass_number,
264
+ )
265
+ def qconv(match: Match, *args, **kwargs):
266
+ # Activation QParams
267
+ x, x_scale, x_zp = (
268
+ kwargs["x"],
269
+ kwargs["x_scale"],
270
+ kwargs["x_zp"],
271
+ )
272
+ # Weight QParams
273
+ packed_weight, w_scale, w_zp = (
274
+ kwargs["packed_weight"],
275
+ kwargs["w_scale"],
276
+ kwargs["w_zp"],
277
+ )
278
+ # Conv Params
279
+ b, stride, padding, dilation, groups = (
280
+ kwargs["b"],
281
+ kwargs["stride"],
282
+ kwargs["padding"],
283
+ kwargs["dilation"],
284
+ kwargs["groups"],
285
+ )
286
+ assert output_dtype in [None, torch.float32, torch.bfloat16]
287
+ # Output QParams
288
+ o_inv_scale = kwargs["o_inv_scale"] if output_dtype is None else 1.0
289
+ o_zero_point = kwargs["o_zp"] if output_dtype is None else 0
290
+ assert (
291
+ kwargs["output_dtype"] is original_pattern_output_dtype
292
+ ) # Expected int8-in fp32-out qconv in weight prepack phase
293
+ assert (
294
+ kwargs["attr"] == "none"
295
+ ) # Expected no post op fused in weight prepack phase
296
+ if unary_attr.op_name == "hardtanh":
297
+ min_value = kwargs.get("min_value")
298
+ max_value = kwargs.get("max_value")
299
+ unary_attr.scalars_attr = [min_value, max_value]
300
+
301
+ computation_args = (
302
+ x,
303
+ x_scale,
304
+ x_zp,
305
+ packed_weight,
306
+ w_scale,
307
+ w_zp,
308
+ b,
309
+ stride,
310
+ padding,
311
+ dilation,
312
+ groups,
313
+ o_inv_scale,
314
+ o_zero_point,
315
+ output_dtype,
316
+ unary_attr.op_name,
317
+ unary_attr.scalars_attr,
318
+ unary_attr.algorithm_attr,
319
+ )
320
+ counters["inductor"]["qconv2d_unary_matcher_count"] += 1
321
+ counters["inductor"]["qconv2d_unary_matcher_nodes"] += len(match.nodes)
322
+ return L[computation_op](*computation_args)
323
+
324
+ return qconv
325
+
326
+
327
+ def _is_valid_quantized_linear_optimization_pattern(output_dtype):
328
+ def fn(match):
329
+ if output_dtype is not None:
330
+ # Only keep matched pattern with same output_dtype
331
+ qlinear_node_after_weight_prepack = filter_nodes(
332
+ match.nodes, torch.ops.onednn.qlinear_pointwise
333
+ )[0]
334
+ return _check_node_kwarg_arg_value(
335
+ qlinear_node_after_weight_prepack, "output_dtype", 9, output_dtype
336
+ )
337
+ return True
338
+
339
+ return fn
340
+
341
+
342
+ def _register_quantized_linear_lowering(
343
+ pattern,
344
+ pass_number,
345
+ computation_op,
346
+ output_dtype,
347
+ unary_attr,
348
+ original_pattern_output_dtype=torch.float32,
349
+ ):
350
+ @register_lowering_pattern(
351
+ pattern,
352
+ extra_check=_is_valid_quantized_linear_optimization_pattern(output_dtype),
353
+ pass_number=pass_number,
354
+ )
355
+ def qlinear(match: Match, *args, **kwargs):
356
+ # Activation QParams
357
+ x, x_scale, x_zp = (
358
+ kwargs["x"],
359
+ kwargs["x_scale"],
360
+ kwargs["x_zp"],
361
+ )
362
+ # Weight QParams
363
+ packed_weight, w_scale, w_zp = (
364
+ kwargs["packed_weight"],
365
+ kwargs["w_scale"],
366
+ kwargs["w_zp"],
367
+ )
368
+
369
+ # bias
370
+ b = kwargs["b"] if "b" in kwargs else None
371
+
372
+ # Output QParams
373
+ o_inv_scale = kwargs["o_inv_scale"] if output_dtype is None else 1.0
374
+ o_zero_point = kwargs["o_zp"] if output_dtype is None else 0
375
+ assert (
376
+ kwargs["output_dtype"] is original_pattern_output_dtype
377
+ ) # Expected int8-in fp32/bf16-out qlinear in weight prepack phase
378
+ assert (
379
+ kwargs["postop_name"] == "none"
380
+ ) # Expected no post op fused in weight prepack phase
381
+
382
+ computation_args = (
383
+ x,
384
+ x_scale,
385
+ x_zp,
386
+ packed_weight,
387
+ w_scale,
388
+ w_zp,
389
+ b,
390
+ o_inv_scale,
391
+ o_zero_point,
392
+ output_dtype,
393
+ unary_attr.op_name,
394
+ unary_attr.scalars_attr,
395
+ unary_attr.algorithm_attr,
396
+ )
397
+ counters["inductor"]["qlinear_unary_matcher_count"] += 1
398
+ counters["inductor"]["qlinear_unary_matcher_nodes"] += len(match.nodes)
399
+ return L[computation_op](*computation_args)
400
+
401
+ return qlinear
402
+
403
+
404
+ def _is_valid_quantized_conv_binary_optimization_pattern(output_dtype):
405
+ # Check if it's a valid Conv Binary Pattern:
406
+ # * qconv2d_pointwise should only has one users
407
+ # * Extra input of binary node comes from dequant pattern
408
+ def fn(match):
409
+ qconv2d_node_after_weight_prepack = filter_nodes(
410
+ match.nodes, torch.ops.onednn.qconv2d_pointwise
411
+ )[0]
412
+ if len(qconv2d_node_after_weight_prepack.users) != 1:
413
+ return False
414
+ if output_dtype is not None:
415
+ binary_node_inputs = list(qconv2d_node_after_weight_prepack.users)[0].args
416
+ assert len(binary_node_inputs) == 2, "Expects binary node with 2 inputs"
417
+ extra_input_node = None
418
+ for arg in binary_node_inputs:
419
+ if arg != qconv2d_node_after_weight_prepack:
420
+ extra_input_node = arg
421
+ break
422
+ assert extra_input_node is not None
423
+ if (not isinstance(extra_input_node, torch.fx.Node)) or (
424
+ extra_input_node.target != aten.mul.Tensor
425
+ ):
426
+ return False
427
+ return True
428
+
429
+ return fn
430
+
431
+
432
+ def _register_quantized_conv_binary_lowering(
433
+ pattern,
434
+ pass_number,
435
+ computation_op,
436
+ output_dtype,
437
+ binary_unary_attr,
438
+ ):
439
+ @register_lowering_pattern(
440
+ pattern,
441
+ extra_check=_is_valid_quantized_conv_binary_optimization_pattern(output_dtype),
442
+ pass_number=pass_number,
443
+ )
444
+ def qconv_binary(match: Match, *args, **kwargs):
445
+ x, x_scale, x_zp = kwargs["x"], kwargs["x_scale"], kwargs["x_zp"]
446
+ accum = (
447
+ kwargs["accum"] if output_dtype is None else kwargs["accum_after_dequant"]
448
+ )
449
+ accum_scale = kwargs["accum_scale"] if output_dtype is None else 1.0
450
+ accum_zp = kwargs["accum_zp"] if output_dtype is None else 0
451
+ packed_weight, w_scale, w_zp = (
452
+ kwargs["packed_weight"],
453
+ kwargs["w_scale"],
454
+ kwargs["w_zp"],
455
+ )
456
+ b, stride, padding, dilation, groups = (
457
+ kwargs["b"],
458
+ kwargs["stride"],
459
+ kwargs["padding"],
460
+ kwargs["dilation"],
461
+ kwargs["groups"],
462
+ )
463
+ # Output QParams
464
+ o_inv_scale = kwargs["o_inv_scale"] if output_dtype is None else 1.0
465
+ o_zero_point = kwargs["o_zp"] if output_dtype is None else 0
466
+
467
+ computation_args = (
468
+ x,
469
+ x_scale,
470
+ x_zp,
471
+ accum,
472
+ accum_scale,
473
+ accum_zp,
474
+ packed_weight,
475
+ w_scale,
476
+ w_zp,
477
+ b,
478
+ stride,
479
+ padding,
480
+ dilation,
481
+ groups,
482
+ o_inv_scale,
483
+ o_zero_point,
484
+ output_dtype,
485
+ binary_unary_attr.binary_op_name,
486
+ binary_unary_attr.alpha,
487
+ binary_unary_attr.unary_op_name,
488
+ binary_unary_attr.scalars_attr,
489
+ binary_unary_attr.algorithm_attr,
490
+ )
491
+ counters["inductor"]["qconv2d_binary_matcher_count"] += 1
492
+ counters["inductor"]["qconv2d_binary_matcher_nodes"] += len(match.nodes)
493
+ return L[computation_op](*computation_args)
494
+
495
+ return qconv_binary
496
+
497
+
498
+ def _register_quantization_unary_fusion():
499
+ class UnaryAttr:
500
+ def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None):
501
+ self.op_name = op_name
502
+ self.scalars_attr = scalars_attr if scalars_attr else []
503
+ self.algorithm_attr = algorithm_attr if algorithm_attr else ""
504
+
505
+ for original_pattern_output_dtype in [torch.float32, torch.bfloat16]:
506
+ # QConv2d
507
+ # Priority 1 to match: QConv2d Unary pattern with int8 output
508
+ # If a pattern1 is a sub-set of pattern2, we should try to match pattern2 firstly.
509
+ # For example: pattern1 is qconv_fp32 -> relu, pattern2 is qconv_fp32 -> relu -> quant
510
+ conv_unary_replace_patterns = {
511
+ UnaryAttr("none", [], ""): generate_pattern_with_output_quant(
512
+ dequantize_qconv_pt2e_pattern,
513
+ dtype=original_pattern_output_dtype,
514
+ ),
515
+ UnaryAttr("relu", [], ""): generate_pattern_with_output_quant(
516
+ generate_pattern_with_unary(
517
+ dequantize_qconv_pt2e_pattern, aten.relu.default
518
+ ),
519
+ dtype=original_pattern_output_dtype,
520
+ ),
521
+ UnaryAttr("hardtanh", [], ""): generate_pattern_with_output_quant(
522
+ generate_pattern_with_unary(
523
+ dequantize_qconv_pt2e_pattern, aten.hardtanh.default
524
+ ),
525
+ dtype=original_pattern_output_dtype,
526
+ ),
527
+ }
528
+
529
+ for unary_attr, patterns in conv_unary_replace_patterns.items():
530
+ # Register qconv2d pattern for ExternKernel Lowering
531
+ _register_quantized_conv_lowering(
532
+ patterns,
533
+ 1, # pass_number
534
+ torch.ops.onednn.qconv2d_pointwise, # computation_op
535
+ None, # output_dtype, None is the default value for int8 output
536
+ unary_attr, # unary_attr
537
+ original_pattern_output_dtype=original_pattern_output_dtype,
538
+ )
539
+
540
+ # Priority 2 to match: QConv2d Unary pattern with fp32/bfloat16 output
541
+ conv_unary_replace_float_out_patterns = {
542
+ UnaryAttr("relu", [], ""): generate_pattern_with_unary(
543
+ dequantize_qconv_pt2e_pattern, aten.relu.default
544
+ ),
545
+ UnaryAttr("hardtanh", [], ""): generate_pattern_with_unary(
546
+ dequantize_qconv_pt2e_pattern, aten.hardtanh.default
547
+ ),
548
+ }
549
+
550
+ for unary_attr, patterns in conv_unary_replace_float_out_patterns.items():
551
+ # Register qconv2d pattern for ExternKernel Lowering
552
+ _register_quantized_conv_lowering(
553
+ patterns,
554
+ 2, # pass_number
555
+ torch.ops.onednn.qconv2d_pointwise, # computation_op
556
+ original_pattern_output_dtype, # output_dtype
557
+ unary_attr, # unary_attr
558
+ original_pattern_output_dtype=original_pattern_output_dtype,
559
+ )
560
+
561
+ # QLinear
562
+ # Priority 1 to match: QLinear Unary pattern with int8 output
563
+ linear_unary_replace_patterns = {
564
+ UnaryAttr("none", [], ""): generate_pattern_with_output_quant(
565
+ qlinear_pt2e_pattern,
566
+ dtype=original_pattern_output_dtype,
567
+ ),
568
+ UnaryAttr("relu", [], ""): generate_pattern_with_output_quant(
569
+ generate_pattern_with_unary(qlinear_pt2e_pattern, aten.relu.default),
570
+ dtype=original_pattern_output_dtype,
571
+ ),
572
+ }
573
+
574
+ for unary_attr, patterns in linear_unary_replace_patterns.items():
575
+ _register_quantized_linear_lowering(
576
+ patterns,
577
+ 1, # pass_number
578
+ torch.ops.onednn.qlinear_pointwise, # computation_op
579
+ None, # output_dtype
580
+ unary_attr, # unary_attr
581
+ original_pattern_output_dtype=original_pattern_output_dtype,
582
+ )
583
+
584
+ # Priority 2 to match: QLinear Unary pattern with FP32/BF16 output
585
+ linear_unary_replace_float_out_patterns = {
586
+ UnaryAttr("relu", [], ""): generate_pattern_with_unary(
587
+ qlinear_pt2e_pattern, aten.relu.default
588
+ ),
589
+ }
590
+
591
+ for unary_attr, patterns in linear_unary_replace_float_out_patterns.items():
592
+ _register_quantized_linear_lowering(
593
+ patterns,
594
+ 2, # pass_number
595
+ torch.ops.onednn.qlinear_pointwise, # computation_op
596
+ original_pattern_output_dtype, # output_dtype
597
+ unary_attr, # unary_attr
598
+ original_pattern_output_dtype=original_pattern_output_dtype,
599
+ )
600
+
601
+
602
+ def _register_quantization_binary_fusion():
603
+ class BinaryUnaryAttr:
604
+ def __init__(
605
+ self,
606
+ binary_op_name: str,
607
+ alpha=None,
608
+ unary_op_name: str = "none",
609
+ scalars_attr=None,
610
+ algorithm_attr=None,
611
+ ):
612
+ self.binary_op_name = binary_op_name
613
+ self.alpha = alpha if alpha else 1.0
614
+ self.unary_op_name = unary_op_name
615
+ self.scalars_attr = scalars_attr if scalars_attr else []
616
+ self.algorithm_attr = algorithm_attr if algorithm_attr else ""
617
+
618
+ for int8_mixed_bf16_with_inplace_add in [False, True]:
619
+ # Priority 1 to match: QConv2d Binary or Binary-Unary pattern with int8 output
620
+ binary_replace_patterns = {
621
+ BinaryUnaryAttr(
622
+ "add", 1.0, "none", [], ""
623
+ ): generate_pattern_with_output_quant(
624
+ generate_pattern_with_binary(
625
+ aten.add.Tensor,
626
+ dequantize_qconv_pt2e_pattern,
627
+ dequantize_accum_pattern,
628
+ int8_mixed_bf16_with_inplace_add,
629
+ ),
630
+ dtype=torch.bfloat16
631
+ if int8_mixed_bf16_with_inplace_add
632
+ else torch.float32,
633
+ ),
634
+ BinaryUnaryAttr(
635
+ "add", 1.0, "relu", [], ""
636
+ ): generate_pattern_with_output_quant(
637
+ generate_pattern_with_unary(
638
+ generate_pattern_with_binary(
639
+ aten.add.Tensor,
640
+ dequantize_qconv_pt2e_pattern,
641
+ dequantize_accum_pattern,
642
+ int8_mixed_bf16_with_inplace_add,
643
+ ),
644
+ aten.relu.default,
645
+ ),
646
+ dtype=torch.bfloat16
647
+ if int8_mixed_bf16_with_inplace_add
648
+ else torch.float32,
649
+ ),
650
+ }
651
+
652
+ for binary_unary_attr, patterns in binary_replace_patterns.items():
653
+ _register_quantized_conv_binary_lowering(
654
+ patterns,
655
+ 0, # pass_number
656
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
657
+ None, # output_dtype
658
+ binary_unary_attr, # binary_unary_attr
659
+ )
660
+
661
+ # Priority 2 to match: QConv2d Binary-Unary pattern with fp32/bfloat16 output
662
+ binary_replace_float_out_patterns = {
663
+ BinaryUnaryAttr("add", 1.0, "relu", [], ""): generate_pattern_with_unary(
664
+ generate_pattern_with_binary(
665
+ aten.add.Tensor,
666
+ dequantize_qconv_pt2e_pattern,
667
+ KeywordArg("accum_after_dequant"),
668
+ int8_mixed_bf16_with_inplace_add,
669
+ ),
670
+ aten.relu.default,
671
+ ),
672
+ }
673
+
674
+ for (
675
+ binary_unary_attr,
676
+ patterns,
677
+ ) in binary_replace_float_out_patterns.items():
678
+ if int8_mixed_bf16_with_inplace_add:
679
+ _register_quantized_conv_binary_lowering(
680
+ patterns,
681
+ 0, # pass_number
682
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
683
+ # Note that for int8-mixed-bf16 and non-inplace add, because we have
684
+ # q-dq inserted at extra input of add, so the non-inplace add has bf16 and fp32 inputs,
685
+ # the output dtype will be float32.
686
+ # For inplace add, there is a extra to_bf16 node at add output, so the fusion pattern has bfloat16 output.
687
+ torch.bfloat16,
688
+ binary_unary_attr, # binary_unary_attr
689
+ )
690
+ else:
691
+ _register_quantized_conv_binary_lowering(
692
+ patterns,
693
+ 1, # pass_number
694
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
695
+ torch.float32,
696
+ binary_unary_attr, # binary_unary_attr
697
+ )
698
+
699
+ # Priority 3: QConv2d Binary pattern with fp32/bfloat16 output
700
+ binary_replace_float_out_patterns = {
701
+ BinaryUnaryAttr("add", 1.0, "none", [], ""): generate_pattern_with_binary(
702
+ aten.add.Tensor,
703
+ dequantize_qconv_pt2e_pattern,
704
+ KeywordArg("accum_after_dequant"),
705
+ int8_mixed_bf16_with_inplace_add,
706
+ ),
707
+ }
708
+
709
+ for (
710
+ binary_unary_attr,
711
+ patterns,
712
+ ) in binary_replace_float_out_patterns.items():
713
+ _register_quantized_conv_binary_lowering(
714
+ patterns,
715
+ 1 if int8_mixed_bf16_with_inplace_add else 2, # pass_number
716
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
717
+ # Same output dtype setting as conv-add-relu pattern
718
+ torch.bfloat16 if int8_mixed_bf16_with_inplace_add else torch.float32,
719
+ binary_unary_attr, # binary_unary_attr
720
+ )
721
+
722
+
723
+ def _is_valid_quantized_maxpool2d_optimization_pattern():
724
+ def fn(match):
725
+ # Only match the pattern which max_pool2d_with_indices returns value
726
+ # instead of indices.
727
+ get_item_node = filter_nodes(match.nodes, operator.getitem)[0]
728
+ return get_item_node.args[1] == 0
729
+
730
+ return fn
731
+
732
+
733
+ def _register_quantized_maxpool2d_lowering(
734
+ pattern,
735
+ computation_op,
736
+ ):
737
+ @register_lowering_pattern(
738
+ pattern,
739
+ extra_check=_is_valid_quantized_maxpool2d_optimization_pattern(),
740
+ )
741
+ def qmaxpool2d(match: Match, *args, **kwargs):
742
+ x = kwargs["x"]
743
+ kernel_size = kwargs["kernel_size"]
744
+ stride = kwargs["stride"] if ("stride" in kwargs) else None
745
+ padding = kwargs["padding"] if ("padding" in kwargs) else 0
746
+ dilation = kwargs["dilation"] if ("dilation" in kwargs) else 1
747
+ ceil_mode = kwargs["ceil_mode"] if ("ceil_mode" in kwargs) else False
748
+
749
+ if padding == 0:
750
+ padding = [0, 0]
751
+ if dilation == 1:
752
+ dilation = [1, 1]
753
+ if not stride:
754
+ stride = kernel_size
755
+ kernel_size = pad_listlike(kernel_size, 2)
756
+ stride = pad_listlike(stride, 2)
757
+ padding = pad_listlike(padding, 2)
758
+ dilation = pad_listlike(dilation, 2)
759
+
760
+ assert len(kernel_size) == 2
761
+ assert len(stride) == 2
762
+ assert len(padding) == 2
763
+ assert len(dilation) == 2
764
+
765
+ computation_args = (
766
+ x,
767
+ kernel_size,
768
+ stride,
769
+ padding,
770
+ dilation,
771
+ ceil_mode,
772
+ )
773
+ computation_args, _ = require_channels_last(computation_op, *computation_args)
774
+ return L[computation_op](*computation_args)
775
+
776
+ return qmaxpool2d
777
+
778
+
779
+ def _register_quantization_maxpool2d():
780
+ # Currently, the default parameters are not in FX Graph generated by Dynamo export.
781
+ # So, if user defines nn.MaxPool2d with different assignment of default parameter,
782
+ # it will generate graph with different number of input nodes and hence
783
+ # different pattern to be matched.
784
+ # Refer to the issue: https://github.com/pytorch/pytorch/issues/105901
785
+ max_pool2d_args_list = [
786
+ [
787
+ KeywordArg("stride"),
788
+ ],
789
+ [
790
+ KeywordArg("stride"),
791
+ KeywordArg("padding"),
792
+ ],
793
+ [
794
+ KeywordArg("stride"),
795
+ KeywordArg("padding"),
796
+ KeywordArg("dilation"),
797
+ ],
798
+ [
799
+ KeywordArg("stride"),
800
+ KeywordArg("padding"),
801
+ KeywordArg("dilation"),
802
+ KeywordArg("ceil_mode"),
803
+ ],
804
+ ]
805
+
806
+ for max_pool2d_args in max_pool2d_args_list:
807
+ dequantize_maxpool2d_pattern = CallFunction(
808
+ aten.max_pool2d_with_indices.default,
809
+ dequantize_per_tensor_activation_pattern,
810
+ KeywordArg("kernel_size"),
811
+ *max_pool2d_args,
812
+ )
813
+ dequantize_maxpool2d_get_item_pattern = CallFunction(
814
+ operator.getitem,
815
+ dequantize_maxpool2d_pattern,
816
+ Arg(),
817
+ )
818
+ _register_quantized_maxpool2d_lowering(
819
+ generate_pattern_with_output_quant(dequantize_maxpool2d_get_item_pattern),
820
+ quantized.max_pool2d.default,
821
+ )
822
+
823
+
824
+ def _is_input_output_same_scale_zp(check_node):
825
+ def fn(match):
826
+ # Ensure all the inputs and output has same scale and zero point
827
+ # Step 1: Check inputs/output zero point
828
+ sub_nodes = filter_nodes(match.nodes, aten.sub.Tensor)
829
+ zero_points = [node.args[1] for node in sub_nodes]
830
+ add_nodes = filter_nodes(match.nodes, aten.add.Tensor)
831
+ assert len(add_nodes) == 1, "expect only 1 add node at output quant pattern"
832
+ zero_points.append(add_nodes[0].args[1])
833
+ if not all(zero_point == zero_points[0] for zero_point in zero_points):
834
+ return False
835
+
836
+ # Step 2: Check inputs/output scale
837
+ mul_nodes = filter_nodes(match.nodes, aten.mul.Tensor)
838
+ # We need to find mul node at output since the scale value is reciprocal to input scale.
839
+ # Mul node at output should connect to cat node directly.
840
+ scales = [
841
+ (
842
+ mul_node.args[1]
843
+ if mul_node.args[0].target is check_node
844
+ else 1.0 / mul_node.args[1]
845
+ )
846
+ for mul_node in mul_nodes
847
+ ]
848
+ if not all(math.isclose(scale, scales[0], rel_tol=1e-5) for scale in scales):
849
+ return False
850
+
851
+ return True
852
+
853
+ return fn
854
+
855
+
856
+ def _register_quantized_cat_lowering(
857
+ pattern,
858
+ computation_op,
859
+ ):
860
+ @register_lowering_pattern(
861
+ pattern,
862
+ extra_check=_is_input_output_same_scale_zp(aten.cat.default),
863
+ )
864
+ def qcat(match: Match, inputs, dim, **kwargs):
865
+ # inputs is with format: [[x1, x1_dq_dtype, x1_zp, x1_scale], ...]
866
+ uint8_inputs = [input[0] for input in inputs]
867
+ return L[computation_op](uint8_inputs, dim)
868
+
869
+ return qcat
870
+
871
+
872
+ _raw_dequantize_per_tensor_activation_pattern = CallFunction(
873
+ aten.mul.Tensor,
874
+ CallFunction(
875
+ aten.sub.Tensor,
876
+ CallFunction(
877
+ prims.convert_element_type.default,
878
+ Arg(),
879
+ Arg(),
880
+ ),
881
+ Arg(),
882
+ ),
883
+ Arg(),
884
+ )
885
+
886
+
887
+ def _register_quantization_cat():
888
+ dequantize_cat_pattern = CallFunction(
889
+ aten.cat.default,
890
+ ListOf(_raw_dequantize_per_tensor_activation_pattern),
891
+ KeywordArg("dim"),
892
+ )
893
+ _register_quantized_cat_lowering(
894
+ generate_pattern_with_output_quant(dequantize_cat_pattern),
895
+ aten.cat,
896
+ )
897
+
898
+
899
+ def _register_quantized_reshape_lowering(
900
+ pattern,
901
+ computation_op,
902
+ ):
903
+ @register_lowering_pattern(
904
+ pattern,
905
+ extra_check=_is_input_output_same_scale_zp(aten.reshape.default),
906
+ )
907
+ def qreshape(match: Match, *args, **kwargs):
908
+ qx = kwargs["x"]
909
+ shape = kwargs["shape"]
910
+ counters["inductor"]["qreshape_matcher_count"] += 1
911
+ counters["inductor"]["qreshape_matcher_nodes"] += len(match.nodes)
912
+ return L[computation_op](qx, shape)
913
+
914
+ return qreshape
915
+
916
+
917
+ def _register_quantization_reshape():
918
+ dequantize_reshape_pattern = CallFunction(
919
+ torch.ops.aten.reshape.default,
920
+ dequantize_per_tensor_activation_pattern,
921
+ KeywordArg("shape"),
922
+ )
923
+ _register_quantized_reshape_lowering(
924
+ generate_pattern_with_output_quant(dequantize_reshape_pattern),
925
+ aten.reshape,
926
+ )
927
+
928
+
929
+ def _register_quantization_lowerings():
930
+ _register_quantization_unary_fusion()
931
+ _register_quantization_binary_fusion()
932
+ _register_quantization_maxpool2d()
933
+ _register_quantization_cat()
934
+ _register_quantization_reshape()
935
+
936
+
937
+ def _is_valid_dequant_promotion_pattern(dtype=torch.float32):
938
+ def _inner(match):
939
+ assert dtype in [torch.float32, torch.bfloat16]
940
+ if dtype == torch.float32:
941
+ mul_node = match.output_node()
942
+ else:
943
+ convert_to_bf16_node = match.output_node()
944
+ mul_node = convert_to_bf16_node.args[0]
945
+ sub_node = mul_node.args[0]
946
+ to_fp32_node = sub_node.args[0]
947
+ if (
948
+ mul_node.target is aten.mul.Tensor
949
+ and sub_node.target is aten.sub.Tensor
950
+ and to_fp32_node.target is prims.convert_element_type.default
951
+ and len(list(mul_node.users)) > 1
952
+ if dtype == torch.float32
953
+ else len(list(convert_to_bf16_node.users)) > 1
954
+ ):
955
+ # dequant pattern has more than 1 users to be promoted
956
+ return True
957
+ return False
958
+
959
+ return _inner
960
+
961
+
962
+ def _register_dequant_promotion_pass(pattern, pass_number, dtype=torch.float32):
963
+ @register_freezing_graph_pattern(
964
+ pattern,
965
+ extra_check=_is_valid_dequant_promotion_pattern(dtype),
966
+ pass_number=pass_number,
967
+ )
968
+ def dequant_promotion(match: Match, *args, **kwargs):
969
+ # If dequant pattern used by multiply nodes,
970
+ # we will do dequant promotion. So each user node has a separate dequant pattern connected.
971
+ assert dtype in [torch.float32, torch.bfloat16]
972
+
973
+ def clone_to_new_node(graph, source_node, user_node):
974
+ assert (
975
+ source_node.op == "call_function"
976
+ ), "clone_to_new_node only support node.op call_function"
977
+ with graph.inserting_before(user_node):
978
+ new_node = graph.call_function(
979
+ source_node.target,
980
+ args=source_node.args,
981
+ kwargs=source_node.kwargs,
982
+ )
983
+ new_node.meta = copy.copy(source_node.meta)
984
+ user_node.replace_input_with(source_node, new_node)
985
+ return new_node
986
+
987
+ if dtype == torch.float32:
988
+ mul_node = match.output_node()
989
+ else:
990
+ convert_to_bf16_node = match.output_node()
991
+ mul_node = convert_to_bf16_node.args[0]
992
+ sub_node = mul_node.args[0]
993
+ to_fp32_node = sub_node.args[0]
994
+ assert mul_node.target is aten.mul.Tensor
995
+ assert sub_node.target is aten.sub.Tensor
996
+ assert to_fp32_node.target is prims.convert_element_type.default
997
+
998
+ graph = match.graph
999
+ user_node_list = (
1000
+ list(mul_node.users)
1001
+ if dtype == torch.float32
1002
+ else list(convert_to_bf16_node.users)
1003
+ )
1004
+ for user_node in user_node_list:
1005
+ # Step1: Duplicate the mul node
1006
+ if dtype == torch.float32:
1007
+ new_mul_node = clone_to_new_node(graph, mul_node, user_node)
1008
+ else:
1009
+ new_convert_to_bf16_node_node = clone_to_new_node(
1010
+ graph, convert_to_bf16_node, user_node
1011
+ )
1012
+ new_mul_node = clone_to_new_node(
1013
+ graph, mul_node, new_convert_to_bf16_node_node
1014
+ )
1015
+ # Step2: Duplicate the sub node
1016
+ new_sub_node = clone_to_new_node(graph, sub_node, new_mul_node)
1017
+ # Step3: Duplicate the to_fp32 node
1018
+ _ = clone_to_new_node(graph, to_fp32_node, new_sub_node)
1019
+ counters["inductor"]["dequant_promotion_matcher_count"] += 1
1020
+ counters["inductor"]["dequant_promotion_matcher_nodes"] += len(match.nodes)
1021
+
1022
+
1023
+ def _is_valid_dequant_conv2d_pattern(dtype):
1024
+ def _inner(match):
1025
+ # Here we do some further check to ensure:
1026
+ # 1. It's a conv2d node with dim of 4, since we only support lowering of conv2d now.
1027
+ # 2. The dequant pattern has only 1 user of conv2d node.
1028
+ # If these conditions don't meet, we will not
1029
+ # insert weight prepack node into the matched pattern.
1030
+ conv_node = match.output_node()
1031
+ assert conv_node.target is aten.convolution.default
1032
+ input_meta_value = conv_node.args[0].meta.get("val")
1033
+ weight_meta_value = conv_node.args[1].meta.get("val")
1034
+ for meta_value in [input_meta_value, weight_meta_value]:
1035
+ if (
1036
+ meta_value is None
1037
+ or meta_value.device.type != "cpu"
1038
+ or meta_value.dim() != 4
1039
+ ):
1040
+ # Only support conv2d now
1041
+ return False
1042
+
1043
+ assert dtype in [torch.float32, torch.bfloat16]
1044
+ if dtype == torch.float32:
1045
+ mul_node = conv_node.args[0]
1046
+ else:
1047
+ convert_to_bf16 = conv_node.args[0]
1048
+ mul_node = convert_to_bf16.args[0]
1049
+ sub_node = mul_node.args[0]
1050
+ to_fp32_node = sub_node.args[0]
1051
+
1052
+ assert to_fp32_node.target is prims.convert_element_type.default
1053
+ assert sub_node.target is aten.sub.Tensor
1054
+ assert mul_node.target is aten.mul.Tensor
1055
+ if (
1056
+ len(list(to_fp32_node.users)) != 1
1057
+ or len(list(sub_node.users)) != 1
1058
+ or len(list(mul_node.users)) != 1
1059
+ ):
1060
+ # Ensure the dequant pattern only has 1 user
1061
+ # since we will delete the dequant pattern here
1062
+ return False
1063
+ return True
1064
+
1065
+ return _inner
1066
+
1067
+
1068
+ def _register_qconv_weight_prepack_pass(pattern, pass_number, dtype=torch.float32):
1069
+ @register_freezing_graph_pattern(
1070
+ pattern,
1071
+ extra_check=_is_valid_dequant_conv2d_pattern(dtype),
1072
+ pass_number=pass_number,
1073
+ )
1074
+ def qconv_weight_prepack(match: Match, *args, **kwargs):
1075
+ """
1076
+ Match the pattern:
1077
+ int8 activation
1078
+ |
1079
+ dequant_per_tensor
1080
+ |
1081
+ Conv2d <- optional(aten.clone.default) <- dequant_per_channel <- int8_weight
1082
+
1083
+ Insert weight prepack node and change the pattern to:
1084
+ int8 activation
1085
+ |
1086
+ onednn.qconv2d_pointwise <- onednn.qconv_prepack <- int8_weight
1087
+ """
1088
+ assert dtype in [torch.float32, torch.bfloat16]
1089
+ conv_node = match.output_node()
1090
+ assert conv_node.target is aten.convolution.default
1091
+ if dtype == torch.float32:
1092
+ mul_node = conv_node.args[0]
1093
+ else:
1094
+ convert_to_bf16 = conv_node.args[0]
1095
+ mul_node = convert_to_bf16.args[0]
1096
+ sub_node = mul_node.args[0]
1097
+ to_fp32_node = sub_node.args[0]
1098
+ has_clone_to_channel_last_node_in_pattern = (
1099
+ conv_node.args[1].target is aten.clone.default
1100
+ )
1101
+ clone_node = (
1102
+ conv_node.args[1] if has_clone_to_channel_last_node_in_pattern else None
1103
+ )
1104
+
1105
+ if dtype == torch.float32:
1106
+ dequant_per_channel = (
1107
+ clone_node.args[0]
1108
+ if has_clone_to_channel_last_node_in_pattern
1109
+ else conv_node.args[1]
1110
+ )
1111
+ else:
1112
+ weight_to_bf16_node = (
1113
+ clone_node.args[0]
1114
+ if has_clone_to_channel_last_node_in_pattern
1115
+ else conv_node.args[1]
1116
+ )
1117
+ dequant_per_channel = weight_to_bf16_node.args[0]
1118
+
1119
+ assert (
1120
+ dequant_per_channel.target
1121
+ is quantized_decomposed.dequantize_per_channel.default
1122
+ )
1123
+
1124
+ # Activation QParams
1125
+ qx, x_zp, x_scale = (
1126
+ kwargs["x"],
1127
+ kwargs["x_zp"],
1128
+ kwargs["x_scale"],
1129
+ )
1130
+
1131
+ # Weight QParams
1132
+ qw, w_scale, w_zp = (
1133
+ kwargs["q_weight"],
1134
+ kwargs["w_scale"],
1135
+ kwargs["w_zp"],
1136
+ )
1137
+
1138
+ # Conv Params
1139
+ bias, stride, padding, dilation, groups = (
1140
+ kwargs["b"],
1141
+ kwargs["stride"],
1142
+ kwargs["padding"],
1143
+ kwargs["dilation"],
1144
+ kwargs["groups"],
1145
+ )
1146
+
1147
+ x_shape = qx.meta.get("tensor_meta").shape
1148
+ if has_free_symbols(x_shape):
1149
+ # For dynamic shape case, we can't get activation shape ahead of runtime.
1150
+ x_shape = None
1151
+ graph = match.graph
1152
+ with graph.inserting_before(conv_node):
1153
+ # Insert weight prepack node and the QConv node
1154
+ packed_weight_inputs = (
1155
+ qw,
1156
+ w_scale,
1157
+ x_scale,
1158
+ x_zp,
1159
+ stride,
1160
+ padding,
1161
+ dilation,
1162
+ groups,
1163
+ x_shape,
1164
+ )
1165
+ packed_weight_op = torch.ops.onednn.qconv_prepack
1166
+ prepack_weight_node = graph.call_function(
1167
+ packed_weight_op, args=packed_weight_inputs
1168
+ )
1169
+
1170
+ new_args: Tuple[Any, ...] = (
1171
+ qx,
1172
+ x_scale,
1173
+ x_zp,
1174
+ prepack_weight_node,
1175
+ w_scale,
1176
+ w_zp,
1177
+ bias,
1178
+ stride,
1179
+ padding,
1180
+ dilation,
1181
+ groups,
1182
+ 1.0, # inv_output_scale
1183
+ 0, # output_zero_point
1184
+ dtype, # output_dtype
1185
+ "none", # attr
1186
+ [], # scalars
1187
+ "", # algorithm
1188
+ )
1189
+ new_conv_node = graph.call_function(
1190
+ torch.ops.onednn.qconv2d_pointwise.default, args=new_args
1191
+ )
1192
+ conv_node.replace_all_uses_with(new_conv_node)
1193
+ new_conv_node.meta.update(conv_node.meta)
1194
+
1195
+ # Erase the original conv node
1196
+ graph.erase_node(conv_node)
1197
+ # Erase the dequant pattern
1198
+ if dtype == torch.bfloat16:
1199
+ graph.erase_node(convert_to_bf16)
1200
+ # Erase the dequant pattern
1201
+ graph.erase_node(mul_node)
1202
+ graph.erase_node(sub_node)
1203
+ graph.erase_node(to_fp32_node)
1204
+ # Erase the dequant per channel pattern
1205
+ if clone_node is not None:
1206
+ graph.erase_node(clone_node)
1207
+ if dtype == torch.bfloat16:
1208
+ graph.erase_node(weight_to_bf16_node)
1209
+ graph.erase_node(dequant_per_channel)
1210
+ counters["inductor"]["qconv2d_weight_prepack_matcher_count"] += 1
1211
+ counters["inductor"]["qconv2d_weight_prepack_matcher_nodes"] += len(
1212
+ match.nodes
1213
+ )
1214
+
1215
+
1216
+ def _generate_dequant_convolution_node_pattern(
1217
+ _dequant_per_channel_pattern, dtype=torch.float32
1218
+ ):
1219
+ assert dtype in [torch.float32, torch.bfloat16]
1220
+ dequant_convolution_node_pattern = CallFunction(
1221
+ aten.convolution.default,
1222
+ _may_generate_pattern_with_dtype_convert(
1223
+ dequantize_per_tensor_activation_pattern,
1224
+ KeywordArg("autocast_act_dtype"),
1225
+ dtype != torch.float32,
1226
+ ),
1227
+ _dequant_per_channel_pattern,
1228
+ KeywordArg("b"),
1229
+ KeywordArg("stride"),
1230
+ KeywordArg("padding"),
1231
+ KeywordArg("dilation"),
1232
+ KeywordArg("is_transposed"),
1233
+ KeywordArg("out_padding"),
1234
+ KeywordArg("groups"),
1235
+ )
1236
+ return dequant_convolution_node_pattern
1237
+
1238
+
1239
+ def _generate_qconv_weight_prepack_patterns(dtype=torch.float32):
1240
+ assert dtype in [torch.float32, torch.bfloat16]
1241
+ return (
1242
+ _generate_dequant_convolution_node_pattern(
1243
+ dequantize_per_channel_weight_pattern
1244
+ if dtype == torch.float32
1245
+ else dequantize_per_channel_to_bf16_weight_pattern,
1246
+ dtype,
1247
+ ),
1248
+ # There is another pattern due to the pass of convert_conv_weights_to_channels_last
1249
+ # https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/_inductor/freezing.py#L338-L362.
1250
+ # Depend on some heuristics, it may or may not insert to(channel_last) node
1251
+ # between convolution and dequant_per_channel node
1252
+ _generate_dequant_convolution_node_pattern(
1253
+ dequantize_per_channel_clone_weight_pattern
1254
+ if dtype == torch.float32
1255
+ else dequantize_per_channel_to_bf16_clone_weight_pattern,
1256
+ dtype,
1257
+ ),
1258
+ )
1259
+
1260
+
1261
+ def _is_valid_dequant_linear_pattern(dtype):
1262
+ def _inner(match):
1263
+ # Check dequant pattern has only 1 user.
1264
+ linear_node = match.output_node()
1265
+ assert linear_node.target in (aten.addmm.default, aten.mm.default)
1266
+ input_index = 0 if linear_node.target is aten.mm.default else 1
1267
+ assert dtype in [torch.float32, torch.bfloat16]
1268
+ if dtype == torch.float32:
1269
+ mul_node = linear_node.args[input_index]
1270
+ else:
1271
+ convert_to_bf16 = linear_node.args[input_index]
1272
+ mul_node = convert_to_bf16.args[0]
1273
+ sub_node = mul_node.args[0]
1274
+ to_fp32_node = sub_node.args[0]
1275
+
1276
+ assert to_fp32_node.target is prims.convert_element_type.default
1277
+ assert sub_node.target is aten.sub.Tensor
1278
+ assert mul_node.target is aten.mul.Tensor
1279
+ if (
1280
+ len(list(to_fp32_node.users)) != 1
1281
+ or len(list(sub_node.users)) != 1
1282
+ or len(list(mul_node.users)) != 1
1283
+ ):
1284
+ # Ensure the dequant pattern only has 1 user
1285
+ # since we will delete the dequant pattern here
1286
+ return False
1287
+ return True
1288
+
1289
+ return _inner
1290
+
1291
+
1292
+ def _register_qlinear_weight_prepack_pass(pattern, pass_number, dtype=torch.float32):
1293
+ @register_freezing_graph_pattern(
1294
+ pattern,
1295
+ extra_check=_is_valid_dequant_linear_pattern(dtype),
1296
+ pass_number=pass_number,
1297
+ )
1298
+ def qlinear_weight_prepack(match: Match, *args, **kwargs):
1299
+ """
1300
+ Match the pattern:
1301
+ int8 activation
1302
+ |
1303
+ dequant_per_tensor
1304
+ |
1305
+ mm/addmm <- t <- dequant_per_channel <- int8_weight
1306
+
1307
+ Insert weight prepack node and change the pattern to:
1308
+ int8 activation
1309
+ |
1310
+ onednn.qlinear_pointwise <- onednn.qlinear_prepack <- int8_weight
1311
+ """
1312
+ assert dtype in [torch.float32, torch.bfloat16]
1313
+ linear_node = match.output_node()
1314
+ assert linear_node.target in (aten.addmm.default, aten.mm.default)
1315
+ input_index = 0 if linear_node.target is aten.mm.default else 1
1316
+ weight_index = input_index + 1
1317
+ if dtype == torch.float32:
1318
+ mul_node = linear_node.args[input_index]
1319
+ else:
1320
+ activation_to_bf16_node = linear_node.args[input_index]
1321
+ mul_node = activation_to_bf16_node.args[0]
1322
+ sub_node = mul_node.args[0]
1323
+ to_fp32_node = sub_node.args[0]
1324
+ t_node = linear_node.args[weight_index]
1325
+ if dtype == torch.float32:
1326
+ dequant_per_channel = t_node.args[0]
1327
+ else:
1328
+ weight_to_bf16_node = t_node.args[0]
1329
+ dequant_per_channel = weight_to_bf16_node.args[0]
1330
+ assert (
1331
+ dequant_per_channel.target
1332
+ is quantized_decomposed.dequantize_per_channel.default
1333
+ )
1334
+
1335
+ # Activation QParams
1336
+ qx, x_zp, x_scale = (
1337
+ kwargs["x"],
1338
+ kwargs["x_zp"],
1339
+ kwargs["x_scale"],
1340
+ )
1341
+
1342
+ # Weight QParams
1343
+ qw, w_scale, w_zp = (
1344
+ kwargs["q_weight"],
1345
+ kwargs["w_scale"],
1346
+ kwargs["w_zp"],
1347
+ )
1348
+
1349
+ # Params
1350
+ bias = kwargs["b"] if "b" in kwargs else None
1351
+
1352
+ x_shape = qx.meta.get("tensor_meta").shape
1353
+ if has_free_symbols(x_shape):
1354
+ # For dynamic shape case, we can't get activation shape ahead of runtime.
1355
+ x_shape = None
1356
+ graph = match.graph
1357
+ with graph.inserting_before(linear_node):
1358
+ # Insert weight prepack node and the qlinear node
1359
+ packed_weight_inputs = (
1360
+ qw,
1361
+ x_shape,
1362
+ )
1363
+ packed_weight_op = torch.ops.onednn.qlinear_prepack
1364
+ prepack_weight_node = graph.call_function(
1365
+ packed_weight_op, args=packed_weight_inputs
1366
+ )
1367
+
1368
+ new_args: Tuple[Any, ...] = (
1369
+ qx,
1370
+ x_scale,
1371
+ x_zp,
1372
+ prepack_weight_node,
1373
+ w_scale,
1374
+ w_zp,
1375
+ bias,
1376
+ 1.0, # output_scale
1377
+ 0, # output_zero_point
1378
+ dtype, # output_dtype
1379
+ "none", # post op name
1380
+ [], # post op args
1381
+ "", # post op algorithm
1382
+ )
1383
+ new_linear_node = graph.call_function(
1384
+ torch.ops.onednn.qlinear_pointwise.default, args=new_args
1385
+ )
1386
+ linear_node.replace_all_uses_with(new_linear_node)
1387
+ new_linear_node.meta.update(linear_node.meta)
1388
+
1389
+ # Erase the original linear node
1390
+ graph.erase_node(linear_node)
1391
+ if dtype == torch.bfloat16:
1392
+ graph.erase_node(activation_to_bf16_node)
1393
+ # Erase the dequant pattern
1394
+ graph.erase_node(mul_node)
1395
+ graph.erase_node(sub_node)
1396
+ graph.erase_node(to_fp32_node)
1397
+ # Erase the dequant per channel pattern
1398
+ graph.erase_node(t_node)
1399
+ if dtype == torch.bfloat16:
1400
+ graph.erase_node(weight_to_bf16_node)
1401
+ graph.erase_node(dequant_per_channel)
1402
+ counters["inductor"]["qlinear_weight_prepack_matcher_count"] += 1
1403
+ counters["inductor"]["qlinear_weight_prepack_matcher_nodes"] += len(
1404
+ match.nodes
1405
+ )
1406
+
1407
+
1408
+ def _generate_dequant_linear_node_pattern(
1409
+ _dequant_per_channel_pattern, dtype=torch.float32
1410
+ ):
1411
+ t_pattern = CallFunction(
1412
+ aten.permute.default,
1413
+ _may_generate_pattern_with_dtype_convert(
1414
+ _dequant_per_channel_pattern,
1415
+ KeywordArg("autocast_wgt_dtype"),
1416
+ dtype != torch.float32,
1417
+ ),
1418
+ KeywordArg("permute_axes"),
1419
+ )
1420
+ dequant_linear_bias_pattern = CallFunction(
1421
+ aten.addmm.default,
1422
+ KeywordArg("b"),
1423
+ _may_generate_pattern_with_dtype_convert(
1424
+ dequantize_per_tensor_activation_pattern,
1425
+ KeywordArg("autocast_act_dtype"),
1426
+ dtype != torch.float32,
1427
+ ),
1428
+ t_pattern,
1429
+ )
1430
+ dequant_linear_no_bias_pattern = CallFunction(
1431
+ aten.mm.default,
1432
+ _may_generate_pattern_with_dtype_convert(
1433
+ dequantize_per_tensor_activation_pattern,
1434
+ KeywordArg("autocast_act_dtype"),
1435
+ dtype != torch.float32,
1436
+ ),
1437
+ t_pattern,
1438
+ )
1439
+ return dequant_linear_bias_pattern, dequant_linear_no_bias_pattern
1440
+
1441
+
1442
+ def _generate_qlinear_weight_prepack_patterns(dtype=torch.float32):
1443
+ return _generate_dequant_linear_node_pattern(
1444
+ dequantize_per_channel_weight_pattern, dtype
1445
+ )
1446
+
1447
+
1448
+ @functools.lru_cache(None)
1449
+ def _register_quantization_weight_pack_pass():
1450
+ for dtype in [torch.float32, torch.bfloat16]:
1451
+ # Step 1: Dequant promotion for int8-mixed-fp32/bf16
1452
+ # Transform
1453
+ # graph 1:
1454
+ # quant
1455
+ # + - - - | - - - +
1456
+ # | dequant |
1457
+ # | / \ |
1458
+ # | node1 node2 |
1459
+ # + - | - - - | - +
1460
+ # quant quant
1461
+ # into:
1462
+ # graph 2:
1463
+ # quant
1464
+ # + - - / - \ - - +
1465
+ # |dequant dequant|
1466
+ # | | | |
1467
+ # | node1 node2 |
1468
+ # + - | - - - | - +
1469
+ # quant quant
1470
+ # In graph 1, the dequant node is shared by node1 and node2,
1471
+ # as a result, neither node1 nor node2 could form an int8
1472
+ # fusion pattern.
1473
+ # After this transformation, the graph 2 could hit the int8
1474
+ # fusion pattern: dequant-node-quant, respectively for
1475
+ # node1 and node2.
1476
+ _register_dequant_promotion_pass(
1477
+ _may_generate_pattern_with_dtype_convert(
1478
+ dequantize_per_tensor_activation_pattern,
1479
+ KeywordArg("autocast_act_dtype"),
1480
+ dtype != torch.float32,
1481
+ ),
1482
+ pass_number=0,
1483
+ dtype=dtype,
1484
+ ) # pass_number=0 to run before weight prepack
1485
+
1486
+ # Step 2: QConv weight prepack
1487
+ weight_prepack_patterns = _generate_qconv_weight_prepack_patterns(dtype)
1488
+ for weight_prepack_pattern in weight_prepack_patterns:
1489
+ # Register to pass_number 1, so we can do dequant promotion in pass_number 0.
1490
+ _register_qconv_weight_prepack_pass(
1491
+ weight_prepack_pattern, pass_number=1, dtype=dtype
1492
+ )
1493
+
1494
+ # Step 3: QLinear weight prepack
1495
+ weight_prepack_patterns = _generate_qlinear_weight_prepack_patterns(dtype)
1496
+ for weight_prepack_pattern in weight_prepack_patterns:
1497
+ # Register to pass_number 1, so we can do dequant promotion in pass_number 0.
1498
+ _register_qlinear_weight_prepack_pass(
1499
+ weight_prepack_pattern, pass_number=1, dtype=dtype
1500
+ )
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+
4
+ import torch
5
+
6
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata
7
+ from .. import config, inductor_prims
8
+ from ..pattern_matcher import (
9
+ CallFunctionVarArgs,
10
+ Match,
11
+ PatternMatcherPass,
12
+ register_graph_pattern,
13
+ )
14
+ from ..virtualized import V
15
+
16
+ log = logging.getLogger(__name__)
17
+ patterns = PatternMatcherPass()
18
+ aten = torch.ops.aten
19
+
20
+
21
+ def replace_random_passes(gm: torch.fx.GraphModule):
22
+ """Modify the given FX graph to use backend-native random ops"""
23
+ if config.fallback_random:
24
+ return 0
25
+
26
+ count = patterns.apply(gm)
27
+ count += fuse_seed_creation_pass(gm.graph)
28
+
29
+ return count
30
+
31
+
32
+ def fuse_seed_creation_pass(graph: torch.fx.Graph):
33
+ """
34
+ Horizontally fuse all the seed generation on each device
35
+
36
+ a = inductor_seed(dev)
37
+ b = inductor_seed(dev)
38
+
39
+ Becomes:
40
+ seeds = inductor_seeds(2, dev)
41
+ a = inductor_lookup_seed(seeds, 0)
42
+ b = inductor_lookup_seed(seeds, 1)
43
+
44
+ We do this because seed creation is entirely launch overhead bound.
45
+ """
46
+ device_seeds = collections.defaultdict(list)
47
+ for node in graph.nodes:
48
+ if CallFunctionVarArgs(inductor_prims.seed).match(node):
49
+ device_seeds[node.args[0]].append(node)
50
+
51
+ if not device_seeds:
52
+ return 0
53
+
54
+ for device, seeds in device_seeds.items():
55
+ with graph.inserting_before(seeds[0]):
56
+ combined = graph.call_function(inductor_prims.seeds, (len(seeds), device))
57
+ with V.fake_mode:
58
+ combined.meta["val"] = torch.empty(
59
+ [len(seeds)], device=device, dtype=torch.int64
60
+ )
61
+ combined.meta["tensor_meta"] = _extract_tensor_metadata(
62
+ combined.meta["val"]
63
+ )
64
+
65
+ for idx, seed in enumerate(seeds):
66
+ with graph.inserting_before(seed):
67
+ new_seed = graph.call_function(
68
+ inductor_prims.lookup_seed, (combined, idx)
69
+ )
70
+ seed.replace_all_uses_with(new_seed)
71
+ new_seed.meta.update(seed.meta)
72
+ graph.erase_node(seed)
73
+
74
+ return len(device_seeds)
75
+
76
+
77
+ def default_kwargs(device):
78
+ return {}
79
+
80
+
81
+ def get_device(device):
82
+ if device is not None:
83
+ return device
84
+ return torch.empty([]).device # default device
85
+
86
+
87
+ @register_graph_pattern(CallFunctionVarArgs(aten.rand.default), pass_dict=patterns)
88
+ @register_graph_pattern(CallFunctionVarArgs(aten.rand.generator), pass_dict=patterns)
89
+ @register_graph_pattern(CallFunctionVarArgs(aten.randn.default), pass_dict=patterns)
90
+ @register_graph_pattern(CallFunctionVarArgs(aten.randn.generator), pass_dict=patterns)
91
+ def replace_random(
92
+ match: Match,
93
+ size,
94
+ *,
95
+ generator=None,
96
+ dtype=None,
97
+ device=None,
98
+ layout=None,
99
+ pin_memory=None,
100
+ ):
101
+ if generator is not None:
102
+ return
103
+
104
+ def replacement(size):
105
+ result = inductor_prims.random(
106
+ size, inductor_prims.seed(device), mode, **default_kwargs(device)
107
+ )
108
+ if dtype is not None:
109
+ result = result.to(dtype)
110
+ return result
111
+
112
+ mode = {
113
+ aten.rand: "rand",
114
+ aten.randn: "randn",
115
+ }[match.output_node().target.overloadpacket]
116
+ device = get_device(device)
117
+ match.replace_by_example(replacement, [size])
118
+
119
+
120
+ @register_graph_pattern(CallFunctionVarArgs(aten.randint.low), pass_dict=patterns)
121
+ def replace_randint(
122
+ match: Match,
123
+ low,
124
+ high,
125
+ size,
126
+ *,
127
+ dtype=torch.int64,
128
+ device=None,
129
+ layout=None,
130
+ pin_memory=None,
131
+ ):
132
+ def replacement(size):
133
+ result = inductor_prims.randint(low, high, size, inductor_prims.seed(device))
134
+ return result.to(dtype)
135
+
136
+ device = get_device(device)
137
+ match.replace_by_example(replacement, [size])
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (210 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_1.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_11.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_12.cpython-310.pyc ADDED
Binary file (5.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_13.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_2.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_3.cpython-310.pyc ADDED
Binary file (5.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_4.cpython-310.pyc ADDED
Binary file (5.17 kB). View file