applied-ai-018 commited on
Commit
7a7f770
·
verified ·
1 Parent(s): faf39dd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py +0 -0
  4. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/decompose_mem_bound_mm.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/dedupe_symint_uses.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/efficient_conv_bn_eval.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/misc_patterns.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/numeric_utils.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/reinplace.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py +277 -0
  24. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/decompose_mem_bound_mm.py +221 -0
  25. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/dedupe_symint_uses.py +78 -0
  26. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/efficient_conv_bn_eval.py +157 -0
  27. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py +212 -0
  28. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py +786 -0
  29. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py +1059 -0
  30. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py +341 -0
  31. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/misc_patterns.py +130 -0
  32. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py +1204 -0
  33. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/numeric_utils.py +210 -0
  34. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py +567 -0
  35. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py +1100 -0
  36. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py +611 -0
  37. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py +1980 -0
  38. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/reinplace.py +537 -0
  39. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py +139 -0
  40. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__init__.py +0 -0
  41. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_1.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_10.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_11.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_12.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_13.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_14.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_15.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_16.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_17.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/18.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a664e7b06e3cfe6a53020a0793c215550dbca9b303ccd74f8f89414698b1857
3
+ size 9372
ckpts/universal/global_step120/zero/22.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abaa2a3a2accaa61f5890bed2215124f9e77931e2a48de57678b6733702eb0c5
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/decompose_mem_bound_mm.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/dedupe_symint_uses.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/efficient_conv_bn_eval.cpython-310.pyc ADDED
Binary file (3.84 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc ADDED
Binary file (5.72 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc ADDED
Binary file (29.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc ADDED
Binary file (8.64 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/misc_patterns.cpython-310.pyc ADDED
Binary file (3.84 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc ADDED
Binary file (28.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/numeric_utils.cpython-310.pyc ADDED
Binary file (5.43 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc ADDED
Binary file (32.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/reinplace.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc ADDED
Binary file (32.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+
4
+ import torch
5
+ from ..._dynamo.utils import counters
6
+
7
+ from ..pattern_matcher import Arg, CallFunction, KeywordArg
8
+ from .freezing_patterns import register_binary_folding_pattern
9
+
10
+ aten = torch.ops.aten
11
+ prims = torch.ops.prims
12
+
13
+
14
+ def mark_mixed_dtype_conv(conv):
15
+ conv_dtype = conv.meta["val"].dtype
16
+ if conv_dtype not in (torch.float16, torch.bfloat16):
17
+ return
18
+
19
+ if not len(conv.users) == 1:
20
+ return
21
+
22
+ conv_user = next(iter(conv.users.keys()))
23
+ if not isinstance(conv_user.meta["val"], torch.Tensor):
24
+ return
25
+
26
+ if not conv_user.meta["val"].dtype == torch.float32:
27
+ return
28
+
29
+ while conv_user.target in _binary_ops:
30
+ if not len(conv_user.users) == 1:
31
+ return
32
+
33
+ conv_user = next(iter(conv_user.users.keys()))
34
+
35
+ if not (
36
+ conv_user.target == prims.convert_element_type.default
37
+ and conv_user.args[1] == conv_dtype
38
+ ):
39
+ return
40
+
41
+ conv.meta["_allow_conv_mixed_dtype_folding"] = conv_dtype
42
+
43
+
44
+ def mark_mixed_dtype_allowed_convs(gm):
45
+ """
46
+ Mark convolutions which we will binary fold even with mixed precision constants. We constant fold in the higher precision
47
+ for better accuracy and then recover the original precision after.
48
+ """
49
+ for node in gm.graph.nodes:
50
+ if node.target is aten.convolution.default:
51
+ mark_mixed_dtype_conv(node)
52
+
53
+
54
+ def recover_original_precision_folded_convs(gm):
55
+ """
56
+ After binary folding conv weights and biases to a higher dtype, recover the original precision they were in.
57
+ """
58
+ graph = gm.graph
59
+ convs = [node for node in graph.nodes if node.target is aten.convolution.default]
60
+ for node in convs:
61
+ orig_dtype = node.meta.get("_allow_conv_mixed_dtype_folding", None)
62
+ if orig_dtype is None:
63
+ continue
64
+
65
+ with graph.inserting_before(node):
66
+ for idx in [1, 2]:
67
+ old_input = node.args[idx]
68
+ if old_input is None:
69
+ continue
70
+
71
+ new_input = graph.create_node(
72
+ "call_function",
73
+ prims.convert_element_type.default,
74
+ (old_input, orig_dtype),
75
+ )
76
+ node.replace_input_with(old_input, new_input)
77
+
78
+
79
+ _binary_ops = [aten.add.Tensor, aten.sub.Tensor, aten.mul.Tensor, aten.div.Tensor]
80
+
81
+
82
+ @functools.lru_cache(None)
83
+ def binary_folding_init():
84
+ _conv_args = [Arg() for _ in range(9)]
85
+ _computation_ops = [aten.convolution.default]
86
+ _computation_calls = [CallFunction(aten.convolution.default, *_conv_args, _users=1)]
87
+
88
+ """
89
+ In order to fuse add/sub/mul/div with conv, the dimensions of its
90
+ constant tensor must satisfy the following:
91
+ - with resizing, broadcast to w/ weight/bias tensor shape
92
+ - broadcast to the conv output shape
93
+ It needs to have a shape that can resize to weight/bias
94
+ tensor shape because we need to run the op with the conv
95
+ weights/bias without changing their sizes.
96
+ It needs to broadcast to the conv output shape so that we do
97
+ accidentally change the shape of op output by pre-fusing it
98
+ compared to eager.
99
+ The only dimension value shared by weight/bias/conv output
100
+ is they all contain a dim with value = channels-out. In the
101
+ conv output tensor, this is in the second dimension,
102
+ so the pointwise op tensor may have a second dimension of
103
+ value == channels-out, but all the other dimensions have to be 1
104
+ """
105
+
106
+ def _op_not_broadcasting_with_conv(weight_tensor, other_tensor):
107
+ # According to opDoesNotBroadCastWithConv of frozen_conv_folding.cpp
108
+ weight_shape = weight_tensor.shape
109
+ other_shape = other_tensor.shape
110
+ if len(weight_shape) < len(other_shape):
111
+ return False
112
+ if len(weight_shape) == len(other_shape) + 1:
113
+ # weight shape is [o, i, *], other_shape is [o, 1...].
114
+ for i in reversed(range(len(other_shape))):
115
+ if i == 0 and weight_shape[0] == other_shape[i]:
116
+ continue
117
+ if other_shape[i] != 1:
118
+ return False
119
+ else:
120
+ # weight shape is [o, i, *], other_shape is [1, i, *]
121
+ for i in reversed(range(len(other_shape))):
122
+ if i == 1 and weight_shape[0] == other_shape[i]:
123
+ continue
124
+ if other_shape[i] != 1:
125
+ return False
126
+ return True
127
+
128
+ def _check_conv_and_broadcast_op(conv_node, other):
129
+ # According to checkConvAndBroadcastingOpPreConditions of frozen_conv_folding.cpp.
130
+ # conv.weight
131
+ if conv_node.args[1].op != "get_attr":
132
+ return False
133
+ # conv.bias
134
+ if conv_node.args[1] is not None and conv_node.args[1].op != "get_attr":
135
+ return False
136
+ if (
137
+ not isinstance(other, int)
138
+ and not isinstance(other, float)
139
+ and other.op != "get_attr"
140
+ ):
141
+ return False
142
+
143
+ if not len(conv_node.args[1].users) == 1:
144
+ return False
145
+
146
+ weight_meta_value = conv_node.args[1].meta.get("val")
147
+ if weight_meta_value is None:
148
+ return False
149
+ # Avoid fusing op that causes type promotion
150
+ # restricting to float avoids int/float difficulties with scalar overload
151
+ if not weight_meta_value.is_floating_point():
152
+ return False
153
+ if isinstance(other, torch.fx.Node) and other.op == "get_attr":
154
+ other_meta_value = other.meta.get("val")
155
+ if not other_meta_value.is_floating_point():
156
+ return False
157
+ if (
158
+ torch.promote_types(other_meta_value.dtype, weight_meta_value.dtype)
159
+ != weight_meta_value.dtype
160
+ ):
161
+ if not conv_node.meta.get("_allow_conv_mixed_dtype_folding", False):
162
+ return False
163
+
164
+ if (
165
+ other_meta_value.dtype != torch.float
166
+ and weight_meta_value.dtype not in (torch.float16, torch.bfloat16)
167
+ ):
168
+ return False
169
+
170
+ if not _op_not_broadcasting_with_conv(weight_meta_value, other_meta_value):
171
+ return False
172
+ else:
173
+ # TODO: support scalar case
174
+ return False
175
+
176
+ return True
177
+
178
+ def _is_foldable_pattern(match):
179
+ binary_node = match.output_node()
180
+ computation_node = binary_node.args[0]
181
+ other = binary_node.args[1]
182
+ if binary_node.args[0].target not in _computation_ops:
183
+ computation_node = binary_node.args[1]
184
+ other = binary_node.args[0]
185
+ if binary_node.args[0].target == aten.convolution.default:
186
+ return _check_conv_and_broadcast_op(computation_node, other)
187
+
188
+ return False
189
+
190
+ def resize_scalar_or_tensor_to_shape(graph, other, shape):
191
+ # TODO: support scalar case
192
+ if other.meta.get("val").numel() == 1:
193
+ # expand errors if the shape input has less # dims than the tensor input
194
+ res = graph.create_node(
195
+ "call_function",
196
+ aten.reshape.default,
197
+ (other, (1,)),
198
+ )
199
+ res = graph.create_node(
200
+ "call_function",
201
+ aten.expand.default,
202
+ (res, shape),
203
+ )
204
+ else:
205
+ res = graph.create_node(
206
+ "call_function",
207
+ aten.reshape.default,
208
+ (other, shape),
209
+ )
210
+ return res
211
+
212
+ def _create_new_conv_node(graph, conv_node, binary_node, other):
213
+ assert conv_node.target == aten.convolution.default
214
+ conv_args = list(conv_node.args)
215
+ weight_meta_value = conv_node.args[1].meta.get("val")
216
+ bias = conv_args[2]
217
+ if binary_node.target in [aten.add.Tensor, aten.sub.Tensor]:
218
+ other_reshape = resize_scalar_or_tensor_to_shape(
219
+ graph, other, (weight_meta_value.size(0),)
220
+ )
221
+ new_bias = graph.create_node(
222
+ "call_function",
223
+ binary_node.target,
224
+ (0 if bias is None else bias, other_reshape),
225
+ )
226
+ conv_args[2] = new_bias
227
+ else:
228
+ assert binary_node.target in [aten.mul.Tensor, aten.div.Tensor]
229
+ weight_broadcast_shape = [1 for _ in range(len(weight_meta_value.shape))]
230
+ weight_broadcast_shape[0] = weight_meta_value.size(0)
231
+ other_reshape1 = resize_scalar_or_tensor_to_shape(
232
+ graph, other, tuple(weight_broadcast_shape)
233
+ )
234
+ new_weight = graph.create_node(
235
+ "call_function", binary_node.target, (conv_args[1], other_reshape1)
236
+ )
237
+ new_weight.meta.update(conv_args[1].meta)
238
+ conv_args[1] = new_weight
239
+ if bias is not None:
240
+ other_reshape = resize_scalar_or_tensor_to_shape(
241
+ graph, other, (weight_meta_value.size(0),)
242
+ )
243
+ new_bias = graph.create_node(
244
+ "call_function", binary_node.target, (bias, other_reshape)
245
+ )
246
+ new_bias.meta.update(bias.meta)
247
+ conv_args[2] = new_bias
248
+ return graph.create_node("call_function", conv_node.target, tuple(conv_args))
249
+
250
+ for _computation_call, binary_op in itertools.product(
251
+ _computation_calls, _binary_ops
252
+ ):
253
+
254
+ @register_binary_folding_pattern(
255
+ CallFunction(binary_op, _computation_call, KeywordArg("other")),
256
+ extra_check=_is_foldable_pattern,
257
+ )
258
+ def folded_op(match, *args, **kwargs):
259
+ counters["inductor"]["binary_folding"] += 1
260
+ other = kwargs.get("other")
261
+ binary_node = match.output_node()
262
+ computation_node = (
263
+ binary_node.args[0]
264
+ if binary_node.args[0].target in _computation_ops
265
+ else binary_node.args[1]
266
+ )
267
+ graph = match.graph
268
+ with graph.inserting_before(binary_node):
269
+ # TODO: support linear?
270
+ assert computation_node.target == aten.convolution.default
271
+ new_computation_node = _create_new_conv_node(
272
+ graph, computation_node, binary_node, other
273
+ )
274
+ binary_node.replace_all_uses_with(new_computation_node)
275
+ new_computation_node.meta.update(computation_node.meta)
276
+ graph.erase_node(binary_node)
277
+ graph.erase_node(computation_node)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/decompose_mem_bound_mm.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import List, Optional
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch._dynamo.utils import counters
7
+ from torch._inductor import utils
8
+
9
+ from ..pattern_matcher import (
10
+ Arg,
11
+ CallFunction,
12
+ config_flag,
13
+ Ignored,
14
+ Match,
15
+ register_graph_pattern,
16
+ )
17
+ from .post_grad import decompose_mm_pass
18
+
19
+ aten = torch.ops.aten
20
+ log = logging.getLogger(__name__)
21
+
22
+ # TODO: need a better strategy for decomposing mm
23
+ MIN_FIRST_DIMENSION_DECOMPOSITION = 10240
24
+ MAX_OTHER_DIMENSION_DECOMPOSITION = 32
25
+
26
+
27
+ def check_device(a: Tensor, b: Tensor) -> bool:
28
+ return a.is_cuda and b.is_cuda
29
+
30
+
31
+ def should_decompose_common(
32
+ mat1: Tensor, mat2: Tensor, input: Optional[Tensor] = None
33
+ ) -> bool:
34
+ return (
35
+ torch._inductor.config.decompose_mem_bound_mm
36
+ and check_device(mat1, mat2)
37
+ and not utils.any_is_symbolic(mat1, mat2, input)
38
+ )
39
+
40
+
41
+ def should_decompose_bmm(mat1, mat2) -> bool:
42
+ if is_node_meta_valid(mat1) and is_node_meta_valid(mat2):
43
+ mat1 = mat1.meta["val"]
44
+ mat2 = mat2.meta["val"]
45
+ else:
46
+ return False
47
+ if not should_decompose_common(mat1, mat2):
48
+ return False
49
+ else:
50
+ if len(mat1.shape) != 3 or len(mat2.shape) != 3:
51
+ return False
52
+ if mat1.shape[0] < MIN_FIRST_DIMENSION_DECOMPOSITION:
53
+ return False
54
+ # 2 of m, n, k must be <= MAX_OTHER_DIMENSION_DECOMPOSITION
55
+ if (mat1.shape[1] < MAX_OTHER_DIMENSION_DECOMPOSITION) + (
56
+ mat1.shape[2] < MAX_OTHER_DIMENSION_DECOMPOSITION
57
+ ) + (mat2.shape[2] < MAX_OTHER_DIMENSION_DECOMPOSITION) < 2:
58
+ return False
59
+ return True
60
+
61
+
62
+ def should_decompose_mm(mat1, mat2) -> bool:
63
+ if is_node_meta_valid(mat1) and is_node_meta_valid(mat2):
64
+ mat1 = mat1.meta["val"]
65
+ mat2 = mat2.meta["val"]
66
+ else:
67
+ return False
68
+ return (
69
+ should_decompose_common(mat1, mat2)
70
+ and len(mat1.shape) == 2
71
+ and len(mat2.shape) == 2
72
+ and mat1.shape[0] >= MIN_FIRST_DIMENSION_DECOMPOSITION
73
+ and mat2.shape[0] < MAX_OTHER_DIMENSION_DECOMPOSITION
74
+ and mat2.shape[1] < MAX_OTHER_DIMENSION_DECOMPOSITION
75
+ )
76
+
77
+
78
+ def should_decompose_mmt(mat1, mat2) -> bool:
79
+ if is_node_meta_valid(mat1) and is_node_meta_valid(mat2):
80
+ mat1 = mat1.meta["val"]
81
+ mat2 = mat2.meta["val"]
82
+ else:
83
+ return False
84
+ return (
85
+ should_decompose_common(mat1, mat2)
86
+ and len(mat1.shape) == 2
87
+ and len(mat2.shape) == 2
88
+ and mat1.shape[0] >= MIN_FIRST_DIMENSION_DECOMPOSITION
89
+ and mat1.shape[1] < MAX_OTHER_DIMENSION_DECOMPOSITION
90
+ and mat2.shape[1] < MAX_OTHER_DIMENSION_DECOMPOSITION
91
+ )
92
+
93
+
94
+ def should_decompose_mm_largek(mat1, mat2) -> bool:
95
+ if is_node_meta_valid(mat1) and is_node_meta_valid(mat2):
96
+ mat1 = mat1.meta["val"]
97
+ mat2 = mat2.meta["val"]
98
+ else:
99
+ return False
100
+ return (
101
+ should_decompose_common(mat1, mat2)
102
+ and len(mat1.shape) == 2
103
+ and len(mat2.shape) == 2
104
+ and mat1.shape[1] >= MIN_FIRST_DIMENSION_DECOMPOSITION
105
+ and mat1.shape[0] < MAX_OTHER_DIMENSION_DECOMPOSITION
106
+ and mat2.shape[1] < MAX_OTHER_DIMENSION_DECOMPOSITION
107
+ )
108
+
109
+
110
+ def is_node_meta_valid(node: torch.fx.Node):
111
+ return "val" in node.meta
112
+
113
+
114
+ def print_decompose_pattern(match: Match, inputs: List[torch.fx.Node]):
115
+ node = match.nodes[-1]
116
+ log.debug(
117
+ "Decompose %s with input shape: %s",
118
+ node.target,
119
+ ", ".join(
120
+ str(input.meta["val"].shape) if "val" in input.meta else "None"
121
+ for input in inputs
122
+ ),
123
+ )
124
+
125
+
126
+ @register_graph_pattern(
127
+ CallFunction(aten.bmm, Arg(), Arg()),
128
+ pass_dict=decompose_mm_pass,
129
+ extra_check=config_flag("decompose_mem_bound_mm"),
130
+ )
131
+ def decompose_bmm(match: Match, mat1: torch.fx.Node, mat2: torch.fx.Node):
132
+ def repl(mat1, mat2):
133
+ return torch.sum(mat1[:, :, :, None] * mat2[:, None, :, :], dim=-2)
134
+
135
+ if should_decompose_bmm(mat1, mat2):
136
+ counters["inductor"]["decompose_bmm"] += 1
137
+ match.replace_by_example(repl, [mat1, mat2])
138
+ print_decompose_pattern(match, [mat1, mat2])
139
+ return
140
+
141
+
142
+ @register_graph_pattern(
143
+ CallFunction(aten.addmm, Arg(), Arg(), Arg()),
144
+ pass_dict=decompose_mm_pass,
145
+ extra_check=config_flag("decompose_mem_bound_mm"),
146
+ )
147
+ def decompose_addmm(
148
+ match: Match,
149
+ mat1: torch.fx.Node,
150
+ mat2: torch.fx.Node,
151
+ mat3: torch.fx.Node,
152
+ ):
153
+ def repl(mat1, mat2, mat3):
154
+ return torch.sum(mat2[:, :, None] * mat3[None, :, :], dim=-2) + mat1
155
+
156
+ if should_decompose_mm(mat2, mat3):
157
+ counters["inductor"]["decompose_addmm"] += 1
158
+ match.replace_by_example(repl, [mat1, mat2, mat3])
159
+ print_decompose_pattern(match, [mat1, mat2, mat3])
160
+ return
161
+
162
+
163
+ @register_graph_pattern(
164
+ CallFunction(aten.mm, CallFunction(aten.permute, Arg(), Ignored()), Arg()),
165
+ pass_dict=decompose_mm_pass,
166
+ extra_check=config_flag("decompose_mem_bound_mm"),
167
+ )
168
+ def decompose_mmt(
169
+ match: Match,
170
+ mat1: torch.fx.Node,
171
+ mat2: torch.fx.Node,
172
+ ):
173
+ def repl(mat1, mat2):
174
+ return torch.sum(mat1[:, :, None] * mat2[:, None, :], dim=0)
175
+
176
+ if should_decompose_mmt(mat1, mat2):
177
+ counters["inductor"]["decompose_mmt"] += 1
178
+ match.replace_by_example(repl, [mat1, mat2])
179
+ print_decompose_pattern(match, [mat1, mat2])
180
+ return
181
+
182
+
183
+ @register_graph_pattern(
184
+ CallFunction(aten.mm, Arg(), Arg()),
185
+ pass_dict=decompose_mm_pass,
186
+ extra_check=config_flag("decompose_mem_bound_mm"),
187
+ )
188
+ def decompose_mm(
189
+ match: Match,
190
+ mat1: torch.fx.Node,
191
+ mat2: torch.fx.Node,
192
+ ):
193
+ def repl(mat1, mat2):
194
+ return torch.sum(mat1[:, :, None] * mat2[None, :, :], dim=-2)
195
+
196
+ if should_decompose_mm(mat1, mat2):
197
+ counters["inductor"]["decompose_mm"] += 1
198
+ match.replace_by_example(repl, [mat1, mat2])
199
+ print_decompose_pattern(match, [mat1, mat2])
200
+ return
201
+
202
+
203
+ @register_graph_pattern(
204
+ CallFunction(aten.mm, Arg(), Arg()),
205
+ pass_dict=decompose_mm_pass,
206
+ extra_check=config_flag("decompose_mem_bound_mm"),
207
+ )
208
+ def decompose_mm_large_k(
209
+ match: Match,
210
+ mat1: torch.fx.Node,
211
+ mat2: torch.fx.Node,
212
+ ):
213
+ def repl(mat1, mat2):
214
+ mat1 = mat1.permute(1, 0)
215
+ return torch.sum(mat1[:, :, None] * mat2[:, None, :], dim=0)
216
+
217
+ if should_decompose_mm_largek(mat1, mat2):
218
+ counters["inductor"]["decompose_mm_large_k"] += 1
219
+ match.replace_by_example(repl, [mat1, mat2])
220
+ print_decompose_pattern(match, [mat1, mat2])
221
+ return
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/dedupe_symint_uses.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Union
3
+
4
+ import torch
5
+ from torch.fx.experimental.proxy_tensor import py_sym_types, SymBool, SymFloat, SymInt
6
+
7
+
8
+ @dataclass
9
+ class _SymExprHash:
10
+ """
11
+ Hash for a py_sym_types that will use the underlying sympy expression
12
+ """
13
+
14
+ sym_obj: Union[SymInt, SymFloat, SymBool]
15
+
16
+ def __hash__(self) -> int:
17
+ return hash((type(self.sym_obj), self.sym_obj.node.expr))
18
+
19
+ def __eq__(self, value) -> bool:
20
+ if not isinstance(value, _SymExprHash):
21
+ return False
22
+ return self.sym_obj.node.expr == value.sym_obj.node.expr
23
+
24
+
25
+ class _SymHashingDict:
26
+ """
27
+ Wrapper around a dictionary that will convert sym types to hash with _SymExprHash and reuse
28
+ existing sym proxies.
29
+
30
+ SymPy hash is not always reliable so optimistically hash sympy expression, and if those fail,
31
+ fallback to symnodes.
32
+ """
33
+
34
+ def __init__(self):
35
+ self.sym_hash_dict = {}
36
+
37
+ def __setitem__(self, key, value):
38
+ self.sym_hash_dict.__setitem__(self._wrap_to_sym_expr_hash(key), value)
39
+
40
+ def __getitem__(self, key):
41
+ return self.sym_hash_dict[self._wrap_to_sym_expr_hash(key)]
42
+
43
+ def __contains__(self, key):
44
+ return self._wrap_to_sym_expr_hash(key) in self.sym_hash_dict
45
+
46
+ def get(self, key, default=None):
47
+ return self.sym_hash_dict.get(self._wrap_to_sym_expr_hash(key), default)
48
+
49
+ def _wrap_to_sym_expr_hash(self, key):
50
+ return _SymExprHash(key) if isinstance(key, py_sym_types) else key
51
+
52
+
53
+ def dedupe_symints(graph: torch.fx.Graph):
54
+ """
55
+ Dedupes sym ints in the graph to nodes are resolvable to symint graph inputs.
56
+
57
+ We only dedupe from graph inputs to avoid adding a potential dependency in the forward
58
+ from the backward.
59
+
60
+ """
61
+
62
+ sym_dict = _SymHashingDict()
63
+ resolvable_from_input_symints = set()
64
+
65
+ for node in graph.nodes:
66
+ val = node.meta.get("val", None)
67
+ if val is None or not isinstance(val, py_sym_types):
68
+ continue
69
+
70
+ if node.op == "placeholder":
71
+ resolvable_from_input_symints.add(node)
72
+ sym_dict[val] = node
73
+ elif existing_node := sym_dict.get(val):
74
+ node.replace_all_uses_with(existing_node)
75
+ graph.erase_node(node)
76
+ elif all(n in resolvable_from_input_symints for n in node.all_input_nodes):
77
+ sym_dict[val] = node
78
+ resolvable_from_input_symints.add(node)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/efficient_conv_bn_eval.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from torch._dynamo.utils import counters
5
+ from torch._inductor import config as inductor_config
6
+ from torch.func import functional_call
7
+
8
+ from ..pattern_matcher import CallModuleVarArgs, Match, register_graph_pattern
9
+
10
+ from .pre_grad import efficient_conv_bn_eval_pass
11
+
12
+
13
+ def efficient_conv_bn_eval(
14
+ bn: nn.modules.batchnorm._BatchNorm, conv: nn.modules.conv._ConvNd, x: torch.Tensor
15
+ ):
16
+ """
17
+ Implementation based on https://arxiv.org/abs/2305.11624
18
+ "Tune-Mode ConvBN Blocks For Efficient Transfer Learning"
19
+ It leverages the associative law between convolution and affine transform,
20
+ i.e., normalize (weight conv feature) = (normalize weight) conv feature.
21
+ It works for Eval mode of ConvBN blocks during validation, and can be used
22
+ for **training** as well, but only if one sets `bn.training=False`. It
23
+ reduces memory footprint and computation cost, at the cost of slightly
24
+ reduced numerical stability.
25
+ Args:
26
+ bn (nn.modules.batchnorm._BatchNorm): a BatchNorm module.
27
+ conv (nn.modules.conv._ConvNd): a conv module
28
+ x (torch.Tensor): Input feature map.
29
+ """
30
+
31
+ assert bn.running_var is not None
32
+
33
+ # These lines of code are designed to deal with various cases
34
+ # like bn without affine transform, and conv without bias
35
+ weight_on_the_fly = conv.weight
36
+ if conv.bias is not None:
37
+ bias_on_the_fly = conv.bias
38
+ else:
39
+ bias_on_the_fly = torch.zeros_like(bn.running_var)
40
+
41
+ if bn.weight is not None:
42
+ bn_weight = bn.weight
43
+ else:
44
+ bn_weight = torch.ones_like(bn.running_var)
45
+
46
+ if bn.bias is not None:
47
+ bn_bias = bn.bias
48
+ else:
49
+ bn_bias = torch.zeros_like(bn.running_var)
50
+
51
+ # shape of [C_out, 1, 1, 1] in Conv2d
52
+ target_shape = [-1] + [1] * (conv.weight.ndim - 1)
53
+ if isinstance(conv, nn.modules.conv._ConvTransposeNd):
54
+ # for transposed conv, the C_out dimension should at index 1.
55
+ target_shape[:2] = [target_shape[1], target_shape[0]]
56
+ weight_coeff = torch.rsqrt(bn.running_var + bn.eps).reshape(target_shape)
57
+ # shape of [C_out, 1, 1, 1] in Conv2d
58
+ coefff_on_the_fly = bn_weight.view_as(weight_coeff) * weight_coeff
59
+
60
+ # shape of [C_out, C_in, k, k] in Conv2d
61
+ weight_on_the_fly = weight_on_the_fly * coefff_on_the_fly
62
+ # shape of [C_out] in Conv2d
63
+ bias_on_the_fly = bn_bias + coefff_on_the_fly.flatten() * (
64
+ bias_on_the_fly - bn.running_mean
65
+ )
66
+
67
+ input = x
68
+ params = {"weight": weight_on_the_fly, "bias": bias_on_the_fly}
69
+ output = functional_call(conv, params, input)
70
+ return output
71
+
72
+
73
+ @register_graph_pattern(
74
+ CallModuleVarArgs(
75
+ [
76
+ nn.modules.batchnorm._BatchNorm,
77
+ nn.BatchNorm1d,
78
+ nn.BatchNorm2d,
79
+ nn.BatchNorm3d,
80
+ nn.SyncBatchNorm,
81
+ ],
82
+ ),
83
+ pass_dict=efficient_conv_bn_eval_pass,
84
+ extra_check=lambda match: not inductor_config.freezing
85
+ and inductor_config.efficient_conv_bn_eval_fx_passes,
86
+ )
87
+ def efficient_conv_bn_eval_graph_transform(match: Match, *args, **kwargs):
88
+ # We matched a BN node
89
+ bn_node = match.nodes[0]
90
+ graph = match.graph
91
+ gm = graph.owning_module
92
+ bn_mod = getattr(gm, bn_node.target) # type: ignore[arg-type]
93
+
94
+ # We can only use efficient conv-bn for eval mode with track_running_stats
95
+ if not bn_mod.track_running_stats or bn_mod.training:
96
+ return
97
+
98
+ # Check if the input is Conv
99
+ if bn_node.args:
100
+ input_node = bn_node.args[0]
101
+ else:
102
+ input_node = bn_node.kwargs["input"]
103
+ if input_node.op != "call_module": # type: ignore[union-attr]
104
+ return
105
+ if not hasattr(gm, input_node.target): # type: ignore[arg-type, union-attr]
106
+ return
107
+ input_mod = getattr(gm, input_node.target) # type: ignore[arg-type, union-attr]
108
+ supported_convs = [
109
+ nn.Linear,
110
+ nn.Conv1d,
111
+ nn.Conv2d,
112
+ nn.Conv3d,
113
+ nn.ConvTranspose1d,
114
+ nn.ConvTranspose2d,
115
+ nn.ConvTranspose3d,
116
+ ]
117
+ if not any(isinstance(input_mod, cls) for cls in supported_convs):
118
+ return
119
+ conv_node = input_node
120
+ # Output of conv is used by other nodes, cannot optimize
121
+ if len(conv_node.users) > 1: # type: ignore[union-attr]
122
+ return
123
+
124
+ # Find a pair of conv and bn computation nodes to optimize.
125
+ counters["inductor"]["efficient_conv_bn_eval"] += 1
126
+
127
+ with graph.inserting_before(conv_node):
128
+ # create `get_attr` node to access modules
129
+ # note that we directly call `create_node` to fill the `name`
130
+ # argument. `graph.get_attr` and
131
+ # `graph.call_function` does not allow the `name` argument.
132
+ conv_get_node = graph.create_node(
133
+ op="get_attr", target=conv_node.target, name="get_conv" # type: ignore[union-attr]
134
+ )
135
+ bn_get_node = graph.create_node(
136
+ op="get_attr", target=bn_node.target, name="get_bn"
137
+ )
138
+ if conv_node.args: # type: ignore[union-attr]
139
+ conv_input = conv_node.args[0] # type: ignore[union-attr]
140
+ else:
141
+ conv_input = conv_node.kwargs["input"] # type: ignore[union-attr]
142
+ # prepare args for the fused function
143
+ args = (bn_get_node, conv_get_node, conv_input)
144
+ # create a new node
145
+ new_node = graph.create_node(
146
+ op="call_function",
147
+ target=efficient_conv_bn_eval,
148
+ args=args,
149
+ name="efficient_conv_bn_eval",
150
+ )
151
+ # this node replaces the original conv + bn, and therefore
152
+ # should replace the uses of bn_node
153
+ bn_node.replace_all_uses_with(new_node)
154
+ # take care of the deletion order:
155
+ # delete bn_node first, and then conv_node
156
+ graph.erase_node(bn_node)
157
+ graph.erase_node(conv_node)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+ from torch._inductor.compile_fx import fake_tensor_prop
5
+ from ..._dynamo.utils import counters
6
+
7
+ from .. import config
8
+ from ..pattern_matcher import (
9
+ _return_true,
10
+ CallFunction,
11
+ fwd_only,
12
+ Ignored,
13
+ init_once_fakemode,
14
+ KeywordArg,
15
+ Match,
16
+ PatternMatcherPass,
17
+ register_graph_pattern,
18
+ register_replacement,
19
+ stable_topological_sort,
20
+ )
21
+
22
+ aten = torch.ops.aten
23
+
24
+ # First pass_patterns[0] are applied, then [1], then [2]
25
+ pass_patterns = [
26
+ PatternMatcherPass(),
27
+ PatternMatcherPass(),
28
+ PatternMatcherPass(),
29
+ ]
30
+
31
+ binary_folding_pass = PatternMatcherPass()
32
+
33
+
34
+ def freezing_passes(gm: torch.fx.GraphModule, aot_example_inputs):
35
+ """
36
+ Passes that are applied to the graph to freeze pass.
37
+ """
38
+
39
+ from ..freezing import constant_fold
40
+
41
+ lazy_init()
42
+ # We need a few rounds of binary folding to get rid of all the
43
+ # unnecessary nodes, but may need a good method to chose the rounds number.
44
+ # works like: conv+binary+binary.
45
+ binary_folding = counters["inductor"]["binary_folding"]
46
+ fake_tensor_prop(gm, aot_example_inputs, True)
47
+
48
+ torch._inductor.fx_passes.binary_folding.mark_mixed_dtype_allowed_convs(gm)
49
+ for _ in range(4):
50
+ constant_fold(gm)
51
+ # Make sure meta['val'] is properly set for all nodes
52
+ fake_tensor_prop(gm, aot_example_inputs, True)
53
+ binary_folding_pass.apply(gm.graph) # type: ignore[arg-type]
54
+ # If we don't have binary folding, we don't need to run the pass again.
55
+ # TODO: remove the need to run fake_tensor_prop on the whole model.
56
+ if counters["inductor"]["binary_folding"] == binary_folding:
57
+ break
58
+ binary_folding = counters["inductor"]["binary_folding"]
59
+
60
+ torch._inductor.fx_passes.binary_folding.recover_original_precision_folded_convs(gm)
61
+
62
+ constant_fold(gm)
63
+ fake_tensor_prop(gm, aot_example_inputs, True)
64
+
65
+ for pattern in pass_patterns:
66
+ pattern.apply(gm.graph) # type: ignore[arg-type]
67
+
68
+ # The CPU weight packing always assume the conv's weight is channels last,
69
+ # So make sure the layout_optimization is on when doing it.
70
+ if (
71
+ torch._C._has_mkldnn
72
+ and config.cpp.weight_prepack
73
+ and config.layout_optimization
74
+ ):
75
+ from .mkldnn_fusion import _eliminate_duplicate_packed_nodes
76
+
77
+ _eliminate_duplicate_packed_nodes(gm)
78
+
79
+ stable_topological_sort(gm.graph)
80
+ gm.recompile()
81
+ gm.graph.lint()
82
+
83
+
84
+ @init_once_fakemode
85
+ def lazy_init():
86
+ if torch._C._has_mkldnn and config.cpp.weight_prepack:
87
+ from .mkldnn_fusion import _mkldnn_weight_pack_init
88
+
89
+ _mkldnn_weight_pack_init()
90
+
91
+ from .binary_folding import binary_folding_init
92
+
93
+ addmm_patterns_init()
94
+ binary_folding_init()
95
+
96
+
97
+ def register_freezing_graph_pattern(pattern, extra_check=_return_true, pass_number=0):
98
+ return register_graph_pattern(
99
+ pattern,
100
+ extra_check=extra_check,
101
+ pass_dict=pass_patterns[pass_number],
102
+ )
103
+
104
+
105
+ def register_binary_folding_pattern(pattern, extra_check=_return_true):
106
+ return register_graph_pattern(
107
+ pattern,
108
+ extra_check=extra_check,
109
+ pass_dict=binary_folding_pass,
110
+ )
111
+
112
+
113
+ @functools.lru_cache(None)
114
+ def addmm_patterns_init():
115
+ if torch.cuda.is_available():
116
+ # workaround https://github.com/pytorch/pytorch/issues/97894
117
+ device = "cuda"
118
+ else:
119
+ device = "cpu"
120
+ val = functools.partial(torch.empty, (10, 10), device=device, requires_grad=False)
121
+
122
+ def check_concat_weights(match):
123
+ weights = [
124
+ match.kwargs["w1"],
125
+ match.kwargs["w2"],
126
+ ]
127
+ if "w3" in match.kwargs:
128
+ weights.append(match.kwargs["w3"])
129
+
130
+ return all(
131
+ w.op == "get_attr" and w.meta["val"].shape == weights[0].meta["val"].shape
132
+ for w in weights
133
+ )
134
+
135
+ def matmul_fuse_pattern(inp, w1, w2, w3):
136
+ return (inp @ w1, inp @ w2, inp @ w3)
137
+
138
+ def matmul_replacement(inp, w1, w2, w3):
139
+ cat_t = torch.cat((w1, w2, w3), dim=1)
140
+ mm = inp @ cat_t
141
+ return mm.chunk(3, dim=1)
142
+
143
+ register_replacement(
144
+ matmul_fuse_pattern,
145
+ matmul_replacement,
146
+ [val(), val(), val(), val()],
147
+ fwd_only,
148
+ pass_patterns[0],
149
+ extra_check=check_concat_weights,
150
+ exclusive_arg_names=("w1", "w2", "w3"),
151
+ )
152
+
153
+ def matmul_fuse_pattern_two(inp, w1, w2):
154
+ return (inp @ w1, inp @ w2)
155
+
156
+ def matmul_replacement_two(inp, w1, w2):
157
+ cat_t = torch.cat((w1, w2), dim=1)
158
+ mm = inp @ cat_t
159
+ return mm.chunk(2, dim=1)
160
+
161
+ register_replacement(
162
+ matmul_fuse_pattern_two,
163
+ matmul_replacement_two,
164
+ [val(), val(), val()],
165
+ fwd_only,
166
+ pass_patterns[0],
167
+ extra_check=check_concat_weights,
168
+ exclusive_arg_names=("w1", "w2"),
169
+ )
170
+
171
+ def addmm_fuse_pattern_second(inp, w1, w2, w3, b1, b2, b3):
172
+ return (
173
+ aten.addmm(b1, inp, w1),
174
+ aten.addmm(b2, inp, w2),
175
+ aten.addmm(b3, inp, w3),
176
+ )
177
+
178
+ def addmm_fuse_replacement_second(inp, w1, w2, w3, b1, b2, b3):
179
+ cat_w = torch.cat((w1, w2, w3), dim=1)
180
+ cat_b = torch.cat((b1, b2, b3))
181
+ return aten.addmm(cat_b, inp, cat_w).chunk(3, dim=1)
182
+
183
+ register_replacement(
184
+ addmm_fuse_pattern_second,
185
+ addmm_fuse_replacement_second,
186
+ [val() for _ in range(7)],
187
+ fwd_only,
188
+ pass_patterns[0],
189
+ extra_check=check_concat_weights,
190
+ exclusive_arg_names=("w1", "w2", "w3", "b1", "b2", "b3"),
191
+ )
192
+
193
+
194
+ def same_dtype(match):
195
+ return match.output_node().args[0].meta["val"].dtype == match.kwargs["dtype"]
196
+
197
+
198
+ @register_graph_pattern(
199
+ CallFunction(
200
+ torch.ops.prims.convert_element_type.default,
201
+ Ignored(),
202
+ KeywordArg("dtype"),
203
+ ),
204
+ pass_dict=pass_patterns[0],
205
+ extra_check=same_dtype,
206
+ )
207
+ def unnecessary_dtype_convert(match: Match, **kwargs):
208
+ """Remove unnecessary dtype conversion op, probably left as a result of Conv-Bn folding"""
209
+ graph = match.graph
210
+ node = match.output_node()
211
+ node.replace_all_uses_with(node.args[0])
212
+ graph.erase_node(node)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py ADDED
@@ -0,0 +1,786 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import inspect
3
+ import logging
4
+ import math
5
+
6
+ import torch
7
+ from ..._dynamo.utils import counters
8
+ from ..pattern_matcher import (
9
+ filter_nodes,
10
+ fwd_only,
11
+ joint_fwd_bwd,
12
+ register_replacement,
13
+ )
14
+
15
+ log = logging.getLogger(__name__)
16
+ aten = torch.ops.aten
17
+
18
+
19
+ def _sfdp_pattern_1(query, key, value, inv_scale):
20
+ return (
21
+ torch.matmul(query, key.transpose(-2, -1))
22
+ .div(inv_scale)
23
+ .softmax(dim=-1)
24
+ .matmul(value)
25
+ )
26
+
27
+
28
+ def _sfdp_replacement_1(query, key, value, inv_scale):
29
+ counters["inductor"]["fuse_attention"] += 1
30
+ return aten.scaled_dot_product_attention(
31
+ query.contiguous(),
32
+ key.contiguous(),
33
+ value.contiguous(),
34
+ attn_mask=None,
35
+ dropout_p=0.0,
36
+ is_causal=False,
37
+ scale=1.0 / inv_scale,
38
+ )
39
+
40
+
41
+ def _sfdp_pattern_2(query, key, value, scale_factor):
42
+ return (
43
+ torch.matmul(query, key.transpose(-2, -1))
44
+ .mul(scale_factor)
45
+ .softmax(dim=-1)
46
+ .matmul(value)
47
+ )
48
+
49
+
50
+ def _sfdp_replacement_2(query, key, value, scale_factor):
51
+ counters["inductor"]["fuse_attention"] += 1
52
+ return aten.scaled_dot_product_attention(
53
+ query.contiguous(),
54
+ key.contiguous(),
55
+ value.contiguous(),
56
+ attn_mask=None,
57
+ dropout_p=0.0,
58
+ is_causal=False,
59
+ scale=scale_factor,
60
+ )
61
+
62
+
63
+ def _sfdp_pattern_3(query, key, value, inv_scale_factor, dropout_p):
64
+ return torch.nn.functional.dropout(
65
+ torch.matmul(query, key.transpose(-2, -1))
66
+ .div(inv_scale_factor)
67
+ .softmax(dim=-1),
68
+ p=dropout_p,
69
+ ).matmul(value)
70
+
71
+
72
+ def _sfdp_replacement_3(query, key, value, inv_scale_factor, dropout_p):
73
+ counters["inductor"]["fuse_attention"] += 1
74
+ return aten.scaled_dot_product_attention(
75
+ query.contiguous(),
76
+ key.contiguous(),
77
+ value.contiguous(),
78
+ attn_mask=None,
79
+ dropout_p=dropout_p,
80
+ is_causal=False,
81
+ scale=1.0 / inv_scale_factor,
82
+ )
83
+
84
+
85
+ def _sfdp_pattern_4(query, key, value, scale_factor, dropout_p):
86
+ return torch.nn.functional.dropout(
87
+ torch.matmul(query, key.transpose(-2, -1)).mul(scale_factor).softmax(dim=-1),
88
+ p=dropout_p,
89
+ ).matmul(value)
90
+
91
+
92
+ def _sfdp_replacement_4(query, key, value, scale_factor, dropout_p):
93
+ counters["inductor"]["fuse_attention"] += 1
94
+ return aten.scaled_dot_product_attention(
95
+ query.contiguous(),
96
+ key.contiguous(),
97
+ value.contiguous(),
98
+ attn_mask=None,
99
+ dropout_p=dropout_p,
100
+ is_causal=False,
101
+ scale=scale_factor,
102
+ )
103
+
104
+
105
+ def _sfdp_pattern_5(query, key, value, attn_mask):
106
+ attn_weight = torch.softmax(
107
+ (query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
108
+ )
109
+ # attn_weight = torch.dropout(attn_weight, dropout_p)
110
+ return attn_weight @ value
111
+
112
+
113
+ def _sfdp_replacement_5(query, key, value, attn_mask):
114
+ counters["inductor"]["fuse_attention"] += 1
115
+ return aten.scaled_dot_product_attention(
116
+ query.contiguous(),
117
+ key.contiguous(),
118
+ value.contiguous(),
119
+ attn_mask=attn_mask.to(dtype=query.dtype),
120
+ dropout_p=0.0,
121
+ is_causal=False,
122
+ )
123
+
124
+
125
+ def _sfdp_pattern_6(query, key, value, attn_mask, dropout_p):
126
+ attn_weight = torch.softmax(
127
+ (query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
128
+ )
129
+ attn_weight = torch.dropout(attn_weight, dropout_p, True)
130
+ return attn_weight @ value
131
+
132
+
133
+ def _sfdp_replacement_6(query, key, value, attn_mask, dropout_p):
134
+ counters["inductor"]["fuse_attention"] += 1
135
+ return aten.scaled_dot_product_attention(
136
+ query.contiguous(),
137
+ key.contiguous(),
138
+ value.contiguous(),
139
+ attn_mask=attn_mask.to(dtype=query.dtype),
140
+ dropout_p=dropout_p,
141
+ is_causal=False,
142
+ )
143
+
144
+
145
+ def _sfdp_pattern_7(query, key, value, dropout_p):
146
+ # in real workloads inputs to matmul are permuted
147
+ # causing matmul to expand to a series of expand and clone calls
148
+ # we want the same to happen during pattern tracing
149
+ q = query.permute(0, 2, 1, 3)
150
+ k = key.permute(0, 2, 1, 3)
151
+ v = value.permute(0, 2, 1, 3)
152
+ div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
153
+ div = div.to(torch.float32)
154
+ attn_weight = torch.softmax(div, dim=-1)
155
+ attn_weight = torch.dropout(attn_weight, dropout_p, True)
156
+ attn_weight = attn_weight.to(torch.float16)
157
+ return attn_weight @ v
158
+
159
+
160
+ def _sfdp_replacement_7(query, key, value, dropout_p):
161
+ # sdpa prefers inputs in permuted format
162
+ # it makes a copy to put them in this format
163
+ # if they aren't already
164
+ # to make replacement efficient ensure that inputs to sdpa
165
+ # are in required order
166
+ counters["inductor"]["fuse_attention"] += 1
167
+ q = query.permute(0, 2, 1, 3)
168
+ k = key.permute(0, 2, 1, 3)
169
+ v = value.permute(0, 2, 1, 3)
170
+ return aten.scaled_dot_product_attention(
171
+ q,
172
+ k,
173
+ v,
174
+ attn_mask=None, # attn_mask,
175
+ dropout_p=dropout_p,
176
+ is_causal=False,
177
+ )
178
+
179
+
180
+ def _sfdp_pattern_8(query, key, value):
181
+ # no dropout version of pattern 7
182
+ q = query.permute(0, 2, 1, 3)
183
+ k = key.permute(0, 2, 1, 3)
184
+ v = value.permute(0, 2, 1, 3)
185
+ div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
186
+ div = div.to(torch.float32)
187
+ attn_weight = torch.softmax(div, dim=-1)
188
+ attn_weight = attn_weight.to(torch.float16)
189
+ return attn_weight @ v
190
+
191
+
192
+ def _sfdp_replacement_8(query, key, value):
193
+ counters["inductor"]["fuse_attention"] += 1
194
+ q = query.permute(0, 2, 1, 3)
195
+ k = key.permute(0, 2, 1, 3)
196
+ v = value.permute(0, 2, 1, 3)
197
+ return aten.scaled_dot_product_attention(
198
+ q,
199
+ k,
200
+ v,
201
+ attn_mask=None, # attn_mask,
202
+ dropout_p=0.0,
203
+ is_causal=False,
204
+ )
205
+
206
+
207
+ def _sfdp_pattern_9(query, key, value, dropout_p):
208
+ q = query.permute(0, 2, 1, 3)
209
+ k = key.permute(0, 2, 1, 3)
210
+ v = value.permute(0, 2, 1, 3)
211
+ q = q / math.sqrt(q.size(-1))
212
+ div = q @ k.transpose(-2, -1)
213
+ div = div.to(torch.float32)
214
+ attn_weight = torch.softmax(div, dim=-1)
215
+ attn_weight = torch.dropout(attn_weight, dropout_p, True)
216
+ attn_weight = attn_weight.to(torch.float16)
217
+ return attn_weight @ v
218
+
219
+
220
+ def _sfdp_replacement_9(query, key, value, dropout_p):
221
+ counters["inductor"]["fuse_attention"] += 1
222
+ q = query.permute(0, 2, 1, 3)
223
+ k = key.permute(0, 2, 1, 3)
224
+ v = value.permute(0, 2, 1, 3)
225
+ return aten.scaled_dot_product_attention(
226
+ q,
227
+ k,
228
+ v,
229
+ attn_mask=None, # attn_mask,
230
+ dropout_p=dropout_p,
231
+ is_causal=False,
232
+ )
233
+
234
+
235
+ def _sfdp_pattern_10(query, key, value):
236
+ # no dropout version of 9
237
+ q = query.permute(0, 2, 1, 3)
238
+ k = key.permute(0, 2, 1, 3)
239
+ v = value.permute(0, 2, 1, 3)
240
+ q = q / math.sqrt(q.size(-1))
241
+ div = q @ k.transpose(-2, -1)
242
+ div = div.to(torch.float32)
243
+ attn_weight = torch.softmax(div, dim=-1)
244
+ attn_weight = attn_weight.to(torch.float16)
245
+ return attn_weight @ v
246
+
247
+
248
+ def _sfdp_replacement_10(query, key, value):
249
+ counters["inductor"]["fuse_attention"] += 1
250
+ q = query.permute(0, 2, 1, 3)
251
+ k = key.permute(0, 2, 1, 3)
252
+ v = value.permute(0, 2, 1, 3)
253
+ return aten.scaled_dot_product_attention(
254
+ q,
255
+ k,
256
+ v,
257
+ attn_mask=None, # attn_mask,
258
+ dropout_p=0.0,
259
+ is_causal=False,
260
+ )
261
+
262
+
263
+ def _sfdp_pattern_11(query, key, value, inv_scale):
264
+ # Mainly for huggingface models
265
+ q = query.permute(0, 2, 1, 3)
266
+ k = key.permute(0, 2, 1, 3)
267
+ v = value.permute(0, 2, 1, 3)
268
+ return torch.matmul(q, k.transpose(-2, -1)).div(inv_scale).softmax(dim=-1).matmul(v)
269
+
270
+
271
+ def _sfdp_replacement_11(query, key, value, inv_scale):
272
+ counters["inductor"]["fuse_attention"] += 1
273
+ return aten.scaled_dot_product_attention(
274
+ query.transpose(1, 2),
275
+ key.transpose(1, 2),
276
+ value.transpose(1, 2),
277
+ attn_mask=None,
278
+ dropout_p=0.0,
279
+ is_causal=False,
280
+ scale=1.0 / inv_scale,
281
+ )
282
+
283
+
284
+ def _sfdp_pattern_12(query, key, value, inv_scale_factor, dropout_p):
285
+ q = query.permute(0, 2, 1, 3)
286
+ k = key.permute(0, 2, 1, 3)
287
+ v = value.permute(0, 2, 1, 3)
288
+ return torch.nn.functional.dropout(
289
+ torch.matmul(q, k.transpose(-2, -1)).div(inv_scale_factor).softmax(dim=-1),
290
+ p=dropout_p,
291
+ ).matmul(v)
292
+
293
+
294
+ def _sfdp_replacement_12(query, key, value, inv_scale_factor, dropout_p):
295
+ counters["inductor"]["fuse_attention"] += 1
296
+ return aten.scaled_dot_product_attention(
297
+ query.transpose(1, 2),
298
+ key.transpose(1, 2),
299
+ value.transpose(1, 2),
300
+ attn_mask=None,
301
+ dropout_p=dropout_p,
302
+ is_causal=False,
303
+ scale=1.0 / inv_scale_factor,
304
+ )
305
+
306
+
307
+ def _sfdp_pattern_13(query, key, value, dropout_p):
308
+ attn_weight = torch.bmm(query, key.transpose(1, 2)).softmax(dim=-1)
309
+ attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p)
310
+ return torch.bmm(attn_weight, value)
311
+
312
+
313
+ def _sfdp_replacement_13(query, key, value, dropout_p):
314
+ counters["inductor"]["fuse_attention"] += 1
315
+ return aten.scaled_dot_product_attention(
316
+ query.unsqueeze(0),
317
+ key.unsqueeze(0),
318
+ value.unsqueeze(0),
319
+ dropout_p=dropout_p,
320
+ scale=1.0,
321
+ ).squeeze(0)
322
+
323
+
324
+ def _sfdp_pattern_14(query, key, value, attn_mask, inv_scale):
325
+ # for BertLarge
326
+ # Permutations are needed to create clones in graph.
327
+ q = query.permute([0, 2, 1, 3])
328
+ k = key.permute([0, 2, 1, 3])
329
+ v = value.permute([0, 2, 1, 3])
330
+ return (
331
+ (torch.matmul(q, k.transpose(-2, -1)).div(inv_scale) + attn_mask)
332
+ .softmax(dim=-1)
333
+ .matmul(v)
334
+ )
335
+
336
+
337
+ def _sfdp_replacement_14(query, key, value, attn_mask, inv_scale):
338
+ counters["inductor"]["fuse_attention"] += 1
339
+ return aten.scaled_dot_product_attention(
340
+ query.transpose(1, 2),
341
+ key.transpose(1, 2),
342
+ value.transpose(1, 2),
343
+ attn_mask=attn_mask.to(dtype=query.dtype),
344
+ dropout_p=0.0,
345
+ is_causal=False,
346
+ scale=1.0 / inv_scale,
347
+ )
348
+
349
+
350
+ def _sfdp_pattern_15(query, key, value, attn_mask, inv_scale):
351
+ # for DistilBert
352
+ # Permutations are needed to create clones in graph.
353
+ q = query.permute([0, 2, 1, 3])
354
+ k = key.permute([0, 2, 1, 3])
355
+ v = value.permute([0, 2, 1, 3])
356
+ bs = q.size(0)
357
+ k_len = k.size(-2)
358
+ scores = q @ k.transpose(-2, -1)
359
+ scores = scores.div(inv_scale)
360
+ fill_value = torch.full((), -float("inf"), dtype=query.dtype, device=query.device)
361
+ attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores)
362
+ return torch.softmax(scores.masked_fill(attn_mask, fill_value), dim=-1) @ v
363
+
364
+
365
+ def _sfdp_replacement_15(query, key, value, attn_mask, inv_scale):
366
+ counters["inductor"]["fuse_attention"] += 1
367
+ bs = query.size(0)
368
+ n_head = query.size(2)
369
+ q_len = query.size(1)
370
+ k_len = key.size(1)
371
+ # do attn_mask->logical_not() in aten.scaled_dot_product_attention
372
+ attn_mask = (
373
+ (attn_mask == 1).view((bs, 1, 1, k_len)).expand((bs, n_head, q_len, k_len))
374
+ )
375
+ return aten.scaled_dot_product_attention(
376
+ query.transpose(1, 2),
377
+ key.transpose(1, 2),
378
+ value.transpose(1, 2),
379
+ attn_mask=attn_mask.to(dtype=torch.bool),
380
+ dropout_p=0.0,
381
+ is_causal=False,
382
+ scale=1.0 / inv_scale,
383
+ )
384
+
385
+
386
+ def _sfdp_pattern_16(query, key, value, attn_mask, inv_scale, dropout_p):
387
+ # for BertLarge with dropout
388
+ q = query.permute([0, 2, 1, 3])
389
+ k = key.permute([0, 2, 1, 3])
390
+ v = value.permute([0, 2, 1, 3])
391
+ return (
392
+ torch.nn.functional.dropout(
393
+ (torch.matmul(q, k.transpose(-2, -1)).div(inv_scale) + attn_mask).softmax(
394
+ dim=-1
395
+ ),
396
+ dropout_p,
397
+ )
398
+ .to(dtype=query.dtype)
399
+ .matmul(v)
400
+ )
401
+
402
+
403
+ def _sfdp_replacement_16(query, key, value, attn_mask, inv_scale, dropout_p):
404
+ counters["inductor"]["fuse_attention"] += 1
405
+ return aten.scaled_dot_product_attention(
406
+ query.transpose(1, 2),
407
+ key.transpose(1, 2),
408
+ value.transpose(1, 2),
409
+ attn_mask=attn_mask.to(dtype=query.dtype),
410
+ dropout_p=dropout_p,
411
+ is_causal=False,
412
+ scale=1.0 / inv_scale,
413
+ )
414
+
415
+
416
+ def _sfdp_pattern_17(query, key, value, attn_mask, inv_scale, dropout_p):
417
+ # for DistilBert with dropout
418
+ q = query.permute([0, 2, 1, 3])
419
+ k = key.permute([0, 2, 1, 3])
420
+ v = value.permute([0, 2, 1, 3])
421
+ bs = q.size(0)
422
+ k_len = k.size(-2)
423
+ scores = q @ k.transpose(-2, -1)
424
+ scores = scores.div(inv_scale)
425
+ fill_value = torch.full((), -float("inf"), dtype=query.dtype, device=query.device)
426
+ attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores)
427
+ return (
428
+ torch.nn.functional.dropout(
429
+ torch.softmax(scores.masked_fill(attn_mask, fill_value), dim=-1), dropout_p
430
+ )
431
+ @ v
432
+ )
433
+
434
+
435
+ def _sfdp_replacement_17(query, key, value, attn_mask, inv_scale, dropout_p):
436
+ counters["inductor"]["fuse_attention"] += 1
437
+ bs = query.size(0)
438
+ n_head = query.size(2)
439
+ q_len = query.size(1)
440
+ k_len = key.size(1)
441
+ # do attn_mask->logical_not() in aten.scaled_dot_product_attention
442
+ attn_mask = (
443
+ (attn_mask == 1).view((bs, 1, 1, k_len)).expand((bs, n_head, q_len, k_len))
444
+ )
445
+ return aten.scaled_dot_product_attention(
446
+ query.transpose(1, 2),
447
+ key.transpose(1, 2),
448
+ value.transpose(1, 2),
449
+ attn_mask=attn_mask.to(dtype=torch.bool),
450
+ dropout_p=dropout_p,
451
+ is_causal=False,
452
+ scale=1.0 / inv_scale,
453
+ )
454
+
455
+
456
+ def _sfdp_params_check(match):
457
+ assert all(k in match.kwargs for k in ("query", "key", "value"))
458
+ query = match.kwargs["query"].meta["val"]
459
+ key = match.kwargs["key"].meta["val"]
460
+ value = match.kwargs["value"].meta["val"]
461
+ if not (query.dtype == key.dtype == value.dtype) or not (
462
+ query.device == key.device == value.device
463
+ ):
464
+ return False
465
+ add_mask_node = filter_nodes(match.nodes, aten.add.Tensor)
466
+ # Has attn_mask add.
467
+ if len(add_mask_node) > 0:
468
+ attn_mask_node = add_mask_node[0].args[1]
469
+ # attn_mask_node may be a float/int number.
470
+ if not hasattr(attn_mask_node, "meta"):
471
+ return False
472
+ attn_mask = attn_mask_node.meta["val"] # type: ignore[union-attr]
473
+ # Make sure attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool
474
+ # attn_mask.dtype == torch.float for models like albert.
475
+ if (
476
+ not isinstance(attn_mask, torch.Tensor)
477
+ or not (
478
+ attn_mask.dtype == query.dtype
479
+ or attn_mask.dtype == torch.bool
480
+ or attn_mask.dtype == torch.float
481
+ )
482
+ or query.device != attn_mask.device
483
+ ):
484
+ return False
485
+ return True
486
+
487
+
488
+ def _sfdp_extra_check(scale_factor_op, disable_cuda=False):
489
+ def fn(match):
490
+ scale_factor_node = filter_nodes(match.nodes, scale_factor_op)[0]
491
+ # Note: args[1] of the scale_factor_node is always the scale_factor for the current patterns.
492
+ scale_factor = scale_factor_node.args[1]
493
+ # make sure the scale_factor a float/int. SymInt?
494
+ if not isinstance(scale_factor, (float, int)):
495
+ return False
496
+ if (
497
+ disable_cuda
498
+ and "query" in match.kwargs
499
+ and "cuda" in str(match.kwargs["query"].meta["val"].device)
500
+ ):
501
+ return False
502
+ return _sfdp_params_check(match)
503
+
504
+ return fn
505
+
506
+
507
+ def partialize_and_update_signature(func, **kwargs):
508
+ """
509
+ Equivalent to functools.partial but also updates the signature on returned function
510
+ """
511
+ original_sig = inspect.signature(func)
512
+ parameters = original_sig.parameters
513
+
514
+ new_parameters = {
515
+ key: value for key, value in parameters.items() if key not in kwargs
516
+ }
517
+ new_sig = inspect.Signature(parameters=list(new_parameters.values()))
518
+
519
+ partial_func = functools.partial(func, **kwargs)
520
+
521
+ def wrapper(*args, **kwargs):
522
+ return partial_func(*args, **kwargs)
523
+
524
+ wrapper.__signature__ = new_sig # type: ignore[attr-defined]
525
+ wrapper.__name__ = func.__name__
526
+
527
+ return wrapper
528
+
529
+
530
+ def _get_sfdp_patterns():
531
+ from .joint_graph import patterns
532
+
533
+ if torch.cuda.is_available():
534
+ # workaround https://github.com/pytorch/pytorch/issues/97894
535
+ device = "cuda"
536
+ else:
537
+ device = "cpu"
538
+
539
+ # sizes/values don't actually matter for initial trace
540
+ # once we get a possible match we re-trace with the actual values and verify the match still holds
541
+ g_inp = functools.partial(
542
+ torch.empty, (2, 4, 8, 16), device=device, requires_grad=True
543
+ )
544
+ # attn_mask
545
+ b_inp = functools.partial(torch.empty, (1, 1, 8, 8), device=device)
546
+ m_inp = functools.partial(torch.empty, (2, 1, 1, 4), device=device)
547
+ # inv_scale
548
+ c_inp = functools.partial(torch.tensor, 2.0, device=device)
549
+ # workaround https://github.com/pytorch/pytorch/issues/97894
550
+ # 0.113377 is a "magic" value that lets us recover the lost input arg relationship
551
+ d = {"dropout_p": 0.113377}
552
+
553
+ # we could also generate all these patterns in 3d.. TODO
554
+ g_3d_inp = functools.partial(
555
+ torch.empty, (1024, 128, 128), device=device, requires_grad=True
556
+ )
557
+
558
+ # reshape in matmul decomposition generates a clone when batch_size>1 due to the memory layout change.
559
+ # however when batch_size=1, reshape does not change the memory layout, so clone would not be generated.
560
+ # here we need to trace with input of batch_size=1 to generate a pattern graph without clone.
561
+ g_bs1_inp = functools.partial(
562
+ torch.empty, (1, 4, 8, 16), device=device, requires_grad=True
563
+ )
564
+ m_bs1_inp = functools.partial(torch.empty, (1, 1, 1, 4), device=device)
565
+
566
+ # softmax will generate a dtype conversion on inputs if they are in half,
567
+ # but will not in float, so we generate a pattern for both
568
+ for dtype in [torch.float, torch.half]:
569
+ g = functools.partial(g_inp, dtype=dtype)
570
+ b = functools.partial(b_inp, dtype=dtype)
571
+ m = functools.partial(m_inp, dtype=dtype)
572
+ m_float = functools.partial(m_inp, dtype=torch.float)
573
+ c = functools.partial(c_inp, dtype=dtype)
574
+ g_3d = functools.partial(g_3d_inp, dtype=dtype)
575
+ g_bs1 = functools.partial(g_bs1_inp, dtype=dtype)
576
+ m_bs1 = functools.partial(m_bs1_inp, dtype=dtype)
577
+ m_bs1_float = functools.partial(m_bs1_inp, dtype=torch.float)
578
+
579
+ candidates = [
580
+ (
581
+ _sfdp_pattern_1,
582
+ _sfdp_replacement_1,
583
+ [g(), g(), g(), c()],
584
+ {},
585
+ _sfdp_extra_check(aten.div.Tensor),
586
+ ),
587
+ (
588
+ _sfdp_pattern_2,
589
+ _sfdp_replacement_2,
590
+ [g(), g(), g(), c()],
591
+ {},
592
+ _sfdp_extra_check(aten.mul.Tensor),
593
+ ),
594
+ (
595
+ _sfdp_pattern_3,
596
+ _sfdp_replacement_3,
597
+ [g(), g(), g(), c()],
598
+ d,
599
+ _sfdp_extra_check(aten.div.Tensor),
600
+ ),
601
+ (
602
+ _sfdp_pattern_4,
603
+ _sfdp_replacement_4,
604
+ [g(), g(), g(), c()],
605
+ d,
606
+ _sfdp_extra_check(aten.mul.Tensor),
607
+ ),
608
+ (
609
+ _sfdp_pattern_5,
610
+ _sfdp_replacement_5,
611
+ [g(), g(), g(), b()],
612
+ {},
613
+ _sfdp_params_check,
614
+ ),
615
+ (
616
+ _sfdp_pattern_6,
617
+ _sfdp_replacement_6,
618
+ [g(), g(), g(), b()],
619
+ d,
620
+ _sfdp_params_check,
621
+ ),
622
+ (
623
+ _sfdp_pattern_7,
624
+ _sfdp_replacement_7,
625
+ [g(), g(), g()],
626
+ d,
627
+ _sfdp_params_check,
628
+ ),
629
+ (
630
+ _sfdp_pattern_8,
631
+ _sfdp_replacement_8,
632
+ [g(), g(), g()],
633
+ {},
634
+ _sfdp_params_check,
635
+ ),
636
+ (
637
+ _sfdp_pattern_9,
638
+ _sfdp_replacement_9,
639
+ [g(), g(), g()],
640
+ d,
641
+ _sfdp_params_check,
642
+ ),
643
+ (
644
+ _sfdp_pattern_10,
645
+ _sfdp_replacement_10,
646
+ [g(), g(), g()],
647
+ {},
648
+ _sfdp_params_check,
649
+ ),
650
+ (
651
+ _sfdp_pattern_11,
652
+ _sfdp_replacement_11,
653
+ [g(), g(), g(), c()],
654
+ {},
655
+ _sfdp_extra_check(aten.div.Tensor),
656
+ ),
657
+ (
658
+ _sfdp_pattern_12,
659
+ _sfdp_replacement_12,
660
+ [g(), g(), g(), c()],
661
+ d,
662
+ _sfdp_extra_check(aten.div.Tensor),
663
+ ),
664
+ (
665
+ _sfdp_pattern_13,
666
+ _sfdp_replacement_13,
667
+ [g_3d(), g_3d(), g_3d()],
668
+ d,
669
+ _sfdp_params_check,
670
+ ),
671
+ (
672
+ _sfdp_pattern_14,
673
+ _sfdp_replacement_14,
674
+ [g(), g(), g(), m(), c()],
675
+ {},
676
+ _sfdp_extra_check(aten.div.Tensor),
677
+ ),
678
+ (
679
+ _sfdp_pattern_15,
680
+ _sfdp_replacement_15,
681
+ [g(), g(), g(), m(), c()],
682
+ {},
683
+ _sfdp_extra_check(aten.div.Tensor),
684
+ ),
685
+ # TODO: Enable CUDA after solving Bert accuracy issue of calling efficient attention
686
+ (
687
+ _sfdp_pattern_16,
688
+ _sfdp_replacement_16,
689
+ [g(), g(), g(), m(), c()],
690
+ d,
691
+ _sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
692
+ ),
693
+ (
694
+ _sfdp_pattern_16,
695
+ _sfdp_replacement_16,
696
+ [g_bs1(), g_bs1(), g_bs1(), m_bs1(), c()],
697
+ d,
698
+ _sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
699
+ ),
700
+ (
701
+ _sfdp_pattern_17,
702
+ _sfdp_replacement_17,
703
+ [g(), g(), g(), m(), c()],
704
+ d,
705
+ _sfdp_extra_check(aten.div.Tensor),
706
+ ),
707
+ ]
708
+ mask_fp32_patterns = ["pattern_16"]
709
+ if dtype == torch.half:
710
+ # Add inputs of bf16 q/k/v and fp32 mask, for models like albert.
711
+ candidates.append(
712
+ (
713
+ _sfdp_pattern_16,
714
+ _sfdp_replacement_16,
715
+ [g(), g(), g(), m_float(), c()],
716
+ d,
717
+ _sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
718
+ )
719
+ )
720
+ candidates.append(
721
+ (
722
+ _sfdp_pattern_16,
723
+ _sfdp_replacement_16,
724
+ [g_bs1(), g_bs1(), g_bs1(), m_bs1_float(), c()],
725
+ d,
726
+ _sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
727
+ )
728
+ )
729
+
730
+ for pattern, replacement, args, workaround, extra_check in candidates:
731
+ # XXX: when adding a new pattern, re-run `gen_attention_patterns` so the pattern
732
+ # gets serialized to a python file and does not require tracing at runtime.
733
+ assert isinstance(workaround, dict)
734
+ name = pattern.__name__
735
+
736
+ if dtype != torch.float:
737
+ name += "_half"
738
+ if (
739
+ any(p in name for p in mask_fp32_patterns)
740
+ and args[3].dtype == torch.float32
741
+ ):
742
+ name += "_mask_fp32"
743
+ if args[0].size(0) == 1:
744
+ name += "_bs1"
745
+
746
+ training_name = name + "_training"
747
+ yield training_name, {
748
+ "search_fn": pattern,
749
+ "replace_fn": replacement,
750
+ "example_inputs": args,
751
+ "trace_fn": joint_fwd_bwd,
752
+ "pass_dicts": patterns,
753
+ "extra_check": extra_check,
754
+ "scalar_workaround": workaround,
755
+ }
756
+
757
+ if workaround:
758
+ assert len(workaround) == 1 and "dropout_p" in workaround
759
+ # functools.partial insufficient because we look at signature downstream
760
+ pattern = partialize_and_update_signature(pattern, dropout_p=0.0)
761
+ replacement = partialize_and_update_signature(
762
+ replacement, dropout_p=0.0
763
+ )
764
+ workaround = {}
765
+
766
+ inference_name = name + "_inference"
767
+ yield inference_name, {
768
+ "search_fn": pattern,
769
+ "replace_fn": replacement,
770
+ "example_inputs": args,
771
+ "trace_fn": fwd_only,
772
+ "pass_dicts": patterns,
773
+ "extra_check": extra_check,
774
+ "scalar_workaround": workaround,
775
+ }
776
+
777
+
778
+ @functools.lru_cache(None)
779
+ def _sfdp_init():
780
+ from .serialized_patterns.central_index import get_serialized_pattern
781
+
782
+ for key, register_replacement_kwargs in _get_sfdp_patterns():
783
+ search_fn_pattern = get_serialized_pattern(key)
784
+ register_replacement(
785
+ **register_replacement_kwargs, search_fn_pattern=search_fn_pattern
786
+ )
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py ADDED
@@ -0,0 +1,1059 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+ import operator
4
+ from collections import OrderedDict
5
+ from typing import (
6
+ Any,
7
+ DefaultDict,
8
+ Deque,
9
+ Dict,
10
+ Iterable,
11
+ Iterator,
12
+ List,
13
+ Optional,
14
+ Set,
15
+ Tuple,
16
+ )
17
+
18
+ import torch
19
+ from torch._dynamo.utils import counters
20
+
21
+ from .. import config
22
+ from ..pattern_matcher import (
23
+ CallFunctionVarArgs,
24
+ get_arg_value,
25
+ stable_topological_sort,
26
+ )
27
+
28
+ try:
29
+ # importing this will register fbgemm lowerings for inductor
30
+ import deeplearning.fbgemm.fbgemm_gpu.fb.inductor_lowerings # noqa: F401
31
+
32
+ has_fbgemm = True
33
+ except Exception:
34
+ has_fbgemm = False
35
+ pass
36
+
37
+ aten = torch.ops.aten
38
+
39
+ log = logging.getLogger(__name__)
40
+
41
+ MIN_FUSE_SET_SIZE = 5
42
+ MAX_FUSE_SET_SIZE = 300
43
+ MAX_FUSE_SEARCH_DEPTH = 5
44
+ # The maximum tensor size that can go into the fusion group
45
+ MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR = 4096
46
+
47
+ # exclude these nodes from BFS
48
+ # excluding get item improves optimizer compilation time by 60s
49
+ SEARCH_EXCLUSIONS = {operator.getitem}
50
+
51
+
52
+ default_graph_search_options = {
53
+ "min_fuse_set_size": MIN_FUSE_SET_SIZE,
54
+ "max_fuse_set_size": MAX_FUSE_SET_SIZE,
55
+ "max_fuse_search_depth": MAX_FUSE_SEARCH_DEPTH,
56
+ "max_fuse_tensor_size_group_linear": MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR,
57
+ }
58
+
59
+ graph_search_options = default_graph_search_options
60
+
61
+
62
+ def update_stack_example_value(node, metadata, dim=0, op=torch.stack):
63
+ """
64
+ Update the example value of the node in the graph to enable followup split cat opt.
65
+ """
66
+ if node is not None and hasattr(node, "meta"):
67
+ if op == torch.stack:
68
+ example_value = torch.stack(metadata, dim=dim)
69
+ elif op == torch.unbind:
70
+ example_value = torch.unbind(metadata, dim=dim) # type: ignore[assignment]
71
+ else:
72
+ return
73
+ node.meta["example_value"] = example_value
74
+
75
+
76
+ def update_pointwise_example_value(pointwise_node, input, other, op):
77
+ """
78
+ Update the example value of the add node in the graph to enable followup split cat opt.
79
+ """
80
+ if pointwise_node is not None and hasattr(pointwise_node, "meta"):
81
+ if op == torch.add:
82
+ example_value = torch.add(input, other)
83
+ elif op == torch.mul:
84
+ example_value = torch.mul(input, other)
85
+ else:
86
+ return
87
+ pointwise_node.meta["example_value"] = example_value
88
+
89
+
90
+ class GroupBatchFusionBase:
91
+ def __init__(self, **kwargs):
92
+ self.graph_search_options = kwargs.pop(
93
+ "graph_search_options", default_graph_search_options
94
+ )
95
+
96
+ def match(self, node):
97
+ raise NotImplementedError("match called on base")
98
+
99
+ def fuse(self, graph, subset):
100
+ raise NotImplementedError("fuse called on base")
101
+
102
+
103
+ PRE_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = dict()
104
+ POST_GRAD_FUSIONS: Dict[str, GroupBatchFusionBase] = dict()
105
+
106
+
107
+ def register_fusion(name: str, pre_grad=True):
108
+ def decorator(fusion_cls: GroupBatchFusionBase):
109
+ if pre_grad:
110
+ PRE_GRAD_FUSIONS[name] = fusion_cls
111
+ else:
112
+ POST_GRAD_FUSIONS[name] = fusion_cls
113
+ return fusion_cls
114
+
115
+ return decorator
116
+
117
+
118
+ def list_group_batch_fusions(pre_grad=True) -> List[str]:
119
+ if pre_grad:
120
+ return list(PRE_GRAD_FUSIONS.keys())
121
+ else:
122
+ return list(POST_GRAD_FUSIONS.keys())
123
+
124
+
125
+ def decompose_stack(graph: torch.fx.GraphModule, input_tensors: List[Any]) -> Any:
126
+ unsqueezed_inputs = []
127
+ for input_tensor in input_tensors:
128
+ unsqueezed_input = graph.call_function(
129
+ aten.unsqueeze, args=(input_tensor,), kwargs={"dim": 0}
130
+ )
131
+ unsqueezed_inputs.append(unsqueezed_input)
132
+ stacked_inputs = graph.call_function(
133
+ aten.cat, args=(unsqueezed_inputs,), kwargs={"dim": 0}
134
+ )
135
+ return stacked_inputs
136
+
137
+
138
+ class GroupFusion(GroupBatchFusionBase):
139
+ """
140
+ Fuse ops in a group way, e.g, fuse mm/addmm of arbitrary input shapes with fbgemm.gmm.
141
+ """
142
+
143
+ pass
144
+
145
+
146
+ class BatchFusion(GroupBatchFusionBase):
147
+ """
148
+ Fuse ops in a batch way, e.g, fuse mm/addmm of same input shapes with bmm.
149
+ """
150
+
151
+ pass
152
+
153
+
154
+ class BatchPointwiseOpsFusionFactory(BatchFusion):
155
+ def __init__(self, op, **kwargs):
156
+ super().__init__(**kwargs)
157
+ self.op = op
158
+
159
+
160
+ @register_fusion("batch_linear_post_grad", pre_grad=False)
161
+ class PostGradBatchLinearFusion(BatchFusion):
162
+ """
163
+ Fuse ops in a batch way in post grad (aten level).
164
+ """
165
+
166
+ def _addmm_node_can_be_fused(self, node: torch.fx.Node) -> bool:
167
+ return (
168
+ node.kwargs.get("beta", 1.0) == 1.0 and node.kwargs.get("alpha", 1.0) == 1.0 # type: ignore[return-value]
169
+ )
170
+
171
+ def _is_input_2d(self, input: torch.fx.Node) -> bool:
172
+ input_shapes = input.meta["tensor_meta"].shape
173
+ return (
174
+ len(input_shapes) == 2
175
+ and isinstance(input_shapes[0], int)
176
+ and isinstance(input_shapes[1], int)
177
+ )
178
+
179
+ def match(self, node: torch.fx.Node) -> Optional[Tuple[str, int, int, int, bool]]:
180
+ if CallFunctionVarArgs(aten.mm).match(node):
181
+ input_m, weight_m = node.args
182
+ bias_m = None
183
+
184
+ elif CallFunctionVarArgs(aten.addmm.default).match(
185
+ node
186
+ ) and self._addmm_node_can_be_fused(node):
187
+ bias_m, input_m, weight_m = node.args
188
+ else:
189
+ return None
190
+
191
+ # only handle the cases where inputs are 2D tensors
192
+ if not self._is_input_2d(input_m) or not self._is_input_2d(weight_m): # type: ignore[arg-type]
193
+ return None
194
+ m, k = input_m.meta["tensor_meta"].shape # type: ignore[union-attr]
195
+ n = weight_m.meta["tensor_meta"].shape[1] # type: ignore[union-attr]
196
+ batch_key = ("batch_linear", m, k, n, bias_m is not None)
197
+ return batch_key
198
+
199
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
200
+ batch_inputs = []
201
+ batch_weights = []
202
+ batch_biases = []
203
+ batch_nodes = []
204
+
205
+ for node in subset:
206
+ if CallFunctionVarArgs(aten.addmm.default).match(node):
207
+ bias, input, weight = node.args
208
+ elif CallFunctionVarArgs(aten.mm.default).match(node):
209
+ input, weight = node.args
210
+ bias = None
211
+ batch_nodes.append(node)
212
+ batch_inputs.append(input) # type: ignore[possibly-undefined]
213
+ batch_weights.append(weight) # type: ignore[possibly-undefined]
214
+ batch_biases.append(bias) # type: ignore[possibly-undefined]
215
+
216
+ with graph.inserting_before(subset[-1]):
217
+ fused_inputs = decompose_stack(graph, batch_inputs)
218
+ fused_weights = decompose_stack(graph, batch_weights)
219
+ fused_bmm = graph.call_function(
220
+ aten.bmm,
221
+ args=(fused_inputs, fused_weights),
222
+ )
223
+
224
+ for i, original_mm in enumerate(batch_nodes):
225
+ has_bias = False
226
+ with graph.inserting_after(fused_bmm):
227
+ new_mm = graph.call_function(aten.select, args=((fused_bmm, 0, i)))
228
+ if batch_biases[i]:
229
+ has_bias = True
230
+ new_bias_add = graph.call_function(
231
+ aten.add, args=((batch_biases[i], new_mm))
232
+ )
233
+ new_mm_cont = new_bias_add if has_bias else new_mm # type: ignore[possibly-undefined]
234
+ original_mm.replace_all_uses_with(new_mm_cont)
235
+ new_mm_cont.meta.update(original_mm.meta)
236
+ graph.erase_node(original_mm)
237
+
238
+
239
+ @register_fusion("group_linear", pre_grad=False)
240
+ class GroupLinearFusion(GroupFusion):
241
+ def _addmm_node_can_be_fused(self, node: torch.fx.Node):
242
+ input_shape = node.args[1].meta["tensor_meta"].shape # type: ignore[union-attr]
243
+ weight_shape = node.args[2].meta["tensor_meta"].shape # type: ignore[union-attr]
244
+ return (
245
+ node.kwargs.get("beta", 1.0) == 1.0
246
+ and node.kwargs.get("alpha", 1.0) == 1.0
247
+ and len(input_shape) == 2
248
+ and len(weight_shape) == 2
249
+ and all(x % 2 == 0 for x in input_shape + weight_shape)
250
+ and all(
251
+ shape <= self.graph_search_options["max_fuse_tensor_size_group_linear"]
252
+ for shape in input_shape + weight_shape
253
+ )
254
+ )
255
+
256
+ def _mm_node_can_be_fused(self, node: torch.fx.Node):
257
+ input_shape = node.args[0].meta["tensor_meta"].shape # type: ignore[union-attr]
258
+ weight_shape = node.args[1].meta["tensor_meta"].shape # type: ignore[union-attr]
259
+ return (
260
+ len(input_shape) == 2
261
+ and len(weight_shape) == 2
262
+ and all(x % 2 == 0 for x in input_shape + weight_shape)
263
+ and all(
264
+ shape <= self.graph_search_options["max_fuse_tensor_size_group_linear"]
265
+ for shape in input_shape + weight_shape
266
+ )
267
+ )
268
+
269
+ def match(self, node: torch.fx.Node) -> Optional[Tuple[str, bool]]:
270
+ if CallFunctionVarArgs(aten.mm.default).match(
271
+ node
272
+ ) and self._mm_node_can_be_fused(node):
273
+ group_key = ("group_linear", True)
274
+ elif CallFunctionVarArgs(aten.addmm.default).match(
275
+ node
276
+ ) and self._addmm_node_can_be_fused(node):
277
+ bias = node.args[0]
278
+ group_key = ("group_linear", bias is None)
279
+ else:
280
+ group_key = None
281
+ return group_key
282
+
283
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
284
+ group_inputs = []
285
+ group_weights = []
286
+ group_biases = []
287
+ group_nodes = []
288
+ for node in subset:
289
+ if CallFunctionVarArgs(aten.addmm.default).match(node):
290
+ bias, input, weight = node.args
291
+ else:
292
+ assert CallFunctionVarArgs(aten.mm.default).match(node)
293
+ input, weight = node.args
294
+ bias = None
295
+
296
+ group_nodes.append(node)
297
+ group_inputs.append(input)
298
+ group_weights.append(weight)
299
+ group_biases.append(bias)
300
+
301
+ if all(bias is None for bias in group_biases):
302
+ group_biases = None # type: ignore[assignment]
303
+ group_biases: Optional[List[Any]]
304
+
305
+ with graph.inserting_before(subset[0]):
306
+ fused_mm = graph.call_function(
307
+ torch.ops.fbgemm.gmm.default,
308
+ args=(group_inputs, group_weights, group_biases),
309
+ kwargs={"smart_fused": True},
310
+ )
311
+
312
+ for i, original_mm in enumerate(group_nodes):
313
+ with graph.inserting_after(fused_mm):
314
+ new_mm = graph.call_function(operator.getitem, args=(fused_mm, i))
315
+ original_mm.replace_all_uses_with(new_mm)
316
+ new_mm.meta.update(original_mm.meta)
317
+ graph.erase_node(original_mm)
318
+
319
+
320
+ class BatchPointwiseOpsPostGradFusion(BatchPointwiseOpsFusionFactory):
321
+ """
322
+ Batch pointwise operator (e.g., add, mul) in post grad pass.
323
+ """
324
+
325
+ def __init__(self, op, **kwargs):
326
+ super().__init__(op, **kwargs)
327
+ self.op = op
328
+
329
+ def _pointwise_node_can_be_fused(self, node: torch.fx.Node):
330
+ # note: we only consider the case where the inputs are tensors
331
+ # for mixed precision training, we need to make sure the inputs
332
+ # of the aten.cat when do the stack should be the same dtype
333
+ # otherwise, the output of the aten.cat may be not the same as
334
+ # its inputs, and cause dtype not same error in mm or addmm
335
+ input, other = node.args
336
+ return (
337
+ input.meta["tensor_meta"].shape == other.meta["tensor_meta"].shape # type: ignore[union-attr]
338
+ if hasattr(input, "meta")
339
+ and hasattr(other, "meta")
340
+ and "tensor_meta" in input.meta # type: ignore[union-attr]
341
+ and "tensor_meta" in other.meta # type: ignore[union-attr]
342
+ else False
343
+ )
344
+
345
+ def match(self, node: torch.fx.Node):
346
+ if CallFunctionVarArgs(self.op).match(
347
+ node
348
+ ) and self._pointwise_node_can_be_fused(node):
349
+ alpha = node.kwargs.get("alpha", 1.0)
350
+ rounding_mode = node.kwargs.get("rounding_mode", None)
351
+ input, other = node.args
352
+ shape = list(input.meta["tensor_meta"].shape) # type: ignore[union-attr]
353
+ group_key = (
354
+ "batch_" + self.op.__name__.lower() + "_post_grad",
355
+ str(shape),
356
+ str(input.meta["tensor_meta"].dtype), # type: ignore[union-attr]
357
+ str(other.meta["tensor_meta"].dtype), # type: ignore[union-attr]
358
+ str(alpha),
359
+ str(rounding_mode),
360
+ )
361
+ else:
362
+ group_key = None
363
+ return group_key
364
+
365
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
366
+ batch_inputs, batch_others = [], []
367
+ alpha = subset[0].kwargs.get("alpha", 1.0)
368
+
369
+ for node in subset:
370
+ input, other = node.args
371
+ batch_inputs.append(input)
372
+ batch_others.append(other)
373
+
374
+ with graph.inserting_before(subset[0]):
375
+ stack_inputs = decompose_stack(graph, batch_inputs)
376
+ stack_others = decompose_stack(graph, batch_others)
377
+
378
+ batch_op = graph.call_function(
379
+ self.op,
380
+ args=(stack_inputs, stack_others),
381
+ kwargs={"alpha": alpha} if self.op == aten.add.Tensor else {},
382
+ )
383
+ for i, original_add in enumerate(subset):
384
+ with graph.inserting_after(batch_op):
385
+ new_add = graph.call_function(
386
+ torch.ops.aten.select, args=((batch_op, 0, i))
387
+ )
388
+ original_add.replace_all_uses_with(new_add)
389
+ new_add.meta.update(original_add.meta)
390
+ graph.erase_node(original_add)
391
+
392
+
393
+ @register_fusion("batch_linear_lhs")
394
+ class BatchLinearLHSFusion(BatchFusion):
395
+ """
396
+ Batch linear left-hand side fusion. This pass tries to fuse the following patterns:
397
+
398
+ torch.nn.functional.linear(x, w1), linear(x, w2),... * linear(x, wn)
399
+ -> torch.mm(x, torch.cat([w1, w2,... * wn]).transpose(0, 1))
400
+
401
+ We have a separate pass to eliminate contiguous transpose in a generic way.
402
+ """
403
+
404
+ def match(self, node: torch.fx.Node) -> Optional[Tuple[str, bool, Any]]:
405
+ if CallFunctionVarArgs(torch.nn.functional.linear).match(
406
+ node
407
+ ) and is_linear_node_can_be_fused(node):
408
+ input = get_arg_value(node, 0, "input")
409
+ bias = get_arg_value(node, 2, "bias")
410
+ group_key = ("batch_linear_lhs", bias is None, input)
411
+ else:
412
+ group_key = None
413
+ return group_key
414
+
415
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
416
+ batch_nodes = []
417
+ batch_input = None
418
+ batch_weights = []
419
+ batch_biases = []
420
+ split_sections = []
421
+ for node in subset:
422
+ input = get_arg_value(node, 0, "input")
423
+ weight = get_arg_value(node, 1, "weight")
424
+ bias = get_arg_value(node, 2, "bias")
425
+ batch_nodes.append(node)
426
+ if batch_input is None:
427
+ batch_input = input
428
+ else:
429
+ assert batch_input is input
430
+ batch_weights.append(weight)
431
+ if bias:
432
+ batch_biases.append(bias)
433
+ split_sections.append(weight.meta["example_value"].shape[0])
434
+
435
+ with graph.inserting_before(subset[0]):
436
+ cat_weights = graph.call_function(
437
+ torch.cat, args=(batch_weights,), kwargs={"dim": 0}
438
+ )
439
+ transposed_weights = graph.call_function(
440
+ torch.transpose, args=(cat_weights, 0, 1)
441
+ )
442
+ if len(batch_biases) > 0:
443
+ cat_biases = graph.call_function(
444
+ torch.cat, args=(batch_biases,), kwargs={"dim": 0}
445
+ )
446
+ fused_lhs = graph.call_function(
447
+ torch.addmm,
448
+ args=(cat_biases, batch_input, transposed_weights),
449
+ )
450
+ else:
451
+ fused_lhs = graph.call_function(
452
+ torch.mm,
453
+ args=(batch_input, transposed_weights),
454
+ )
455
+ fused_lhs_list = graph.call_function(
456
+ torch.split, args=(fused_lhs, split_sections), kwargs={"dim": 1}
457
+ )
458
+
459
+ for i, node in enumerate(batch_nodes):
460
+ with graph.inserting_after(fused_lhs_list):
461
+ new_node = graph.call_function(
462
+ operator.getitem, args=(fused_lhs_list, i)
463
+ )
464
+ node.replace_all_uses_with(new_node)
465
+ new_node.meta.update(node.meta)
466
+ graph.erase_node(node)
467
+
468
+
469
+ def is_node_meta_valid(node: Optional[torch.fx.Node]):
470
+ if node is None:
471
+ return True
472
+ if "example_value" not in node.meta:
473
+ return False
474
+ return True
475
+
476
+
477
+ def is_linear_node_can_be_fused(node: torch.fx.Node):
478
+ input = get_arg_value(node, 0, "input")
479
+ weight = get_arg_value(node, 1, "weight")
480
+ return (
481
+ is_node_meta_valid(node)
482
+ and is_node_meta_valid(input)
483
+ and is_node_meta_valid(weight)
484
+ and len(input.meta["example_value"].shape) == 2
485
+ and len(weight.meta["example_value"].shape) == 2
486
+ )
487
+
488
+
489
+ @register_fusion("batch_linear")
490
+ class PreGradBatchLinearFusion(BatchFusion):
491
+ """
492
+ Batch linear fusion in pre grad pass.
493
+ Fuse linear with same size with torch.baddmm
494
+ """
495
+
496
+ def _getitem_args(self, getitem_node: torch.fx.Node):
497
+ if getitem_node.target != operator.__getitem__ or (
498
+ getitem_node.op != "call_function"
499
+ ):
500
+ return None
501
+ return getitem_node.args[0]
502
+
503
+ def match(self, node: torch.fx.Node):
504
+ if CallFunctionVarArgs(torch.nn.functional.linear).match(
505
+ node
506
+ ) and is_linear_node_can_be_fused(node):
507
+ input = get_arg_value(node, 0, "input")
508
+ weight = get_arg_value(node, 1, "weight")
509
+ bias = get_arg_value(node, 2, "bias")
510
+ group_key = (
511
+ "batch_linear_pre_grad",
512
+ self._getitem_args(input),
513
+ str(input.meta["example_value"].shape),
514
+ str(weight.meta["example_value"].shape),
515
+ bias is None,
516
+ )
517
+ else:
518
+ group_key = None
519
+ return group_key
520
+
521
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
522
+ batch_nodes = []
523
+ batch_inputs = []
524
+ batch_weights = []
525
+ batch_biases = []
526
+ batch_inputs_metadata = []
527
+ batch_weights_metadata = []
528
+ batch_biases_metadata = []
529
+ for node in subset:
530
+ batch_nodes.append(node)
531
+ input = get_arg_value(node, 0, "input")
532
+ batch_inputs.append(input)
533
+ batch_inputs_metadata.append(input.meta["example_value"])
534
+ weight = get_arg_value(node, 1, "weight")
535
+ batch_weights.append(weight)
536
+ batch_weights_metadata.append(weight.meta["example_value"])
537
+ bias = get_arg_value(node, 2, "bias")
538
+ batch_biases.append(bias)
539
+ if bias is not None and hasattr(bias, "meta"):
540
+ batch_biases_metadata.append(bias.meta["example_value"])
541
+
542
+ with graph.inserting_before(subset[0]):
543
+ stack_inputs = graph.call_function(
544
+ torch.stack, args=(batch_inputs,), kwargs={"dim": 0}
545
+ )
546
+ update_stack_example_value(stack_inputs, batch_inputs_metadata)
547
+ stack_weights = graph.call_function(
548
+ torch.stack, args=(batch_weights,), kwargs={"dim": 0}
549
+ )
550
+ update_stack_example_value(stack_weights, batch_weights_metadata)
551
+ transpose_weight = graph.call_function(
552
+ torch.transpose, args=(stack_weights, 1, 2)
553
+ )
554
+ if all(bias is None for bias in batch_biases):
555
+ bmm = graph.call_function(
556
+ torch.bmm,
557
+ args=(stack_inputs, transpose_weight),
558
+ )
559
+ else:
560
+ stack_biases = graph.call_function(
561
+ torch.stack, args=(batch_biases,), kwargs={"dim": 0}
562
+ )
563
+ update_stack_example_value(stack_biases, batch_biases_metadata)
564
+ unsqueeze_biases = graph.call_function(
565
+ torch.unsqueeze, args=(stack_biases, 1)
566
+ )
567
+ bmm = graph.call_function(
568
+ torch.baddbmm,
569
+ args=(unsqueeze_biases, stack_inputs, transpose_weight),
570
+ )
571
+
572
+ bmm = graph.call_function(torch.unbind, args=(bmm,), kwargs={"dim": 0})
573
+ for i, linear in enumerate(batch_nodes):
574
+ with graph.inserting_after(bmm):
575
+ getitem = graph.call_function(operator.getitem, args=(bmm, i))
576
+ linear.replace_all_uses_with(getitem)
577
+ getitem.meta.update(linear.meta)
578
+ graph.erase_node(linear)
579
+
580
+
581
+ @register_fusion("batch_layernorm")
582
+ class BatchLayernormFusion(BatchFusion):
583
+ """
584
+ Batch layer norm fusion in pre grad pass
585
+ """
586
+
587
+ def match(self, node: torch.fx.Node):
588
+ if CallFunctionVarArgs(torch.nn.functional.layer_norm).match(node):
589
+ input = get_arg_value(node, 0, "input")
590
+ weight = get_arg_value(node, 2, "weight")
591
+ bias = get_arg_value(node, 3, "bias")
592
+ group_key = (
593
+ (
594
+ "batch_layernorm",
595
+ str(input.meta["example_value"].shape),
596
+ str(weight.meta["example_value"].shape)
597
+ if weight is not None
598
+ else "",
599
+ str(bias.meta["example_value"].shape) if bias is not None else "",
600
+ str(get_arg_value(node, 1, "normalized_shape")),
601
+ str(get_arg_value(node, 4, "eps")),
602
+ )
603
+ if "example_value" in input.meta
604
+ and is_node_meta_valid(weight)
605
+ and is_node_meta_valid(bias)
606
+ else None
607
+ )
608
+ else:
609
+ group_key = None
610
+ return group_key
611
+
612
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
613
+ group_inputs = []
614
+ group_shapes = []
615
+ group_weights = []
616
+ group_biases = []
617
+ group_epss = []
618
+ group_nodes = []
619
+ group_inputs_metadata = []
620
+ group_biases_metadata = []
621
+ group_weights_metadata = []
622
+ for node in subset:
623
+ group_nodes.append(node)
624
+ input = get_arg_value(node, 0, "input")
625
+ group_inputs.append(input)
626
+ group_inputs_metadata.append(input.meta["example_value"])
627
+ group_shapes.append(get_arg_value(node, 1, "normalized_shape"))
628
+ weight = get_arg_value(node, 2, "weight")
629
+ group_weights.append(weight)
630
+ if weight is not None and hasattr(weight, "meta"):
631
+ group_weights_metadata.append(weight.meta["example_value"])
632
+ bias = get_arg_value(node, 3, "bias")
633
+ group_biases.append(bias)
634
+ if bias is not None and hasattr(bias, "meta"):
635
+ group_biases_metadata.append(bias.meta["example_value"])
636
+ eps = get_arg_value(node, 4, "eps")
637
+ if eps is None:
638
+ eps = 1e-5
639
+ group_epss.append(eps)
640
+ stack_dim = -1 - len(group_shapes[-1])
641
+
642
+ if all(bias is None for bias in group_biases):
643
+ group_biases = None # type: ignore[assignment]
644
+ group_biases: Optional[List[Any]]
645
+ if all(weight is None for weight in group_weights):
646
+ group_weights = None # type: ignore[assignment]
647
+ group_weights: Optional[List[Any]]
648
+ assert all(
649
+ eps == group_epss[0] for eps in group_epss
650
+ ), "all epsilon values must be equal"
651
+
652
+ with graph.inserting_before(subset[0]):
653
+ stack_input = graph.call_function(
654
+ torch.stack, args=(group_inputs,), kwargs={"dim": stack_dim}
655
+ )
656
+ update_stack_example_value(stack_input, group_inputs_metadata, stack_dim)
657
+ if group_weights is not None:
658
+ stack_weight = graph.call_function(
659
+ torch.stack, args=(group_weights,), kwargs={"dim": 0}
660
+ )
661
+ update_stack_example_value(stack_weight, group_weights_metadata)
662
+ else:
663
+ stack_weight = None
664
+ if group_biases is not None:
665
+ stack_bias = graph.call_function(
666
+ torch.stack, args=(group_biases,), kwargs={"dim": 0}
667
+ )
668
+ update_stack_example_value(stack_bias, group_biases_metadata)
669
+ else:
670
+ stack_bias = None
671
+
672
+ batch_layer_norm = graph.call_function(
673
+ torch.nn.functional.layer_norm,
674
+ args=(stack_input, group_shapes[-1]),
675
+ kwargs={"eps": group_epss[-1]},
676
+ )
677
+ batch_layer_norm.meta["example_value"] = stack_input.meta["example_value"]
678
+
679
+ if group_weights is not None and group_biases is not None:
680
+ previous_batch_layer_norm_meta = batch_layer_norm.meta["example_value"]
681
+ batch_layer_norm = graph.call_function(
682
+ torch.mul, args=(stack_weight, batch_layer_norm)
683
+ )
684
+ update_pointwise_example_value(
685
+ batch_layer_norm,
686
+ stack_weight.meta["example_value"],
687
+ previous_batch_layer_norm_meta,
688
+ torch.mul,
689
+ )
690
+ previous_batch_layer_norm_meta = batch_layer_norm.meta["example_value"]
691
+ batch_layer_norm = graph.call_function(
692
+ torch.add, args=(stack_bias, batch_layer_norm)
693
+ )
694
+ update_pointwise_example_value(
695
+ batch_layer_norm,
696
+ stack_bias.meta["example_value"],
697
+ previous_batch_layer_norm_meta,
698
+ torch.add,
699
+ )
700
+ elif group_weights is not None and group_biases is None:
701
+ previous_batch_layer_norm_meta = batch_layer_norm.meta["example_value"]
702
+ batch_layer_norm = graph.call_function(
703
+ torch.mul, args=(stack_weight, batch_layer_norm)
704
+ )
705
+ update_pointwise_example_value(
706
+ batch_layer_norm,
707
+ stack_weight.meta["example_value"],
708
+ previous_batch_layer_norm_meta,
709
+ torch.mul,
710
+ )
711
+ elif group_weights is None and group_biases is not None:
712
+ previous_batch_layer_norm_meta = batch_layer_norm.meta["example_value"]
713
+ batch_layer_norm = graph.call_function(
714
+ torch.add, args=(stack_bias, batch_layer_norm)
715
+ )
716
+ update_pointwise_example_value(
717
+ batch_layer_norm,
718
+ stack_bias.meta["example_value"],
719
+ previous_batch_layer_norm_meta,
720
+ torch.add,
721
+ )
722
+
723
+ batch_layer_norm_unbind = graph.call_function(
724
+ torch.unbind,
725
+ args=(batch_layer_norm,),
726
+ kwargs={"dim": stack_dim},
727
+ )
728
+ update_stack_example_value(
729
+ batch_layer_norm_unbind,
730
+ batch_layer_norm.meta["example_value"],
731
+ op=torch.unbind,
732
+ dim=stack_dim,
733
+ )
734
+
735
+ for i, node in enumerate(group_nodes):
736
+ with graph.inserting_after(batch_layer_norm_unbind):
737
+ new_node = graph.call_function(
738
+ operator.getitem, args=(batch_layer_norm_unbind, i)
739
+ )
740
+ node.replace_all_uses_with(new_node)
741
+ new_node.meta.update(node.meta)
742
+ graph.erase_node(node)
743
+
744
+
745
+ class BatchPointwiseOpsPreGradFusion(BatchPointwiseOpsFusionFactory):
746
+ """
747
+ Batch poinwise ops (e.g., sigmoid, relu, tanh) fusion in pre grad pass.
748
+ We fuse it in random place, and the introduced stack node may be merged in split cat.
749
+ """
750
+
751
+ def __init__(self, op, **kwargs):
752
+ super().__init__(op, **kwargs)
753
+ self.op = op
754
+
755
+ def match(self, node: torch.fx.Node):
756
+ input = get_arg_value(node, 0, "input")
757
+ if CallFunctionVarArgs(self.op).match(node) and is_node_meta_valid(node):
758
+ # for relu op, we also use the inplace to construct the key
759
+ group_key = (
760
+ "batch_" + self.op.__name__.lower() + "_pre_grad",
761
+ str(input.meta["example_value"].shape),
762
+ str(node.kwargs.get("inplace", False)),
763
+ )
764
+ else:
765
+ group_key = None
766
+ return group_key
767
+
768
+ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
769
+ batch_nodes = []
770
+ batch_inputs = []
771
+ batch_inputs_metadata = []
772
+
773
+ for node in subset:
774
+ batch_nodes.append(node)
775
+ input = get_arg_value(node, 0, "input")
776
+ batch_inputs.append(input)
777
+ batch_inputs_metadata.append(input.meta["example_value"])
778
+
779
+ with graph.inserting_before(subset[0]):
780
+ stack_inputs = graph.call_function(
781
+ torch.stack, args=(batch_inputs,), kwargs={"dim": 0}
782
+ )
783
+ update_stack_example_value(stack_inputs, batch_inputs_metadata)
784
+ if self.op == torch.nn.functional.relu:
785
+ batch_op = graph.call_function(
786
+ self.op,
787
+ args=(stack_inputs,),
788
+ kwargs={"inplace": subset[0].kwargs.get("inplace", False)},
789
+ )
790
+ else:
791
+ batch_op = graph.call_function(
792
+ self.op,
793
+ args=(stack_inputs,),
794
+ )
795
+ unbind_op = graph.call_function(
796
+ torch.unbind, args=(batch_op,), kwargs={"dim": 0}
797
+ )
798
+ for i, node in enumerate(batch_nodes):
799
+ with graph.inserting_after(unbind_op):
800
+ getitem = graph.call_function(operator.getitem, args=(unbind_op, i))
801
+ node.replace_all_uses_with(getitem)
802
+ getitem.meta.update(node.meta)
803
+ graph.erase_node(node)
804
+
805
+
806
+ @register_fusion("batch_tanh")
807
+ class BatchTanhPreGradFusion(BatchPointwiseOpsPreGradFusion):
808
+ def __init__(self, **kwargs):
809
+ super().__init__(torch.tanh, **kwargs)
810
+
811
+
812
+ @register_fusion("batch_sigmoid")
813
+ class BatchSigmoidPreGradFusion(BatchPointwiseOpsPreGradFusion):
814
+ def __init__(self, **kwargs):
815
+ super().__init__(torch.sigmoid, **kwargs)
816
+
817
+
818
+ @register_fusion("batch_relu")
819
+ class BatchReLuPreGradFusion(BatchPointwiseOpsPreGradFusion):
820
+ def __init__(self, **kwargs):
821
+ super().__init__(torch.nn.functional.relu, **kwargs)
822
+
823
+
824
+ @register_fusion("batch_aten_add", pre_grad=False)
825
+ class BatchAddPostGradFusion(BatchPointwiseOpsPostGradFusion):
826
+ def __init__(self, **kwargs):
827
+ super().__init__(aten.add.Tensor, **kwargs)
828
+
829
+
830
+ @register_fusion("batch_aten_sub", pre_grad=False)
831
+ class BatchSubPostGradFusion(BatchPointwiseOpsPostGradFusion):
832
+ def __init__(self, **kwargs):
833
+ super().__init__(aten.sub.Tensor, **kwargs)
834
+
835
+
836
+ @register_fusion("batch_aten_div", pre_grad=False)
837
+ class BatchDivPostGradFusion(BatchPointwiseOpsPostGradFusion):
838
+ def __init__(self, **kwargs):
839
+ super().__init__(aten.div.Tensor, **kwargs)
840
+
841
+
842
+ @register_fusion("batch_aten_mul", pre_grad=False)
843
+ class BatchMulPostGradFusion(BatchPointwiseOpsPostGradFusion):
844
+ def __init__(self, **kwargs):
845
+ super().__init__(aten.mul.Tensor, **kwargs)
846
+
847
+
848
+ class _OrderedSet:
849
+ def __init__(self, param=None):
850
+ if param:
851
+ self.rep = OrderedDict({k: None for k in param})
852
+ else:
853
+ self.rep = OrderedDict()
854
+
855
+ def __contains__(self, o):
856
+ return o in self.rep
857
+
858
+ def __len__(self):
859
+ return self.rep.__len__()
860
+
861
+ def append(self, o):
862
+ self.rep[o] = None
863
+
864
+ def __iter__(self):
865
+ return self.rep.keys().__iter__()
866
+
867
+
868
+ def find_independent_subset_greedy(
869
+ node_list: Iterable[torch.fx.Node],
870
+ graph_search_options: Dict[str, Any],
871
+ ) -> Iterator[Iterable[torch.fx.Node]]:
872
+ """
873
+ Yields a list of subsets of `node_list` where no element in the subset
874
+ depends on any other element in the subset. This results in a set of
875
+ independent nodes which can be fused together.
876
+
877
+ The order of `node_list` is preserved within each subset so we can benefit
878
+ from split-cat elimination in later passes.
879
+
880
+ During iteration it is only safe to mutate the graph by changing the nodes
881
+ that have been returned.
882
+
883
+ graph_search_options:
884
+ - min_fuse_set_size: Minimum size of the subset to consider. Subsets below
885
+ this size will be ignored.
886
+ - max_fuse_set_size: Maximum size of the subset to consider. Subsets will
887
+ be broken to be at most this size.
888
+ """
889
+
890
+ # Compute all the children of `node` which are members of
891
+ # `interesting_nodes`.
892
+ def find_dependent_nodes(node, interesting_nodes):
893
+ visited_node_set: Set[torch.fx.Node] = {node}
894
+ dep_set: Set[torch.fx.Node] = set()
895
+
896
+ work = [node]
897
+ while work:
898
+ node = work.pop()
899
+ for input_node in node.all_input_nodes:
900
+ if input_node in interesting_nodes:
901
+ dep_set.add(input_node)
902
+
903
+ if input_node not in visited_node_set:
904
+ visited_node_set.add(input_node)
905
+ work.append(input_node)
906
+
907
+ return dep_set
908
+
909
+ min_fuse_set_size = graph_search_options["min_fuse_set_size"]
910
+ max_fuse_set_size = graph_search_options["max_fuse_set_size"]
911
+
912
+ # node_list needs to be a set because we only track the nodes that are left
913
+ # in it (and we want to do the `in` on a set, not a list). But we want to
914
+ # keep the correct order.
915
+ node_list = _OrderedSet(node_list)
916
+
917
+ cache: Dict[torch.fx.Node, Set[torch.fx.Node]] = {}
918
+ while node_list:
919
+ subset: List[torch.fx.Node] = []
920
+ subset_deps: Set[torch.fx.Node] = set()
921
+
922
+ next_round_node_list = _OrderedSet()
923
+ for node in node_list:
924
+ if len(subset) >= max_fuse_set_size or node in subset_deps:
925
+ next_round_node_list.append(node)
926
+ continue
927
+
928
+ dep_set = cache.pop(node, None)
929
+ if dep_set is None:
930
+ dep_set = find_dependent_nodes(node, node_list)
931
+
932
+ if not dep_set.intersection(subset):
933
+ subset.append(node)
934
+ subset_deps.update(dep_set)
935
+ else:
936
+ next_round_node_list.append(node)
937
+ cache[node] = dep_set
938
+
939
+ if len(subset) >= min_fuse_set_size:
940
+ # Careful here - the caller uses the subsets to fuse nodes together
941
+ # so we need to clear any cache entry that contains one of the
942
+ # returned nodes because the dependency list could be different
943
+ # (larger) after the merge.
944
+ cache = {k: v for k, v in cache.items() if v.isdisjoint(subset)}
945
+ yield subset
946
+
947
+ node_list = next_round_node_list
948
+
949
+
950
+ def get_fusion_candidates(
951
+ rule: GroupBatchFusionBase, root_node: torch.fx.Node, fused_set: Set[torch.fx.Node]
952
+ ) -> DefaultDict[Any, List[torch.fx.Node]]:
953
+ """
954
+ Search fusion candidates for a specific rule using BFS starting from the root node.
955
+ We only search the subgraph within graph_search_options["max_fuse_search_depth"].
956
+ """
957
+ q: Deque[Tuple[int, torch.fx.Node]] = collections.deque()
958
+
959
+ candidate_dict: DefaultDict[Any, List[torch.fx.Node]] = collections.defaultdict(
960
+ list
961
+ )
962
+
963
+ if root_node.target in SEARCH_EXCLUSIONS:
964
+ return candidate_dict
965
+
966
+ visited_set: Set[torch.fx.Node] = set()
967
+
968
+ for next_node in root_node.all_input_nodes:
969
+ q.append((1, next_node))
970
+ visited_set.add(next_node)
971
+
972
+ while len(q) > 0:
973
+ depth, node = q.popleft()
974
+
975
+ if node in fused_set:
976
+ continue
977
+
978
+ key = rule.match(node)
979
+ if key is not None:
980
+ candidate_nodes = candidate_dict[key]
981
+ if node not in candidate_nodes:
982
+ candidate_nodes.append(node)
983
+ else:
984
+ if depth < rule.graph_search_options["max_fuse_search_depth"]:
985
+ for next_node in node.all_input_nodes:
986
+ if next_node not in visited_set:
987
+ visited_set.add(next_node)
988
+ q.append((depth + 1, next_node))
989
+
990
+ return candidate_dict
991
+
992
+
993
+ def apply_group_batch_fusion(graph: torch.fx.GraphModule, rule: GroupBatchFusionBase):
994
+ stable_topological_sort(graph) # type: ignore[arg-type]
995
+ fused_set: Set[torch.fx.Node] = set()
996
+
997
+ for node in reversed(graph.nodes):
998
+ candidates = get_fusion_candidates(rule, node, fused_set)
999
+
1000
+ for key, candidate_nodes in candidates.items():
1001
+ if len(candidate_nodes) < rule.graph_search_options["min_fuse_set_size"]:
1002
+ continue
1003
+
1004
+ for subset in find_independent_subset_greedy(
1005
+ candidate_nodes, rule.graph_search_options
1006
+ ):
1007
+ rule.fuse(graph, subset)
1008
+ fused_set.update(subset)
1009
+ if isinstance(rule, GroupFusion):
1010
+ counters["inductor"]["group_fusion"] += 1
1011
+ elif isinstance(rule, BatchFusion):
1012
+ counters["inductor"]["batch_fusion"] += 1
1013
+ else:
1014
+ counters["inductor"]["unknown_group_batch_fusion"] += 1
1015
+
1016
+ log.debug(
1017
+ f"{rule.__class__.__name__}: key = {key}; subset size = {len(list(subset))}" # noqa: G004
1018
+ )
1019
+
1020
+
1021
+ def generate_fusion_from_config(config_options: Dict[str, Any], pre_grad=True):
1022
+ fusions: List[GroupBatchFusionBase] = []
1023
+ for name, options in config_options.items():
1024
+ fusion_cls = PRE_GRAD_FUSIONS[name] if pre_grad else POST_GRAD_FUSIONS[name]
1025
+ _options = graph_search_options.copy()
1026
+ _options.update(options)
1027
+ fusions.append(fusion_cls(graph_search_options=_options)) # type: ignore[operator]
1028
+ return fusions
1029
+
1030
+
1031
+ def group_batch_fusion_passes(graph: torch.fx.Graph, pre_grad=True):
1032
+ fusions: List[GroupBatchFusionBase] = []
1033
+ # we keep all current pre grad fusions to keep
1034
+ # current implementation, will remove this later
1035
+ if pre_grad:
1036
+ fusions += generate_fusion_from_config(
1037
+ config.pre_grad_fusion_options, pre_grad=True
1038
+ )
1039
+ else:
1040
+ fbgemm_fusion_keys = [
1041
+ x
1042
+ for x in config.post_grad_fusion_options
1043
+ if config.post_grad_fusion_options[x].get("require_fbgemm", False)
1044
+ ]
1045
+ fbgemm_fusions = {
1046
+ fusion: config.post_grad_fusion_options[fusion]
1047
+ for fusion in fbgemm_fusion_keys
1048
+ }
1049
+ non_fbgemm_fusions = {
1050
+ fusion: config.post_grad_fusion_options[fusion]
1051
+ for fusion in config.post_grad_fusion_options.keys()
1052
+ if fusion not in fbgemm_fusion_keys
1053
+ }
1054
+ fusions += generate_fusion_from_config(non_fbgemm_fusions, pre_grad=False)
1055
+ if has_fbgemm:
1056
+ fusions += generate_fusion_from_config(fbgemm_fusions, pre_grad=False)
1057
+
1058
+ for rule in fusions:
1059
+ apply_group_batch_fusion(graph, rule) # type: ignore[arg-type]
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import typing
3
+ from collections import Counter
4
+ from typing import Dict, List, Set
5
+
6
+ import torch
7
+ import torch._guards
8
+ from torch._inductor.constant_folding import ConstantFolder
9
+ from torch.multiprocessing.reductions import StorageWeakRef
10
+
11
+ from .. import config
12
+ from ..pattern_matcher import (
13
+ CallFunction,
14
+ init_once_fakemode,
15
+ KeywordArg,
16
+ Match,
17
+ PatternMatcherPass,
18
+ register_graph_pattern,
19
+ stable_topological_sort,
20
+ )
21
+ from .replace_random import replace_random_passes
22
+
23
+ log = logging.getLogger(__name__)
24
+ patterns = PatternMatcherPass()
25
+
26
+
27
+ @init_once_fakemode
28
+ def lazy_init():
29
+ from .fuse_attention import _sfdp_init
30
+ from .misc_patterns import _misc_patterns_init
31
+ from .pad_mm import _pad_mm_init
32
+
33
+ _pad_mm_init()
34
+ _sfdp_init()
35
+ _misc_patterns_init()
36
+
37
+
38
+ @torch.utils._python_dispatch._disable_current_modes()
39
+ def remove_no_ops(
40
+ gm: torch.fx.GraphModule, zeros: Set[torch.fx.Node], ones: Set[torch.fx.Node]
41
+ ):
42
+ "Removes no-ops: (+ 0, - 0, * 1, / 1)"
43
+ aten = torch.ops.aten
44
+ graph = gm.graph
45
+
46
+ def fake_tensors_eq(t1, t2, fields=("shape", "dtype", "device")):
47
+ if any(not isinstance(t, torch.Tensor) for t in (t1, t2)):
48
+ return False
49
+ for field in fields:
50
+ if getattr(t1, field) != getattr(t2, field):
51
+ return False
52
+ return True
53
+
54
+ def replace_no_op(node, replace_input_index):
55
+ replacement = node.args[replace_input_index]
56
+
57
+ # https://github.com/pytorch/pytorch/issues/86128 causes
58
+ # non-Tensor inputs even for ops with only Tensor inputs.
59
+ # TODO - decompose/type promote to avoid this
60
+ if not all(isinstance(arg, torch.fx.Node) for arg in node.args):
61
+ return
62
+
63
+ if not fake_tensors_eq(node.meta["val"], replacement.meta["val"]):
64
+ if fake_tensors_eq(
65
+ node.meta["val"],
66
+ replacement.meta["val"],
67
+ ("shape", "device"),
68
+ ):
69
+ with graph.inserting_after(node):
70
+ replacement = graph.call_function(
71
+ torch.ops.prims.convert_element_type.default,
72
+ args=(replacement, node.meta["val"].dtype),
73
+ )
74
+ else:
75
+ return
76
+
77
+ node.replace_all_uses_with(replacement)
78
+ replacement.meta.update(node.meta)
79
+ graph.erase_node(node)
80
+
81
+ for node in graph.nodes:
82
+ if node.op != "call_function":
83
+ continue
84
+
85
+ # TODO handle Tensor-Scalar adds, it's a different schema
86
+ if node.target == aten.add.Tensor and len(node.args) == 2:
87
+ if (
88
+ not any(e in zeros for e in node.args)
89
+ or node.kwargs.get("alpha", 1) != 1
90
+ ):
91
+ continue
92
+
93
+ replace_index = 1 if node.args[0] in zeros else 0
94
+ replace_no_op(node, replace_index)
95
+
96
+ elif node.target == aten.sub.Tensor and len(node.args) == 2:
97
+ if node.args[1] not in zeros or node.kwargs.get("alpha", 1) != 1:
98
+ continue
99
+
100
+ replace_no_op(node, 0)
101
+
102
+ elif node.target == aten.mul.Tensor and len(node.args) == 2:
103
+ if not any(e in ones for e in node.args):
104
+ continue
105
+
106
+ replace_input_index = 1 if node.args[0] in ones else 0
107
+ replace_no_op(node, replace_input_index)
108
+
109
+ elif (
110
+ node.target == aten.div.Tensor
111
+ and len(node.args) == 2
112
+ and node.args[1] in ones
113
+ ):
114
+ replace_no_op(node, 0)
115
+
116
+
117
+ @torch.utils._python_dispatch._disable_current_modes()
118
+ def remove_redundant_views(gm: torch.fx.GraphModule):
119
+ """
120
+ Removes redundant views by reusing existing ones.
121
+ """
122
+
123
+ # A dictionary mapping a tensor to all aliased views.
124
+ views: Dict[torch.fx.Node, Dict[torch.dtype, torch.fx.Node]] = {}
125
+ graph = gm.graph
126
+
127
+ for node in graph.nodes:
128
+ if node.op != "call_function":
129
+ continue
130
+
131
+ if node.target != torch.ops.aten.view.dtype:
132
+ continue
133
+
134
+ src = node.args[0]
135
+ to_type = node.args[1]
136
+ existing_views = views.get(src)
137
+ is_needed = True
138
+
139
+ if existing_views:
140
+ # Replace the view with the an existing view if available.
141
+ alias = existing_views.get(to_type)
142
+ if alias:
143
+ is_needed = False
144
+ node.replace_all_uses_with(alias)
145
+ alias.meta.update(node.meta)
146
+ graph.erase_node(node)
147
+ else:
148
+ from_type = src.meta["val"].dtype
149
+ existing_views = {from_type: src}
150
+ views[src] = existing_views
151
+
152
+ if is_needed:
153
+ # Save the new alias but do not replace existing one.
154
+ existing_views.setdefault(to_type, node)
155
+ views[node] = existing_views
156
+
157
+ # Clean up unused views.
158
+ while True:
159
+ unused_views = [alias for alias in views if not alias.users]
160
+ if len(unused_views) == 0:
161
+ break
162
+ for unused in unused_views:
163
+ views.pop(unused)
164
+ graph.erase_node(unused)
165
+
166
+
167
+ class UniformValueConstantFolder(ConstantFolder):
168
+ """
169
+ Runs constant folding and replaces tensors that have a unifrom value
170
+ with a tensor constructor call: aten.full([shape], value, ...)
171
+ """
172
+
173
+ def __init__(self, gm, skip_constructors=False):
174
+ super().__init__(gm, skip_constructors)
175
+ self.node_storages_ptrs: Dict[torch.fx.Node, int] = {}
176
+ self.constant_data_ptrs: Dict[torch.fx.Node, StorageWeakRef] = {}
177
+ # we may constant fold a tensor which in the graph has a sym size
178
+ # see: [constant folding refining of symints]
179
+ self.node_replacements_shapes: Dict[torch.fx.Node, List[int]] = {}
180
+
181
+ def insertable_tensor_check(self, t: torch.Tensor) -> bool:
182
+ # TODO - we could also Tensors which get replaced with arange here
183
+ return (
184
+ t.numel() != 0
185
+ and bool((t == t.flatten()[0]).all())
186
+ and torch._C._has_storage(t)
187
+ and t.layout == torch.strided
188
+ )
189
+
190
+ def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
191
+ self.node_replacements[node] = tensor.flatten()[0].item()
192
+ self.constant_data_ptrs[node] = StorageWeakRef(tensor.untyped_storage())
193
+ shape = list(tensor.shape)
194
+ assert all(type(dim) is int for dim in shape)
195
+ self.node_replacements_shapes[node] = shape
196
+
197
+
198
+ @torch.utils._python_dispatch._disable_current_modes()
199
+ def constant_fold_uniform_value(gm: torch.fx.GraphModule):
200
+ "Runs constant folding and replaces constants which can be constructed with a single `full` call. Calls into remove_no_ops."
201
+ aten = torch.ops.aten
202
+
203
+ # Constant folding can leak memory, especially with repeated compilation, so we are only going to
204
+ # remove constants which can be replaced with a constructor.
205
+ cf = UniformValueConstantFolder(gm)
206
+ cf.run()
207
+
208
+ node_replacements = cf.node_replacements
209
+
210
+ # note: [constant folding refining of symints]
211
+ # constant folding will partially evaluate a graph such that values which have dependencies which
212
+ # are entirely known at compile time may also become compile time constants. in some cases,
213
+ # this will include symints which we had not yet previously deduced are guaranteed a
214
+ # constant value and is then deduced in constant folding. an example is:
215
+ # unbacked_symint_eq_11 = torch.full((), 11).item()
216
+ # torch.full((unbacked_symint_eq_11,), 0)
217
+ node_replacements_shapes = cf.node_replacements_shapes
218
+
219
+ graph = gm.graph
220
+
221
+ zeros = set()
222
+ ones = set()
223
+
224
+ # Got failures in `test_is_set_to_cuda` if we change aliasing on constants,
225
+ # so just constant-ify if a Tensor is unaliased
226
+ constant_data_ptr_count: typing.Counter[StorageWeakRef] = Counter()
227
+
228
+ for node in cf.node_replacements:
229
+ constant_data_ptr_count[cf.constant_data_ptrs[node]] += 1
230
+
231
+ for node, value in node_replacements.items():
232
+ # we dont have a functional way right now of instantiating a non-contiguous tensor with full/zeros/ones right now
233
+ # hasn't shown up to be important yet
234
+ fake_tensor = node.meta["val"]
235
+ if not fake_tensor.is_contiguous(memory_format=torch.contiguous_format):
236
+ continue
237
+
238
+ if constant_data_ptr_count[cf.constant_data_ptrs[node]] > 1:
239
+ continue
240
+
241
+ with graph.inserting_after(node):
242
+ # the conversion from tensor and back to value can be lossy, just use the original full ctor value
243
+ if (
244
+ node.op == "call_function"
245
+ and node.target == aten.full.default
246
+ and len(node.args) == 2
247
+ ):
248
+ value = node.args[1]
249
+
250
+ # refines symints, see [constant folding refining of symints] above
251
+ for runtime_size, compile_time_size in zip(
252
+ node_replacements_shapes[node], fake_tensor.shape
253
+ ):
254
+ torch._check(runtime_size == compile_time_size)
255
+
256
+ # zeros, and ones just get traced into full, so we insert those
257
+ new_node = graph.call_function(
258
+ aten.full.default,
259
+ args=(node_replacements_shapes[node], value),
260
+ kwargs={
261
+ "dtype": fake_tensor.dtype,
262
+ "layout": torch.strided,
263
+ "device": fake_tensor.device,
264
+ "pin_memory": False,
265
+ },
266
+ )
267
+
268
+ new_node.meta.update(node.meta)
269
+ node.replace_all_uses_with(new_node)
270
+ graph.erase_node(node)
271
+
272
+ if value == 0:
273
+ zeros.add(new_node)
274
+ elif value == 1:
275
+ ones.add(new_node)
276
+
277
+ remove_no_ops(gm, zeros, ones)
278
+ remove_redundant_views(gm)
279
+
280
+
281
+ def joint_graph_passes(graph: torch.fx.GraphModule):
282
+ """
283
+ Run FX transformations on the joint forwards+backwards graph.
284
+ """
285
+ lazy_init()
286
+ count = 0
287
+
288
+ if config.joint_graph_constant_folding:
289
+ constant_fold_uniform_value(graph)
290
+
291
+ if config.pattern_matcher:
292
+ count += patterns.apply(graph.graph) # type: ignore[arg-type]
293
+
294
+ if not config.fallback_random:
295
+ count += replace_random_passes(graph)
296
+
297
+ if count:
298
+ stable_topological_sort(graph.graph)
299
+ graph.graph.lint()
300
+ graph.recompile()
301
+ return graph
302
+
303
+
304
+ @register_graph_pattern(
305
+ CallFunction(
306
+ torch.ops.prims.convert_element_type.default,
307
+ CallFunction(
308
+ torch.ops.prims.convert_element_type.default,
309
+ KeywordArg("arg"),
310
+ KeywordArg("dtype1"),
311
+ ),
312
+ KeywordArg("dtype2"),
313
+ ),
314
+ pass_dict=patterns,
315
+ )
316
+ def pointless_convert(match: Match, arg, dtype1: torch.dtype, dtype2: torch.dtype):
317
+ """Remove chain of dtype conversions often created by AMP"""
318
+ graph = match.graph
319
+ node = match.output_node()
320
+ allowed = {torch.float16, torch.bfloat16, torch.float32, torch.float64}
321
+ if dtype1 in allowed and dtype2 in allowed:
322
+ repl = graph.call_function(
323
+ torch.ops.prims.convert_element_type.default, (arg, dtype2)
324
+ )
325
+ repl.meta.update(node.meta)
326
+ node.replace_all_uses_with(repl)
327
+ match.erase_nodes(graph)
328
+
329
+
330
+ @register_graph_pattern(
331
+ CallFunction(torch.ops.aten.view.default, KeywordArg("arg"), KeywordArg("size")),
332
+ pass_dict=patterns,
333
+ )
334
+ def pointless_view(match: Match, arg, size):
335
+ """Remove no-op view"""
336
+ graph = match.graph
337
+ node = match.output_node()
338
+ arg_size = list(node.args[0].meta["val"].shape) # type: ignore[union-attr]
339
+ if size == arg_size:
340
+ node.replace_all_uses_with(node.args[0])
341
+ match.erase_nodes(graph)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/misc_patterns.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ from typing import Dict, Set, Tuple
4
+
5
+ import torch
6
+ from torch._dynamo.utils import counters
7
+
8
+ from torch._ops import OpOverload, OpOverloadPacket
9
+ from ..pattern_matcher import fwd_only, register_replacement
10
+
11
+ aten = torch.ops.aten
12
+
13
+
14
+ @functools.lru_cache(None)
15
+ def _misc_patterns_init():
16
+ from .joint_graph import patterns as joint_graph_patterns
17
+ from .post_grad import pass_patterns as post_grad_patterns_all
18
+
19
+ post_grad_patterns = post_grad_patterns_all[1] # medium priority
20
+
21
+ if torch.cuda.is_available():
22
+ # workaround https://github.com/pytorch/pytorch/issues/97894
23
+ device = "cuda"
24
+ else:
25
+ device = "cpu"
26
+
27
+ # These patterns do 2 things
28
+ # 1. Since we know that index is completely unique, we can codegen it using
29
+ # stores instead of atomic adds, which is quite a bit faster.
30
+ # 2. Also, since we are guaranteed that they are completely within bounds,
31
+ # we can use unsafe indexing and skip debug asserts
32
+ def randperm_index_add_pattern(x, y):
33
+ index = torch.randperm(x.shape[0], device=x.device)[: y.shape[0]]
34
+ return torch.index_add(x, dim=0, source=y, index=index), index
35
+
36
+ def randperm_index_add_replacement(x, y):
37
+ index = torch.randperm(x.shape[0], device=x.device)[: y.shape[0]]
38
+ return (
39
+ torch.ops.aten._unsafe_index_put(
40
+ x, (index,), aten._unsafe_index(x, (index,)) + y, accumulate=False
41
+ ),
42
+ index,
43
+ )
44
+
45
+ register_replacement(
46
+ randperm_index_add_pattern,
47
+ randperm_index_add_replacement,
48
+ [torch.empty(4, 8, device=device), torch.empty(2, 8, device=device)],
49
+ fwd_only,
50
+ [post_grad_patterns, joint_graph_patterns],
51
+ )
52
+
53
+ def randperm_index_pattern(x, slice_shape):
54
+ index = torch.randperm(x.shape[0], device=x.device)[:slice_shape]
55
+ return torch.ops.aten.index(x, (index,)), index
56
+
57
+ def randperm_index_replacement(x, slice_shape):
58
+ index = torch.randperm(x.shape[0], device=x.device)[:slice_shape]
59
+ return torch.ops.aten._unsafe_index(x, (index,)), index
60
+
61
+ pattern = register_replacement(
62
+ randperm_index_pattern,
63
+ randperm_index_replacement,
64
+ [torch.empty(4, 8, device=device)],
65
+ fwd_only,
66
+ [post_grad_patterns, joint_graph_patterns],
67
+ scalar_workaround={"slice_shape": 42},
68
+ )
69
+
70
+
71
+ class NumpyCompatNormalization:
72
+ numpy_compat: Dict[str, Tuple[str, ...]] = {
73
+ "dim": ("axis",),
74
+ "keepdim": ("keepdims",),
75
+ "input": ("x", "a", "x1"),
76
+ "other": ("x2",),
77
+ }
78
+ inverse_mapping: Dict[str, str]
79
+ cache: Dict["torch.fx.graph.Target", Set[str]]
80
+
81
+ def __init__(self):
82
+ self.cache = {} # callable -> tuple of replaceable args e.g. ["axis"]
83
+ self.inverse_mapping = {}
84
+ for actual_kwarg, numpy_kwargs in self.numpy_compat.items():
85
+ for numpy_kwarg in numpy_kwargs:
86
+ assert numpy_kwarg not in self.inverse_mapping
87
+ self.inverse_mapping[numpy_kwarg] = actual_kwarg
88
+
89
+ def __call__(self, graph: torch.fx.Graph):
90
+ for node in graph.nodes:
91
+ if node.op != "call_function":
92
+ continue
93
+ if isinstance(node.target, (OpOverload, OpOverloadPacket)):
94
+ # only applies to torch ops; e.g. torch.stack(axis=1) works, torch.ops.aten.stack(axis=1) doesn't.
95
+ continue
96
+ kwargs = node.kwargs
97
+
98
+ if node.target in self.cache:
99
+ replaceable_kwargs = self.cache[node.target]
100
+ else:
101
+ signatures = torch.fx.operator_schemas.get_signature_for_torch_op(
102
+ node.target
103
+ )
104
+ signatures = () if signatures is None else signatures
105
+ replaceable_kwargs = set()
106
+ for sig in signatures:
107
+ for param_name in sig.parameters.keys():
108
+ if param_name in self.numpy_compat:
109
+ replaceable_kwargs.update(self.numpy_compat[param_name])
110
+
111
+ self.cache[node.target] = replaceable_kwargs
112
+
113
+ if not replaceable_kwargs:
114
+ continue
115
+
116
+ new_kwargs = {}
117
+ kwargs_changed = False
118
+ for k, v in kwargs.items():
119
+ if k in replaceable_kwargs:
120
+ kwargs_changed = True
121
+ new_kwargs[self.inverse_mapping[k]] = v
122
+ else:
123
+ new_kwargs[k] = v
124
+
125
+ if kwargs_changed:
126
+ node.kwargs = torch.fx.immutable_collections.immutable_dict(new_kwargs)
127
+ counters["inductor"]["numpy_compat_normalization"] += 1
128
+
129
+
130
+ numpy_compat_normalization = NumpyCompatNormalization()
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py ADDED
@@ -0,0 +1,1204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import operator
3
+ from functools import reduce
4
+ from typing import Any, Tuple
5
+
6
+ import torch
7
+
8
+ from torch.fx.experimental.symbolic_shapes import has_free_symbols
9
+
10
+ from .. import ir
11
+
12
+ from ..lowering import lowerings as L
13
+ from ..pattern_matcher import (
14
+ Arg,
15
+ CallFunction,
16
+ filter_nodes,
17
+ get_arg_value,
18
+ KeywordArg,
19
+ MULTIPLE,
20
+ )
21
+ from ..virtualized import ops
22
+ from .freezing_patterns import register_freezing_graph_pattern
23
+ from .post_grad import register_lowering_pattern
24
+ from .quantization import (
25
+ _register_quantization_lowerings,
26
+ _register_quantization_weight_pack_pass,
27
+ )
28
+
29
+ if torch._C._has_mkldnn:
30
+ aten = torch.ops.aten
31
+ mkldnn = torch.ops.mkldnn
32
+ prims = torch.ops.prims
33
+
34
+ _conv_args = [Arg() for _ in range(10)]
35
+ _linear_args = [Arg() for _ in range(6)]
36
+ _conv_transpose_args = [Arg() for _ in range(11)]
37
+
38
+ def _conv_call(users=1):
39
+ return CallFunction(
40
+ mkldnn._convolution_pointwise.default, *_conv_args, _users=users
41
+ )
42
+
43
+ def _linear_call(users=1):
44
+ return CallFunction(
45
+ mkldnn._linear_pointwise.default, *_linear_args, _users=users
46
+ )
47
+
48
+ def _conv_transpose_call(users=1):
49
+ return CallFunction(
50
+ mkldnn._convolution_transpose_pointwise.default,
51
+ *_conv_transpose_args,
52
+ _users=users,
53
+ )
54
+
55
+ def _to_float(input_call, users=1):
56
+ return CallFunction(
57
+ prims.convert_element_type.default,
58
+ input_call,
59
+ KeywordArg("to_float"),
60
+ _users=users,
61
+ )
62
+
63
+ def _to_bf16(input_call):
64
+ return CallFunction(
65
+ prims.convert_element_type.default,
66
+ input_call,
67
+ KeywordArg("to_bf16"),
68
+ _users=1,
69
+ )
70
+
71
+ def _to_fp16(input_call):
72
+ return CallFunction(
73
+ prims.convert_element_type.default,
74
+ input_call,
75
+ KeywordArg("to_fp16"),
76
+ _users=1,
77
+ )
78
+
79
+ def _unary_fusion_pattern(unary_fusion, call_fn, users, lowp_dtype):
80
+ # only insert to_dtype if lowp_dtype is True
81
+ computation_call = (
82
+ _to_float(call_fn(), users=users) if lowp_dtype else call_fn(users=users)
83
+ )
84
+ out = unary_fusion(computation_call)
85
+ if lowp_dtype == torch.bfloat16:
86
+ return _to_bf16(out)
87
+ elif lowp_dtype == torch.float16:
88
+ return _to_fp16(out)
89
+ else:
90
+ return out
91
+
92
+ def _gelu_fusion_1(computation_call):
93
+ return CallFunction(
94
+ aten.mul,
95
+ CallFunction(aten.mul, computation_call, 0.5),
96
+ CallFunction(
97
+ aten.add,
98
+ CallFunction(
99
+ aten.erf,
100
+ CallFunction(aten.mul, computation_call, 0.7071067811865476),
101
+ ),
102
+ 1,
103
+ ),
104
+ )
105
+
106
+ def _gelu_fusion_2(computation_call):
107
+ return CallFunction(
108
+ aten.mul,
109
+ CallFunction(aten.mul, computation_call, 0.5),
110
+ CallFunction(
111
+ aten.add,
112
+ CallFunction(
113
+ aten.tanh,
114
+ CallFunction(
115
+ aten.mul,
116
+ CallFunction(
117
+ aten.add,
118
+ computation_call,
119
+ CallFunction(
120
+ aten.mul,
121
+ CallFunction(
122
+ aten.mul,
123
+ CallFunction(
124
+ aten.mul, computation_call, computation_call
125
+ ),
126
+ computation_call,
127
+ ),
128
+ 0.044715,
129
+ ),
130
+ ),
131
+ 0.7978845608028654,
132
+ ),
133
+ ),
134
+ 1,
135
+ ),
136
+ )
137
+
138
+ def _hardswish_fusion(computation_call):
139
+ return CallFunction(
140
+ aten.div,
141
+ CallFunction(
142
+ aten.mul,
143
+ computation_call,
144
+ CallFunction(
145
+ aten.clamp_max,
146
+ CallFunction(
147
+ aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0
148
+ ),
149
+ 6,
150
+ ),
151
+ ),
152
+ 6,
153
+ )
154
+
155
+ def _silu_fusion(computation_call):
156
+ return CallFunction(
157
+ aten.mul, computation_call, CallFunction(aten.sigmoid, computation_call)
158
+ )
159
+
160
+ def _hardsigmoid_fusion(computation_call):
161
+ return CallFunction(
162
+ aten.div,
163
+ CallFunction(
164
+ aten.clamp_max,
165
+ CallFunction(
166
+ aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0
167
+ ),
168
+ 6,
169
+ ),
170
+ 6,
171
+ )
172
+
173
+ def _leaky_relu_fusion(computation_call):
174
+ return CallFunction(
175
+ aten.where,
176
+ CallFunction(aten.gt, computation_call, 0),
177
+ computation_call,
178
+ CallFunction(aten.mul, computation_call, KeywordArg("negative_slope")),
179
+ )
180
+
181
+ def _hardtanh_fusion(computation_call):
182
+ return CallFunction(
183
+ aten.clamp_max,
184
+ CallFunction(aten.clamp_min, computation_call, KeywordArg("min_value")),
185
+ KeywordArg("max_value"),
186
+ )
187
+
188
+ def _combined_fusion(computation_call, elementwise_op):
189
+ return CallFunction(elementwise_op, computation_call)
190
+
191
+ # binary_op(other, computation_op)
192
+ def _binary_fusion_v1(computation_call, binary_fn):
193
+ return CallFunction(binary_fn, KeywordArg("other"), computation_call)
194
+
195
+ # binary_op(computation_op, other)
196
+ def _binary_fusion_v2(computation_call, binary_fn):
197
+ return CallFunction(binary_fn, computation_call, KeywordArg("other"))
198
+
199
+ def _is_single_computation_op(computation_op):
200
+ def fn(match):
201
+ computation_nodes = filter_nodes(match.nodes, computation_op)
202
+ if len(computation_nodes) < 1:
203
+ return False
204
+ if any(n.args[-3] != "none" for n in computation_nodes):
205
+ return False
206
+ return True
207
+
208
+ return fn
209
+
210
+ def _is_valid_computation_unary_fusion(computation_op, lowp_dtype=None):
211
+ def fn(match):
212
+ matched = _is_single_computation_op(computation_op)(match)
213
+ computation_node = filter_nodes(match.nodes, computation_op)[0]
214
+ if lowp_dtype:
215
+ conversion_dtype_nodes = filter_nodes(
216
+ match.nodes, prims.convert_element_type.default
217
+ )
218
+ if len(conversion_dtype_nodes) != 2:
219
+ return False
220
+ # fusion pattern is always in the form of computation_op + to_float32 + unary_op + to_bfloat16
221
+ if computation_node == conversion_dtype_nodes[0].args[0]:
222
+ to_float = conversion_dtype_nodes[0].args[1]
223
+ to_lp = conversion_dtype_nodes[1].args[1]
224
+ else:
225
+ to_float = conversion_dtype_nodes[1].args[1]
226
+ to_lp = conversion_dtype_nodes[0].args[1]
227
+ matched = matched and to_float == torch.float and to_lp == lowp_dtype
228
+ return matched
229
+
230
+ return fn
231
+
232
+ def _register_unary_fusion_lowering(
233
+ pattern, unary_attr, computation_op, lowp_dtype=None
234
+ ):
235
+ @register_lowering_pattern(
236
+ pattern,
237
+ extra_check=_is_valid_computation_unary_fusion(computation_op, lowp_dtype),
238
+ )
239
+ def fn(match, *args, **kwargs):
240
+ computation_args = list(args)[:-3] + [
241
+ unary_attr.op_name,
242
+ unary_attr.scalars_attr,
243
+ unary_attr.algorithm_attr,
244
+ ]
245
+ return L[computation_op](*computation_args)
246
+
247
+ return fn
248
+
249
+ def _register_leaky_relu_fusion_lowering(pattern, computation_op, lowp_dtype=None):
250
+ @register_lowering_pattern(
251
+ pattern, extra_check=_is_single_computation_op(computation_op)
252
+ )
253
+ def fn(match, *args, **kwargs):
254
+ negative_slope = kwargs.get("negative_slope")
255
+ if isinstance(negative_slope, ir.TensorBox):
256
+ matched = False
257
+ else: # inp is a Number
258
+ matched = True
259
+ if lowp_dtype:
260
+ dtype1 = kwargs.get("to_float")
261
+ dtype2 = (
262
+ kwargs.get("to_bf16")
263
+ if lowp_dtype == torch.bfloat16
264
+ else kwargs.get("to_fp16")
265
+ )
266
+ matched = matched and dtype1 == torch.float and dtype2 == lowp_dtype
267
+ computation_args = list(args)
268
+ if matched:
269
+ computation_args = computation_args[:-3] + [
270
+ "leaky_relu",
271
+ [negative_slope],
272
+ "",
273
+ ]
274
+ return L[computation_op](*computation_args)
275
+ else:
276
+ # computation_args += ["none", [], ""]
277
+ out = L[computation_op](*computation_args)
278
+ if lowp_dtype:
279
+ out = L[prims.convert_element_type.default](out, dtype=torch.float)
280
+ out = L[aten.where](
281
+ L[aten.gt](out, 0),
282
+ out,
283
+ L[aten.mul](out, negative_slope),
284
+ )
285
+ if lowp_dtype:
286
+ out = L[prims.convert_element_type.default](out, dtype=dtype2) # type: ignore[possibly-undefined]
287
+ return out
288
+
289
+ return fn
290
+
291
+ def _register_hardtanh_fusion_lowering(pattern, computation_op, lowp_dtype=None):
292
+ @register_lowering_pattern(
293
+ pattern, extra_check=_is_single_computation_op(computation_op)
294
+ )
295
+ def fn(match, *args, **kwargs):
296
+ min_value = kwargs.get("min_value")
297
+ max_value = kwargs.get("max_value")
298
+ if isinstance(min_value, ir.TensorBox) or isinstance(
299
+ max_value, ir.TensorBox
300
+ ):
301
+ matched = False
302
+ else: # inp is a Number
303
+ assert max_value is not None
304
+ matched = min_value <= max_value
305
+ if lowp_dtype:
306
+ dtype1 = kwargs.get("to_float")
307
+ dtype2 = (
308
+ kwargs.get("to_bf16")
309
+ if lowp_dtype == torch.bfloat16
310
+ else kwargs.get("to_fp16")
311
+ )
312
+ matched = matched and dtype1 == torch.float and dtype2 == lowp_dtype
313
+ computation_args = list(args)
314
+ if matched:
315
+ computation_args = computation_args[:-3] + [
316
+ "hardtanh",
317
+ [min_value, max_value],
318
+ "",
319
+ ]
320
+ return L[computation_op](*computation_args)
321
+ else:
322
+ out = L[computation_op](*computation_args)
323
+ if lowp_dtype:
324
+ out = L[prims.convert_element_type.default](out, dtype=torch.float)
325
+ out = L[aten.clamp_max](L[aten.clamp_min](out, min_value), max_value)
326
+ if lowp_dtype:
327
+ out = L[prims.convert_element_type.default](out, dtype=dtype2) # type: ignore[possibly-undefined]
328
+ return out
329
+
330
+ return fn
331
+
332
+ _binary_attr = {
333
+ aten.add: "add",
334
+ ops.add: "add",
335
+ aten.sub: "sub",
336
+ ops.sub: "sub",
337
+ }
338
+
339
+ def _is_valid_binary(match, fn):
340
+ binary_nodes = filter_nodes(match.nodes, fn)
341
+ if len(binary_nodes) < 1:
342
+ return False
343
+
344
+ def get_meta_value(argument: torch.fx.node.Argument):
345
+ # Only torch.fx.Node is expected to have meta.
346
+ if isinstance(argument, torch.fx.Node):
347
+ return argument.meta.get("val", None)
348
+ return None
349
+
350
+ if any(
351
+ not isinstance(get_meta_value(n.args[0]), torch.Tensor)
352
+ or not isinstance(get_meta_value(n.args[1]), torch.Tensor)
353
+ for n in binary_nodes
354
+ ):
355
+ return False
356
+ # check alpha is one.
357
+ if any(
358
+ get_arg_value(n, 2, kwarg_name="alpha") != 1.0
359
+ and get_arg_value(n, 2, kwarg_name="alpha") is not None
360
+ for n in binary_nodes
361
+ ):
362
+ return False
363
+ if any(
364
+ get_meta_value(n.args[0]).size() != get_meta_value(n.args[1]).size()
365
+ or get_meta_value(n.args[0]).device != get_meta_value(n.args[1]).device
366
+ or get_meta_value(n.args[0]).dtype != get_meta_value(n.args[1]).dtype
367
+ for n in binary_nodes
368
+ ):
369
+ return False
370
+ # check args[0] and args[1] is not same
371
+ if any(n.args[0] == n.args[1] for n in binary_nodes):
372
+ return False
373
+ return True
374
+
375
+ def _is_valid_computation_binary(computation_op, binary_op, other_index=None):
376
+ def fn(match):
377
+ if not _is_single_computation_op(computation_op)(match):
378
+ return False
379
+ if not _is_valid_binary(match, binary_op):
380
+ return False
381
+ return True
382
+
383
+ return fn
384
+
385
+ def _get_remaining_users(extra_input_node, compute_node):
386
+ # Think about this pattern:
387
+ # ReLU
388
+ # / \
389
+ # Conv1
390
+ # / \
391
+ # Conv2
392
+ # \ /
393
+ # Add
394
+ # Although, the extra input node (ReLU) has more than 1 users: Conv1 and Add.
395
+ # The Conv1 is the ancestor node of the current compute node (Conv2).
396
+ # This indicates that the buffer of ReLU has completed all its usage,
397
+ # So we can safely make changes to it now by doing Conv2->Add inplace fusion.
398
+ # Take above case as example:
399
+ # * extra_input_node: ReLU
400
+ # * compute_node: Conv2
401
+ # _get_remaining_users will return the users of extra_input_node which are not
402
+ # ancestor node of compute_node.
403
+ def _is_ancestor_node(_current_node, _ancestor_node):
404
+ # Check whether _ancestor_node is the ancestor node of _current_node
405
+ _node_list = [_current_node]
406
+ _visited_nodes = set()
407
+ while len(_node_list) != 0:
408
+ _current_node = _node_list.pop(0)
409
+ if _current_node not in _visited_nodes:
410
+ _visited_nodes.add(_current_node)
411
+ if _current_node == _ancestor_node:
412
+ return True
413
+ elif isinstance(
414
+ _current_node, torch.fx.Node
415
+ ) and _current_node.op not in ["placeholder", "output", "get_attr"]:
416
+ for input in _current_node.all_input_nodes:
417
+ _node_list.append(input) # noqa: PERF402
418
+ return False
419
+
420
+ return [
421
+ user
422
+ for user in list(extra_input_node.users)
423
+ if not _is_ancestor_node(compute_node, user)
424
+ ]
425
+
426
+ def _is_valid_computation_binary_inplace(computation_op, binary_op, other_index):
427
+ def fn(match):
428
+ if not _is_valid_computation_binary(computation_op, binary_op)(match):
429
+ return False
430
+ binary_nodes = filter_nodes(match.nodes, binary_op)
431
+
432
+ def _get_compute_node(_binary_node, _other_index):
433
+ assert (
434
+ len(_binary_node.all_input_nodes) == 2
435
+ ), "Binary node should have 2 input nodes."
436
+ _compute_index = 1 if (_other_index == 0) else 0
437
+ return _binary_node.args[_compute_index]
438
+
439
+ def _other_input_not_inplaceable(_binary_node, _other_index):
440
+ _compute_node = _get_compute_node(_binary_node, _other_index)
441
+ return (
442
+ len(
443
+ _get_remaining_users(
444
+ _binary_node.args[_other_index], _compute_node
445
+ )
446
+ )
447
+ > 1
448
+ or _binary_node.args[_other_index] == _compute_node.args[0]
449
+ )
450
+
451
+ if any(_other_input_not_inplaceable(n, other_index) for n in binary_nodes):
452
+ return False
453
+ if any(
454
+ n.args[other_index].op in ["placeholder", "output"]
455
+ for n in binary_nodes
456
+ ):
457
+ return False
458
+ return True
459
+
460
+ return fn
461
+
462
+ def _register_binary_unary_fusion_lowering(
463
+ pattern,
464
+ computation_op,
465
+ binary_op,
466
+ fusion_op,
467
+ unary_attr=None,
468
+ ):
469
+ @register_lowering_pattern(
470
+ pattern, extra_check=_is_valid_computation_binary(computation_op, binary_op)
471
+ )
472
+ def fn(match, *args, **kwargs):
473
+ other = kwargs.get("other")
474
+ assert isinstance(other, ir.TensorBox)
475
+ binary_attr = _binary_attr[binary_op]
476
+ args_list = list(args)
477
+ computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr]
478
+ if len(args_list) > 6:
479
+ if unary_attr is not None:
480
+ computation_args += [
481
+ 1.0,
482
+ unary_attr.op_name,
483
+ unary_attr.scalars_attr,
484
+ unary_attr.algorithm_attr,
485
+ ]
486
+ else:
487
+ computation_args += [1.0, None, [], None]
488
+ return L[fusion_op](*computation_args)
489
+
490
+ return fn
491
+
492
+ def _can_be_inplace(_other):
493
+ if isinstance(_other.data, ir.View):
494
+ return _can_be_inplace(_other.data)
495
+ else:
496
+ return not (
497
+ isinstance(_other.data, ir.ReinterpretView)
498
+ or isinstance(
499
+ _other.get_layout(), (ir.MutationLayout, ir.AliasedLayout)
500
+ )
501
+ )
502
+
503
+ def _register_binary_unary_maybe_inplace_fusion_lowering(
504
+ pattern,
505
+ computation_op,
506
+ binary_op,
507
+ inplace_fusion_op,
508
+ outplace_fusion_op,
509
+ unary_attr=None,
510
+ other_index=None,
511
+ ):
512
+ @register_lowering_pattern(
513
+ pattern,
514
+ extra_check=_is_valid_computation_binary_inplace(
515
+ computation_op, binary_op, other_index
516
+ ),
517
+ )
518
+ def fn(match, *args, **kwargs):
519
+ other = kwargs.get("other")
520
+ assert isinstance(other, ir.TensorBox)
521
+ binary_attr = _binary_attr[binary_op]
522
+ args_list = list(args)
523
+ computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr]
524
+ if len(args_list) > 6:
525
+ if unary_attr is not None:
526
+ computation_args += [
527
+ 1.0,
528
+ unary_attr.op_name,
529
+ unary_attr.scalars_attr,
530
+ unary_attr.algorithm_attr,
531
+ ]
532
+ else:
533
+ computation_args += [1.0, None, [], None]
534
+ # Make sure the other is not an alias or mutation(fx side doesn't has such info).
535
+ other.realize()
536
+ if not _can_be_inplace(other):
537
+ return L[outplace_fusion_op](*computation_args)
538
+ return L[inplace_fusion_op](*computation_args)
539
+
540
+ return fn
541
+
542
+ computation_ops = [
543
+ mkldnn._convolution_pointwise.default,
544
+ mkldnn._linear_pointwise.default,
545
+ mkldnn._convolution_transpose_pointwise.default,
546
+ ]
547
+
548
+ class UnaryAttr:
549
+ def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None):
550
+ self.op_name = op_name
551
+ self.scalars_attr = scalars_attr if scalars_attr else []
552
+ self.algorithm_attr = algorithm_attr if algorithm_attr else ""
553
+
554
+ def _register_unary_fusion():
555
+ computation_call_fns = [_conv_call, _linear_call, _conv_transpose_call]
556
+
557
+ def _unary_fusion_patterns(lowp_dtype):
558
+ replacement_unary_fusion_patterns = {
559
+ UnaryAttr("gelu", algorithm_attr="tanh"): [
560
+ _unary_fusion_pattern(_gelu_fusion_2, call_fn, 4, lowp_dtype)
561
+ for call_fn in computation_call_fns
562
+ ],
563
+ UnaryAttr("gelu", algorithm_attr="none"): [
564
+ _unary_fusion_pattern(_gelu_fusion_1, call_fn, 2, lowp_dtype)
565
+ for call_fn in computation_call_fns
566
+ ],
567
+ UnaryAttr("hardswish"): [
568
+ _unary_fusion_pattern(_hardswish_fusion, call_fn, 2, lowp_dtype)
569
+ for call_fn in computation_call_fns
570
+ ],
571
+ UnaryAttr("hardsigmoid"): [
572
+ _unary_fusion_pattern(_hardsigmoid_fusion, call_fn, 1, lowp_dtype)
573
+ for call_fn in computation_call_fns
574
+ ],
575
+ UnaryAttr("swish"): [
576
+ _unary_fusion_pattern(_silu_fusion, call_fn, 2, lowp_dtype)
577
+ for call_fn in computation_call_fns
578
+ ],
579
+ }
580
+ if not lowp_dtype:
581
+ call_user1 = [call_fn(users=1) for call_fn in computation_call_fns]
582
+ replacement_unary_fusion_patterns.update(
583
+ {
584
+ UnaryAttr("relu"): [
585
+ _combined_fusion(u, aten.relu) for u in call_user1
586
+ ],
587
+ UnaryAttr("sigmoid"): [
588
+ _combined_fusion(u, aten.sigmoid) for u in call_user1
589
+ ],
590
+ UnaryAttr("tanh"): [
591
+ _combined_fusion(u, aten.tanh) for u in call_user1
592
+ ],
593
+ }
594
+ )
595
+
596
+ return replacement_unary_fusion_patterns
597
+
598
+ for lowp_dtype in [torch.bfloat16, torch.float16, None]:
599
+ replace_patterns = _unary_fusion_patterns(lowp_dtype)
600
+ for unary_attr, patterns in replace_patterns.items():
601
+ _register_unary_fusion_lowering(
602
+ patterns[0], unary_attr, computation_ops[0], lowp_dtype
603
+ )
604
+ _register_unary_fusion_lowering(
605
+ patterns[1], unary_attr, computation_ops[1], lowp_dtype
606
+ )
607
+ _register_unary_fusion_lowering(
608
+ patterns[2], unary_attr, computation_ops[2], lowp_dtype
609
+ )
610
+ _leaky_relu_patterns = [
611
+ _unary_fusion_pattern(_leaky_relu_fusion, call_fn, 3, lowp_dtype)
612
+ for call_fn in computation_call_fns
613
+ ]
614
+ for pattern, computation_op in zip(_leaky_relu_patterns, computation_ops):
615
+ _register_leaky_relu_fusion_lowering(
616
+ pattern, computation_op, lowp_dtype
617
+ )
618
+ hardtanh_patterns = [
619
+ _unary_fusion_pattern(_hardtanh_fusion, call_fn, 1, lowp_dtype)
620
+ for call_fn in computation_call_fns
621
+ ]
622
+ for pattern, computation_op in zip(hardtanh_patterns, computation_ops):
623
+ _register_hardtanh_fusion_lowering(pattern, computation_op, lowp_dtype)
624
+
625
+ def _register_inplace_fusion():
626
+ binary_ops = [aten.add, ops.add]
627
+ inplace_fusion_op = mkldnn._convolution_pointwise_.binary
628
+ outplace_fusion_op = mkldnn._convolution_pointwise.binary
629
+ conv_call = _conv_call(users=1)
630
+ conv_op = computation_ops[0]
631
+ for binary_op in binary_ops:
632
+ binary_v1 = _binary_fusion_v1(conv_call, binary_op)
633
+ binary_unary_v1 = _combined_fusion(binary_v1, aten.relu)
634
+ _register_binary_unary_maybe_inplace_fusion_lowering(
635
+ binary_unary_v1,
636
+ conv_op,
637
+ binary_op,
638
+ inplace_fusion_op,
639
+ outplace_fusion_op,
640
+ other_index=0,
641
+ unary_attr=UnaryAttr("relu"),
642
+ )
643
+ _register_binary_unary_maybe_inplace_fusion_lowering(
644
+ binary_v1,
645
+ conv_op,
646
+ binary_op,
647
+ inplace_fusion_op,
648
+ outplace_fusion_op,
649
+ other_index=0,
650
+ )
651
+ binary_v2 = _binary_fusion_v2(conv_call, binary_op)
652
+ binary_unary_v2 = _combined_fusion(binary_v2, aten.relu)
653
+ _register_binary_unary_maybe_inplace_fusion_lowering(
654
+ binary_unary_v2,
655
+ conv_op,
656
+ binary_op,
657
+ inplace_fusion_op,
658
+ outplace_fusion_op,
659
+ other_index=1,
660
+ unary_attr=UnaryAttr("relu"),
661
+ )
662
+ _register_binary_unary_maybe_inplace_fusion_lowering(
663
+ binary_v2,
664
+ conv_op,
665
+ binary_op,
666
+ inplace_fusion_op,
667
+ outplace_fusion_op,
668
+ other_index=1,
669
+ )
670
+
671
+ def _register_binary_fusion():
672
+ binary_ops = [aten.add, ops.add, aten.sub, ops.sub]
673
+ fusion_ops = [
674
+ mkldnn._convolution_pointwise.binary,
675
+ mkldnn._linear_pointwise.binary,
676
+ ]
677
+ _computation_user_1 = [_conv_call(users=1), _linear_call(users=1)]
678
+ for computation_call, computation_op, fusion_op in zip(
679
+ _computation_user_1, computation_ops[:-1], fusion_ops
680
+ ):
681
+ for binary_op in binary_ops:
682
+ pattern = _binary_fusion_v2(computation_call, binary_op)
683
+ _register_binary_unary_fusion_lowering(
684
+ pattern, computation_op, binary_op, fusion_op
685
+ )
686
+
687
+ for binary_op in [aten.add, ops.add]:
688
+ pattern = _binary_fusion_v1(computation_call, binary_op)
689
+ _register_binary_unary_fusion_lowering(
690
+ pattern, computation_op, binary_op, fusion_op
691
+ )
692
+
693
+ def _register_binary_unary_fusion():
694
+ binary_ops = [aten.add, ops.add, aten.sub, ops.sub]
695
+ fusion_ops = [mkldnn._convolution_pointwise.binary]
696
+ _computation_user_1 = [_conv_call(users=1)]
697
+ for computation_call, computation_op, fusion_op in zip(
698
+ _computation_user_1, computation_ops[:-1], fusion_ops
699
+ ):
700
+ for binary_op in binary_ops:
701
+ pattern_v1 = _combined_fusion(
702
+ _binary_fusion_v2(computation_call, binary_op), aten.relu
703
+ )
704
+ _register_binary_unary_fusion_lowering(
705
+ pattern_v1,
706
+ computation_op,
707
+ binary_op,
708
+ fusion_op,
709
+ unary_attr=UnaryAttr("relu"),
710
+ )
711
+ for binary_op in [aten.add, ops.add]:
712
+ pattern_v2 = _combined_fusion(
713
+ _binary_fusion_v1(computation_call, binary_op), aten.relu
714
+ )
715
+ _register_binary_unary_fusion_lowering(
716
+ pattern_v2,
717
+ computation_op,
718
+ binary_op,
719
+ fusion_op,
720
+ unary_attr=UnaryAttr("relu"),
721
+ )
722
+
723
+ def _recover_linear():
724
+ # convert reshape+linear+reshape to a single linear for applying fusion path.
725
+ @register_freezing_graph_pattern(
726
+ CallFunction(
727
+ aten.reshape.default,
728
+ CallFunction(
729
+ mkldnn._linear_pointwise.default,
730
+ CallFunction(
731
+ aten.reshape.default,
732
+ Arg(),
733
+ KeywordArg("reshape_1"),
734
+ _users=MULTIPLE,
735
+ ),
736
+ Arg(),
737
+ Arg(),
738
+ Arg(),
739
+ Arg(),
740
+ Arg(),
741
+ ),
742
+ KeywordArg("reshape_2"),
743
+ ),
744
+ pass_number=1,
745
+ )
746
+ def reshape_linear_reshape_pattern(match, *args, **kwargs):
747
+ reshape_1 = kwargs.get("reshape_1")
748
+ reshape_2 = kwargs.get("reshape_2")
749
+ assert isinstance(reshape_1, list)
750
+ assert isinstance(reshape_2, list)
751
+ assert len(reshape_1) == 2
752
+ dynamic_shapes = not all(
753
+ isinstance(x, int) for x in ([reshape_1[0]] + reshape_2[:-1])
754
+ )
755
+
756
+ graph = match.graph
757
+ reshape_2_node = match.output_node()
758
+ linear_input_node = reshape_2_node.args[0].args[0].args[0]
759
+ # check linear's input's shape[:-1] == reshape_2[:-1]
760
+ # and check product(reshape_2[:-1]) == reshape_1[0]
761
+ if dynamic_shapes:
762
+ # TODO: Haozhe investigate how add guard here
763
+ return
764
+ else:
765
+ can_remove_reshape = linear_input_node.meta.get("val").shape[
766
+ :-1
767
+ ] == torch.Size(reshape_2[:-1])
768
+ can_remove_reshape = can_remove_reshape and (
769
+ reduce(operator.mul, reshape_2[:-1]) == reshape_1[0]
770
+ )
771
+
772
+ if can_remove_reshape:
773
+ repl = graph.call_function(mkldnn._linear_pointwise.default, args)
774
+ repl.meta.update(reshape_2_node.meta)
775
+ reshape_2_node.replace_all_uses_with(repl)
776
+ old_linear_node = reshape_2_node.args[0]
777
+ reshape_1_node = old_linear_node.args[0]
778
+ graph.erase_node(reshape_2_node)
779
+ graph.erase_node(old_linear_node)
780
+ if len(reshape_1_node.users) == 0:
781
+ graph.erase_node(reshape_1_node)
782
+
783
+ def is_linear_add_bias(match):
784
+ add_node = match.output_node()
785
+ linear_node = add_node.args[0]
786
+ weight_meta = linear_node.args[1].meta.get("val")
787
+ bias_meta = add_node.args[1].meta.get("val")
788
+ if weight_meta is None or bias_meta is None:
789
+ return False
790
+ return (
791
+ linear_node.args[2] is None
792
+ and bias_meta.dim() == 1
793
+ and bias_meta.size(0) == weight_meta.size(0)
794
+ )
795
+
796
+ # convert linear+bias to a single linear for applying fusion path.
797
+ @register_freezing_graph_pattern(
798
+ CallFunction(
799
+ aten.add.Tensor,
800
+ CallFunction(mkldnn._linear_pointwise.default, *_linear_args),
801
+ Arg(),
802
+ ),
803
+ pass_number=1,
804
+ extra_check=is_linear_add_bias,
805
+ )
806
+ def linear_bias_pattern(match, *args):
807
+ graph = match.graph
808
+ add_node = match.output_node()
809
+ linear_node = add_node.args[0]
810
+ new_args = list(linear_node.args)
811
+ new_args[2] = add_node.args[1]
812
+ repl = graph.call_function(
813
+ mkldnn._linear_pointwise.default, tuple(new_args)
814
+ )
815
+ repl.meta.update(add_node.meta)
816
+ add_node.replace_all_uses_with(repl)
817
+ match.erase_nodes(graph)
818
+
819
+ def _is_packable_mkldnn_rnn_layer(match):
820
+ lstm_node = match.output_node()
821
+ POS_WEIGHTS = [1, 2]
822
+ POS_INPUTS = [0, 5, 6]
823
+ POS_ARGS = POS_WEIGHTS + POS_INPUTS
824
+ # Weights should be Constant
825
+ if any(
826
+ lstm_node.args[POS_WEIGHT].op != "get_attr" for POS_WEIGHT in POS_WEIGHTS
827
+ ):
828
+ return False
829
+
830
+ # Meta info for weights and inputs should be available
831
+ if any(lstm_node.args[POS_ARG].meta.get("val") is None for POS_ARG in POS_ARGS):
832
+ return False
833
+
834
+ # Check device
835
+ if any(
836
+ lstm_node.args[POS_ARG].meta.get("val").device.type != "cpu"
837
+ for POS_ARG in POS_ARGS
838
+ ):
839
+ return False
840
+
841
+ # Check dtype
842
+ if any(
843
+ lstm_node.args[POS_ARG].meta.get("val").dtype == torch.bfloat16
844
+ and not mkldnn._is_mkldnn_bf16_supported()
845
+ for POS_ARG in POS_ARGS
846
+ ):
847
+ return False
848
+ if any(
849
+ lstm_node.args[POS_ARG].meta.get("val").dtype == torch.float16
850
+ and not mkldnn._is_mkldnn_fp16_supported()
851
+ for POS_ARG in POS_ARGS
852
+ ):
853
+ return False
854
+
855
+ return True
856
+
857
+ def _is_packable_convolution(match):
858
+ """
859
+ Check if the node is supported for MKLDNN convolution.
860
+ """
861
+ conv_node = match.output_node()
862
+ input_meta_value = conv_node.args[0].meta.get("val")
863
+ weight_meta_value = conv_node.args[1].meta.get("val")
864
+ if input_meta_value is None or weight_meta_value is None:
865
+ return False
866
+ input_size = input_meta_value.shape
867
+ if conv_node.args[1].op != "get_attr":
868
+ return False
869
+ for meta_value in [input_meta_value, weight_meta_value]:
870
+ if (
871
+ meta_value is None
872
+ or meta_value.device.type != "cpu"
873
+ or meta_value.dim() != 4
874
+ ):
875
+ return False
876
+ if (
877
+ input_meta_value.dtype == torch.bfloat16
878
+ or weight_meta_value.dtype == torch.bfloat16
879
+ ):
880
+ if not mkldnn._is_mkldnn_bf16_supported():
881
+ return False
882
+ if (
883
+ input_meta_value.dtype == torch.float16
884
+ or weight_meta_value.dtype == torch.float16
885
+ ):
886
+ if not mkldnn._is_mkldnn_fp16_supported():
887
+ return False
888
+ is_transposed = conv_node.args[-3]
889
+ if is_transposed:
890
+ # TODO: Support dynamic shape case for MKLDNN conv transpose.
891
+ if has_free_symbols(input_size):
892
+ return False
893
+ groups = conv_node.args[-1]
894
+ in_channels = weight_meta_value.size(0)
895
+ # doesn't support group_depthwise_conv_transpose.
896
+ if groups > 1 and groups == in_channels:
897
+ return False
898
+ # Port from: aten/src/ATen/native/Convolution.cpp:is_output_padding_big
899
+ output_paddings = conv_node.args[-2]
900
+ strides = conv_node.args[3]
901
+ if any(
902
+ output_padding >= stride
903
+ for output_padding, stride in zip(output_paddings, strides)
904
+ ):
905
+ return False
906
+ return True
907
+
908
+ def _is_packable_linear(match):
909
+ """
910
+ Check if the node is supported for MKLDNN linear.
911
+ """
912
+ linear_node = match.output_node()
913
+ # weight_idx is 1 for aten.mm and is 2 for aten.addmm
914
+ weight_idx = 2 if linear_node.target == aten.addmm.default else 1
915
+ if linear_node.args[weight_idx].op != "get_attr":
916
+ return False
917
+ input_meta_value = linear_node.args[weight_idx - 1].meta.get("val")
918
+ weight_meta_value = linear_node.args[weight_idx].meta.get("val")
919
+ if input_meta_value is None or weight_meta_value is None:
920
+ return False
921
+ batch_size = input_meta_value.shape[0]
922
+ is_lp_weight = weight_meta_value.dtype in (
923
+ torch.bfloat16,
924
+ torch.float16,
925
+ )
926
+ # on x86, for fp32, mkl should be enabled and batch_size should not be a free symbol.
927
+ # on aarch64, use mkldnn op for fp32 as well if acl is enabled
928
+ if (
929
+ not is_lp_weight
930
+ and not mkldnn._is_mkldnn_acl_supported()
931
+ and ((not torch._C.has_mkl) or has_free_symbols(batch_size))
932
+ ):
933
+ return False
934
+ for meta_value in [input_meta_value, weight_meta_value]:
935
+ if (
936
+ meta_value is None
937
+ or meta_value.device.type != "cpu"
938
+ or meta_value.dim() != 2
939
+ ):
940
+ return False
941
+ if weight_idx == 2:
942
+ bias_meta_value = linear_node.args[0].meta.get("val")
943
+ if (
944
+ bias_meta_value is None
945
+ or meta_value.device.type != "cpu"
946
+ or bias_meta_value.dim() != 1
947
+ or bias_meta_value.size(0) != weight_meta_value.size(1)
948
+ ):
949
+ return False
950
+
951
+ if (
952
+ input_meta_value.dtype == torch.bfloat16
953
+ or weight_meta_value.dtype == torch.bfloat16
954
+ ):
955
+ if not mkldnn._is_mkldnn_bf16_supported():
956
+ return False
957
+ if (
958
+ input_meta_value.dtype == torch.float16
959
+ or weight_meta_value.dtype == torch.float16
960
+ ):
961
+ if not mkldnn._is_mkldnn_fp16_supported():
962
+ return False
963
+ return True
964
+
965
+ _aten_conv_args = (
966
+ Arg(),
967
+ Arg(),
968
+ Arg(),
969
+ Arg(),
970
+ Arg(),
971
+ Arg(),
972
+ KeywordArg("is_transposed"),
973
+ Arg(),
974
+ Arg(),
975
+ )
976
+
977
+ _aten_mkldnn_rnn_layer_args = (
978
+ Arg(), # input
979
+ Arg(), # weight0
980
+ Arg(), # weight1
981
+ Arg(), # weight2
982
+ Arg(), # weight3
983
+ Arg(), # hx_
984
+ Arg(), # cx_
985
+ KeywordArg("reverse"), # reverse
986
+ Arg(), # batch_sizes
987
+ Arg(), # mode
988
+ Arg(), # hidden_size
989
+ Arg(), # num_layers
990
+ Arg(), # has_biases
991
+ Arg(), # bidirectional
992
+ Arg(), # batch_first
993
+ Arg(), # train
994
+ )
995
+
996
+ def _register_weight_pack_pass():
997
+ @register_freezing_graph_pattern(
998
+ CallFunction(aten.convolution.default, *_aten_conv_args),
999
+ extra_check=_is_packable_convolution,
1000
+ )
1001
+ def convolution(match, *args, **kwargs):
1002
+ is_transposed = kwargs.get("is_transposed")
1003
+ assert isinstance(is_transposed, bool)
1004
+ graph = match.graph
1005
+ conv_node = match.output_node()
1006
+ input_size = conv_node.args[0].meta.get("val").shape
1007
+ with graph.inserting_before(conv_node):
1008
+ constant_args = [args[4], args[3], args[5], args[-1]]
1009
+ packed_weight_op = mkldnn._reorder_convolution_weight
1010
+ packed_conv_op = mkldnn._convolution_pointwise.default
1011
+ if is_transposed:
1012
+ constant_args.insert(1, args[-2]) # output_padding
1013
+ packed_weight_op = mkldnn._reorder_convolution_transpose_weight
1014
+ packed_conv_op = mkldnn._convolution_transpose_pointwise.default
1015
+ if not has_free_symbols(input_size):
1016
+ packed_weight_inputs = (
1017
+ (args[1],) + tuple(constant_args) + (input_size,)
1018
+ )
1019
+ packed_weight_node = graph.create_node(
1020
+ "call_function", packed_weight_op, args=packed_weight_inputs
1021
+ )
1022
+ else:
1023
+ assert not is_transposed
1024
+ # For dynamic shape case, we need to pack weight in runtime.
1025
+ packed_weight_node = args[1]
1026
+ packed_conv_inputs = (
1027
+ (args[0], packed_weight_node, args[2])
1028
+ + tuple(constant_args)
1029
+ + ("none", [], "")
1030
+ )
1031
+ packed_conv_node = graph.create_node(
1032
+ "call_function", packed_conv_op, tuple(packed_conv_inputs)
1033
+ )
1034
+ conv_node.replace_all_uses_with(packed_conv_node)
1035
+ packed_conv_node.meta.update(conv_node.meta)
1036
+ graph.erase_node(conv_node)
1037
+
1038
+ @register_freezing_graph_pattern(
1039
+ CallFunction(aten.mkldnn_rnn_layer.default, *_aten_mkldnn_rnn_layer_args),
1040
+ extra_check=_is_packable_mkldnn_rnn_layer,
1041
+ )
1042
+ def mkldnn_rnn_layer(match, *args, **kwargs):
1043
+ def get_item(graph, node, index):
1044
+ return graph.call_function(operator.getitem, (node, index))
1045
+
1046
+ graph = match.graph
1047
+ lstm_node = match.output_node()
1048
+ input = args[0]
1049
+ weight0, weight1 = args[1:3]
1050
+ reverse = kwargs.get("reverse")
1051
+ packed_lstm_op = aten.mkldnn_rnn_layer.default
1052
+ hidden_size = args[9]
1053
+ has_biases = args[11]
1054
+ batch_first = args[13]
1055
+ with graph.inserting_before(lstm_node):
1056
+ packed_weight_op = mkldnn._reorder_mkldnn_rnn_layer_weight.default
1057
+ packed_weight_inputs = (
1058
+ weight0,
1059
+ weight1,
1060
+ hidden_size,
1061
+ reverse,
1062
+ has_biases,
1063
+ batch_first,
1064
+ )
1065
+ packed_weight_node = graph.create_node(
1066
+ "call_function", packed_weight_op, packed_weight_inputs, {}, "name"
1067
+ )
1068
+ packed_weight_items = [
1069
+ get_item(graph, packed_weight_node, i) for i in range(2)
1070
+ ]
1071
+ pack_lstm_inputs = (
1072
+ args[0],
1073
+ *packed_weight_items,
1074
+ args[3],
1075
+ args[4],
1076
+ args[5],
1077
+ args[6],
1078
+ reverse,
1079
+ *args[7:],
1080
+ )
1081
+
1082
+ packed_lstm_node = graph.create_node(
1083
+ "call_function", packed_lstm_op, args=pack_lstm_inputs
1084
+ )
1085
+ lstm_node.replace_all_uses_with(packed_lstm_node)
1086
+ packed_lstm_node.meta.update(lstm_node.meta)
1087
+ graph.erase_node(lstm_node)
1088
+
1089
+ @register_freezing_graph_pattern(
1090
+ CallFunction(aten.addmm.default, Arg(), Arg(), Arg()),
1091
+ extra_check=_is_packable_linear,
1092
+ )
1093
+ @register_freezing_graph_pattern(
1094
+ CallFunction(aten.mm.default, Arg(), Arg()),
1095
+ extra_check=_is_packable_linear,
1096
+ )
1097
+ def linear(match, *args, **kwargs):
1098
+ graph = match.graph
1099
+ linear_node = match.output_node()
1100
+ input = args[0] if linear_node.target == aten.mm.default else args[1]
1101
+ bias = None if linear_node.target == aten.mm.default else args[0]
1102
+ weight = args[1] if linear_node.target == aten.mm.default else args[2]
1103
+ with graph.inserting_before(linear_node):
1104
+ transpose_weight_node = graph.create_node(
1105
+ "call_function", aten.permute.default, (weight, (1, 0))
1106
+ )
1107
+ weight_dtype = weight.meta.get("val").dtype
1108
+ is_lp_weight = weight_dtype in (
1109
+ torch.bfloat16,
1110
+ torch.float16,
1111
+ )
1112
+ batch_size = input.meta.get("val").shape[0]
1113
+ if has_free_symbols(batch_size):
1114
+ assert (
1115
+ is_lp_weight or mkldnn._is_mkldnn_acl_supported()
1116
+ ), f"only bf16/fp16 weight prepacking supports dynamic shape inputs but got {weight_dtype}"
1117
+ # For bfloat16 dynamic shape path, using input size hint to pack weight for a better performance.
1118
+ packed_weight_inputs = (
1119
+ transpose_weight_node,
1120
+ batch_size.node.shape_env.size_hint(batch_size.node.expr)
1121
+ if has_free_symbols(batch_size)
1122
+ else batch_size,
1123
+ )
1124
+ packed_weight_op = (
1125
+ mkldnn._reorder_linear_weight
1126
+ if (is_lp_weight or mkldnn._is_mkldnn_acl_supported())
1127
+ else torch.ops.mkl._mkl_reorder_linear_weight
1128
+ )
1129
+ packed_weight_node = graph.create_node(
1130
+ "call_function", packed_weight_op, args=packed_weight_inputs
1131
+ )
1132
+
1133
+ packed_linear_inputs: Tuple[Any, ...] = (input, packed_weight_node)
1134
+ if is_lp_weight or mkldnn._is_mkldnn_acl_supported():
1135
+ packed_linear_inputs += (bias, "none", [], "")
1136
+ packed_linear_op = mkldnn._linear_pointwise.default
1137
+ else:
1138
+ packed_linear_inputs += (transpose_weight_node, bias, batch_size)
1139
+ packed_linear_op = torch.ops.mkl._mkl_linear
1140
+ packed_linear_node = graph.create_node(
1141
+ "call_function", packed_linear_op, packed_linear_inputs
1142
+ )
1143
+ linear_node.replace_all_uses_with(packed_linear_node)
1144
+ packed_linear_node.meta.update(linear_node.meta)
1145
+ graph.erase_node(linear_node)
1146
+
1147
+ def _eliminate_duplicate_packed_nodes(gm):
1148
+ """
1149
+ Combine packed weight nodes with the same inputs to reduce memory usage.
1150
+ for example:
1151
+ class Model(nn.Module):
1152
+ def __init__(self):
1153
+ super().__init__()
1154
+ self.linear = nn.Linear(32, 32, bias=True)
1155
+
1156
+ def forward(self, x):
1157
+ return self.linear(self.linear(x))
1158
+
1159
+ the above's packed weight nodes are duplicate if two linear calls have same input size.
1160
+ """
1161
+ if not (torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()):
1162
+ return gm
1163
+
1164
+ packed_weight_ops = [
1165
+ torch._C._nn.mkldnn_reorder_conv2d_weight,
1166
+ mkldnn._reorder_convolution_transpose_weight,
1167
+ mkldnn._reorder_linear_weight,
1168
+ mkldnn._reorder_mkldnn_rnn_layer_weight,
1169
+ ]
1170
+ if torch._C.has_mkl:
1171
+ packed_weight_ops.append(torch.ops.mkl._mkl_reorder_linear_weight)
1172
+
1173
+ for node in gm.graph.nodes:
1174
+ if node.target in packed_weight_ops and len(node.args[0].users) > 1:
1175
+ for user_node in list(node.args[0].users.keys()):
1176
+ if (
1177
+ user_node.target == node.target
1178
+ and user_node != node
1179
+ and user_node.args == node.args
1180
+ ):
1181
+ user_node.replace_all_uses_with(node)
1182
+ gm.graph.erase_node(user_node)
1183
+
1184
+ @functools.lru_cache(None)
1185
+ def _mkldnn_fusion_init():
1186
+ # TODO: aarch64: enable op fusion for acl once it supports fused operators. Disabling it for now.
1187
+ # Otherwise even the matmul or innerproduct can not be accelerated with acl
1188
+ if (
1189
+ torch.backends.mkldnn.enabled
1190
+ and torch.backends.mkldnn.is_available()
1191
+ and not torch.ops.mkldnn._is_mkldnn_acl_supported()
1192
+ ):
1193
+ _register_unary_fusion()
1194
+ _register_inplace_fusion()
1195
+ _register_binary_unary_fusion()
1196
+ _register_binary_fusion()
1197
+ _register_quantization_lowerings()
1198
+
1199
+ @functools.lru_cache(None)
1200
+ def _mkldnn_weight_pack_init():
1201
+ if torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available():
1202
+ _register_weight_pack_pass()
1203
+ _recover_linear()
1204
+ _register_quantization_weight_pack_pass()
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/numeric_utils.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import logging
3
+ import os
4
+ import random
5
+ import traceback
6
+
7
+ import numpy
8
+
9
+ import torch
10
+ import torch.optim as optim
11
+
12
+ from .. import config
13
+
14
+ logger: logging.Logger = logging.getLogger(__name__)
15
+
16
+ MAIN_RANDOM_SEED = 1337
17
+
18
+ # Set the CUBLAS_WORKSPACE_CONFIG environment variable
19
+ os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
20
+
21
+
22
+ # If the two forward functions involve any non-deterministic operations,
23
+ # such as certain types of parallelism or asynchronous execution,
24
+ # this can also lead to different outputs.
25
+ def set_deterministic() -> None:
26
+ """Make torch manual seed deterministic."""
27
+
28
+ torch.manual_seed(MAIN_RANDOM_SEED)
29
+ random.seed(MAIN_RANDOM_SEED)
30
+ numpy.random.seed(MAIN_RANDOM_SEED)
31
+ torch.use_deterministic_algorithms(True)
32
+
33
+
34
+ def clean_memory() -> None:
35
+ """Clean memory to avoid OOM."""
36
+ gc.collect()
37
+ torch.cuda.empty_cache()
38
+
39
+
40
+ # We compare the numerical results before and after pre/post grad fx passes
41
+ # transformation to make sure the numerical results are the same.
42
+ def compare_dict_tensors(dict_base, dict_control, precision):
43
+ if len(set(dict_base.keys())) != len(set(dict_control.keys())):
44
+ logger.warning("Mismatch keys found before and after pre/post grad fx passes.")
45
+ logger.debug("keys before pre/post grad fx passes %s", dict_base.keys())
46
+ logger.debug("keys after pre/post grad fx passes %s", dict_control.keys())
47
+ return False
48
+ is_allclose = True
49
+ for key in dict_base.keys():
50
+ if key not in dict_control:
51
+ logger.warning(
52
+ "Mismatch parameter name %s does not exist after pre/post grad fx passes",
53
+ key,
54
+ )
55
+ # Some parameters have `None`, and not every param has a valid .grad field, we skip them
56
+ if dict_base[key] is None or dict_control[key] is None:
57
+ continue
58
+ if not torch.allclose(
59
+ dict_base[key],
60
+ dict_control[key],
61
+ rtol=precision,
62
+ atol=precision,
63
+ equal_nan=True,
64
+ ):
65
+ logger.warning(
66
+ "Mismatch parameter values found before and after pre/post grad fx passes."
67
+ )
68
+ logger.debug("value before pre/post grad fx passes %s", dict_base[key])
69
+ logger.debug("value after pre/post grad fx passes %s", dict_control[key])
70
+ is_allclose = False
71
+ return is_allclose
72
+
73
+
74
+ def compare_tuple_tensors(tuple_base, tuple_control, precision):
75
+ if len(tuple_base) != len(tuple_control):
76
+ logger.warning(
77
+ "Mismatch fw output length. before transformation: %s, after transformation: %s",
78
+ len(tuple_base),
79
+ len(tuple_control),
80
+ )
81
+ return False
82
+ is_allclose = True
83
+ for i in range(len(tuple_base)):
84
+ # Some parameters have `None`, we skip them
85
+ if tuple_base[i] is None or tuple_control[i] is None:
86
+ continue
87
+ if not torch.allclose(
88
+ tuple_base[i],
89
+ tuple_control[i],
90
+ rtol=precision,
91
+ atol=precision,
92
+ equal_nan=True,
93
+ ):
94
+ logger.debug(
95
+ "forward output before pre/post grad fx passes %s", tuple_base[i]
96
+ )
97
+ logger.debug(
98
+ "forward output after pre/post grad fx passes %s", tuple_control[i]
99
+ )
100
+ is_allclose = False
101
+ return is_allclose
102
+
103
+
104
+ def compare_parameters(model_base, model_control, precision):
105
+ return compare_dict_tensors(
106
+ dict(model_base.named_parameters()),
107
+ dict(model_control.named_parameters()),
108
+ precision,
109
+ )
110
+
111
+
112
+ def compare_forward_output(pred_base, pred_control, precision):
113
+ return compare_tuple_tensors(
114
+ pred_base,
115
+ pred_control,
116
+ precision,
117
+ )
118
+
119
+
120
+ def compare_gradients(model_base, model_control, precision):
121
+ grad_base = {key: param.grad for key, param in model_base.named_parameters()}
122
+ grad_pt2 = {key: param.grad for key, param in model_control.named_parameters()}
123
+ return compare_dict_tensors(
124
+ grad_base,
125
+ grad_pt2,
126
+ precision,
127
+ )
128
+
129
+
130
+ def run_model(
131
+ model_base, model_control, model_input, num_iterations=10, precision=1e-4
132
+ ):
133
+ clean_memory()
134
+ for i in range(num_iterations):
135
+ logger.info("start %s iteration", i)
136
+ set_deterministic()
137
+ pred_base = model_base(*model_input)
138
+ set_deterministic()
139
+ pred_control = model_control(*model_input)
140
+
141
+ res = compare_parameters(model_base, model_control, precision)
142
+ logger.info("compare parameters. Numerical result : %s", res)
143
+
144
+ res = compare_forward_output(pred_base, pred_control, precision)
145
+ logger.info("compare loss/predict. Numerical result : %s", res)
146
+ # tensor may not have a grad_fn
147
+ try:
148
+ _ = pred_base[0].sum().backward(retain_graph=True)
149
+ _ = pred_control[0].sum().backward(retain_graph=True)
150
+ res = compare_gradients(model_base, model_control, precision)
151
+ logger.info("compare param grad. Numerical result : %s", res)
152
+ except Exception as e:
153
+ logger.exception("Exception %s when compare gradients", e)
154
+ traceback.print_exc()
155
+
156
+ if config.fx_passes_numeric_check["requires_optimizer"]:
157
+ try:
158
+ optimizer_base = optim.SGD(
159
+ [param for name, param in model_base.named_parameters()], lr=0.01
160
+ )
161
+ optimizer_base.step()
162
+
163
+ optimizer_control = optim.SGD(
164
+ [param for name, param in model_control.named_parameters()], lr=0.01
165
+ )
166
+ optimizer_control.step()
167
+
168
+ res = compare_parameters(model_base, model_control, precision)
169
+ logger.info(
170
+ "compare parameters with optimizer added. Numerical result : %s",
171
+ res,
172
+ )
173
+ except Exception as e:
174
+ logger.exception(
175
+ "Exception %s when optimizer is added to check parameter names", e
176
+ )
177
+ traceback.print_exc()
178
+ else:
179
+ logger.warning(
180
+ "no parameter with optimizer to compare with length %s before transformation"
181
+ " and the length %s after transformation",
182
+ len(dict(model_base.named_parameters())),
183
+ len(dict(model_control.named_parameters())),
184
+ )
185
+
186
+
187
+ def numeric_check_if_enabled(
188
+ gm_before_fx_passes,
189
+ gm_after_fx_passes,
190
+ example_inputs,
191
+ num_iterations,
192
+ precision,
193
+ ):
194
+ # need to topo-sort graphmodule before we run the model,
195
+ # otherwise it may fail as refer before def
196
+ # fail silently in order not to block the model run
197
+ try:
198
+ with torch.autograd.set_detect_anomaly(True):
199
+ run_model(
200
+ gm_before_fx_passes,
201
+ gm_after_fx_passes,
202
+ example_inputs,
203
+ num_iterations=num_iterations,
204
+ precision=precision,
205
+ )
206
+ except Exception as e:
207
+ logger.warning(
208
+ "Runtime numeric check failed in pre grad fx passes with error: %s", e
209
+ )
210
+ traceback.print_exc()
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py ADDED
@@ -0,0 +1,567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from typing import List, Optional, Set, Union
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch._inductor import utils
7
+ from torch._subclasses.fake_tensor import FakeTensor
8
+ from torch.utils._mode_utils import no_dispatch
9
+ from torch.utils._triton import has_triton
10
+
11
+ from ..pattern_matcher import (
12
+ fwd_only,
13
+ joint_fwd_bwd,
14
+ Match,
15
+ MatchContext,
16
+ register_replacement,
17
+ )
18
+ from ..utils import is_view
19
+
20
+ aten = torch.ops.aten
21
+
22
+
23
+ # This flag is only used for testing purpose.
24
+ # Changing it to True will ignore comparing do_bench times
25
+ # between original pattern and padded one.
26
+ _skip_do_bench_times = False
27
+
28
+
29
+ def fetch_fake_tensors(match, kwarg_names) -> List[Tensor]:
30
+ kwargs = match.kwargs
31
+ return [kwargs[name].meta["val"] for name in kwarg_names]
32
+
33
+
34
+ def unwrap_fake_args(*arg_names):
35
+ def decorator(func):
36
+ def wrapper(match):
37
+ fake_tensors = fetch_fake_tensors(match, arg_names)
38
+ return func(*fake_tensors)
39
+
40
+ return wrapper
41
+
42
+ return decorator
43
+
44
+
45
+ def get_alignment_size(x: Tensor) -> int:
46
+ if x.dtype == torch.float16 or x.dtype == torch.half or x.dtype == torch.bfloat16:
47
+ return 8
48
+ elif x.dtype == torch.float32 or x.dtype == torch.float:
49
+ return 4
50
+ else:
51
+ return 0
52
+
53
+
54
+ def check_device(a: Tensor, b: Tensor) -> bool:
55
+ return a.is_cuda and b.is_cuda
56
+
57
+
58
+ def check_dtype(a: Tensor, b: Tensor) -> bool:
59
+ return a.is_floating_point() and b.is_floating_point()
60
+
61
+
62
+ def _result_layout_affects_graph_output(match: Match) -> bool:
63
+ """
64
+ Check if the matched GEMM operation potentially affects the graph output strides.
65
+ returns True if the matched op's output buffer does not pass through functions which certainly
66
+ redefine the memory layout before being part of the graph output.
67
+ """
68
+
69
+ if match.ctx is not None:
70
+ assert isinstance(match.ctx, MatchContext)
71
+ search_node: torch.fx.Node = match.output_node()
72
+ else:
73
+ return True
74
+
75
+ assert search_node is not None
76
+ seen: Set[torch.fx.Node] = set()
77
+
78
+ def find_output(node: torch.fx.Node, is_start_node=False):
79
+ if not isinstance(node, torch.fx.Node):
80
+ return False
81
+ if node in seen:
82
+ return False
83
+ seen.add(node)
84
+ if node.op == "output":
85
+ return True
86
+ if node.op != "call_function":
87
+ return False
88
+ if not is_start_node and (
89
+ (not isinstance(node.target, torch._ops.OpOverload))
90
+ or (not is_view(node.target))
91
+ ):
92
+ return False
93
+ if node.users is not None and len(node.users) > 0:
94
+ for n in node.users:
95
+ if find_output(n):
96
+ return True
97
+ return False
98
+
99
+ return find_output(search_node, True)
100
+
101
+
102
+ def should_pad_common(
103
+ mat1: Tensor, mat2: Tensor, input: Optional[Tensor] = None
104
+ ) -> bool:
105
+ # It's fine we have symbolic shapes or strides as long as they
106
+ # have hints. Later, we will make sure we only pad non-symbolic dimensions.
107
+ def valid_shape_and_stride(t: Optional[Tensor]) -> bool:
108
+ if t is None:
109
+ return True
110
+
111
+ symbolic_cnt = 0
112
+ for x in t.size():
113
+ if isinstance(x, int):
114
+ continue
115
+ elif utils.is_symbolic(x):
116
+ if not x.node.has_hint():
117
+ return False
118
+ symbolic_cnt += 1
119
+ else:
120
+ return False
121
+ # filter out cases where all dimentions are symbolic
122
+ if symbolic_cnt == len(t.size()):
123
+ return False
124
+ return all(
125
+ isinstance(x, int) or (utils.is_symbolic(x) and x.node.has_hint())
126
+ for x in t.stride()
127
+ )
128
+
129
+ return (
130
+ torch._inductor.config.shape_padding
131
+ and check_device(mat1, mat2)
132
+ and check_dtype(mat1, mat2)
133
+ and all(valid_shape_and_stride(t) for t in (mat1, mat2, input))
134
+ )
135
+
136
+
137
+ def get_padded_length(x: Union[int, torch.SymInt], alignment_size) -> int:
138
+ # we don't pad x if it is symbolic
139
+ if isinstance(x, torch.SymInt) or alignment_size == 0 or x % alignment_size == 0:
140
+ return 0
141
+ return int((x // alignment_size + 1) * alignment_size) - x
142
+
143
+
144
+ def pad_dim(x: Tensor, padded_length: int, dim: int) -> Tensor:
145
+ if padded_length == 0:
146
+ return x
147
+ pad = x.new_zeros(*x.shape[:dim], padded_length, *x.shape[dim + 1 :])
148
+ return torch.cat([x, pad], dim=dim)
149
+
150
+
151
+ def addmm_pattern(
152
+ input: Tensor, mat1: Tensor, mat2: Tensor, beta: float, alpha: float
153
+ ) -> Tensor:
154
+ return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha)
155
+
156
+
157
+ def should_pad_addmm(match: Match) -> bool:
158
+ if (
159
+ torch._inductor.config.keep_output_stride
160
+ and _result_layout_affects_graph_output(match)
161
+ ):
162
+ return False
163
+ mat1, mat2, input = fetch_fake_tensors(match, ("mat1", "mat2", "input"))
164
+ return should_pad_common(mat1, mat2, input) and should_pad_bench(
165
+ mat1, mat2, torch.ops.aten.addmm, input=input
166
+ )
167
+
168
+
169
+ def addmm_replace(
170
+ input: Optional[Tensor], mat1: Tensor, mat2: Tensor, beta=1.0, alpha=1.0
171
+ ) -> Tensor:
172
+ m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1))
173
+ k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
174
+ n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2))
175
+
176
+ if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0:
177
+ return pad_addmm(
178
+ input,
179
+ mat1,
180
+ mat2,
181
+ m_padded_length,
182
+ k_padded_length,
183
+ n_padded_length,
184
+ beta,
185
+ alpha,
186
+ )
187
+
188
+ return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha)
189
+
190
+
191
+ def pad_addmm(
192
+ input: Optional[Tensor],
193
+ mat1: Tensor,
194
+ mat2: Tensor,
195
+ m_padded_length: int,
196
+ k_padded_length: int,
197
+ n_padded_length: int,
198
+ beta=1.0,
199
+ alpha=1.0,
200
+ ):
201
+ # addmm decomp with padding will go through pad_addmm multiple times if multiple dimensions are needed to be padded
202
+ if k_padded_length != 0:
203
+ mat1 = pad_dim(mat1, k_padded_length, 1)
204
+ mat2 = pad_dim(mat2, k_padded_length, 0)
205
+ elif n_padded_length != 0:
206
+ mat2 = pad_dim(mat2, n_padded_length, 1)
207
+ elif m_padded_length != 0:
208
+ mat1 = pad_dim(mat1, m_padded_length, 0)
209
+
210
+ # the add broadcasts, so we only pad if the dimension != 1
211
+ if input is not None and k_padded_length == 0:
212
+ if n_padded_length != 0:
213
+ if input.dim() == 2 and input.shape[1] != 1:
214
+ input = pad_dim(input, n_padded_length, 1)
215
+ elif input.dim() == 1 and input.shape[0] != 1:
216
+ input = pad_dim(input, n_padded_length, 0)
217
+ elif m_padded_length != 0 and input.dim() == 2 and input.shape[0] != 1:
218
+ input = pad_dim(input, m_padded_length, 0)
219
+
220
+ if k_padded_length != 0:
221
+ return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)
222
+ elif n_padded_length != 0:
223
+ return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[
224
+ :, :-n_padded_length
225
+ ]
226
+ else:
227
+ return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[
228
+ :-m_padded_length, :
229
+ ]
230
+
231
+
232
+ def is_mm_compute_bound(M: int, K: int, N: int, dtype: torch.dtype) -> bool:
233
+ denominator = M * K + N * K + M * N
234
+ if denominator == 0:
235
+ return False
236
+ arithmetic_intensity = (M * N * K) / denominator
237
+
238
+ # Fails with AMD
239
+ try:
240
+ machine_balance = (
241
+ 1000 * utils.get_device_tflops(dtype)
242
+ ) / utils.get_gpu_dram_gbps()
243
+ except Exception:
244
+ return True
245
+
246
+ # dram_gbps might be underestimating bandwidth because of cache.
247
+ # if we estimate machine balance too low we might miss some speedups,
248
+ # if we extimate too high there will be unnecessary compilation time increase.
249
+ # TODO - finetune coefficient here. As a reference point, Triton mm model assumes
250
+ # 80% of reads are in cache and cache is 4x faster than dram_gbps
251
+ machine_balance = machine_balance * 0.5
252
+
253
+ return arithmetic_intensity > machine_balance
254
+
255
+
256
+ @functools.lru_cache(None)
257
+ def get_pad_cache():
258
+ return torch._inductor.codecache.LocalCache()
259
+
260
+
261
+ def get_cached_should_pad(key):
262
+ return get_pad_cache().lookup(key)
263
+
264
+
265
+ def set_cached_should_pad(key, value):
266
+ return get_pad_cache().set_value(key, value=value)
267
+
268
+
269
+ def should_pad_bench_key(
270
+ mat1: Tensor, mat2: Tensor, op, input: Optional[Tensor] = None
271
+ ) -> str:
272
+ def tensor_key(t):
273
+ return (t.shape, t.stride(), t.dtype)
274
+
275
+ tf32_key = (
276
+ None if mat1.dtype != torch.float32 else torch.backends.cuda.matmul.allow_tf32
277
+ )
278
+ key = (
279
+ tensor_key(mat1),
280
+ tensor_key(mat2),
281
+ op,
282
+ input if input is None else tensor_key(input),
283
+ tf32_key,
284
+ )
285
+
286
+ return str(key)
287
+
288
+
289
+ def should_pad_bench(
290
+ mat1: Tensor, mat2: Tensor, op, input: Optional[Tensor] = None
291
+ ) -> bool:
292
+ if not has_triton():
293
+ return False
294
+
295
+ do_bench = functools.partial(
296
+ utils.do_bench,
297
+ warmup=5,
298
+ )
299
+
300
+ with no_dispatch():
301
+ if op is torch.ops.aten.mm or op is torch.ops.aten.addmm:
302
+ m = mat1.shape[0]
303
+ k = mat1.shape[1]
304
+ n = mat2.shape[1]
305
+
306
+ m_padded_length = get_padded_length(m, get_alignment_size(mat1))
307
+ k_padded_length = get_padded_length(k, get_alignment_size(mat1))
308
+ n_padded_length = get_padded_length(n, get_alignment_size(mat2))
309
+ elif op is torch.ops.aten.bmm:
310
+ m = mat1.shape[1]
311
+ k = mat1.shape[2]
312
+ n = mat2.shape[2]
313
+
314
+ m_padded_length = get_padded_length(m, get_alignment_size(mat1))
315
+ k_padded_length = get_padded_length(k, get_alignment_size(mat1))
316
+ n_padded_length = get_padded_length(n, get_alignment_size(mat2))
317
+ else:
318
+ return False
319
+
320
+ if m_padded_length == k_padded_length == n_padded_length == 0:
321
+ return False
322
+
323
+ if not is_mm_compute_bound(m, k, n, mat1.dtype):
324
+ return False
325
+
326
+ # We don't want to look up the cache for cases that are trivially false
327
+ # since it does file io
328
+ key = should_pad_bench_key(mat1, mat2, op, input)
329
+
330
+ cached_pad = get_cached_should_pad(key)
331
+ if cached_pad is not None:
332
+ return cached_pad
333
+
334
+ def realize_symbols(ds):
335
+ return [d if isinstance(d, int) else d.node.hint for d in ds]
336
+
337
+ def realize_tensor(t):
338
+ if isinstance(t, FakeTensor):
339
+ size_hints = realize_symbols(t.size())
340
+ stride_hint = realize_symbols(t.stride())
341
+ real_size = (
342
+ sum((d - 1) * s for d, s in zip(size_hints, stride_hint)) + 1
343
+ )
344
+ real_t = torch.randn(real_size, dtype=t.dtype, device=t.device)
345
+ return torch.as_strided(real_t, size_hints, stride_hint)
346
+ else:
347
+ return torch.randn_like(t)
348
+
349
+ mat1 = realize_tensor(mat1)
350
+ mat2 = realize_tensor(mat2)
351
+ if op is torch.ops.aten.bmm or op is torch.ops.aten.mm:
352
+ ori_time = do_bench(
353
+ lambda: op(mat1, mat2),
354
+ )
355
+ else:
356
+ if input is not None:
357
+ input = realize_tensor(input)
358
+ ori_time = do_bench(
359
+ lambda: op(input, mat1, mat2),
360
+ )
361
+
362
+ mat1_pad = torch.randn_like(mat1)
363
+ mat2_pad = torch.randn_like(mat2)
364
+
365
+ if op is torch.ops.aten.addmm:
366
+ input_pad = None
367
+ if input is not None and input.is_cuda:
368
+ input_pad = torch.randn_like(input)
369
+ pad_time = do_bench(
370
+ lambda: pad_addmm(
371
+ input_pad,
372
+ mat1_pad,
373
+ mat2_pad,
374
+ m_padded_length,
375
+ k_padded_length,
376
+ n_padded_length,
377
+ ),
378
+ )
379
+ elif op is torch.ops.aten.mm:
380
+ pad_time = do_bench(
381
+ lambda: pad_mm(
382
+ mat1_pad,
383
+ mat2_pad,
384
+ m_padded_length,
385
+ k_padded_length,
386
+ n_padded_length,
387
+ ),
388
+ )
389
+ else:
390
+ pad_time = do_bench(
391
+ lambda: pad_bmm(
392
+ mat1_pad,
393
+ mat2_pad,
394
+ m_padded_length,
395
+ k_padded_length,
396
+ n_padded_length,
397
+ ),
398
+ )
399
+
400
+ # Shape padding introduces additional memory ops. Based on microbenchmarks, 1.1x represents a reasonable
401
+ # tradeoff between performance improvement from shape padding and overhead from additional memory ops
402
+ # TODO: Build a learned model which would be better than this heuristic
403
+ should_pad = _skip_do_bench_times or ori_time > pad_time * 1.1
404
+ set_cached_should_pad(key, should_pad)
405
+
406
+ return should_pad
407
+
408
+
409
+ def mm_pattern(mat1: Tensor, mat2: Tensor) -> Tensor:
410
+ return aten.mm(mat1, mat2)
411
+
412
+
413
+ def should_pad_mm(match: Match) -> bool:
414
+ if (
415
+ torch._inductor.config.keep_output_stride
416
+ and _result_layout_affects_graph_output(match)
417
+ ):
418
+ return False
419
+ mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2"))
420
+ return should_pad_common(mat1, mat2) and should_pad_bench(
421
+ mat1, mat2, torch.ops.aten.mm
422
+ )
423
+
424
+
425
+ def mm_replace(mat1: Tensor, mat2: Tensor) -> Tensor:
426
+ m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1))
427
+ k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
428
+ n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2))
429
+
430
+ return pad_mm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length)
431
+
432
+
433
+ def pad_mm(
434
+ mat1: Tensor,
435
+ mat2: Tensor,
436
+ m_padded_length: int,
437
+ k_padded_length: int,
438
+ n_padded_length: int,
439
+ ) -> Tensor:
440
+ # mm_replace will go through pad_mm multiple times if multiple dimensions are needed to be padded
441
+ if k_padded_length != 0:
442
+ mat1 = pad_dim(mat1, k_padded_length, 1)
443
+ mat2 = pad_dim(mat2, k_padded_length, 0)
444
+ return torch.ops.aten.mm(mat1, mat2)
445
+ elif n_padded_length != 0:
446
+ mat2 = pad_dim(mat2, n_padded_length, 1)
447
+ return torch.ops.aten.mm(mat1, mat2)[:, :-n_padded_length]
448
+ else:
449
+ mat1 = pad_dim(mat1, m_padded_length, 0)
450
+ return torch.ops.aten.mm(mat1, mat2)[:-m_padded_length, :]
451
+
452
+
453
+ def bmm_pattern(mat1: Tensor, mat2: Tensor) -> Tensor:
454
+ return aten.bmm(mat1, mat2)
455
+
456
+
457
+ def should_pad_bmm(match: Match) -> bool:
458
+ if (
459
+ torch._inductor.config.keep_output_stride
460
+ and _result_layout_affects_graph_output(match)
461
+ ):
462
+ return False
463
+ mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2"))
464
+ return should_pad_common(mat1, mat2) and should_pad_bench(
465
+ mat1, mat2, torch.ops.aten.bmm
466
+ )
467
+
468
+
469
+ def bmm_replace(mat1: Tensor, mat2: Tensor) -> Tensor:
470
+ m_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
471
+ k_padded_length = get_padded_length(mat1.shape[2], get_alignment_size(mat1))
472
+ n_padded_length = get_padded_length(mat2.shape[2], get_alignment_size(mat2))
473
+
474
+ if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0:
475
+ return pad_bmm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length)
476
+
477
+ return aten.bmm(mat1, mat2)
478
+
479
+
480
+ def pad_bmm(
481
+ mat1: Tensor,
482
+ mat2: Tensor,
483
+ m_padded_length: int,
484
+ k_padded_length: int,
485
+ n_padded_length: int,
486
+ ) -> Tensor:
487
+ # bmm_replace will go through pad_bmm multiple times if multiple dimensions are needed to be padded
488
+ if k_padded_length != 0:
489
+ mat1 = pad_dim(mat1, k_padded_length, 2)
490
+ mat2 = pad_dim(mat2, k_padded_length, 1)
491
+
492
+ return aten.bmm(mat1, mat2)
493
+ elif n_padded_length != 0:
494
+ mat2 = pad_dim(mat2, n_padded_length, 2)
495
+ return aten.bmm(mat1, mat2)[:, :, :-n_padded_length].contiguous()
496
+ else:
497
+ mat1 = pad_dim(mat1, m_padded_length, 1)
498
+ return aten.bmm(mat1, mat2)[:, :-m_padded_length, :].contiguous()
499
+
500
+
501
+ @functools.lru_cache(None)
502
+ def _pad_mm_init():
503
+ from .joint_graph import patterns
504
+
505
+ if torch.cuda.is_available():
506
+ # workaround https://github.com/pytorch/pytorch/issues/97894
507
+ device = "cuda"
508
+ else:
509
+ device = "cpu"
510
+
511
+ # sizes/values dont actually matter for initial trace
512
+ # once we get a possible match we re-trace with the actual values and verify the match still holds
513
+
514
+ dim2a = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True)
515
+ dim2b = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True)
516
+
517
+ dim3a = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True)
518
+ dim3b = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True)
519
+
520
+ dim1a = functools.partial(torch.empty, (4), device=device, requires_grad=True)
521
+
522
+ # workaround https://github.com/pytorch/pytorch/issues/97894
523
+ # 0.113377 is a "magic" value that lets us recover the lost input arg relationship
524
+ rep = {"beta": 0.213377, "alpha": 0.113377}
525
+
526
+ for pattern, replacement, args, workaround, extra_check in [
527
+ (
528
+ mm_pattern,
529
+ mm_replace,
530
+ [dim2a(), dim2b()],
531
+ {},
532
+ should_pad_mm,
533
+ ),
534
+ (
535
+ bmm_pattern,
536
+ bmm_replace,
537
+ [dim3a(), dim3b()],
538
+ {},
539
+ should_pad_bmm,
540
+ ),
541
+ (
542
+ addmm_pattern,
543
+ addmm_replace,
544
+ [dim1a(), dim2a(), dim2b()],
545
+ rep,
546
+ should_pad_addmm,
547
+ ),
548
+ ]:
549
+ assert isinstance(workaround, dict) # mypy is unable to infer the type properly
550
+ register_replacement(
551
+ pattern,
552
+ replacement,
553
+ args,
554
+ joint_fwd_bwd,
555
+ patterns,
556
+ extra_check=extra_check,
557
+ scalar_workaround=workaround,
558
+ )
559
+ register_replacement(
560
+ pattern,
561
+ replacement,
562
+ args,
563
+ fwd_only,
564
+ patterns,
565
+ extra_check=extra_check,
566
+ scalar_workaround=workaround,
567
+ )
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py ADDED
@@ -0,0 +1,1100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import functools
3
+ import itertools
4
+ import logging
5
+ import operator
6
+ from collections import Counter, defaultdict
7
+ from typing import Any, Dict, List, Optional, Set, Union
8
+
9
+ from sympy import Expr
10
+
11
+ import torch
12
+ import torch._inductor as inductor
13
+ import torch.utils._pytree as pytree
14
+ from torch import fx
15
+ from torch._decomp import register_decomposition
16
+ from torch._dynamo.utils import counters, optimus_scuba_log
17
+
18
+ from torch._prims_common import is_boolean_dtype, is_expandable_to, is_integer_dtype
19
+
20
+ from torch._utils_internal import upload_graph
21
+ from torch.fx.experimental.symbolic_shapes import statically_known_true, sym_eq
22
+
23
+ from .. import config, ir, pattern_matcher
24
+ from ..fx_utils import FakeTensorUpdater, get_fake_args_kwargs, get_node_storage
25
+
26
+ from ..lowering import lowerings as L
27
+ from ..pattern_matcher import (
28
+ _return_true,
29
+ Arg,
30
+ CallFunction,
31
+ CallFunctionVarArgs,
32
+ filter_nodes,
33
+ get_arg_value,
34
+ get_mutation_region_id,
35
+ Ignored,
36
+ init_once_fakemode,
37
+ KeywordArg,
38
+ ListOf,
39
+ Match,
40
+ MULTIPLE,
41
+ PatternMatcherPass,
42
+ register_graph_pattern,
43
+ stable_topological_sort,
44
+ )
45
+ from ..utils import decode_device, is_pointwise_use
46
+ from ..virtualized import V
47
+ from .group_batch_fusion import group_batch_fusion_passes
48
+ from .reinplace import reinplace_inplaceable_ops
49
+
50
+ log = logging.getLogger(__name__)
51
+ aten = torch.ops.aten
52
+ prims = torch.ops.prims
53
+
54
+ # First pass_patterns[0] are applied, then [1], then [2]
55
+ pass_patterns = [
56
+ PatternMatcherPass(),
57
+ PatternMatcherPass(),
58
+ PatternMatcherPass(),
59
+ ]
60
+ # patterns applied only in inference
61
+ inference_patterns = PatternMatcherPass()
62
+ decompose_mm_pass = PatternMatcherPass()
63
+
64
+
65
+ def post_grad_passes(gm: torch.fx.GraphModule, is_inference: bool):
66
+ """
67
+ Passes that run on after grad. This is called once on the forwards
68
+ graph and once on the backwards graph.
69
+
70
+ The IR here has been normalized and functionalized.
71
+ """
72
+ if config.dce:
73
+ # has some issues with mutation in inference mode
74
+ gm.graph.eliminate_dead_code()
75
+
76
+ if is_inference and config.reorder_for_locality:
77
+ reorder_for_locality(gm.graph)
78
+
79
+ fake_tensor_updater = FakeTensorUpdater(gm.graph)
80
+
81
+ if config.post_grad_custom_pre_pass is not None:
82
+ config.post_grad_custom_pre_pass(gm.graph)
83
+
84
+ if config.pattern_matcher:
85
+ lazy_init()
86
+ inductor_before_change = copy.deepcopy(counters["inductor"])
87
+ group_batch_fusion_passes(gm.graph, pre_grad=False)
88
+ if counters["inductor"] != inductor_before_change:
89
+ optimus_scuba_log["group_batch_fusion_post_grad"] = upload_graph(gm.graph)
90
+ remove_noop_ops(gm.graph)
91
+ for patterns in pass_patterns:
92
+ patterns.apply(gm.graph) # type: ignore[arg-type]
93
+ if is_inference:
94
+ inference_patterns.apply(gm.graph) # type: ignore[arg-type]
95
+ decompose_mm_pass.apply(gm.graph) # type: ignore[arg-type]
96
+
97
+ if config.post_grad_custom_post_pass is not None:
98
+ config.post_grad_custom_post_pass(gm.graph)
99
+
100
+ stable_topological_sort(gm.graph)
101
+
102
+ move_constructors_to_cuda(gm.graph)
103
+
104
+ fake_tensor_updater.incremental_update()
105
+
106
+ # Keep these last, since they introduces mutation. Look at
107
+ # ./fx_passes/README.md for a discussion of mutation invariants.
108
+ reinplace_inplaceable_ops(gm.graph)
109
+ decompose_auto_functionalized(gm.graph)
110
+
111
+ gm.recompile()
112
+ gm.graph.lint()
113
+
114
+
115
+ @init_once_fakemode
116
+ def lazy_init():
117
+ if torch._C._has_mkldnn:
118
+ from . import decompose_mem_bound_mm # noqa: F401
119
+ from .mkldnn_fusion import _mkldnn_fusion_init
120
+
121
+ _mkldnn_fusion_init()
122
+
123
+
124
+ def reorder_for_locality(graph: torch.fx.Graph):
125
+ def visit(other_node):
126
+ if (
127
+ other_node.op == "call_function"
128
+ and other_node.target != operator.getitem
129
+ and all((n in seen_nodes) for n in other_node.users)
130
+ and get_mutation_region_id(graph, node)
131
+ == get_mutation_region_id(graph, other_node)
132
+ ):
133
+ # move node's producers right before it
134
+ node.prepend(other_node)
135
+
136
+ seen_nodes = set()
137
+
138
+ # only reorder nodes before the first copy_ in the graph.
139
+ # copy_ will appear at the end of functionalized graphs when there is mutation on inputs,
140
+ # and this reordering doesnt work well with mutation
141
+ first_copy = next(
142
+ (
143
+ node
144
+ for node in graph.nodes
145
+ if node.op == "call_function"
146
+ and node.target == torch.ops.aten.copy_.default
147
+ ),
148
+ None,
149
+ )
150
+ past_mutating_epilogue = True if first_copy is None else False
151
+
152
+ for node in reversed(graph.nodes):
153
+ seen_nodes.add(node)
154
+ if not past_mutating_epilogue:
155
+ past_mutating_epilogue = node is first_copy
156
+ continue
157
+
158
+ torch.fx.map_arg((node.args, node.kwargs), visit)
159
+
160
+
161
+ def register_lowering_pattern(pattern, extra_check=_return_true, pass_number=1):
162
+ """
163
+ Register an aten to inductor IR replacement pattern
164
+ """
165
+ return pattern_matcher.register_lowering_pattern(
166
+ pattern, extra_check, pass_dict=pass_patterns[pass_number]
167
+ )
168
+
169
+
170
+ ################################################################################
171
+ # Actual patterns below this point.
172
+ # Priority of patterns is:
173
+ # - later output nodes first
174
+ # - order patterns are defined in
175
+ ################################################################################
176
+
177
+
178
+ def is_valid_mm_plus_mm(match: Match):
179
+ *b1, m1, k1 = match.kwargs["mat1"].meta.get("tensor_meta").shape
180
+ *b2, k2, n1 = match.kwargs["mat2"].meta.get("tensor_meta").shape
181
+ if k1 != k2:
182
+ return False
183
+
184
+ *b1, m2, k3 = match.kwargs["mat3"].meta.get("tensor_meta").shape
185
+ *b2, k4, n2 = match.kwargs["mat4"].meta.get("tensor_meta").shape
186
+ if k3 != k4:
187
+ return False
188
+
189
+ if m1 != m2 or n1 != n2:
190
+ return False
191
+
192
+ return True
193
+
194
+
195
+ @register_lowering_pattern(
196
+ CallFunction(
197
+ aten.add,
198
+ CallFunction(aten.mm, KeywordArg("mat1"), KeywordArg("mat2")),
199
+ CallFunction(aten.mm, KeywordArg("mat3"), KeywordArg("mat4")),
200
+ ),
201
+ extra_check=is_valid_mm_plus_mm,
202
+ )
203
+ def mm_plus_mm(match: Match, mat1, mat2, mat3, mat4):
204
+ return inductor.kernel.mm_plus_mm.tuned_mm_plus_mm(mat1, mat2, mat3, mat4)
205
+
206
+
207
+ def cuda_and_enabled_mixed_mm(match):
208
+ return (config.use_mixed_mm or config.force_mixed_mm) and getattr(
209
+ match.kwargs["mat1"].meta.get("val"), "is_cuda", False
210
+ )
211
+
212
+
213
+ def cuda_and_enabled_mixed_mm_and_not_int8(match):
214
+ return (
215
+ cuda_and_enabled_mixed_mm(match)
216
+ and getattr(match.kwargs["mat1"].meta.get("val"), "is_cuda", False)
217
+ and getattr(match.kwargs["mat2"].meta.get("val"), "dtype", torch.int8)
218
+ != torch.int8
219
+ ) # bitshift numerics in triton and pytorch don't match for torch.int8
220
+
221
+
222
+ """
223
+ this is intended to be used to unpack a [K,N] int4 tensor from a [K/2, N] uint4x2 tensor
224
+ (where the int4 and uint4x2 are represented with int8 and uint8 respectively)
225
+ where every other row of the int4 is packed with the row above it as:
226
+ uint4x2[k,n] = (8+int4[2*k,n])+(8+int4[2*k+1,n])<<4
227
+
228
+ unpack formulas:
229
+ int4[2*k,n]=(uint4x2[k,n] & 0xF) - 8
230
+ int4[2*k+1,n]=(uint4x2[k,n] >> 4) - 8
231
+
232
+ thus matching on unpack formula:
233
+ torch.mm(mat1, torch.cat((mat2 & 0xF, mat2>>4),1).reshape(mat2_mm_shape).to(mat2_dtype).sub(8))
234
+
235
+ note: although the unpack formula in pytorch and the triton kernel is designed for a uint8 mat2, the behavior
236
+ of the kernel matches the pytorch formula for all dtypes except torch.int8
237
+ where the bitwise numerics in triton do not match those in pytorch.
238
+ """
239
+
240
+
241
+ @register_lowering_pattern(
242
+ CallFunction(
243
+ aten.mm.default,
244
+ KeywordArg("mat1"),
245
+ CallFunction(
246
+ aten.sub.Tensor,
247
+ CallFunction(
248
+ prims.convert_element_type.default,
249
+ CallFunction(
250
+ aten.reshape.default,
251
+ CallFunction(
252
+ aten.cat.default,
253
+ ListOf(
254
+ CallFunction(
255
+ aten.bitwise_and.Scalar,
256
+ KeywordArg("mat2"),
257
+ 0xF,
258
+ ),
259
+ CallFunction(
260
+ aten.__rshift__.Scalar,
261
+ KeywordArg("mat2"),
262
+ 4,
263
+ ),
264
+ ),
265
+ 1,
266
+ ),
267
+ KeywordArg("mat2_mm_shape"),
268
+ ),
269
+ KeywordArg("mat2_dtype"),
270
+ ),
271
+ 8,
272
+ ),
273
+ ),
274
+ extra_check=cuda_and_enabled_mixed_mm_and_not_int8,
275
+ )
276
+ def uint4x2_mixed_mm(match: Match, mat1, mat2, mat2_mm_shape, mat2_dtype):
277
+ return inductor.kernel.unpack_mixed_mm.tuned_uint4x2_mixed_mm(
278
+ mat1, mat2, mat2_mm_shape, mat2_dtype
279
+ )
280
+
281
+
282
+ """
283
+ torch.mm(mat1, mat2.to(mat2_dtype))
284
+ """
285
+
286
+
287
+ @register_lowering_pattern(
288
+ CallFunction(
289
+ aten.mm,
290
+ KeywordArg("mat1"),
291
+ CallFunction(
292
+ prims.convert_element_type.default,
293
+ KeywordArg("mat2"),
294
+ KeywordArg("mat2_dtype"),
295
+ ),
296
+ ),
297
+ extra_check=cuda_and_enabled_mixed_mm,
298
+ )
299
+ def mixed_mm(match: Match, mat1, mat2, mat2_dtype):
300
+ return inductor.kernel.mm.tuned_mixed_mm(mat1, mat2, mat2_dtype)
301
+
302
+
303
+ @register_graph_pattern(
304
+ CallFunction(
305
+ aten.cumsum.default,
306
+ CallFunction(
307
+ torch.ops.aten.full.default,
308
+ KeywordArg("shape"),
309
+ KeywordArg("fill_value"),
310
+ dtype=KeywordArg("dtype"),
311
+ layout=Ignored(),
312
+ device=KeywordArg("device"),
313
+ pin_memory=False,
314
+ _users=MULTIPLE,
315
+ ),
316
+ KeywordArg("dim"),
317
+ _users=MULTIPLE,
318
+ ),
319
+ pass_dict=pass_patterns[1],
320
+ )
321
+ def pointless_cumsum_replacement(match: Match, shape, fill_value, device, dtype, dim):
322
+ """Based on a pattern in OPTForCausalLM"""
323
+
324
+ if is_integer_dtype(dtype) or is_boolean_dtype(dtype):
325
+ # cumsum promotes all integral types to int64
326
+ dtype = torch.int64
327
+
328
+ def repl(*shape):
329
+ dim_size = shape[dim]
330
+ idx = torch.arange(1, dim_size + 1, device=device, dtype=dtype)
331
+
332
+ inter_shape = [1] * len(shape)
333
+ inter_shape[dim] = dim_size
334
+ return (idx * fill_value).view(inter_shape).expand(shape)
335
+
336
+ # only replace the output node, not all nodes
337
+ match.nodes = [match.output_node()]
338
+ with V.fake_mode:
339
+ match.replace_by_example(repl, list(shape))
340
+
341
+
342
+ def shape_of_mm(a, b):
343
+ m, _ = a.get_size()
344
+ _, n = b.get_size()
345
+ return [m, n]
346
+
347
+
348
+ @register_lowering_pattern(
349
+ CallFunction(aten.cat, ListOf(CallFunction(aten.mm, Arg(), Arg())), Arg()),
350
+ )
351
+ def cat_mm(match, inputs, dim):
352
+ return cat_tuned_op(match, inputs, dim, op=L[aten.mm], shape_of=shape_of_mm)
353
+
354
+
355
+ @register_lowering_pattern(
356
+ CallFunction(
357
+ aten.cat, ListOf(CallFunction(aten.addmm, Arg(), Arg(), Arg())), Arg()
358
+ ),
359
+ )
360
+ def cat_addmm(match, inputs, dim):
361
+ def shape_of(bias, a, b):
362
+ m, _ = a.get_size()
363
+ _, n = b.get_size()
364
+ return [m, n]
365
+
366
+ return cat_tuned_op(match, inputs, dim, op=L[aten.addmm], shape_of=shape_of)
367
+
368
+
369
+ def cat_tuned_op(match, inputs, dim, *, op, shape_of):
370
+ """
371
+ Memory planning to remove cat. We can't use the stock memory
372
+ planner since autotuning matmuls needs to know the output layout.
373
+ """
374
+ if len(inputs) == 1:
375
+ return op(*inputs[0])
376
+
377
+ # TODO(jansel): rewrite this as a bmm?
378
+ if dim < 0:
379
+ dim += len(shape_of(*inputs[0]))
380
+ assert dim in (0, 1)
381
+ notdim = 1 - dim
382
+
383
+ new_size: Optional[Union[List[Expr], List[int]]] = None
384
+ offsets_start = []
385
+ offsets_end = []
386
+
387
+ # compute output sizes
388
+ for i in range(len(inputs)):
389
+ shape = shape_of(*inputs[i])
390
+ if new_size is None:
391
+ new_size = shape
392
+ else:
393
+ new_size[notdim] = V.graph.sizevars.guard_equals( # type: ignore[call-overload]
394
+ shape[notdim], new_size[notdim]
395
+ )
396
+ new_size[dim] += shape[dim]
397
+ offsets_start.append(new_size[dim] - shape[dim])
398
+ offsets_end.append(new_size[dim])
399
+
400
+ assert new_size is not None
401
+ dtype = functools.reduce(
402
+ torch.promote_types,
403
+ [x.get_dtype() for x in itertools.chain.from_iterable(inputs)],
404
+ )
405
+ device = inputs[0][0].get_device()
406
+ kernel = ir.ConcatKernel(
407
+ name=None,
408
+ layout=ir.FixedLayout(device, dtype, new_size),
409
+ inputs=[],
410
+ )
411
+ kernel_tensor = ir.TensorBox.create(kernel)
412
+
413
+ for i in range(len(inputs)):
414
+ dst = ir.SliceView.create(kernel_tensor, dim, offsets_start[i], offsets_end[i])
415
+ src = op(*inputs[i], layout=dst.get_layout()).data.data
416
+ assert isinstance(src, (ir.ExternKernelOut, ir.TemplateBuffer))
417
+ src.layout = ir.AliasedLayout(dst)
418
+ kernel.inputs.append(src)
419
+
420
+ kernel.name = V.graph.register_buffer(kernel)
421
+ kernel.inputs = ir.ConcatKernel.unwrap_storage(kernel.inputs)
422
+ return kernel_tensor
423
+
424
+
425
+ _cat_1 = CallFunction(aten.cat, Arg(), 1, _users=2)
426
+
427
+
428
+ @register_lowering_pattern(
429
+ CallFunction(
430
+ aten.cat,
431
+ [
432
+ _cat_1,
433
+ CallFunction(
434
+ aten.slice,
435
+ _cat_1,
436
+ 1,
437
+ 0,
438
+ KeywordArg("size"),
439
+ ),
440
+ ],
441
+ 1,
442
+ )
443
+ )
444
+ def cat_slice_cat(match, cat_input, size, dim=1):
445
+ """
446
+ This is an example of a more complex pattern where cat_1 is used
447
+ multiple times inside the pattern. We fold 2 calls to cat into one.
448
+
449
+ Matches:
450
+ cat_1: f32[1024, 4077] = torch.ops.aten.cat.default([add_26, primals_217], 1)
451
+ slice_1: f32[1024, 4077] = torch.ops.aten.slice.Tensor(cat_1, 0, 0, 9223372036854775807)
452
+ slice_2: f32[1024, 19] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 19)
453
+ cat_2: f32[1024, 4096] = torch.ops.aten.cat.default([cat_1, slice_2], 1)
454
+
455
+
456
+ Rewrite to:
457
+ slice_2 = torch.ops.aten.slice.Tensor(add_26, 1, 0, 19)
458
+ cat_2 = torch.ops.aten.cat.default([add_26, primals_217, slice2], 1)
459
+ """
460
+ first, *rest = cat_input
461
+ # Optimization is optional, because we can just not fold the cat
462
+ # size should be within first.get_size()[dim] such that the optimization is valid.
463
+ # For negative `end`, we currently fallback to not optimizing.
464
+ if size >= 0 and V.graph.sizevars.statically_known_leq(size, first.get_size()[dim]):
465
+ # fold 2 cats into 1 cat
466
+ return L[aten.cat](
467
+ [
468
+ first,
469
+ *rest,
470
+ L[aten.slice](first, dim, 0, size),
471
+ ],
472
+ dim,
473
+ )
474
+ else:
475
+ # don't expect to hit this case, just fall back
476
+ tmp = L[aten.cat](cat_input, dim)
477
+ return L[aten.cat](
478
+ [
479
+ tmp,
480
+ L[aten.slice](tmp, dim, 0, size),
481
+ ],
482
+ dim,
483
+ )
484
+
485
+
486
+ def is_valid_splitwithsizes_cat(match):
487
+ split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
488
+ cat_nodes = filter_nodes(match.nodes, aten.cat)
489
+ get_item_nodes = filter_nodes(match.nodes, operator.getitem)
490
+ if len(split_nodes) != 1 or len(cat_nodes) != 1:
491
+ return False
492
+ split_node, cat_node = split_nodes[0], cat_nodes[0]
493
+ # The dim of split and cat should match for passthrough
494
+ if get_arg_value(split_node, 2, "dim") != get_arg_value(cat_node, 1, "dim"):
495
+ return False
496
+ get_item_args = {
497
+ get_arg_value(get_item_node, 1) for get_item_node in get_item_nodes
498
+ }
499
+ assert None not in get_item_args
500
+ split_sizes = get_arg_value(split_node, 1, "split_sizes")
501
+ # All parts of split should be included in the cat
502
+ if get_item_args != set(range(len(split_sizes))):
503
+ return False
504
+ # The order of get_item_args should same with cat_node used.
505
+ # For example, if the split_node like split_with_sizes(input, [2, 2, 3], 1),
506
+ # the cat node should be like cat([get_item(0), get_item(1), get_item(2)], 1).
507
+ cat_items_args_order = [
508
+ get_arg_value(item_node, 1) for item_node in get_arg_value(cat_node, 0)
509
+ ]
510
+ if cat_items_args_order != list(range(len(split_sizes))):
511
+ return False
512
+
513
+ return True
514
+
515
+
516
+ def same_meta(node1: torch.fx.Node, node2: torch.fx.Node):
517
+ """True if two nodes have the same metadata"""
518
+ val1 = node1.meta.get("val")
519
+ val2 = node2.meta.get("val")
520
+ return (
521
+ val1 is not None
522
+ and val2 is not None
523
+ and statically_known_true(sym_eq(val1.size(), val2.size()))
524
+ and val1.layout == val2.layout
525
+ and val1.dtype == val2.dtype
526
+ and val1.device == val2.device
527
+ and (
528
+ val1.layout != torch.strided
529
+ or statically_known_true(sym_eq(val1.stride(), val2.stride()))
530
+ )
531
+ )
532
+
533
+
534
+ noop_registry: Dict[Any, Any] = {}
535
+
536
+
537
+ def register_noop_decomp(targets, nop_arg=0):
538
+ def register_fun(cond):
539
+ register_decomposition(targets, registry=noop_registry, unsafe=True)(
540
+ (cond, nop_arg)
541
+ )
542
+ return cond
543
+
544
+ return register_fun
545
+
546
+
547
+ @register_noop_decomp(aten.slice)
548
+ def slice_noop(self, dim=0, start=None, end=None, step=1):
549
+ if start is None or end is None:
550
+ return False
551
+ if start == 0 and end >= 2**63 - 1 and step == 1:
552
+ return True
553
+ return False
554
+
555
+
556
+ @register_noop_decomp(aten.slice_scatter, 1)
557
+ def slice_scatter_noop(self, src, dim=0, start=None, end=None, step=1):
558
+ if start is None:
559
+ start = 0
560
+ if end is None:
561
+ end = 2**63 - 1
562
+ if start == 0 and end >= 2**63 - 1 and step == 1:
563
+ return True
564
+ return False
565
+
566
+
567
+ @register_noop_decomp(aten.repeat)
568
+ def repeat_noop(self, repeats):
569
+ return all(r == 1 for r in repeats)
570
+
571
+
572
+ @register_noop_decomp(aten.constant_pad_nd)
573
+ def constant_pad_nd(x, padding, fill_value=0):
574
+ return all(p == 0 for p in padding)
575
+
576
+
577
+ @register_noop_decomp(torch.ops.prims.convert_element_type)
578
+ def convert_element_type_noop(x, dtype: torch.dtype):
579
+ return x.dtype == dtype
580
+
581
+
582
+ @register_noop_decomp(torch.ops.prims.device_put)
583
+ def device_put_noop(x, device):
584
+ return x.device == decode_device(device)
585
+
586
+
587
+ @register_noop_decomp([aten.ceil, aten.floor, aten.round, aten.trunc])
588
+ def int_noop(x):
589
+ return is_integer_dtype(x.dtype)
590
+
591
+
592
+ @register_noop_decomp([aten.pow])
593
+ def pow_noop(a, b):
594
+ return isinstance(b, int) and b == 1
595
+
596
+
597
+ @register_noop_decomp([aten.cat], lambda args: args[0][0])
598
+ def cat_noop(inputs, dim=0):
599
+ return len(inputs) == 1
600
+
601
+
602
+ @register_noop_decomp(aten.view)
603
+ def view_noop(arg, size):
604
+ return arg.shape == size
605
+
606
+
607
+ # Note, we also always have a check for identical metadata, which is why these
608
+ # are safe
609
+ @register_noop_decomp([aten.copy], nop_arg=1)
610
+ @register_noop_decomp([aten.alias, aten.clone])
611
+ def true_noop(*args, **kwargs):
612
+ return True
613
+
614
+
615
+ def remove_noop_ops(graph: torch.fx.Graph):
616
+ """
617
+ Removes both operations that are essentially aten.clone and operations that are essentially aten.alias from the graph.
618
+ """
619
+ inputs = set()
620
+ input_storages = set()
621
+ output_storages = set()
622
+
623
+ for node in graph.nodes:
624
+ if node.op == "placeholder":
625
+ inputs.add(node)
626
+ input_storages.add(get_node_storage(node))
627
+ else:
628
+ break
629
+
630
+ output_node = next(iter(reversed(graph.nodes)))
631
+ assert output_node.op == "output"
632
+ for out in output_node.args[0]:
633
+ if isinstance(out, torch.fx.Node):
634
+ output_storages.add(get_node_storage(out))
635
+
636
+ for node in graph.nodes:
637
+ if node.target in noop_registry:
638
+ cond, src_index = noop_registry[node.target]
639
+ if isinstance(src_index, int):
640
+ src = node.args[src_index]
641
+ else:
642
+ src = src_index(node.args)
643
+ if not isinstance(src, torch.fx.Node):
644
+ continue
645
+ # Don't introduce new aliasing between inputs and outputs.
646
+ # See fx_passes/README.md for a discussion of why this is
647
+ # necessary.
648
+ node_storage = get_node_storage(node)
649
+ src_storage = get_node_storage(src)
650
+ node_is_view = node_storage == src_storage
651
+ if (
652
+ not node_is_view
653
+ and node_storage in output_storages
654
+ and (src_storage in input_storages or src_storage in output_storages)
655
+ ):
656
+ continue
657
+
658
+ # Even if input and outputs are expected to alias,
659
+ # don't make "node is src" True
660
+ if (
661
+ node_is_view
662
+ and node in output_node.args
663
+ and (src in inputs or src in output_node.args)
664
+ ):
665
+ continue
666
+
667
+ is_valid, args, kwargs = get_fake_args_kwargs(node)
668
+ if not is_valid:
669
+ continue
670
+ if same_meta(node, src) and cond(*args, **kwargs):
671
+ node.replace_all_uses_with(src)
672
+ graph.erase_node(node)
673
+
674
+
675
+ def decompose_auto_functionalized(graph):
676
+ graph_pass = PatternMatcherPass()
677
+
678
+ @register_graph_pattern(
679
+ CallFunctionVarArgs(torch.ops.higher_order.auto_functionalized),
680
+ pass_dict=graph_pass,
681
+ )
682
+ def replacement(match: Match, *args, **kwargs):
683
+ from torch._higher_order_ops.auto_functionalize import auto_functionalized_dense
684
+
685
+ only_clone_these_tensors = tuple(
686
+ match.nodes[0].meta.get("only_clone_these_tensors", [])
687
+ )
688
+
689
+ flat_args, spec = pytree.tree_flatten((args, kwargs))
690
+
691
+ # NB: we combine (args, kwargs) into flat args for replacing.
692
+ # This is replace_by_example uses make_fx which does not support
693
+ # tracing a function with kwargs.
694
+ def decomp(*flat_args):
695
+ args, kwargs = pytree.tree_unflatten(flat_args, spec)
696
+ return auto_functionalized_dense(*args, only_clone_these_tensors, **kwargs)
697
+
698
+ with V.fake_mode:
699
+ match.replace_by_example(decomp, flat_args, run_dce=False)
700
+
701
+ graph_pass.apply(graph)
702
+ for node in graph.nodes:
703
+ if node.target is torch.ops.higher_order.auto_functionalized:
704
+ raise AssertionError("auto_functionalized was not removed")
705
+
706
+
707
+ @register_lowering_pattern(
708
+ CallFunction(
709
+ aten.cat,
710
+ ListOf(
711
+ CallFunction(
712
+ operator.getitem,
713
+ CallFunction(
714
+ aten.split_with_sizes,
715
+ KeywordArg("input_"),
716
+ Ignored(),
717
+ Ignored(),
718
+ _users=MULTIPLE,
719
+ ),
720
+ Ignored(),
721
+ ),
722
+ ),
723
+ Ignored(),
724
+ ),
725
+ pass_number=2,
726
+ extra_check=is_valid_splitwithsizes_cat,
727
+ )
728
+ def splitwithsizes_cat_replace(match, input_):
729
+ return input_
730
+
731
+
732
+ def is_valid_cat_splitwithsizes(match):
733
+ cat_nodes = filter_nodes(match.nodes, aten.cat)
734
+ split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
735
+ if len(split_nodes) != 1 or len(cat_nodes) != 1:
736
+ return False
737
+ split_node, cat_node = split_nodes[0], cat_nodes[0]
738
+
739
+ # the cat node has other users: can't eliminate
740
+ if len(cat_node.users) > 1:
741
+ return False
742
+
743
+ # the dim of the cat and split should match
744
+ dim = get_arg_value(split_node, 2, "dim")
745
+ if dim != get_arg_value(cat_node, 1, "dim"):
746
+ return False
747
+
748
+ cat_inputs = list(get_arg_value(cat_node, 0))
749
+ split_sizes = get_arg_value(split_node, 1, "split_sizes")
750
+ # the number of input tensors in cat and the
751
+ # length of the split sizes should match
752
+ if len(cat_inputs) != len(split_sizes):
753
+ return False
754
+
755
+ for cat_input, split_size in zip(cat_inputs, split_sizes):
756
+ # each cat input tensor's size along dim
757
+ # should match the corresponding split size
758
+ if "val" not in cat_input.meta:
759
+ return False
760
+ cat_input_size = cat_input.meta["val"].size(dim)
761
+ if cat_input_size != split_size:
762
+ return False
763
+
764
+ return True
765
+
766
+
767
+ @register_lowering_pattern(
768
+ CallFunction(
769
+ aten.split_with_sizes,
770
+ CallFunction(
771
+ aten.cat,
772
+ KeywordArg("input_"),
773
+ Ignored(),
774
+ _users=MULTIPLE,
775
+ ),
776
+ Ignored(),
777
+ Ignored(),
778
+ ),
779
+ pass_number=2,
780
+ extra_check=is_valid_cat_splitwithsizes,
781
+ )
782
+ def cat_splitwithsizes_replace(match, input_):
783
+ return input_
784
+
785
+
786
+ def view_to_reshape(gm):
787
+ """
788
+ Replace view ops in the GraphModule to reshape ops.
789
+ """
790
+ for nd in gm.graph.nodes:
791
+ if nd.target == torch.ops.aten.view.default:
792
+ nd.target = torch.ops.aten.reshape.default
793
+
794
+
795
+ def should_prefer_unfused_addmm(match):
796
+ inp = match.kwargs["inp"]
797
+ if not inp.meta["val"].is_cuda:
798
+ return False
799
+
800
+ output = match.output_node()
801
+ return all(is_pointwise_use(use) for use in output.users)
802
+
803
+
804
+ @register_graph_pattern(
805
+ CallFunction(aten.addmm, KeywordArg("inp"), Arg(), Arg()),
806
+ pass_dict=pass_patterns[2],
807
+ extra_check=should_prefer_unfused_addmm,
808
+ )
809
+ def unfuse_bias_add_to_pointwise(match: Match, mat1, mat2, *, inp):
810
+ def repl(inp, x1, x2):
811
+ return x1 @ x2 + inp
812
+
813
+ with V.fake_mode:
814
+ match.replace_by_example(repl, [inp, mat1, mat2])
815
+
816
+
817
+ def is_valid_addmm_fusion(match):
818
+ mat1, mat2 = match.args
819
+ inp = match.kwargs["inp"]
820
+
821
+ if not (
822
+ isinstance(inp, torch.fx.Node) and isinstance(inp.meta["val"], torch.Tensor)
823
+ ):
824
+ return False # Input is a number
825
+
826
+ in_shape = inp.meta["val"].shape
827
+ mm_shape = mat1.meta["val"].shape[0], mat2.meta["val"].shape[1]
828
+ matched = is_expandable_to(in_shape, mm_shape)
829
+ if not matched:
830
+ return False # Shape mismatch
831
+
832
+ return not should_prefer_unfused_addmm(match)
833
+
834
+
835
+ @register_graph_pattern(
836
+ CallFunction(
837
+ aten.add,
838
+ CallFunction(aten.mm, Arg(), Arg()),
839
+ KeywordArg("inp"),
840
+ ),
841
+ pass_dict=pass_patterns[2],
842
+ extra_check=is_valid_addmm_fusion,
843
+ )
844
+ @register_graph_pattern(
845
+ CallFunction(
846
+ aten.add,
847
+ KeywordArg("inp"),
848
+ CallFunction(aten.mm, Arg(), Arg()),
849
+ ),
850
+ pass_dict=pass_patterns[2],
851
+ extra_check=is_valid_addmm_fusion,
852
+ )
853
+ def addmm(match, mat1, mat2, *, inp):
854
+ def repl(inp, mat1, mat2):
855
+ return aten.addmm(inp, mat1, mat2)
856
+
857
+ with V.fake_mode:
858
+ match.replace_by_example(repl, [inp, mat1, mat2])
859
+
860
+
861
+ def check_shape_cuda_and_fused_int_mm_mul_enabled(match):
862
+ return (
863
+ config.force_fuse_int_mm_with_mul
864
+ and len(getattr(match.args[2].meta.get("val"), "shape", [])) == 2
865
+ and getattr(match.args[2].meta.get("val"), "is_cuda", False)
866
+ )
867
+
868
+
869
+ @register_lowering_pattern(
870
+ CallFunction(
871
+ prims.convert_element_type.default,
872
+ CallFunction(
873
+ aten.mul,
874
+ CallFunction(
875
+ aten._int_mm,
876
+ Arg(),
877
+ Arg(),
878
+ ),
879
+ Arg(),
880
+ ),
881
+ Arg(),
882
+ ),
883
+ check_shape_cuda_and_fused_int_mm_mul_enabled,
884
+ )
885
+ @register_lowering_pattern(
886
+ CallFunction(
887
+ aten.mul,
888
+ CallFunction(
889
+ aten._int_mm,
890
+ Arg(),
891
+ Arg(),
892
+ ),
893
+ Arg(),
894
+ ),
895
+ check_shape_cuda_and_fused_int_mm_mul_enabled,
896
+ )
897
+ def fused_int_mm_mul(match: Match, mat1, mat2, mat3, out_dtype=None):
898
+ return inductor.kernel.mm.tuned_fused_int_mm_mul(mat1, mat2, mat3, out_dtype)
899
+
900
+
901
+ class ConstructorMoverPass:
902
+ def __init__(self, target: str, allow_outputs: bool = False) -> None:
903
+ """
904
+ Move constructors from cpu to the target_device.
905
+
906
+ Sweeps through the module, looking for constructor nodes that can be moved
907
+ to the target_device.
908
+
909
+ A constructor node can be moved to the target_device iff all of its users
910
+ can also be moved (tested by cannot_be_moved). Otherwise, all dependent
911
+ constructor nodes won't be moved.
912
+
913
+ - target: target device type
914
+ - allow_outputs: allow outputs to be moved
915
+ """
916
+
917
+ self.target = target
918
+ self.allow_outputs = allow_outputs
919
+
920
+ assert isinstance(target, str), (
921
+ "target should be a string representing the device type. "
922
+ f"Got: {type(target).__name__}"
923
+ )
924
+
925
+ def allow_cpu_device(self, node: fx.Node) -> bool:
926
+ """
927
+ Returns whether a node that returns a tensor on the target device may have
928
+ cpu tensors as input.
929
+ """
930
+ return node.target in (
931
+ torch.ops.aten.index.Tensor,
932
+ torch.ops.aten.index_put.default,
933
+ torch.ops.aten.index_put_.default,
934
+ torch.ops.aten.copy.default,
935
+ torch.ops.aten.copy_.default,
936
+ torch.ops.aten.slice_scatter.default,
937
+ )
938
+
939
+ def cannot_be_moved(self, node: fx.Node) -> bool:
940
+ """
941
+ Returns whether a node can be moved to the target device.
942
+
943
+ If this function returns False, it means that this node and all of its users
944
+ won't be moved into the target device.
945
+ """
946
+ if node.target == "output":
947
+ return not self.allow_outputs
948
+
949
+ if not (
950
+ isinstance(node.target, torch._ops.OpOverload)
951
+ and node.target.namespace in ("prims", "aten")
952
+ ):
953
+ return True
954
+
955
+ return False
956
+
957
+ def get_node_device(self, node: fx.Node) -> Optional[torch.device]:
958
+ """
959
+ Get the device of a node.
960
+ """
961
+ ten = node.meta.get("val")
962
+ return None if not isinstance(ten, torch.Tensor) else ten.device
963
+
964
+ def get_cpu_indeg_count(self, graph: fx.Graph) -> Dict[fx.Node, int]:
965
+ """
966
+ Get the number of cpu inputs to a node
967
+ """
968
+ cpu_indeg: Dict[fx.Node, int] = Counter()
969
+
970
+ for node in graph.nodes:
971
+ cpu_count = 0
972
+
973
+ def add_cpu_inp(node):
974
+ nonlocal cpu_count
975
+ device = self.get_node_device(node)
976
+ cpu_count += device is not None and device.type == "cpu"
977
+
978
+ pytree.tree_map_only(fx.Node, add_cpu_inp, (node.args, node.kwargs))
979
+
980
+ if cpu_count:
981
+ cpu_indeg[node] = cpu_count
982
+
983
+ return cpu_indeg
984
+
985
+ def __call__(self, graph: fx.Graph) -> None:
986
+ target_devices = set()
987
+ constructors = []
988
+
989
+ for node in graph.nodes:
990
+ device = self.get_node_device(node)
991
+ if device and device.type == self.target:
992
+ target_devices.add(device)
993
+
994
+ if not (
995
+ isinstance(node.target, torch._ops.OpOverload)
996
+ and node.target.namespace in ("prims", "aten")
997
+ ):
998
+ continue
999
+
1000
+ if not torch._subclasses.fake_tensor._is_tensor_constructor(node.target):
1001
+ continue
1002
+
1003
+ if not node.kwargs.get("device") == torch.device("cpu"):
1004
+ continue
1005
+
1006
+ constructors.append(node)
1007
+
1008
+ # not handling multiple target devices initially
1009
+ if not constructors or len(target_devices) != 1:
1010
+ return
1011
+
1012
+ movable_constructors = self.find_movable_constructors(graph, constructors)
1013
+
1014
+ for node in movable_constructors:
1015
+ kwargs = node.kwargs.copy()
1016
+ kwargs["device"] = next(iter(target_devices))
1017
+ node.kwargs = kwargs
1018
+
1019
+ def find_movable_constructors(
1020
+ self, graph: fx.Graph, constructors: List[fx.Node]
1021
+ ) -> Set[fx.Node]:
1022
+ """
1023
+ Starting from the cpu constructors, iterate through the graph and test that all of their
1024
+ downstream uses can safely be moved to cpu.
1025
+ """
1026
+ cpu_indeg: Dict[fx.Node, int] = self.get_cpu_indeg_count(graph)
1027
+
1028
+ # which constructors cannot be moved to cuda
1029
+ cannot_move_to_cuda: Set[fx.Node] = set()
1030
+
1031
+ # For any node in the graph, which constructors does it have a dependency on
1032
+ constructor_dependencies: Dict[fx.Node, Set[fx.Node]] = defaultdict(set)
1033
+
1034
+ # if a cpu node has a dependency on two different cpu constructors,
1035
+ # then if either constructor cannot be moved to cuda, the other cannot as well.
1036
+ # In this case any node with a dependency on one will have a dependency on the other
1037
+ equal_constructor_sets: Dict[fx.Node, Set[fx.Node]] = {
1038
+ c: {c} for c in constructors
1039
+ }
1040
+
1041
+ def make_dependencies_equivalent(
1042
+ set1: Set[fx.Node], set2: Set[fx.Node]
1043
+ ) -> Set[fx.Node]:
1044
+ # could use union find but not worth complexity here
1045
+ set1.update(set2)
1046
+ for obj in set1:
1047
+ equal_constructor_sets[obj] = set1
1048
+ return set1
1049
+
1050
+ queue: List[fx.Node] = list(constructors)
1051
+
1052
+ for c in queue:
1053
+ constructor_dependencies[c].add(c)
1054
+
1055
+ while queue:
1056
+ node = queue.pop()
1057
+ dependencies = constructor_dependencies[node]
1058
+
1059
+ for user in node.users:
1060
+ if self.cannot_be_moved(user):
1061
+ cannot_move_to_cuda.update(dependencies)
1062
+ break
1063
+
1064
+ # this node was used on a op which takes in multiple devices and output a cuda
1065
+ # tensor. we can convert its cpu input to cuda without making further changes
1066
+ node_device = self.get_node_device(user)
1067
+ if (
1068
+ self.allow_cpu_device(user)
1069
+ and node_device
1070
+ and node_device.type == self.target
1071
+ ):
1072
+ del cpu_indeg[user]
1073
+ else:
1074
+ # otherwise, we should continue look at its downstream uses
1075
+ cpu_indeg[user] -= 1
1076
+ if cpu_indeg[user] == 0:
1077
+ del cpu_indeg[user]
1078
+ queue.append(user)
1079
+
1080
+ unioned_set = make_dependencies_equivalent(
1081
+ dependencies, constructor_dependencies[user]
1082
+ )
1083
+ constructor_dependencies[user] = unioned_set
1084
+
1085
+ for node in cpu_indeg:
1086
+ if constructor_dependencies[node]:
1087
+ cannot_move_to_cuda.update(constructor_dependencies[node])
1088
+
1089
+ all_cannot_move_to_cuda = cannot_move_to_cuda.copy()
1090
+ for constructor in cannot_move_to_cuda:
1091
+ all_cannot_move_to_cuda.update(equal_constructor_sets[constructor])
1092
+
1093
+ return set(constructors) - all_cannot_move_to_cuda
1094
+
1095
+
1096
+ def move_constructors_to_cuda(graph: fx.Graph) -> None:
1097
+ """
1098
+ Moves intermediary tensors which are constructed on the cpu to cuda when safe
1099
+ """
1100
+ ConstructorMoverPass("cuda")(graph)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import logging
3
+ from typing import List, Optional
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch._dynamo.utils import counters, detect_fake_mode, optimus_scuba_log
8
+ from torch._utils_internal import upload_graph
9
+ from torch.fx.experimental.optimization import (
10
+ matches_module_pattern,
11
+ replace_node_module,
12
+ )
13
+ from torch.fx.passes.shape_prop import ShapeProp
14
+ from torch.nn import functional as F
15
+ from torch.nn.utils.fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
16
+
17
+ from .. import config
18
+
19
+ from ..fx_utils import matches_module_function_pattern
20
+ from ..pattern_matcher import (
21
+ init_once_fakemode,
22
+ PatternMatcherPass,
23
+ stable_topological_sort,
24
+ )
25
+ from ..utils import is_cpu_device, pass_execution_and_save
26
+ from .group_batch_fusion import group_batch_fusion_passes
27
+ from .misc_patterns import numpy_compat_normalization
28
+
29
+ log = logging.getLogger(__name__)
30
+
31
+ normalization_pass = PatternMatcherPass(
32
+ prevent_match_across_mutations=True, pass_name="normalization_pass"
33
+ )
34
+ merge_splits_pass = PatternMatcherPass(
35
+ prevent_match_across_mutations=True, pass_name="merge_splits_pass"
36
+ )
37
+ split_cat_pass = PatternMatcherPass(
38
+ prevent_match_across_mutations=True, pass_name="split_cat_pass"
39
+ )
40
+ unbind_stack_pass = PatternMatcherPass(
41
+ prevent_match_across_mutations=True, pass_name="unbind_stack_pass"
42
+ )
43
+ efficient_conv_bn_eval_pass = PatternMatcherPass(
44
+ prevent_match_across_mutations=True, pass_name="efficient_conv_bn_eval_pass"
45
+ )
46
+ merge_getitem_cat_pass = PatternMatcherPass(
47
+ prevent_match_across_mutations=True, pass_name="merge_getitem_cat_pass"
48
+ )
49
+
50
+ fuse_split_linear_add_pass = PatternMatcherPass(
51
+ prevent_match_across_mutations=True,
52
+ pass_name="fuse_split_linear_add_pass",
53
+ )
54
+ fuse_chunk_squeeze_cat_pass = PatternMatcherPass(
55
+ prevent_match_across_mutations=True,
56
+ pass_name="fuse_chunk_squeeze_cat_pass",
57
+ )
58
+ remove_reshape_pass = PatternMatcherPass(
59
+ prevent_match_across_mutations=True,
60
+ pass_name="remove_reshape_pass",
61
+ )
62
+
63
+ # based on predispatch aten IR
64
+ normalization_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
65
+ merge_splits_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
66
+ split_cat_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
67
+ unbind_stack_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
68
+ merge_getitem_cat_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
69
+
70
+
71
+ def fuse_parallel_linear_pass(graph):
72
+ return None
73
+
74
+
75
+ def remove_split_ops(graph, shape_prop):
76
+ return None
77
+
78
+
79
+ pattern_matcher_passes: List[PatternMatcherPass] = [
80
+ normalization_pass,
81
+ merge_getitem_cat_pass,
82
+ merge_splits_pass,
83
+ split_cat_pass,
84
+ unbind_stack_pass,
85
+ efficient_conv_bn_eval_pass,
86
+ ]
87
+ pattern_matcher_passes_aten: List[PatternMatcherPass] = [
88
+ merge_getitem_cat_pass_aten,
89
+ merge_splits_pass_aten,
90
+ split_cat_pass_aten,
91
+ unbind_stack_pass_aten,
92
+ ]
93
+
94
+
95
+ @init_once_fakemode
96
+ def lazy_init():
97
+ from . import efficient_conv_bn_eval, split_cat # noqa: F401 # noqa: F401
98
+
99
+ if config.is_fbcode():
100
+ from . import fb # type: ignore[attr-defined] # noqa: F401
101
+
102
+
103
+ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs=None):
104
+ """
105
+ Apply passes on the input FX graph using Torch IR.
106
+
107
+ WARNING:
108
+ The IR before grad is not functional or normalized, so it is harder
109
+ to write passes on this IR. Passes must be safe with respect to
110
+ aliasing and mutation and need to handle all possible arg schemas.
111
+
112
+ Consider adding a new pass to post_grad.py or joint_graph.py which
113
+ are after functionalization and normalization.
114
+ """
115
+ if config.pattern_matcher:
116
+ lazy_init()
117
+ if hasattr(
118
+ config, "fx_passes_numeric_check"
119
+ ) and config.fx_passes_numeric_check.get("pre_grad", False):
120
+ gm_before_fx_passes = gm.__copy__()
121
+ # explicitly run with predispatch atenIR based passes
122
+ if config.is_predispatch:
123
+
124
+ def shape_prop(mod) -> None:
125
+ ShapeProp(
126
+ gm=mod,
127
+ fake_mode=detect_fake_mode(example_inputs),
128
+ ).propagate(*example_inputs)
129
+
130
+ # normalization pass
131
+ pass_execution_and_save(
132
+ normalization_pass_aten.apply,
133
+ gm,
134
+ "[Pre grad(predispatch IR)]Apply normalization pass",
135
+ )
136
+ pass_execution_and_save(
137
+ group_batch_fusion_passes,
138
+ gm,
139
+ "[Pre grad(predispatch IR)] Apply group_batch_fusion",
140
+ )
141
+ pass_execution_and_save(
142
+ fuse_chunk_squeeze_cat_pass.apply,
143
+ gm,
144
+ "[Pre grad(predispatch IR)] Apply fuse_chunk_squeeze_cat_pass",
145
+ )
146
+ pass_execution_and_save(
147
+ fuse_split_linear_add_pass.apply,
148
+ gm,
149
+ "[Pre grad(predispatch IR)] Apply fuse_split_linear_add_pass",
150
+ )
151
+
152
+ log.debug(
153
+ "[Pre grad(predispatch IR)]Before split cat in pre grad pass. graph: %s",
154
+ gm.graph,
155
+ )
156
+ for ind, pattern_matcher_pass_aten in enumerate(
157
+ pattern_matcher_passes_aten
158
+ ):
159
+ pass_execution_and_save(
160
+ pattern_matcher_pass_aten.apply,
161
+ gm,
162
+ f"[Pre grad(predispatch IR)]Apply split_cat, index: {ind}",
163
+ )
164
+ pass_execution_and_save(
165
+ remove_reshape_pass.apply,
166
+ gm,
167
+ "[Pre grad(predispatch IR)] Apply remove_reshape_pass",
168
+ )
169
+ pass_execution_and_save(
170
+ fuse_parallel_linear_pass,
171
+ gm,
172
+ "[Pre grad(predispatch IR)] Apply fuse_parallel_linear_pass",
173
+ )
174
+ pass_execution_and_save(
175
+ lambda graph: remove_split_ops(graph.owning_module, shape_prop),
176
+ gm,
177
+ "[Pre grad(predispatch IR)] Apply remove_split_ops",
178
+ )
179
+ shape_prop(gm)
180
+
181
+ else:
182
+ # We only log the graph with changes to avoid the excessive compilation time
183
+ # https://fb.workplace.com/groups/257735836456307/permalink/633533465543207/
184
+ if example_inputs is not None:
185
+ gm = fuse_fx(gm, example_inputs)
186
+ numpy_compat_normalization(gm.graph)
187
+ inductor_before_change = copy.deepcopy(counters["inductor"])
188
+ group_batch_fusion_passes(gm.graph, pre_grad=True)
189
+ if counters["inductor"] != inductor_before_change:
190
+ optimus_scuba_log["group_batch_fusion_pre_grad"] = upload_graph(
191
+ gm.graph
192
+ )
193
+ for pattern_matcher_pass in pattern_matcher_passes:
194
+ inductor_before_change = copy.deepcopy(counters["inductor"])
195
+ pattern_matcher_pass.apply(gm.graph) # type: ignore[arg-type]
196
+ if counters["inductor"] != inductor_before_change:
197
+ optimus_scuba_log[
198
+ f"split_cat_pattern_{pattern_matcher_pass.pass_name}_pre_grad"
199
+ ] = upload_graph(gm.graph)
200
+
201
+ if config.pre_grad_custom_pass is not None:
202
+ config.pre_grad_custom_pass(gm.graph)
203
+ stable_topological_sort(gm.graph)
204
+ gm.graph.lint()
205
+ gm.recompile()
206
+
207
+ if (
208
+ config.pattern_matcher
209
+ and hasattr(config, "fx_passes_numeric_check")
210
+ and config.fx_passes_numeric_check.get("pre_grad", False)
211
+ and example_inputs is not None
212
+ ):
213
+ from .numeric_utils import numeric_check_if_enabled
214
+
215
+ gm_after_fx_passes = gm.__copy__()
216
+ numeric_check_if_enabled(
217
+ gm_before_fx_passes, # type: ignore[possibly-undefined]
218
+ gm_after_fx_passes,
219
+ example_inputs,
220
+ config.fx_passes_numeric_check.get("num_iterations", 1),
221
+ config.fx_passes_numeric_check.get("precision", 1e-4),
222
+ )
223
+
224
+ return gm
225
+
226
+
227
+ def fuse_fx(gm: torch.fx.GraphModule, example_inputs) -> torch.fx.GraphModule:
228
+ is_cpu = is_cpu_device(example_inputs)
229
+
230
+ fake_mode = detect_fake_mode(example_inputs)
231
+
232
+ gm = sink_cat_after_pointwise(gm)
233
+ if config.permute_fusion and not is_cpu:
234
+ # For linear permute fusion, we need to check input info to identify
235
+ # and perform proper permutation/transpose
236
+ ShapeProp(gm, fake_mode=fake_mode).propagate(*example_inputs)
237
+ gm = linear_permute_fusion(gm)
238
+ gm = permute_linear_fusion(gm)
239
+ gm = permute_matmul_fusion(gm)
240
+
241
+ # make sure the autograd is disabled.
242
+ if torch.is_grad_enabled() or not is_cpu:
243
+ return gm
244
+ if config.freezing:
245
+ gm = remove_identity(gm)
246
+ gm = fuse_conv_bn(gm)
247
+ return gm
248
+
249
+
250
+ def fetch_attr(target: str, mod):
251
+ target_atoms = target.split(".")
252
+ attr_itr = mod
253
+ for i, atom in enumerate(target_atoms):
254
+ if not hasattr(attr_itr, atom):
255
+ raise RuntimeError(
256
+ f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}"
257
+ )
258
+ attr_itr = getattr(attr_itr, atom)
259
+ return attr_itr
260
+
261
+
262
+ def remove_identity(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
263
+ """
264
+ Removes all identity layers from the module.
265
+ """
266
+
267
+ class IdentityRemover(torch.fx.Transformer):
268
+ def call_module(self, target, args, kwargs):
269
+ if isinstance(self.submodules[target], nn.Identity):
270
+ assert len(args) == 1
271
+ return args[0]
272
+ else:
273
+ return super().call_module(target, args, kwargs)
274
+
275
+ return IdentityRemover(gm).transform()
276
+
277
+
278
+ def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModule:
279
+ """
280
+ Fuses Convolution/BN layers for inference purposes.
281
+ """
282
+ modules_patterns = [
283
+ (torch.nn.Conv1d, torch.nn.BatchNorm1d),
284
+ (torch.nn.Conv2d, torch.nn.BatchNorm2d),
285
+ (torch.nn.Conv3d, torch.nn.BatchNorm3d),
286
+ ]
287
+ module_function_patterns = [
288
+ (torch.nn.Conv1d, F.batch_norm),
289
+ (torch.nn.Conv2d, F.batch_norm),
290
+ (torch.nn.Conv3d, F.batch_norm),
291
+ ]
292
+ modules = dict(gm.named_modules())
293
+ for pattern in modules_patterns:
294
+ for node in gm.graph.nodes:
295
+ if matches_module_pattern(pattern, node, modules):
296
+ if len(node.args[0].users) > 1: # Output of conv is used by other nodes
297
+ continue
298
+ conv = modules[node.args[0].target]
299
+ bn = modules[node.target]
300
+ eval_mode = all(not n.training for n in [conv, bn])
301
+ if not eval_mode:
302
+ continue
303
+ if not bn.track_running_stats:
304
+ continue
305
+ fused_conv = fuse_conv_bn_eval(conv, bn)
306
+ replace_node_module(node.args[0], modules, fused_conv)
307
+ node.replace_all_uses_with(node.args[0])
308
+ gm.graph.erase_node(node)
309
+ gm.graph.lint()
310
+ for pattern in module_function_patterns:
311
+ for node in gm.graph.nodes:
312
+ if matches_module_function_pattern(pattern, node, modules):
313
+ # TODO: support kwargs.
314
+ if len(node.args) != 8:
315
+ continue
316
+ conv = modules[node.args[0].target]
317
+ bn_training = node.args[5]
318
+ bn_eps = node.args[7]
319
+ if conv.training or bn_training:
320
+ continue
321
+ if type(bn_eps) is not float:
322
+ continue
323
+ bn_args_is_constant = all(
324
+ n.op == "get_attr" and len(n.users) == 1 for n in node.args[1:5]
325
+ )
326
+ if not bn_args_is_constant:
327
+ continue
328
+ bn_running_mean = fetch_attr(node.args[1].target, gm)
329
+ bn_running_var = fetch_attr(node.args[2].target, gm)
330
+ bn_weight = fetch_attr(node.args[3].target, gm)
331
+ bn_bias = fetch_attr(node.args[4].target, gm)
332
+ if bn_running_mean is None or bn_running_var is None:
333
+ continue
334
+ fused_conv = copy.deepcopy(conv)
335
+ fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights(
336
+ fused_conv.weight,
337
+ fused_conv.bias,
338
+ bn_running_mean,
339
+ bn_running_var,
340
+ bn_eps,
341
+ bn_weight,
342
+ bn_bias,
343
+ )
344
+ replace_node_module(node.args[0], modules, fused_conv)
345
+ node.replace_all_uses_with(node.args[0])
346
+ gm.graph.erase_node(node)
347
+ gm.graph.lint()
348
+ gm.recompile()
349
+
350
+ return gm
351
+
352
+
353
+ class NormalizedLinearNode:
354
+ def __init__(self, node: torch.fx.Node) -> None:
355
+ assert node.op == "call_function"
356
+ assert node.target in [torch.nn.functional.linear]
357
+ self.node: torch.fx.Node = node
358
+
359
+ def get_input(self) -> torch.fx.Node:
360
+ if len(self.node.args) > 0:
361
+ return self.node.args[0] # type: ignore[return-value]
362
+ else:
363
+ return self.node.kwargs["input"] # type: ignore[return-value]
364
+
365
+ def get_weight(self) -> torch.fx.Node:
366
+ if len(self.node.args) > 1:
367
+ return self.node.args[1] # type: ignore[return-value]
368
+ else:
369
+ return self.node.kwargs["weight"] # type: ignore[return-value]
370
+
371
+ def get_bias(self) -> torch.fx.Node:
372
+ if len(self.node.args) > 2:
373
+ return self.node.args[2] # type: ignore[return-value]
374
+ else:
375
+ return self.node.kwargs["bias"] if "bias" in self.node.kwargs else None # type: ignore[return-value]
376
+
377
+
378
+ class NormalizedMatmulNode:
379
+ def __init__(self, node: torch.fx.Node) -> None:
380
+ assert node.op == "call_function"
381
+ assert node.target in [torch.bmm, torch.matmul]
382
+ self.node: torch.fx.Node = node
383
+
384
+ def get_input(self) -> torch.fx.Node:
385
+ if len(self.node.args) > 0:
386
+ return self.node.args[0] # type: ignore[return-value]
387
+ else:
388
+ return self.node.kwargs["input"] # type: ignore[return-value]
389
+
390
+ def get_other(self) -> torch.fx.Node:
391
+ if len(self.node.args) > 1:
392
+ return self.node.args[1] # type: ignore[return-value]
393
+ else:
394
+ return self.node.kwargs["other"] # type: ignore[return-value]
395
+
396
+
397
+ def check_permute(node: torch.fx.Node) -> bool:
398
+ ranks = len(node.meta["tensor_meta"].shape)
399
+ if len(node.args) > 3:
400
+ permutation = [node.args[i] % ranks for i in range(1, ranks + 1)] # type: ignore[operator]
401
+ elif (
402
+ "permutation" in node.kwargs
403
+ and node.kwargs["permutation"] is not None
404
+ and len(node.kwargs["permutation"]) > 2 # type: ignore[arg-type]
405
+ ):
406
+ permutation = [i % ranks for i in node.kwargs["permutation"]] # type: ignore[union-attr]
407
+ else:
408
+ return False
409
+ allowed_permutation = list(range(ranks))
410
+ allowed_permutation[-1] = ranks - 2
411
+ allowed_permutation[-2] = ranks - 1
412
+ return permutation == allowed_permutation
413
+
414
+
415
+ def sink_cat_after_pointwise(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
416
+ def one_user(node):
417
+ users = list(node.users)
418
+ return users[0] if len(users) == 1 else None
419
+
420
+ def is_view(node):
421
+ view = {"view"}
422
+ return node.op == "call_method" and node.target in view
423
+
424
+ def is_pointwise_unary(node):
425
+ pointwise = {torch.relu, torch.tanh, "relu", "tanh"}
426
+ return node.op in {"call_function", "call_method"} and node.target in pointwise
427
+
428
+ g = module.graph
429
+ for node in g.nodes:
430
+ if node.op != "call_function" or node.target != torch.cat:
431
+ continue
432
+
433
+ cat_or_view = node
434
+ while True:
435
+ user = one_user(cat_or_view)
436
+ if not user or not is_view(user):
437
+ break
438
+ cat_or_view = user
439
+
440
+ if user and is_pointwise_unary(user):
441
+ with g.inserting_before(node):
442
+
443
+ def cat_args(tensors, dim=0):
444
+ return tensors, dim
445
+
446
+ tensors, dim = cat_args(*node.args, **node.kwargs)
447
+ new_tensors = [
448
+ g.create_node(user.op, user.target, args=(arg,), kwargs=user.kwargs)
449
+ for arg in tensors
450
+ ]
451
+ new_cat = g.create_node(
452
+ "call_function", torch.cat, args=(new_tensors, dim)
453
+ )
454
+ user.replace_all_uses_with(cat_or_view)
455
+ node.replace_all_uses_with(new_cat)
456
+ g.erase_node(user)
457
+ g.erase_node(node)
458
+ g.lint()
459
+ module.recompile()
460
+ return module
461
+
462
+
463
+ def linear_permute_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
464
+ for node in module.graph.nodes:
465
+ if (
466
+ node.op == "call_method"
467
+ and node.target == "permute"
468
+ and check_permute(node)
469
+ ):
470
+ if len(node.args) > 0:
471
+ input_node = node.args[0]
472
+ else:
473
+ input_node = node.kwargs["input"]
474
+ if (
475
+ input_node.op == "call_function"
476
+ and input_node.target == torch.nn.functional.linear
477
+ ):
478
+ normalized = NormalizedLinearNode(input_node)
479
+ input = normalized.get_input()
480
+ weight = normalized.get_weight()
481
+ bias = normalized.get_bias()
482
+ with module.graph.inserting_before(node):
483
+ fused_node = module.graph.call_function(
484
+ linear_transpose, args=(input, weight, bias)
485
+ )
486
+ node.replace_all_uses_with(fused_node)
487
+ module.graph.erase_node(node)
488
+ if len(input_node.users) == 0:
489
+ module.graph.erase_node(input_node)
490
+
491
+ module.graph.lint()
492
+ module.recompile()
493
+ return module
494
+
495
+
496
+ # Y1 = X * W^T + bias
497
+ # Y2 = Y1.permute(0, 2, 1)
498
+ # ---->
499
+ # Y2 = (W * X^T + bias.unsqueeze(-1))^T
500
+ def linear_transpose(
501
+ input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
502
+ ) -> torch.Tensor:
503
+ if bias is None:
504
+ return torch.matmul(weight, input.transpose(-1, -2))
505
+ return torch.matmul(weight, input.transpose(-1, -2)) + bias.unsqueeze(-1)
506
+
507
+
508
+ def permute_linear_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
509
+ for node in module.graph.nodes:
510
+ if node.op == "call_function" and node.target == torch.nn.functional.linear:
511
+ if len(node.args) > 0:
512
+ input_node = node.args[0]
513
+ else:
514
+ input_node = node.kwargs["input"]
515
+ if (
516
+ input_node.op == "call_method"
517
+ and input_node.target == "permute"
518
+ and check_permute(input_node)
519
+ ):
520
+ normalized = NormalizedLinearNode(node)
521
+ if len(input_node.args) > 0:
522
+ input = input_node.args[0]
523
+ else:
524
+ input = input_node.kwargs["input"]
525
+ weight = normalized.get_weight()
526
+ bias = normalized.get_bias()
527
+ with module.graph.inserting_before(node):
528
+ fused_node = module.graph.call_function(
529
+ transpose_linear, args=(input, weight, bias)
530
+ )
531
+ node.replace_all_uses_with(fused_node)
532
+ module.graph.erase_node(node)
533
+ if len(input_node.users) == 0:
534
+ module.graph.erase_node(input_node)
535
+
536
+ module.graph.lint()
537
+ module.recompile()
538
+ return module
539
+
540
+
541
+ def permute_matmul_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
542
+ for node in module.graph.nodes:
543
+ if node.op == "call_function" and (
544
+ node.target == torch.bmm or node.target == torch.matmul
545
+ ):
546
+ normalized = NormalizedMatmulNode(node)
547
+ input_A_node = normalized.get_input()
548
+ input_B_node = normalized.get_other()
549
+ input_A = input_A_node
550
+ input_B = input_B_node
551
+ Atrans = Btrans = False
552
+ if (
553
+ input_A_node.op == "call_method"
554
+ and input_A_node.target == "permute"
555
+ and check_permute(input_A_node)
556
+ ):
557
+ Atrans = True
558
+ if len(input_A_node.args) > 0:
559
+ input_A = input_A_node.args[0] # type: ignore[assignment]
560
+ else:
561
+ input_A = input_A_node.kwargs["input"] # type: ignore[assignment]
562
+
563
+ if (
564
+ input_B_node.op == "call_method"
565
+ and input_B_node.target == "permute"
566
+ and check_permute(input_B_node)
567
+ ):
568
+ Btrans = True
569
+ if len(input_B_node.args) > 0:
570
+ input_B = input_B_node.args[0] # type: ignore[assignment]
571
+ else:
572
+ input_B = input_B_node.kwargs["input"] # type: ignore[assignment]
573
+
574
+ if Atrans or Btrans:
575
+ with module.graph.inserting_before(node):
576
+ fused_node = module.graph.call_function(
577
+ transpose_matmul,
578
+ args=(input_A, input_B, Atrans, Btrans),
579
+ )
580
+ node.replace_all_uses_with(fused_node)
581
+ module.graph.erase_node(node)
582
+ if Atrans and len(input_A_node.users) == 0:
583
+ module.graph.erase_node(input_A_node)
584
+ if Btrans and len(input_B_node.users) == 0:
585
+ module.graph.erase_node(input_B_node)
586
+
587
+ module.graph.lint()
588
+ module.recompile()
589
+ return module
590
+
591
+
592
+ # X1 = X.permute(0, 2, 1)
593
+ # Y1 = X1 * W1^T + bias1
594
+ # ---->
595
+ # Y2 = X1.transpose(-1, -2) * W1^T + bias1
596
+ def transpose_linear(
597
+ input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
598
+ ) -> torch.Tensor:
599
+ if bias is None:
600
+ return torch.matmul(input.transpose(-1, -2), weight.t())
601
+ return torch.matmul(input.transpose(-1, -2), weight.t()) + bias
602
+
603
+
604
+ def transpose_matmul(
605
+ A: torch.Tensor, B: torch.Tensor, Atrans: bool, Btrans: bool
606
+ ) -> torch.Tensor:
607
+ if Atrans:
608
+ A = A.transpose(-1, -2)
609
+ if Btrans:
610
+ B = B.transpose(-1, -2)
611
+ return torch.matmul(A, B)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py ADDED
@@ -0,0 +1,1980 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import functools
3
+ import itertools
4
+ import math
5
+ import operator
6
+ from typing import Any, Tuple
7
+
8
+ import torch
9
+ from torch._dynamo.utils import counters
10
+ from torch.fx.experimental.symbolic_shapes import has_free_symbols
11
+ from ..lowering import lowerings as L, require_channels_last
12
+ from ..pattern_matcher import Arg, CallFunction, filter_nodes, KeywordArg, ListOf, Match
13
+ from ..utils import pad_listlike
14
+ from .freezing_patterns import register_freezing_graph_pattern
15
+ from .post_grad import register_lowering_pattern
16
+
17
+ aten = torch.ops.aten
18
+ prims = torch.ops.prims
19
+ quantized_decomposed = torch.ops.quantized_decomposed
20
+ quantized = torch.ops.quantized
21
+
22
+ """
23
+ The quantization.py file primarily incorporates passes related to quantization fusion
24
+ in inductor, includes:
25
+ 1. Dequant Promotion;
26
+ 2. Conv/GEMM weight prepack with oneDNN Library;
27
+ 3. Conv/GEMM quantization fusion with output quant node (if have);
28
+ 4. Other pointwise operators' quantization fusion like: qmaxpool2d, qcat and more;
29
+
30
+ It also involves int8-mixed-fp32 and int8-mixed-bf16 quantization. The main difference
31
+ of patterns for int8-mixed-bf16, comparing with int8-mixed-fp32, is
32
+ 1. There is to(dtype=torch.bfloat16) node at the inputs of activation and weight for Conv/GEMM.
33
+ 2. There is to(dtype=torch.float32) node at the outputs of Conv/GEMM before inputs to next quant node.
34
+ Refer to: https://github.com/pytorch/pytorch/issues/111640 for detail design of int8-mixed-bf16
35
+ quantization.
36
+ """
37
+
38
+
39
+ def _may_generate_pattern_with_dtype_convert(pattern, dtype=Arg(), dtype_convert=True):
40
+ if dtype_convert:
41
+ return CallFunction(
42
+ prims.convert_element_type.default,
43
+ pattern,
44
+ dtype,
45
+ )
46
+ else:
47
+ return pattern
48
+
49
+
50
+ def _may_generate_pattern_with_reshape(pattern, reshape_size=Arg(), with_reshape=True):
51
+ if with_reshape:
52
+ return CallFunction(
53
+ torch.ops.aten.reshape.default,
54
+ pattern,
55
+ reshape_size,
56
+ )
57
+ else:
58
+ return pattern
59
+
60
+
61
+ def _generate_linear_t_pattern(
62
+ _dequant_per_channel_pattern,
63
+ dtype,
64
+ ):
65
+ assert dtype in [torch.float32, torch.bfloat16]
66
+ t_pattern = CallFunction(
67
+ aten.permute.default,
68
+ _may_generate_pattern_with_dtype_convert(
69
+ _dequant_per_channel_pattern,
70
+ KeywordArg("autocast_wgt_dtype"),
71
+ dtype == torch.bfloat16,
72
+ ),
73
+ KeywordArg("permute_axes"),
74
+ )
75
+ return t_pattern
76
+
77
+
78
+ """
79
+ dequantize activation:
80
+ x = x.to(fp32)
81
+ x = x - zero_point
82
+ x = x * scale
83
+ """
84
+ dequantize_per_tensor_activation_pattern = CallFunction(
85
+ aten.mul.Tensor,
86
+ CallFunction(
87
+ aten.sub.Tensor,
88
+ CallFunction(
89
+ prims.convert_element_type.default,
90
+ KeywordArg("x"),
91
+ KeywordArg("x_dq_dtype"),
92
+ ),
93
+ KeywordArg("x_zp"),
94
+ ),
95
+ KeywordArg("x_scale"),
96
+ )
97
+
98
+ dequantize_per_channel_weight_pattern = CallFunction(
99
+ quantized_decomposed.dequantize_per_channel.default,
100
+ KeywordArg("q_weight"),
101
+ KeywordArg("w_scale"),
102
+ KeywordArg("w_zp"),
103
+ KeywordArg("w_axis"),
104
+ KeywordArg("w_quant_min"),
105
+ KeywordArg("w_quant_max"),
106
+ KeywordArg("w_dtype"),
107
+ )
108
+
109
+ dequantize_per_channel_to_bf16_weight_pattern = (
110
+ _may_generate_pattern_with_dtype_convert(
111
+ dequantize_per_channel_weight_pattern,
112
+ KeywordArg("autocast_wgt_dtype"),
113
+ )
114
+ )
115
+
116
+ dequantize_per_channel_clone_weight_pattern = CallFunction(
117
+ aten.clone.default,
118
+ dequantize_per_channel_weight_pattern,
119
+ memory_format=KeywordArg("memory_format"),
120
+ )
121
+
122
+ dequantize_per_channel_to_bf16_clone_weight_pattern = CallFunction(
123
+ aten.clone.default,
124
+ dequantize_per_channel_to_bf16_weight_pattern,
125
+ memory_format=KeywordArg("memory_format"),
126
+ )
127
+
128
+
129
+ def get_dequantize_qconv_pt2e_pattern(users=1):
130
+ return CallFunction(
131
+ torch.ops.onednn.qconv2d_pointwise.default,
132
+ KeywordArg("x"),
133
+ KeywordArg("x_scale"), # x_scale
134
+ KeywordArg("x_zp"), # x_zp
135
+ KeywordArg("packed_weight"), # packed_weight
136
+ KeywordArg("w_scale"), # w_scale
137
+ KeywordArg("w_zp"), # w_zp
138
+ KeywordArg("b"), # bias
139
+ KeywordArg("stride"),
140
+ KeywordArg("padding"),
141
+ KeywordArg("dilation"),
142
+ KeywordArg("groups"),
143
+ KeywordArg("inv_output_scale"), # inv_output_scale = 1.0
144
+ KeywordArg("output_zero_point"), # output_zero_point = 0
145
+ KeywordArg("output_dtype"), # output_dtype = None
146
+ KeywordArg("attr"), # attr = "none"
147
+ Arg(), # scalars
148
+ Arg(), # algorithm
149
+ _users=users,
150
+ )
151
+
152
+
153
+ def get_qlinear_pt2e_pattern(x_scale_zp_are_tensors):
154
+ qlinear_op = (
155
+ torch.ops.onednn.qlinear_pointwise.tensor
156
+ if x_scale_zp_are_tensors
157
+ else torch.ops.onednn.qlinear_pointwise.default
158
+ )
159
+ return CallFunction(
160
+ qlinear_op,
161
+ KeywordArg("x"),
162
+ KeywordArg("x_scale"),
163
+ KeywordArg("x_zp"),
164
+ KeywordArg("packed_weight"),
165
+ KeywordArg("w_scale"),
166
+ KeywordArg("w_zp"),
167
+ KeywordArg("b"),
168
+ KeywordArg("output_scale"),
169
+ KeywordArg("output_zero_point"),
170
+ KeywordArg("output_dtype"),
171
+ KeywordArg("postop_name"),
172
+ KeywordArg("postop_args"),
173
+ KeywordArg("postop_algorithm"),
174
+ )
175
+
176
+
177
+ dequantize_accum_pattern = CallFunction(
178
+ aten.mul.Tensor,
179
+ CallFunction(
180
+ aten.sub.Tensor,
181
+ CallFunction(
182
+ prims.convert_element_type.default,
183
+ KeywordArg("accum"),
184
+ KeywordArg("accum_dq_dtype"),
185
+ ),
186
+ KeywordArg("accum_zp"),
187
+ ),
188
+ KeywordArg("accum_scale"),
189
+ )
190
+
191
+
192
+ def generate_pattern_with_binary(
193
+ binary_post_op,
194
+ computation_call,
195
+ extra_input_pattern,
196
+ int8_mixed_bf16_with_inplace_add=False,
197
+ ):
198
+ binary_pattern = CallFunction(
199
+ binary_post_op,
200
+ computation_call,
201
+ extra_input_pattern,
202
+ )
203
+ return _may_generate_pattern_with_dtype_convert(
204
+ binary_pattern,
205
+ KeywordArg("convert_dtype_after_inplace_add"),
206
+ int8_mixed_bf16_with_inplace_add,
207
+ )
208
+
209
+
210
+ def generate_pattern_with_unary(computation_call, unary_post_op):
211
+ if unary_post_op is not None:
212
+ if unary_post_op == aten.hardtanh.default:
213
+ return CallFunction(
214
+ aten.clamp_max,
215
+ CallFunction(aten.clamp_min, computation_call, KeywordArg("min_value")),
216
+ KeywordArg("max_value"),
217
+ )
218
+ if unary_post_op == aten.hardswish.default:
219
+ return CallFunction(
220
+ aten.div,
221
+ CallFunction(
222
+ aten.mul,
223
+ computation_call,
224
+ CallFunction(
225
+ aten.clamp_max,
226
+ CallFunction(
227
+ aten.clamp_min,
228
+ CallFunction(aten.add, computation_call, 3),
229
+ 0,
230
+ ),
231
+ 6,
232
+ ),
233
+ ),
234
+ 6,
235
+ )
236
+ else:
237
+ return CallFunction(
238
+ unary_post_op,
239
+ computation_call,
240
+ )
241
+ return computation_call
242
+
243
+
244
+ def generate_pattern_with_output_quant(computation_call, dtype=torch.float32):
245
+ """
246
+ quantize output:
247
+ output = round(output * o_inv_scale)
248
+ output = output + zero_point
249
+ output = clamp_min(output, 0)
250
+ output = clamp_max(output, 127)
251
+ output = output.to(uint8)
252
+ """
253
+ assert dtype in [torch.float32, torch.bfloat16]
254
+ quantized_op_output_pattern_pt2e = CallFunction(
255
+ prims.convert_element_type.default,
256
+ CallFunction(
257
+ aten.clamp_max.default,
258
+ CallFunction(
259
+ aten.clamp_min.default,
260
+ CallFunction(
261
+ aten.add.Tensor,
262
+ CallFunction(
263
+ aten.round.default,
264
+ CallFunction(
265
+ aten.mul.Tensor,
266
+ _may_generate_pattern_with_dtype_convert(
267
+ computation_call,
268
+ KeywordArg("autocast_output_quant_dtype"),
269
+ dtype == torch.bfloat16,
270
+ ),
271
+ KeywordArg("o_inv_scale"),
272
+ ),
273
+ ),
274
+ KeywordArg("o_zp"),
275
+ ),
276
+ KeywordArg("o_qmin"),
277
+ ),
278
+ KeywordArg("o_qmax"),
279
+ ),
280
+ KeywordArg("o_dtype"),
281
+ )
282
+ return quantized_op_output_pattern_pt2e
283
+
284
+
285
+ def _check_node_kwarg_arg_value(check_node, kwarg_name, args_index, expected_value):
286
+ if kwarg_name in check_node.kwargs:
287
+ actual_value = check_node.kwargs[kwarg_name]
288
+ return actual_value == expected_value
289
+ else:
290
+ assert len(check_node.args) >= (args_index + 1)
291
+ actual_value = check_node.args[args_index]
292
+ return actual_value == expected_value
293
+
294
+
295
+ def _is_valid_quantized_conv2d_optimization_pattern(output_dtype):
296
+ def fn(match):
297
+ if output_dtype is not None:
298
+ # Only keep matched pattern with same output_dtype
299
+ qconv_node_after_weight_prepack = filter_nodes(
300
+ match.nodes, torch.ops.onednn.qconv2d_pointwise
301
+ )[0]
302
+ return _check_node_kwarg_arg_value(
303
+ qconv_node_after_weight_prepack, "output_dtype", 13, output_dtype
304
+ )
305
+ return True
306
+
307
+ return fn
308
+
309
+
310
+ def _register_quantized_conv_lowering(
311
+ pattern,
312
+ pass_number,
313
+ computation_op,
314
+ output_dtype,
315
+ unary_attr,
316
+ original_pattern_output_dtype=torch.float32,
317
+ ):
318
+ @register_lowering_pattern(
319
+ pattern,
320
+ extra_check=_is_valid_quantized_conv2d_optimization_pattern(output_dtype),
321
+ pass_number=pass_number,
322
+ )
323
+ def qconv(match: Match, *args, **kwargs):
324
+ # Activation QParams
325
+ x, x_scale, x_zp = (
326
+ kwargs["x"],
327
+ kwargs["x_scale"],
328
+ kwargs["x_zp"],
329
+ )
330
+ # Weight QParams
331
+ packed_weight, w_scale, w_zp = (
332
+ kwargs["packed_weight"],
333
+ kwargs["w_scale"],
334
+ kwargs["w_zp"],
335
+ )
336
+ # Conv Params
337
+ b, stride, padding, dilation, groups = (
338
+ kwargs["b"],
339
+ kwargs["stride"],
340
+ kwargs["padding"],
341
+ kwargs["dilation"],
342
+ kwargs["groups"],
343
+ )
344
+ assert output_dtype in [None, torch.float32, torch.bfloat16]
345
+ # Output QParams
346
+ o_inv_scale = kwargs["o_inv_scale"] if output_dtype is None else 1.0
347
+ o_zero_point = kwargs["o_zp"] if output_dtype is None else 0
348
+ assert (
349
+ kwargs["output_dtype"] is original_pattern_output_dtype
350
+ ) # Expected int8-in fp32-out qconv in weight prepack phase
351
+ assert (
352
+ kwargs["attr"] == "none"
353
+ ) # Expected no post op fused in weight prepack phase
354
+ if unary_attr.op_name == "hardtanh":
355
+ min_value = kwargs.get("min_value")
356
+ max_value = kwargs.get("max_value")
357
+ unary_attr.scalars_attr = [min_value, max_value]
358
+
359
+ computation_args = (
360
+ x,
361
+ x_scale,
362
+ x_zp,
363
+ packed_weight,
364
+ w_scale,
365
+ w_zp,
366
+ b,
367
+ stride,
368
+ padding,
369
+ dilation,
370
+ groups,
371
+ o_inv_scale,
372
+ o_zero_point,
373
+ output_dtype,
374
+ unary_attr.op_name,
375
+ unary_attr.scalars_attr,
376
+ unary_attr.algorithm_attr,
377
+ )
378
+ counters["inductor"]["qconv2d_unary_matcher_count"] += 1
379
+ counters["inductor"]["qconv2d_unary_matcher_nodes"] += len(match.nodes)
380
+ return L[computation_op](*computation_args)
381
+
382
+ return qconv
383
+
384
+
385
+ def _is_valid_quantized_linear_optimization_pattern(output_dtype):
386
+ def fn(match):
387
+ if output_dtype is not None:
388
+ # Only keep matched pattern with same output_dtype
389
+ qlinear_node_after_weight_prepack = filter_nodes(
390
+ match.nodes, torch.ops.onednn.qlinear_pointwise
391
+ )[0]
392
+ return _check_node_kwarg_arg_value(
393
+ qlinear_node_after_weight_prepack, "output_dtype", 9, output_dtype
394
+ )
395
+ return True
396
+
397
+ return fn
398
+
399
+
400
+ def _register_quantized_linear_lowering(
401
+ pattern,
402
+ pass_number,
403
+ computation_op,
404
+ output_dtype,
405
+ unary_attr,
406
+ original_pattern_output_dtype=torch.float32,
407
+ ):
408
+ @register_lowering_pattern(
409
+ pattern,
410
+ extra_check=_is_valid_quantized_linear_optimization_pattern(output_dtype),
411
+ pass_number=pass_number,
412
+ )
413
+ def qlinear(match: Match, *args, **kwargs):
414
+ # Activation QParams
415
+ x, x_scale, x_zp = (
416
+ kwargs["x"],
417
+ kwargs["x_scale"],
418
+ kwargs["x_zp"],
419
+ )
420
+ # Weight QParams
421
+ packed_weight, w_scale, w_zp = (
422
+ kwargs["packed_weight"],
423
+ kwargs["w_scale"],
424
+ kwargs["w_zp"],
425
+ )
426
+
427
+ # bias
428
+ b = kwargs["b"] if "b" in kwargs else None
429
+
430
+ # Output QParams
431
+ o_inv_scale = kwargs["o_inv_scale"] if output_dtype is None else 1.0
432
+ o_zero_point = kwargs["o_zp"] if output_dtype is None else 0
433
+ assert (
434
+ kwargs["output_dtype"] is original_pattern_output_dtype
435
+ ) # Expected int8-in fp32/bf16-out qlinear in weight prepack phase
436
+ assert (
437
+ kwargs["postop_name"] == "none"
438
+ ) # Expected no post op fused in weight prepack phase
439
+
440
+ computation_args = (
441
+ x,
442
+ x_scale,
443
+ x_zp,
444
+ packed_weight,
445
+ w_scale,
446
+ w_zp,
447
+ b,
448
+ o_inv_scale,
449
+ o_zero_point,
450
+ output_dtype,
451
+ unary_attr.op_name,
452
+ unary_attr.scalars_attr,
453
+ unary_attr.algorithm_attr,
454
+ )
455
+ counters["inductor"]["qlinear_unary_matcher_count"] += 1
456
+ counters["inductor"]["qlinear_unary_matcher_nodes"] += len(match.nodes)
457
+ return L[computation_op](*computation_args)
458
+
459
+ return qlinear
460
+
461
+
462
+ def _is_valid_quantized_conv_binary_optimization_pattern(output_dtype):
463
+ # Check if it's a valid Conv Binary Pattern:
464
+ # * qconv2d_pointwise should only has one users
465
+ # * Extra input of binary node comes from dequant pattern
466
+ # * the two inputs of binary node should have attribute "meta" and should be tensors
467
+ # * the two inputs of binary node should have the same shape
468
+ # * All users of the extra input in this pattern should be
469
+ # ancestor nodes of the compute node, except for the binary node
470
+ # connected to the compute node.
471
+ def fn(match):
472
+ compute_node = filter_nodes(match.nodes, torch.ops.onednn.qconv2d_pointwise)[0]
473
+ # qconv2d_pointwise should only have one user
474
+ if len(compute_node.users) != 1:
475
+ return False
476
+ binary_node_inputs = next(iter(compute_node.users)).args
477
+ assert len(binary_node_inputs) == 2, "Expects binary node with 2 inputs"
478
+ if output_dtype is not None:
479
+ extra_input_of_binary_node = None
480
+ for arg in binary_node_inputs:
481
+ if arg != compute_node:
482
+ extra_input_of_binary_node = arg
483
+ break
484
+ assert extra_input_of_binary_node is not None
485
+ # Extra input of binary node comes from dequant pattern
486
+ if (not isinstance(extra_input_of_binary_node, torch.fx.Node)) or (
487
+ extra_input_of_binary_node.target != aten.mul.Tensor
488
+ ):
489
+ return False
490
+
491
+ # the two inputs of binary node should have attribute "meta" and should be tensors
492
+ if not (
493
+ hasattr(binary_node_inputs[0], "meta")
494
+ and isinstance(binary_node_inputs[0].meta.get("val", None), torch.Tensor) # type: ignore[union-attr]
495
+ ) or not (
496
+ hasattr(binary_node_inputs[1], "meta")
497
+ and isinstance(binary_node_inputs[1].meta.get("val", None), torch.Tensor) # type: ignore[union-attr]
498
+ ):
499
+ return False
500
+ # the two inputs of binary node should have the same shape
501
+ if (
502
+ binary_node_inputs[0].meta["val"].size() # type: ignore[union-attr]
503
+ != binary_node_inputs[1].meta["val"].size() # type: ignore[union-attr]
504
+ ):
505
+ return False
506
+
507
+ # All users of the extra input in this pattern should be
508
+ # ancestor nodes of the compute node, except for the binary node
509
+ # connected to the compute node.
510
+
511
+ from .mkldnn_fusion import _get_remaining_users
512
+
513
+ extra_input_of_pattern = (
514
+ match.kwargs["accum"]
515
+ if output_dtype is None
516
+ else match.kwargs["accum_after_dequant"]
517
+ )
518
+ if (
519
+ len(
520
+ _get_remaining_users(
521
+ extra_input_of_pattern,
522
+ compute_node,
523
+ )
524
+ )
525
+ > 1
526
+ or extra_input_of_pattern == compute_node.args[0]
527
+ ):
528
+ return False
529
+ return True
530
+
531
+ return fn
532
+
533
+
534
+ def _register_quantized_conv_binary_lowering(
535
+ pattern,
536
+ pass_number,
537
+ computation_op,
538
+ output_dtype,
539
+ binary_unary_attr,
540
+ ):
541
+ @register_lowering_pattern(
542
+ pattern,
543
+ extra_check=_is_valid_quantized_conv_binary_optimization_pattern(output_dtype),
544
+ pass_number=pass_number,
545
+ )
546
+ def qconv_binary(match: Match, *args, **kwargs):
547
+ x, x_scale, x_zp = kwargs["x"], kwargs["x_scale"], kwargs["x_zp"]
548
+ accum = (
549
+ kwargs["accum"] if output_dtype is None else kwargs["accum_after_dequant"]
550
+ )
551
+ accum_scale = kwargs["accum_scale"] if output_dtype is None else 1.0
552
+ accum_zp = kwargs["accum_zp"] if output_dtype is None else 0
553
+ packed_weight, w_scale, w_zp = (
554
+ kwargs["packed_weight"],
555
+ kwargs["w_scale"],
556
+ kwargs["w_zp"],
557
+ )
558
+ b, stride, padding, dilation, groups = (
559
+ kwargs["b"],
560
+ kwargs["stride"],
561
+ kwargs["padding"],
562
+ kwargs["dilation"],
563
+ kwargs["groups"],
564
+ )
565
+ # Output QParams
566
+ o_inv_scale = kwargs["o_inv_scale"] if output_dtype is None else 1.0
567
+ o_zero_point = kwargs["o_zp"] if output_dtype is None else 0
568
+
569
+ accum.realize()
570
+ from .mkldnn_fusion import _can_be_inplace
571
+
572
+ assert _can_be_inplace(
573
+ accum
574
+ ), "QConv Binary Inplace Fusion requires accum is not an alias or mutation."
575
+
576
+ computation_args = (
577
+ x,
578
+ x_scale,
579
+ x_zp,
580
+ accum,
581
+ accum_scale,
582
+ accum_zp,
583
+ packed_weight,
584
+ w_scale,
585
+ w_zp,
586
+ b,
587
+ stride,
588
+ padding,
589
+ dilation,
590
+ groups,
591
+ o_inv_scale,
592
+ o_zero_point,
593
+ output_dtype,
594
+ binary_unary_attr.binary_op_name,
595
+ binary_unary_attr.alpha,
596
+ binary_unary_attr.unary_op_name,
597
+ binary_unary_attr.scalars_attr,
598
+ binary_unary_attr.algorithm_attr,
599
+ )
600
+ counters["inductor"]["qconv2d_binary_matcher_count"] += 1
601
+ counters["inductor"]["qconv2d_binary_matcher_nodes"] += len(match.nodes)
602
+ return L[computation_op](*computation_args)
603
+
604
+ return qconv_binary
605
+
606
+
607
+ def _register_quantization_unary_fusion():
608
+ class UnaryAttr:
609
+ def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None):
610
+ self.op_name = op_name
611
+ self.scalars_attr = scalars_attr if scalars_attr else []
612
+ self.algorithm_attr = algorithm_attr if algorithm_attr else ""
613
+
614
+ for original_pattern_output_dtype in [torch.float32, torch.bfloat16]:
615
+ # QConv2d
616
+ # Priority 1 to match: QConv2d Unary pattern with int8 output
617
+ # If a pattern1 is a sub-set of pattern2, we should try to match pattern2 firstly.
618
+ # For example: pattern1 is qconv_fp32 -> relu, pattern2 is qconv_fp32 -> relu -> quant
619
+ conv_unary_replace_patterns = {
620
+ UnaryAttr("none", [], ""): generate_pattern_with_output_quant(
621
+ get_dequantize_qconv_pt2e_pattern(1),
622
+ dtype=original_pattern_output_dtype,
623
+ ),
624
+ UnaryAttr("relu", [], ""): generate_pattern_with_output_quant(
625
+ generate_pattern_with_unary(
626
+ get_dequantize_qconv_pt2e_pattern(1), aten.relu.default
627
+ ),
628
+ dtype=original_pattern_output_dtype,
629
+ ),
630
+ UnaryAttr("hardtanh", [], ""): generate_pattern_with_output_quant(
631
+ generate_pattern_with_unary(
632
+ get_dequantize_qconv_pt2e_pattern(1), aten.hardtanh.default
633
+ ),
634
+ dtype=original_pattern_output_dtype,
635
+ ),
636
+ UnaryAttr("hardswish", [], ""): generate_pattern_with_output_quant(
637
+ generate_pattern_with_unary(
638
+ get_dequantize_qconv_pt2e_pattern(2), aten.hardswish.default
639
+ ),
640
+ dtype=original_pattern_output_dtype,
641
+ ),
642
+ }
643
+
644
+ for unary_attr, patterns in conv_unary_replace_patterns.items():
645
+ # Register qconv2d pattern for ExternKernel Lowering
646
+ _register_quantized_conv_lowering(
647
+ patterns,
648
+ 1, # pass_number
649
+ torch.ops.onednn.qconv2d_pointwise, # computation_op
650
+ None, # output_dtype, None is the default value for int8 output
651
+ unary_attr, # unary_attr
652
+ original_pattern_output_dtype=original_pattern_output_dtype,
653
+ )
654
+
655
+ # Priority 2 to match: QConv2d Unary pattern with fp32/bfloat16 output
656
+ conv_unary_replace_float_out_patterns = {
657
+ UnaryAttr("relu", [], ""): generate_pattern_with_unary(
658
+ get_dequantize_qconv_pt2e_pattern(1), aten.relu.default
659
+ ),
660
+ UnaryAttr("hardtanh", [], ""): generate_pattern_with_unary(
661
+ get_dequantize_qconv_pt2e_pattern(1), aten.hardtanh.default
662
+ ),
663
+ UnaryAttr("hardswish", [], ""): generate_pattern_with_unary(
664
+ get_dequantize_qconv_pt2e_pattern(2), aten.hardswish.default
665
+ ),
666
+ }
667
+
668
+ for unary_attr, patterns in conv_unary_replace_float_out_patterns.items():
669
+ # Register qconv2d pattern for ExternKernel Lowering
670
+ _register_quantized_conv_lowering(
671
+ patterns,
672
+ 2, # pass_number
673
+ torch.ops.onednn.qconv2d_pointwise, # computation_op
674
+ original_pattern_output_dtype, # output_dtype
675
+ unary_attr, # unary_attr
676
+ original_pattern_output_dtype=original_pattern_output_dtype,
677
+ )
678
+
679
+ # QLinear
680
+ for x_scale_zp_are_tensors in (False, True):
681
+ qlinear_pattern = get_qlinear_pt2e_pattern(x_scale_zp_are_tensors)
682
+ # Priority 1 to match: QLinear Unary pattern with int8 output
683
+ linear_unary_replace_patterns = {
684
+ UnaryAttr("none", [], ""): generate_pattern_with_output_quant(
685
+ qlinear_pattern,
686
+ dtype=original_pattern_output_dtype,
687
+ ),
688
+ UnaryAttr("relu", [], ""): generate_pattern_with_output_quant(
689
+ generate_pattern_with_unary(qlinear_pattern, aten.relu.default),
690
+ dtype=original_pattern_output_dtype,
691
+ ),
692
+ }
693
+
694
+ for unary_attr, patterns in linear_unary_replace_patterns.items():
695
+ _register_quantized_linear_lowering(
696
+ patterns,
697
+ 1, # pass_number
698
+ torch.ops.onednn.qlinear_pointwise, # computation_op
699
+ None, # output_dtype
700
+ unary_attr, # unary_attr
701
+ original_pattern_output_dtype=original_pattern_output_dtype,
702
+ )
703
+
704
+ # Priority 2 to match: QLinear Unary pattern with FP32/BF16 output
705
+ linear_unary_replace_float_out_patterns = {
706
+ UnaryAttr("relu", [], ""): generate_pattern_with_unary(
707
+ qlinear_pattern, aten.relu.default
708
+ ),
709
+ }
710
+
711
+ for unary_attr, patterns in linear_unary_replace_float_out_patterns.items():
712
+ _register_quantized_linear_lowering(
713
+ patterns,
714
+ 2, # pass_number
715
+ torch.ops.onednn.qlinear_pointwise, # computation_op
716
+ original_pattern_output_dtype, # output_dtype
717
+ unary_attr, # unary_attr
718
+ original_pattern_output_dtype=original_pattern_output_dtype,
719
+ )
720
+
721
+
722
+ def _register_quantization_binary_fusion():
723
+ class BinaryUnaryAttr:
724
+ def __init__(
725
+ self,
726
+ binary_op_name: str,
727
+ alpha=None,
728
+ unary_op_name: str = "none",
729
+ scalars_attr=None,
730
+ algorithm_attr=None,
731
+ ):
732
+ self.binary_op_name = binary_op_name
733
+ self.alpha = alpha if alpha else 1.0
734
+ self.unary_op_name = unary_op_name
735
+ self.scalars_attr = scalars_attr if scalars_attr else []
736
+ self.algorithm_attr = algorithm_attr if algorithm_attr else ""
737
+
738
+ for int8_mixed_bf16_with_inplace_add in [False, True]:
739
+ # Priority 1 to match: QConv2d Binary or Binary-Unary pattern with int8 output
740
+ binary_replace_patterns = {
741
+ BinaryUnaryAttr(
742
+ "sum", 1.0, "none", [], ""
743
+ ): generate_pattern_with_output_quant(
744
+ generate_pattern_with_binary(
745
+ aten.add.Tensor,
746
+ get_dequantize_qconv_pt2e_pattern(1),
747
+ dequantize_accum_pattern,
748
+ int8_mixed_bf16_with_inplace_add,
749
+ ),
750
+ dtype=torch.bfloat16
751
+ if int8_mixed_bf16_with_inplace_add
752
+ else torch.float32,
753
+ ),
754
+ BinaryUnaryAttr(
755
+ "sum", 1.0, "relu", [], ""
756
+ ): generate_pattern_with_output_quant(
757
+ generate_pattern_with_unary(
758
+ generate_pattern_with_binary(
759
+ aten.add.Tensor,
760
+ get_dequantize_qconv_pt2e_pattern(1),
761
+ dequantize_accum_pattern,
762
+ int8_mixed_bf16_with_inplace_add,
763
+ ),
764
+ aten.relu.default,
765
+ ),
766
+ dtype=torch.bfloat16
767
+ if int8_mixed_bf16_with_inplace_add
768
+ else torch.float32,
769
+ ),
770
+ }
771
+
772
+ for binary_unary_attr, patterns in binary_replace_patterns.items():
773
+ _register_quantized_conv_binary_lowering(
774
+ patterns,
775
+ 0, # pass_number
776
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
777
+ None, # output_dtype
778
+ binary_unary_attr, # binary_unary_attr
779
+ )
780
+
781
+ # Priority 2 to match: QConv2d Binary-Unary pattern with fp32/bfloat16 output
782
+ binary_replace_float_out_patterns = {
783
+ BinaryUnaryAttr("sum", 1.0, "relu", [], ""): generate_pattern_with_unary(
784
+ generate_pattern_with_binary(
785
+ aten.add.Tensor,
786
+ get_dequantize_qconv_pt2e_pattern(1),
787
+ KeywordArg("accum_after_dequant"),
788
+ int8_mixed_bf16_with_inplace_add,
789
+ ),
790
+ aten.relu.default,
791
+ ),
792
+ }
793
+
794
+ for (
795
+ binary_unary_attr,
796
+ patterns,
797
+ ) in binary_replace_float_out_patterns.items():
798
+ if int8_mixed_bf16_with_inplace_add:
799
+ _register_quantized_conv_binary_lowering(
800
+ patterns,
801
+ 0, # pass_number
802
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
803
+ # Note that for int8-mixed-bf16 and non-inplace add, because we have
804
+ # q-dq inserted at extra input of add, so the non-inplace add has bf16 and fp32 inputs,
805
+ # the output dtype will be float32.
806
+ # For inplace add, there is a extra to_bf16 node at add output, so the fusion pattern has bfloat16 output.
807
+ torch.bfloat16,
808
+ binary_unary_attr, # binary_unary_attr
809
+ )
810
+ else:
811
+ _register_quantized_conv_binary_lowering(
812
+ patterns,
813
+ 1, # pass_number
814
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
815
+ torch.float32,
816
+ binary_unary_attr, # binary_unary_attr
817
+ )
818
+
819
+ # Priority 3: QConv2d Binary pattern with fp32/bfloat16 output
820
+ binary_replace_float_out_patterns = {
821
+ BinaryUnaryAttr("sum", 1.0, "none", [], ""): generate_pattern_with_binary(
822
+ aten.add.Tensor,
823
+ get_dequantize_qconv_pt2e_pattern(1),
824
+ KeywordArg("accum_after_dequant"),
825
+ int8_mixed_bf16_with_inplace_add,
826
+ ),
827
+ }
828
+
829
+ for (
830
+ binary_unary_attr,
831
+ patterns,
832
+ ) in binary_replace_float_out_patterns.items():
833
+ _register_quantized_conv_binary_lowering(
834
+ patterns,
835
+ 1 if int8_mixed_bf16_with_inplace_add else 2, # pass_number
836
+ torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
837
+ # Same output dtype setting as conv-add-relu pattern
838
+ torch.bfloat16 if int8_mixed_bf16_with_inplace_add else torch.float32,
839
+ binary_unary_attr, # binary_unary_attr
840
+ )
841
+
842
+
843
+ def _is_valid_quantized_maxpool2d_optimization_pattern():
844
+ def fn(match):
845
+ # Only match the pattern which max_pool2d_with_indices returns value
846
+ # instead of indices.
847
+ get_item_node = filter_nodes(match.nodes, operator.getitem)[0]
848
+ return get_item_node.args[1] == 0
849
+
850
+ return fn
851
+
852
+
853
+ def _register_quantized_maxpool2d_lowering(
854
+ pattern,
855
+ computation_op,
856
+ ):
857
+ @register_lowering_pattern(
858
+ pattern,
859
+ extra_check=_is_valid_quantized_maxpool2d_optimization_pattern(),
860
+ )
861
+ def qmaxpool2d(match: Match, *args, **kwargs):
862
+ x = kwargs["x"]
863
+ kernel_size = kwargs["kernel_size"]
864
+ stride = kwargs["stride"] if ("stride" in kwargs) else None
865
+ padding = kwargs["padding"] if ("padding" in kwargs) else 0
866
+ dilation = kwargs["dilation"] if ("dilation" in kwargs) else 1
867
+ ceil_mode = kwargs["ceil_mode"] if ("ceil_mode" in kwargs) else False
868
+
869
+ if padding == 0:
870
+ padding = [0, 0]
871
+ if dilation == 1:
872
+ dilation = [1, 1]
873
+ if not stride:
874
+ stride = kernel_size
875
+ kernel_size = pad_listlike(kernel_size, 2)
876
+ stride = pad_listlike(stride, 2)
877
+ padding = pad_listlike(padding, 2)
878
+ dilation = pad_listlike(dilation, 2)
879
+
880
+ assert len(kernel_size) == 2
881
+ assert len(stride) == 2
882
+ assert len(padding) == 2
883
+ assert len(dilation) == 2
884
+
885
+ computation_args = (
886
+ x,
887
+ kernel_size,
888
+ stride,
889
+ padding,
890
+ dilation,
891
+ ceil_mode,
892
+ )
893
+ computation_args, _ = require_channels_last(computation_op, *computation_args)
894
+ return L[computation_op](*computation_args)
895
+
896
+ return qmaxpool2d
897
+
898
+
899
+ def _register_quantization_maxpool2d():
900
+ # Currently, the default parameters are not in FX Graph generated by Dynamo export.
901
+ # So, if user defines nn.MaxPool2d with different assignment of default parameter,
902
+ # it will generate graph with different number of input nodes and hence
903
+ # different pattern to be matched.
904
+ # Refer to the issue: https://github.com/pytorch/pytorch/issues/105901
905
+ max_pool2d_args_list = [
906
+ [
907
+ KeywordArg("stride"),
908
+ ],
909
+ [
910
+ KeywordArg("stride"),
911
+ KeywordArg("padding"),
912
+ ],
913
+ [
914
+ KeywordArg("stride"),
915
+ KeywordArg("padding"),
916
+ KeywordArg("dilation"),
917
+ ],
918
+ [
919
+ KeywordArg("stride"),
920
+ KeywordArg("padding"),
921
+ KeywordArg("dilation"),
922
+ KeywordArg("ceil_mode"),
923
+ ],
924
+ ]
925
+
926
+ for max_pool2d_args in max_pool2d_args_list:
927
+ dequantize_maxpool2d_pattern = CallFunction(
928
+ aten.max_pool2d_with_indices.default,
929
+ dequantize_per_tensor_activation_pattern,
930
+ KeywordArg("kernel_size"),
931
+ *max_pool2d_args,
932
+ )
933
+ dequantize_maxpool2d_get_item_pattern = CallFunction(
934
+ operator.getitem,
935
+ dequantize_maxpool2d_pattern,
936
+ Arg(),
937
+ )
938
+ _register_quantized_maxpool2d_lowering(
939
+ generate_pattern_with_output_quant(dequantize_maxpool2d_get_item_pattern),
940
+ quantized.max_pool2d.default,
941
+ )
942
+
943
+
944
+ def _is_input_output_same_scale_zp(check_node):
945
+ def fn(match):
946
+ # Ensure all the inputs and output has same scale and zero point
947
+ # Step 1: Check inputs/output zero point
948
+ sub_nodes = filter_nodes(match.nodes, aten.sub.Tensor)
949
+ zero_points = [node.args[1] for node in sub_nodes]
950
+ add_nodes = filter_nodes(match.nodes, aten.add.Tensor)
951
+ assert len(add_nodes) == 1, "expect only 1 add node at output quant pattern"
952
+ zero_points.append(add_nodes[0].args[1])
953
+ if not all(zero_point == zero_points[0] for zero_point in zero_points):
954
+ return False
955
+
956
+ # Step 2: Check inputs/output scale
957
+ mul_nodes = filter_nodes(match.nodes, aten.mul.Tensor)
958
+ # We need to find mul node at output since the scale value is reciprocal to input scale.
959
+ # Mul node at output should connect to cat node directly.
960
+ scales = [
961
+ (
962
+ mul_node.args[1]
963
+ if mul_node.args[0].target is check_node # type: ignore[union-attr]
964
+ else 1.0 / mul_node.args[1] # type: ignore[operator]
965
+ )
966
+ for mul_node in mul_nodes
967
+ ]
968
+ if not all(math.isclose(scale, scales[0], rel_tol=1e-5) for scale in scales): # type: ignore[arg-type]
969
+ return False
970
+
971
+ return True
972
+
973
+ return fn
974
+
975
+
976
+ def _register_quantized_cat_lowering(
977
+ pattern,
978
+ computation_op,
979
+ ):
980
+ @register_lowering_pattern(
981
+ pattern,
982
+ extra_check=_is_input_output_same_scale_zp(aten.cat.default),
983
+ )
984
+ def qcat(match: Match, inputs, dim, **kwargs):
985
+ # inputs is with format: [[x1, x1_dq_dtype, x1_zp, x1_scale], ...]
986
+ uint8_inputs = [input[0] for input in inputs]
987
+ return L[computation_op](uint8_inputs, dim)
988
+
989
+ return qcat
990
+
991
+
992
+ _raw_dequantize_per_tensor_activation_pattern = CallFunction(
993
+ aten.mul.Tensor,
994
+ CallFunction(
995
+ aten.sub.Tensor,
996
+ CallFunction(
997
+ prims.convert_element_type.default,
998
+ Arg(),
999
+ Arg(),
1000
+ ),
1001
+ Arg(),
1002
+ ),
1003
+ Arg(),
1004
+ )
1005
+
1006
+
1007
+ def _register_quantization_cat():
1008
+ dequantize_cat_pattern = CallFunction(
1009
+ aten.cat.default,
1010
+ ListOf(_raw_dequantize_per_tensor_activation_pattern),
1011
+ KeywordArg("dim"),
1012
+ )
1013
+ _register_quantized_cat_lowering(
1014
+ generate_pattern_with_output_quant(dequantize_cat_pattern),
1015
+ aten.cat,
1016
+ )
1017
+
1018
+
1019
+ def _register_quantized_reshape_lowering(
1020
+ pattern,
1021
+ computation_op,
1022
+ ):
1023
+ @register_lowering_pattern(
1024
+ pattern,
1025
+ extra_check=_is_input_output_same_scale_zp(aten.reshape.default),
1026
+ )
1027
+ def qreshape(match: Match, *args, **kwargs):
1028
+ qx = kwargs["x"]
1029
+ shape = kwargs["shape"]
1030
+ counters["inductor"]["qreshape_matcher_count"] += 1
1031
+ counters["inductor"]["qreshape_matcher_nodes"] += len(match.nodes)
1032
+ return L[computation_op](qx, shape)
1033
+
1034
+ return qreshape
1035
+
1036
+
1037
+ def _register_quantization_reshape():
1038
+ dequantize_reshape_pattern = CallFunction(
1039
+ torch.ops.aten.reshape.default,
1040
+ dequantize_per_tensor_activation_pattern,
1041
+ KeywordArg("shape"),
1042
+ )
1043
+ _register_quantized_reshape_lowering(
1044
+ generate_pattern_with_output_quant(dequantize_reshape_pattern),
1045
+ aten.reshape,
1046
+ )
1047
+
1048
+
1049
+ def _register_quantization_lowerings():
1050
+ _register_quantization_unary_fusion()
1051
+ _register_quantization_binary_fusion()
1052
+ _register_quantization_maxpool2d()
1053
+ _register_quantization_cat()
1054
+ _register_quantization_reshape()
1055
+
1056
+
1057
+ def _is_valid_dequant_promotion_pattern(dtype=torch.float32):
1058
+ def _inner(match):
1059
+ assert dtype in [torch.float32, torch.bfloat16]
1060
+ dequant_pattern_end_node = match.output_node()
1061
+ if dequant_pattern_end_node.target not in [
1062
+ aten.mul.Tensor,
1063
+ prims.convert_element_type.default,
1064
+ aten.reshape.default,
1065
+ ]:
1066
+ return False
1067
+
1068
+ if dequant_pattern_end_node.target is aten.reshape.default:
1069
+ mul_node = (
1070
+ dequant_pattern_end_node.args[0] # pattern: linear <- reshape <- mul
1071
+ if dtype == torch.float32
1072
+ else dequant_pattern_end_node.args[0].args[
1073
+ 0
1074
+ ] # pattern: linear <- reshape <- to_bf16 <- mul
1075
+ )
1076
+ else:
1077
+ mul_node = (
1078
+ dequant_pattern_end_node # pattern: linear <- mul
1079
+ if dtype == torch.float32
1080
+ else dequant_pattern_end_node.args[
1081
+ 0
1082
+ ] # pattern: linear <- to_bf16 <- mul
1083
+ )
1084
+
1085
+ sub_node = mul_node.args[0]
1086
+ to_fp32_node = sub_node.args[0]
1087
+ if (
1088
+ mul_node.target is aten.mul.Tensor
1089
+ and sub_node.target is aten.sub.Tensor
1090
+ and to_fp32_node.target is prims.convert_element_type.default
1091
+ and len(list(dequant_pattern_end_node.users)) > 1
1092
+ ):
1093
+ # If dequant pattern has more than 1 users, then do dequant promoted
1094
+ return True
1095
+ return False
1096
+
1097
+ return _inner
1098
+
1099
+
1100
+ def _register_dequant_promotion_pass(pattern, pass_number, dtype=torch.float32):
1101
+ @register_freezing_graph_pattern(
1102
+ pattern,
1103
+ extra_check=_is_valid_dequant_promotion_pattern(dtype),
1104
+ pass_number=pass_number,
1105
+ )
1106
+ def dequant_promotion(match: Match, *args, **kwargs):
1107
+ # Dequant_promotion will transform
1108
+ # graph 1:
1109
+ # quant
1110
+ # + - - - | - - - +
1111
+ # | dequant |
1112
+ # | / \ |
1113
+ # | node1 node2 |
1114
+ # + - | - - - | - +
1115
+ # quant quant
1116
+ # into:
1117
+ # graph 2:
1118
+ # quant
1119
+ # + - - / - \ - - +
1120
+ # |dequant dequant|
1121
+ # | | | |
1122
+ # | node1 node2 |
1123
+ # + - | - - - | - +
1124
+ # quant quant
1125
+ # In graph 1, the dequant node is shared by node1 and node2,
1126
+ # as a result, neither node1 nor node2 could form an int8
1127
+ # fusion pattern.
1128
+ # After this transformation, the graph 2 could hit the int8
1129
+ # fusion pattern: dequant-node-quant, respectively for
1130
+ # node1 and node2.
1131
+ assert dtype in [torch.float32, torch.bfloat16]
1132
+
1133
+ def clone_to_new_node(graph, source_node, user_node):
1134
+ # Clone the source_node to a new node
1135
+ # Replace user_node's input from source_node to new_node
1136
+ assert (
1137
+ source_node.op == "call_function"
1138
+ ), "clone_to_new_node only support node.op call_function"
1139
+ with graph.inserting_before(user_node):
1140
+ new_node = graph.call_function(
1141
+ source_node.target,
1142
+ args=source_node.args,
1143
+ kwargs=source_node.kwargs,
1144
+ )
1145
+ new_node.meta = copy.copy(source_node.meta)
1146
+ user_node.replace_input_with(source_node, new_node)
1147
+ return new_node
1148
+
1149
+ # Find the start node and end node of a dequant pattern
1150
+ # * End node should be the match.output_node()
1151
+ # * Start node should be the node of dtype convert to float32
1152
+ dequant_pattern_end_node = match.output_node()
1153
+ assert dequant_pattern_end_node.target in [
1154
+ aten.mul.Tensor,
1155
+ prims.convert_element_type.default,
1156
+ aten.reshape.default,
1157
+ ]
1158
+
1159
+ # For a dequant pattern, we should expect see the node list as:
1160
+ # * OPT(aten.reshape.default)
1161
+ # * OPT(prims.convert_element_type.default) (to_bf16)
1162
+ # * aten.mul
1163
+ # * aten.sub
1164
+ # * prims.convert_element_type.default (to_fp32)
1165
+ def _find_first_node_in_dequant_pattern(_node):
1166
+ if (
1167
+ _node.target is prims.convert_element_type.default
1168
+ and _node.args[1] == torch.float32
1169
+ ):
1170
+ # For a dequant pattern, we expect the start node is a to_fp32 node
1171
+ return _node
1172
+ else:
1173
+ assert (
1174
+ len(_node.args) >= 1
1175
+ ), "In in dequant pattern, each node should have more than 1 arg."
1176
+ return _find_first_node_in_dequant_pattern(_node.args[0])
1177
+
1178
+ dequant_pattern_start_node = _find_first_node_in_dequant_pattern(
1179
+ dequant_pattern_end_node
1180
+ )
1181
+
1182
+ # Clone the dequant pattern for each user node
1183
+ graph = match.graph
1184
+ user_node_list = list(dequant_pattern_end_node.users)
1185
+ for user_node in user_node_list[1:]:
1186
+ _source_node = dequant_pattern_end_node
1187
+ _user_node = user_node
1188
+ while _source_node != dequant_pattern_start_node.args[0]:
1189
+ _user_node = clone_to_new_node(graph, _source_node, _user_node)
1190
+ _source_node = _source_node.args[0] # type: ignore[assignment]
1191
+
1192
+ counters["inductor"]["dequant_promotion_matcher_count"] += 1
1193
+ counters["inductor"]["dequant_promotion_matcher_nodes"] += len(match.nodes)
1194
+
1195
+
1196
+ def _is_valid_dequant_conv2d_pattern(dtype):
1197
+ def _inner(match):
1198
+ # Here we do some further check to ensure:
1199
+ # 1. It's a conv2d node with dim of 4, since we only support lowering of conv2d now.
1200
+ # 2. The dequant pattern has only 1 user of conv2d node.
1201
+ # If these conditions don't meet, we will not
1202
+ # insert weight prepack node into the matched pattern.
1203
+ conv_node = match.output_node()
1204
+ assert conv_node.target is aten.convolution.default
1205
+ input_meta_value = conv_node.args[0].meta.get("val")
1206
+ weight_meta_value = conv_node.args[1].meta.get("val")
1207
+ for meta_value in [input_meta_value, weight_meta_value]:
1208
+ if (
1209
+ meta_value is None
1210
+ or meta_value.device.type != "cpu"
1211
+ or meta_value.dim() != 4
1212
+ ):
1213
+ # Only support conv2d now
1214
+ return False
1215
+
1216
+ assert dtype in [torch.float32, torch.bfloat16]
1217
+ if dtype == torch.float32:
1218
+ mul_node = conv_node.args[0]
1219
+ else:
1220
+ convert_to_bf16 = conv_node.args[0]
1221
+ mul_node = convert_to_bf16.args[0]
1222
+ sub_node = mul_node.args[0]
1223
+ to_fp32_node = sub_node.args[0]
1224
+
1225
+ assert to_fp32_node.target is prims.convert_element_type.default
1226
+ assert sub_node.target is aten.sub.Tensor
1227
+ assert mul_node.target is aten.mul.Tensor
1228
+ if (
1229
+ len(list(to_fp32_node.users)) != 1
1230
+ or len(list(sub_node.users)) != 1
1231
+ or len(list(mul_node.users)) != 1
1232
+ ):
1233
+ # Ensure the dequant pattern only has 1 user
1234
+ # since we will delete the dequant pattern here
1235
+ return False
1236
+ return True
1237
+
1238
+ return _inner
1239
+
1240
+
1241
+ def _register_qconv_weight_prepack_pass(pattern, pass_number, dtype=torch.float32):
1242
+ @register_freezing_graph_pattern(
1243
+ pattern,
1244
+ extra_check=_is_valid_dequant_conv2d_pattern(dtype),
1245
+ pass_number=pass_number,
1246
+ )
1247
+ def qconv_weight_prepack(match: Match, *args, **kwargs):
1248
+ """
1249
+ Match the pattern:
1250
+ int8 activation
1251
+ |
1252
+ dequant_per_tensor
1253
+ |
1254
+ Conv2d <- optional(aten.clone.default) <- dequant_per_channel <- int8_weight
1255
+
1256
+ Insert weight prepack node and change the pattern to:
1257
+ int8 activation
1258
+ |
1259
+ onednn.qconv2d_pointwise <- onednn.qconv_prepack <- int8_weight
1260
+ """
1261
+ assert dtype in [torch.float32, torch.bfloat16]
1262
+ conv_node = match.output_node()
1263
+ assert conv_node.target is aten.convolution.default
1264
+ if dtype == torch.float32:
1265
+ mul_node = conv_node.args[0]
1266
+ else:
1267
+ convert_to_bf16 = conv_node.args[0]
1268
+ mul_node = convert_to_bf16.args[0] # type: ignore[union-attr]
1269
+ sub_node = mul_node.args[0] # type: ignore[union-attr]
1270
+ to_fp32_node = sub_node.args[0] # type: ignore[union-attr]
1271
+ has_clone_to_channel_last_node_in_pattern = (
1272
+ conv_node.args[1].target is aten.clone.default # type: ignore[union-attr]
1273
+ )
1274
+ clone_node = (
1275
+ conv_node.args[1] if has_clone_to_channel_last_node_in_pattern else None
1276
+ )
1277
+
1278
+ if dtype == torch.float32:
1279
+ dequant_per_channel = (
1280
+ clone_node.args[0] # type: ignore[union-attr]
1281
+ if has_clone_to_channel_last_node_in_pattern
1282
+ else conv_node.args[1]
1283
+ )
1284
+ else:
1285
+ weight_to_bf16_node = (
1286
+ clone_node.args[0] # type: ignore[union-attr]
1287
+ if has_clone_to_channel_last_node_in_pattern
1288
+ else conv_node.args[1]
1289
+ )
1290
+ dequant_per_channel = weight_to_bf16_node.args[0] # type: ignore[union-attr]
1291
+
1292
+ assert (
1293
+ dequant_per_channel.target # type: ignore[union-attr]
1294
+ is quantized_decomposed.dequantize_per_channel.default
1295
+ )
1296
+
1297
+ # Activation QParams
1298
+ qx, x_zp, x_scale = (
1299
+ kwargs["x"],
1300
+ kwargs["x_zp"],
1301
+ kwargs["x_scale"],
1302
+ )
1303
+
1304
+ # Weight QParams
1305
+ qw, w_scale, w_zp = (
1306
+ kwargs["q_weight"],
1307
+ kwargs["w_scale"],
1308
+ kwargs["w_zp"],
1309
+ )
1310
+
1311
+ # Conv Params
1312
+ bias, stride, padding, dilation, groups = (
1313
+ kwargs["b"],
1314
+ kwargs["stride"],
1315
+ kwargs["padding"],
1316
+ kwargs["dilation"],
1317
+ kwargs["groups"],
1318
+ )
1319
+
1320
+ x_shape = qx.meta.get("tensor_meta").shape
1321
+ if has_free_symbols(x_shape):
1322
+ # For dynamic shape case, we can't get activation shape ahead of runtime.
1323
+ x_shape = None
1324
+ graph = match.graph
1325
+ with graph.inserting_before(conv_node):
1326
+ # Insert weight prepack node and the QConv node
1327
+ packed_weight_inputs = (
1328
+ qw,
1329
+ w_scale,
1330
+ x_scale,
1331
+ x_zp,
1332
+ stride,
1333
+ padding,
1334
+ dilation,
1335
+ groups,
1336
+ x_shape,
1337
+ )
1338
+ packed_weight_op = torch.ops.onednn.qconv_prepack
1339
+ prepack_weight_node = graph.call_function(
1340
+ packed_weight_op, args=packed_weight_inputs
1341
+ )
1342
+
1343
+ new_args: Tuple[Any, ...] = (
1344
+ qx,
1345
+ x_scale,
1346
+ x_zp,
1347
+ prepack_weight_node,
1348
+ w_scale,
1349
+ w_zp,
1350
+ bias,
1351
+ stride,
1352
+ padding,
1353
+ dilation,
1354
+ groups,
1355
+ 1.0, # inv_output_scale
1356
+ 0, # output_zero_point
1357
+ dtype, # output_dtype
1358
+ "none", # attr
1359
+ [], # scalars
1360
+ "", # algorithm
1361
+ )
1362
+ new_conv_node = graph.call_function(
1363
+ torch.ops.onednn.qconv2d_pointwise.default, args=new_args
1364
+ )
1365
+ conv_node.replace_all_uses_with(new_conv_node)
1366
+ new_conv_node.meta.update(conv_node.meta)
1367
+
1368
+ # Erase the original conv node
1369
+ graph.erase_node(conv_node)
1370
+ # Erase the dequant pattern
1371
+ if dtype == torch.bfloat16:
1372
+ graph.erase_node(convert_to_bf16) # type: ignore[possibly-undefined]
1373
+ # Erase the dequant pattern
1374
+ graph.erase_node(mul_node)
1375
+ graph.erase_node(sub_node)
1376
+ graph.erase_node(to_fp32_node)
1377
+ # Erase the dequant per channel pattern
1378
+ if clone_node is not None:
1379
+ graph.erase_node(clone_node)
1380
+ if dtype == torch.bfloat16:
1381
+ graph.erase_node(weight_to_bf16_node) # type: ignore[possibly-undefined]
1382
+ graph.erase_node(dequant_per_channel)
1383
+ counters["inductor"]["qconv2d_weight_prepack_matcher_count"] += 1
1384
+ counters["inductor"]["qconv2d_weight_prepack_matcher_nodes"] += len(
1385
+ match.nodes
1386
+ )
1387
+
1388
+
1389
+ def _generate_dequant_convolution_node_pattern(
1390
+ _dequant_per_channel_pattern, dtype=torch.float32
1391
+ ):
1392
+ assert dtype in [torch.float32, torch.bfloat16]
1393
+ dequant_convolution_node_pattern = CallFunction(
1394
+ aten.convolution.default,
1395
+ _may_generate_pattern_with_dtype_convert(
1396
+ dequantize_per_tensor_activation_pattern,
1397
+ KeywordArg("autocast_act_dtype"),
1398
+ dtype == torch.bfloat16,
1399
+ ),
1400
+ _dequant_per_channel_pattern,
1401
+ KeywordArg("b"),
1402
+ KeywordArg("stride"),
1403
+ KeywordArg("padding"),
1404
+ KeywordArg("dilation"),
1405
+ KeywordArg("is_transposed"),
1406
+ KeywordArg("out_padding"),
1407
+ KeywordArg("groups"),
1408
+ )
1409
+ return dequant_convolution_node_pattern
1410
+
1411
+
1412
+ def _generate_qconv_weight_prepack_patterns(dtype=torch.float32):
1413
+ assert dtype in [torch.float32, torch.bfloat16]
1414
+ return (
1415
+ _generate_dequant_convolution_node_pattern(
1416
+ dequantize_per_channel_weight_pattern
1417
+ if dtype == torch.float32
1418
+ else dequantize_per_channel_to_bf16_weight_pattern,
1419
+ dtype,
1420
+ ),
1421
+ # There is another pattern due to the pass of convert_conv_weights_to_channels_last
1422
+ # https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/_inductor/freezing.py#L338-L362.
1423
+ # Depend on some heuristics, it may or may not insert to(channel_last) node
1424
+ # between convolution and dequant_per_channel node
1425
+ _generate_dequant_convolution_node_pattern(
1426
+ dequantize_per_channel_clone_weight_pattern
1427
+ if dtype == torch.float32
1428
+ else dequantize_per_channel_to_bf16_clone_weight_pattern,
1429
+ dtype,
1430
+ ),
1431
+ )
1432
+
1433
+
1434
+ def _get_linear_node(match, input_dim_exceeds_two, input_contiguous):
1435
+ output_reshape_node = None
1436
+ if input_dim_exceeds_two:
1437
+ if input_contiguous:
1438
+ output_reshape_node = match.output_node()
1439
+ assert output_reshape_node.target is aten.reshape.default
1440
+ linear_node = output_reshape_node.args[0]
1441
+ else:
1442
+ linear_nodes = filter_nodes(match.nodes, aten.bmm.default)
1443
+ assert len(linear_nodes) == 1
1444
+ linear_node = linear_nodes[0]
1445
+ else:
1446
+ linear_node = match.output_node()
1447
+
1448
+ assert linear_node.target in (
1449
+ aten.addmm.default,
1450
+ aten.mm.default,
1451
+ aten.bmm.default,
1452
+ )
1453
+ return linear_node, output_reshape_node
1454
+
1455
+
1456
+ def _get_linear_dq_mul_node(
1457
+ linear_node, input_index, dtype, input_dim_exceeds_two, input_contiguous
1458
+ ):
1459
+ act_reshape_node = None
1460
+ activation_to_bf16_node = None
1461
+ act_expand_node = None
1462
+ if input_dim_exceeds_two:
1463
+ if input_contiguous:
1464
+ act_reshape_node = linear_node.args[input_index]
1465
+ assert act_reshape_node.target is aten.reshape.default
1466
+ if dtype == torch.float32:
1467
+ # pattern: linear -> reshape -> mul
1468
+ mul_node = act_reshape_node.args[0]
1469
+ else:
1470
+ # pattern: linear -> reshape -> to_bf16 -> mul
1471
+ activation_to_bf16_node = act_reshape_node.args[0]
1472
+ mul_node = activation_to_bf16_node.args[0]
1473
+ else:
1474
+ # bmm pattern decomposed from linear when input dim exceeds 2 and not contiguous
1475
+ act_expand_node = linear_node.args[input_index]
1476
+ assert act_expand_node.target is aten.expand.default
1477
+ if dtype == torch.float32:
1478
+ mul_node = act_expand_node.args[0]
1479
+ else:
1480
+ activation_to_bf16_node = act_expand_node.args[0]
1481
+ mul_node = activation_to_bf16_node.args[0]
1482
+ else:
1483
+ if dtype == torch.float32:
1484
+ # pattern: linear -> mul
1485
+ mul_node = linear_node.args[input_index]
1486
+ else:
1487
+ # pattern: linear -> to_bf16 -> mul
1488
+ activation_to_bf16_node = linear_node.args[input_index]
1489
+ mul_node = activation_to_bf16_node.args[0]
1490
+ return mul_node, act_reshape_node, activation_to_bf16_node, act_expand_node
1491
+
1492
+
1493
+ def _is_valid_dequant_linear_pattern(dtype, input_dim_exceeds_two, input_contiguous):
1494
+ def _inner(match):
1495
+ # Check dequant pattern has only 1 user.
1496
+ (
1497
+ linear_node,
1498
+ _,
1499
+ ) = _get_linear_node(match, input_dim_exceeds_two, input_contiguous)
1500
+
1501
+ input_index = 1 if linear_node.target is aten.addmm.default else 0
1502
+ assert dtype in [torch.float32, torch.bfloat16]
1503
+
1504
+ (
1505
+ mul_node,
1506
+ _,
1507
+ _,
1508
+ _,
1509
+ ) = _get_linear_dq_mul_node(
1510
+ linear_node, input_index, dtype, input_dim_exceeds_two, input_contiguous
1511
+ )
1512
+
1513
+ sub_node = mul_node.args[0]
1514
+ to_fp32_node = sub_node.args[0]
1515
+
1516
+ assert to_fp32_node.target is prims.convert_element_type.default
1517
+ assert sub_node.target is aten.sub.Tensor
1518
+ assert mul_node.target is aten.mul.Tensor
1519
+ if (
1520
+ len(list(to_fp32_node.users)) != 1
1521
+ or len(list(sub_node.users)) != 1
1522
+ or len(list(mul_node.users)) != 1
1523
+ ):
1524
+ # Ensure the dequant pattern only has 1 user
1525
+ # since we will delete the dequant pattern here
1526
+ return False
1527
+
1528
+ # Extra check for bmm pattern
1529
+ if input_dim_exceeds_two and not input_contiguous:
1530
+ # Check for act
1531
+ # Act expand size should be exactly same as act size
1532
+ act_expand_size = match.kwargs["act_expand_size"]
1533
+ act_node = match.kwargs["x"]
1534
+ if not (
1535
+ hasattr(act_node, "meta")
1536
+ and isinstance(act_node.meta.get("val", None), torch.Tensor)
1537
+ and (act_node.meta["val"].size() == torch.Size(act_expand_size))
1538
+ ):
1539
+ return False
1540
+
1541
+ # Check for wgt
1542
+ # wgt permute dims should be [1, 0]
1543
+ wgt_permute_dims = match.kwargs["permute_axes"]
1544
+ if wgt_permute_dims != [1, 0]:
1545
+ return False
1546
+
1547
+ # Check below wgt size items:
1548
+ # wgt before expand should with dim 2
1549
+ # Expand size should with dim 3
1550
+ # Expand size[0] should same as act size[0]
1551
+ # Expand size[1] should same as wgt size[1]
1552
+ # Expand size[2] should same as wgt size[0]
1553
+ qweight_node = match.kwargs["q_weight"]
1554
+ wgt_expand_size = match.kwargs["wgt_expand_size"]
1555
+ if not (
1556
+ hasattr(qweight_node, "meta")
1557
+ and isinstance(qweight_node.meta.get("val", None), torch.Tensor)
1558
+ and len(qweight_node.meta["val"].size()) == 2
1559
+ and len(wgt_expand_size) == 3
1560
+ and wgt_expand_size[0] == act_node.meta["val"].size()[0]
1561
+ and wgt_expand_size[1] == qweight_node.meta["val"].size()[1]
1562
+ and wgt_expand_size[2] == qweight_node.meta["val"].size()[0]
1563
+ ):
1564
+ return False
1565
+
1566
+ return True
1567
+
1568
+ return _inner
1569
+
1570
+
1571
+ def _register_qlinear_weight_prepack_pass(
1572
+ pattern,
1573
+ pass_number,
1574
+ dtype=torch.float32,
1575
+ input_dim_exceeds_two=False,
1576
+ input_contiguous=True,
1577
+ ):
1578
+ @register_freezing_graph_pattern(
1579
+ pattern,
1580
+ extra_check=_is_valid_dequant_linear_pattern(
1581
+ dtype, input_dim_exceeds_two, input_contiguous
1582
+ ),
1583
+ pass_number=pass_number,
1584
+ )
1585
+ def qlinear_weight_prepack(match: Match, *args, **kwargs):
1586
+ """
1587
+ Match the pattern:
1588
+ int8 activation
1589
+ |
1590
+ dequant_per_tensor
1591
+ |
1592
+ mm/addmm <- t <- dequant_per_channel <- int8_weight
1593
+
1594
+ Insert weight prepack node and change the pattern to:
1595
+ int8 activation
1596
+ |
1597
+ onednn.qlinear_pointwise <- onednn.qlinear_prepack <- int8_weight
1598
+ """
1599
+ assert dtype in [torch.float32, torch.bfloat16]
1600
+ (
1601
+ linear_node,
1602
+ output_reshape_node,
1603
+ ) = _get_linear_node(match, input_dim_exceeds_two, input_contiguous)
1604
+ input_index = 1 if linear_node.target is aten.addmm.default else 0
1605
+ weight_index = input_index + 1
1606
+
1607
+ (
1608
+ mul_node,
1609
+ act_reshape_node,
1610
+ activation_to_bf16_node,
1611
+ act_expand_node,
1612
+ ) = _get_linear_dq_mul_node(
1613
+ linear_node, input_index, dtype, input_dim_exceeds_two, input_contiguous
1614
+ )
1615
+
1616
+ sub_node = mul_node.args[0]
1617
+ to_fp32_node = sub_node.args[0]
1618
+
1619
+ if input_dim_exceeds_two and not input_contiguous:
1620
+ wgt_expand_node = linear_node.args[weight_index]
1621
+ assert wgt_expand_node.target is aten.expand.default
1622
+ t_node = wgt_expand_node.args[0]
1623
+ else:
1624
+ t_node = linear_node.args[weight_index]
1625
+
1626
+ if dtype == torch.float32:
1627
+ dequant_per_channel = t_node.args[0]
1628
+ else:
1629
+ weight_to_bf16_node = t_node.args[0]
1630
+ dequant_per_channel = weight_to_bf16_node.args[0]
1631
+ assert (
1632
+ dequant_per_channel.target
1633
+ is quantized_decomposed.dequantize_per_channel.default
1634
+ )
1635
+
1636
+ # Activation QParams
1637
+ qx, x_zp, x_scale = (
1638
+ kwargs["x"],
1639
+ kwargs["x_zp"],
1640
+ kwargs["x_scale"],
1641
+ )
1642
+
1643
+ # Weight QParams
1644
+ qw, w_scale, w_zp = (
1645
+ kwargs["q_weight"],
1646
+ kwargs["w_scale"],
1647
+ kwargs["w_zp"],
1648
+ )
1649
+
1650
+ # Params
1651
+ bias = kwargs["b"] if "b" in kwargs else None
1652
+
1653
+ x_shape = qx.meta.get("tensor_meta").shape
1654
+ if has_free_symbols(x_shape):
1655
+ # For dynamic shape case, we can't get activation shape ahead of runtime.
1656
+ x_shape = None
1657
+ graph = match.graph
1658
+ with graph.inserting_before(linear_node):
1659
+ # Insert weight prepack node and the qlinear node
1660
+ packed_weight_inputs = (
1661
+ qw,
1662
+ x_shape,
1663
+ )
1664
+ packed_weight_op = torch.ops.onednn.qlinear_prepack
1665
+ prepack_weight_node = graph.call_function(
1666
+ packed_weight_op, args=packed_weight_inputs
1667
+ )
1668
+
1669
+ new_args: Tuple[Any, ...] = (
1670
+ qx,
1671
+ x_scale,
1672
+ x_zp,
1673
+ prepack_weight_node,
1674
+ w_scale,
1675
+ w_zp,
1676
+ bias,
1677
+ 1.0, # output_scale
1678
+ 0, # output_zero_point
1679
+ dtype, # output_dtype
1680
+ "none", # post op name
1681
+ [], # post op args
1682
+ "", # post op algorithm
1683
+ )
1684
+ Node = torch.fx.node.Node
1685
+ if isinstance(x_scale, Node) and isinstance(x_zp, Node):
1686
+ new_linear_node = graph.call_function(
1687
+ torch.ops.onednn.qlinear_pointwise.tensor, args=new_args
1688
+ )
1689
+ else:
1690
+ new_linear_node = graph.call_function(
1691
+ torch.ops.onednn.qlinear_pointwise.default, args=new_args
1692
+ )
1693
+ if input_dim_exceeds_two:
1694
+ if input_contiguous:
1695
+ output_reshape_node.replace_all_uses_with(new_linear_node)
1696
+ new_linear_node.meta.update(output_reshape_node.meta)
1697
+ else:
1698
+ if bias:
1699
+ output_add_node_for_bias = match.output_node()
1700
+ assert output_add_node_for_bias.target is aten.add.Tensor
1701
+ output_add_node_for_bias.replace_all_uses_with(new_linear_node)
1702
+ new_linear_node.meta.update(output_add_node_for_bias.meta)
1703
+ else:
1704
+ linear_node.replace_all_uses_with(new_linear_node)
1705
+ new_linear_node.meta.update(linear_node.meta)
1706
+ else:
1707
+ linear_node.replace_all_uses_with(new_linear_node)
1708
+ new_linear_node.meta.update(linear_node.meta)
1709
+
1710
+ # Erase the original linear node
1711
+ if input_dim_exceeds_two:
1712
+ if input_contiguous:
1713
+ graph.erase_node(output_reshape_node)
1714
+ elif not input_contiguous and bias:
1715
+ graph.erase_node(output_add_node_for_bias) # type: ignore[possibly-undefined]
1716
+ graph.erase_node(linear_node)
1717
+ if input_dim_exceeds_two:
1718
+ if input_contiguous:
1719
+ graph.erase_node(act_reshape_node)
1720
+ else:
1721
+ graph.erase_node(act_expand_node)
1722
+ graph.erase_node(wgt_expand_node) # type: ignore[possibly-undefined]
1723
+ if dtype == torch.bfloat16:
1724
+ graph.erase_node(activation_to_bf16_node)
1725
+ # Erase the dequant pattern
1726
+ graph.erase_node(mul_node)
1727
+ graph.erase_node(sub_node)
1728
+ graph.erase_node(to_fp32_node)
1729
+ # Erase the dequant per channel pattern
1730
+ graph.erase_node(t_node)
1731
+ if dtype == torch.bfloat16:
1732
+ graph.erase_node(weight_to_bf16_node) # type: ignore[possibly-undefined]
1733
+ graph.erase_node(dequant_per_channel)
1734
+
1735
+ counters["inductor"]["qlinear_weight_prepack_matcher_count"] += 1
1736
+ counters["inductor"]["qlinear_weight_prepack_matcher_nodes"] += len(
1737
+ match.nodes
1738
+ )
1739
+
1740
+
1741
+ def _generate_dequant_linear_node_pattern(
1742
+ _dequant_per_channel_pattern, dtype=torch.float32, input_dim_exceeds_two=False
1743
+ ):
1744
+ assert dtype in [torch.float32, torch.bfloat16]
1745
+ t_pattern = _generate_linear_t_pattern(_dequant_per_channel_pattern, dtype)
1746
+ dequant_linear_bias_pattern = _may_generate_pattern_with_reshape(
1747
+ CallFunction(
1748
+ aten.addmm.default,
1749
+ KeywordArg("b"),
1750
+ _may_generate_pattern_with_reshape(
1751
+ _may_generate_pattern_with_dtype_convert(
1752
+ dequantize_per_tensor_activation_pattern,
1753
+ KeywordArg("autocast_act_dtype"),
1754
+ dtype == torch.bfloat16,
1755
+ ),
1756
+ KeywordArg("act_reshape_size"),
1757
+ input_dim_exceeds_two,
1758
+ ),
1759
+ t_pattern,
1760
+ ),
1761
+ KeywordArg("output_reshape_size"),
1762
+ input_dim_exceeds_two,
1763
+ )
1764
+ dequant_linear_no_bias_pattern = _may_generate_pattern_with_reshape(
1765
+ CallFunction(
1766
+ aten.mm.default,
1767
+ _may_generate_pattern_with_reshape(
1768
+ _may_generate_pattern_with_dtype_convert(
1769
+ dequantize_per_tensor_activation_pattern,
1770
+ KeywordArg("autocast_act_dtype"),
1771
+ dtype == torch.bfloat16,
1772
+ ),
1773
+ KeywordArg("act_reshape_size"),
1774
+ input_dim_exceeds_two,
1775
+ ),
1776
+ t_pattern,
1777
+ ),
1778
+ KeywordArg("output_reshape_size"),
1779
+ input_dim_exceeds_two,
1780
+ )
1781
+ return dequant_linear_bias_pattern, dequant_linear_no_bias_pattern
1782
+
1783
+
1784
+ def _generate_dequant_bmm_node_pattern(
1785
+ _dequant_per_channel_pattern,
1786
+ dtype=torch.float32,
1787
+ with_bias=False,
1788
+ ):
1789
+ # When activation of linear dim exceed 2 and not contiguous
1790
+ t_pattern = _generate_linear_t_pattern(_dequant_per_channel_pattern, dtype)
1791
+
1792
+ assert dtype in [torch.float32, torch.bfloat16]
1793
+ dequant_bmm_pattern = CallFunction(
1794
+ aten.bmm.default,
1795
+ CallFunction(
1796
+ aten.expand.default,
1797
+ _may_generate_pattern_with_dtype_convert(
1798
+ dequantize_per_tensor_activation_pattern,
1799
+ KeywordArg("autocast_act_dtype"),
1800
+ dtype == torch.bfloat16,
1801
+ ),
1802
+ KeywordArg("act_expand_size"),
1803
+ ),
1804
+ CallFunction(
1805
+ aten.expand.default,
1806
+ t_pattern,
1807
+ KeywordArg("wgt_expand_size"),
1808
+ ),
1809
+ )
1810
+
1811
+ def _generate_pattern_with_output_add(_dequant_bmm_pattern, _with_bias):
1812
+ if _with_bias:
1813
+ return CallFunction(
1814
+ aten.add.Tensor,
1815
+ _dequant_bmm_pattern,
1816
+ KeywordArg("b"),
1817
+ )
1818
+ else:
1819
+ return _dequant_bmm_pattern
1820
+
1821
+ return _generate_pattern_with_output_add(dequant_bmm_pattern, with_bias)
1822
+
1823
+
1824
+ def _generate_qlinear_weight_prepack_patterns(
1825
+ dtype=torch.float32,
1826
+ input_dim_exceeds_two=False,
1827
+ input_contiguous=True,
1828
+ with_bias=False,
1829
+ ):
1830
+ if input_dim_exceeds_two and not input_contiguous:
1831
+ return _generate_dequant_bmm_node_pattern(
1832
+ dequantize_per_channel_weight_pattern,
1833
+ dtype,
1834
+ with_bias,
1835
+ )
1836
+ else:
1837
+ return _generate_dequant_linear_node_pattern(
1838
+ dequantize_per_channel_weight_pattern, dtype, input_dim_exceeds_two
1839
+ )
1840
+
1841
+
1842
+ def _register_dequant_promotion():
1843
+ dequant_pattern_cases = itertools.product(
1844
+ [torch.float32, torch.bfloat16], [True, False]
1845
+ )
1846
+ for dtype, input_dim_exceeds_two in dequant_pattern_cases:
1847
+ # 4 dequantization patterns will be matched based on the dtype and input dimension size.
1848
+ # Case 1: int8-mixed-fp32, input dim size is 2
1849
+ # Case 2: int8-mixed-fp32, input dim size exceeds 2
1850
+ # Case 3: int8-mixed-bf16, input dim size is 2
1851
+ # Case 4: int8-mixed-bf16, input dim size exceeds 2
1852
+ # quant
1853
+ # + - - - - | - - - - +
1854
+ # | dequant |
1855
+ # | | |
1856
+ # | OPT(to_bf16) |
1857
+ # | | |
1858
+ # | OPT(reshape) |
1859
+ # | / \ |
1860
+ # | node1 node2 |
1861
+ # + - - | - - - | - - +
1862
+ # OPT(reshape) OPT(reshape)
1863
+ # + - - | - - - | - - +
1864
+ # OPT(to_fp32) OPT(to_fp32)
1865
+ # + - - | - - - | - - +
1866
+ # quant quant
1867
+ _register_dequant_promotion_pass(
1868
+ _may_generate_pattern_with_reshape(
1869
+ _may_generate_pattern_with_dtype_convert(
1870
+ dequantize_per_tensor_activation_pattern,
1871
+ KeywordArg("autocast_act_dtype"),
1872
+ dtype == torch.bfloat16,
1873
+ ),
1874
+ KeywordArg("act_reshape_size"),
1875
+ with_reshape=input_dim_exceeds_two,
1876
+ ),
1877
+ pass_number=0,
1878
+ dtype=dtype,
1879
+ ) # pass_number=0 to run before weight prepack
1880
+
1881
+
1882
+ def _register_qconv_weight_prepack():
1883
+ for dtype in [torch.float32, torch.bfloat16]:
1884
+ weight_prepack_patterns = _generate_qconv_weight_prepack_patterns(dtype)
1885
+ for weight_prepack_pattern in weight_prepack_patterns:
1886
+ # Register to pass_number 1, so we can do dequant promotion in pass_number 0.
1887
+ _register_qconv_weight_prepack_pass(
1888
+ weight_prepack_pattern, pass_number=1, dtype=dtype
1889
+ )
1890
+
1891
+
1892
+ def _register_qlinear_weight_prepack():
1893
+ # 6 Linear related patterns will be matched based on the dtype, input dimension size and input contiguous.
1894
+ # Then convert the pattern into a QLinear node with int8_fp32/bf16.
1895
+ # Case 1: int8-mixed-fp32, input dim size is 2
1896
+ # Case 2: int8-mixed-fp32, input dim size exceeds 2 and contiguous
1897
+ # Case 3: int8-mixed-bf16, input dim size is 2
1898
+ # Case 4: int8-mixed-bf16, input dim size exceeds 2 and contiguous
1899
+
1900
+ # + - - - - | - - - - - - | - - - - - +
1901
+ # | dq_per_tensor dq_per_channel |
1902
+ # | | | |
1903
+ # | OPT(to_bf16) OPT(to_bf16) |
1904
+ # | | | |
1905
+ # | OPT(reshape) permute |
1906
+ # | \ / |
1907
+ # | addmm/mm |
1908
+ # | | |
1909
+ # | OPT(reshape) |
1910
+
1911
+ # Case 5: int8-mixed-fp32, input dim size exceeds 2 and not contiguous
1912
+ # Case 6: int8-mixed-bf16, input dim size exceeds 2 and not contiguous
1913
+
1914
+ # + - - - - | - - - - - - | - - - - - +
1915
+ # | dq_per_tensor dq_per_channel |
1916
+ # | | | |
1917
+ # | OPT(to_bf16) OPT(to_bf16) |
1918
+ # | | | |
1919
+ # | expand permute |
1920
+ # | \ | |
1921
+ # | expand |
1922
+ # | / |
1923
+ # | bmm |
1924
+ # | | |
1925
+ # | OPT(add) |
1926
+
1927
+ linear_weight_prepack_cases = itertools.product(
1928
+ [torch.float32, torch.bfloat16], [True, False]
1929
+ )
1930
+
1931
+ # Step 1: register patterns from mm and addmm
1932
+ for dtype, input_dim_exceeds_two in linear_weight_prepack_cases:
1933
+ weight_prepack_patterns = _generate_qlinear_weight_prepack_patterns(
1934
+ dtype, input_dim_exceeds_two
1935
+ )
1936
+ for weight_prepack_pattern in weight_prepack_patterns:
1937
+ # Register to pass_number 1, so we can do dequant promotion in pass_number 0.
1938
+ _register_qlinear_weight_prepack_pass(
1939
+ weight_prepack_pattern,
1940
+ pass_number=1,
1941
+ dtype=dtype,
1942
+ input_dim_exceeds_two=input_dim_exceeds_two,
1943
+ )
1944
+
1945
+ # Step 2: register patterns from bmm
1946
+ # Linear might be decomposed into bmm when input dim exceeds 2 and not contiguous
1947
+ # refer to:
1948
+ # https://github.com/pytorch/pytorch/blob/
1949
+ # 80c07df659362a95da7cd4f3ec367abfdace38c4/torch/_decomp/decompositions.py#L3965-L3968
1950
+ # in this case, we can convert it back to qlinear
1951
+ for dtype, with_bias in itertools.product(
1952
+ [torch.float32, torch.bfloat16], [True, False]
1953
+ ):
1954
+ bmm_pattern = _generate_qlinear_weight_prepack_patterns(
1955
+ dtype=dtype,
1956
+ input_dim_exceeds_two=True,
1957
+ input_contiguous=False,
1958
+ with_bias=with_bias,
1959
+ )
1960
+ _register_qlinear_weight_prepack_pass(
1961
+ bmm_pattern,
1962
+ pass_number=1
1963
+ if with_bias
1964
+ else 2, # if with_bias, there is an output add, so we should try to match it firstly
1965
+ dtype=dtype,
1966
+ input_dim_exceeds_two=True,
1967
+ input_contiguous=False,
1968
+ )
1969
+
1970
+
1971
+ @functools.lru_cache(None)
1972
+ def _register_quantization_weight_pack_pass():
1973
+ # Step 1: Dequant promotion for int8-mixed-fp32/bf16
1974
+ _register_dequant_promotion()
1975
+
1976
+ # Step 2: QConv weight prepack
1977
+ _register_qconv_weight_prepack()
1978
+
1979
+ # Step 3: QLinear weight prepack
1980
+ _register_qlinear_weight_prepack()
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/reinplace.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ from collections import defaultdict
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable, Dict, List, Tuple
5
+
6
+ import torch
7
+ from torch._higher_order_ops.triton_kernel_wrap import triton_kernel_wrapper_functional
8
+ from torch._inductor import inductor_prims
9
+ from torch._inductor.fx_utils import get_node_storage, is_node_realized
10
+ from torch._inductor.lowering import (
11
+ inplaceable_foreach_ops as inplaceable_foreach_ops_lowerings,
12
+ )
13
+ from torch._inductor.virtualized import V
14
+ from torch.fx.immutable_collections import immutable_dict
15
+ from torch.fx.passes.reinplace import _is_view_op
16
+ from torch.utils import _pytree as pytree
17
+
18
+ aten = torch.ops.aten
19
+
20
+
21
+ @dataclass(frozen=True)
22
+ class InplaceableOp:
23
+ inplace_op: Callable[..., Any]
24
+ mutated_arg: int
25
+ extra_check: Callable[[torch.fx.Node], bool] = lambda node: True
26
+
27
+
28
+ _SCATTER_OP_TO_VIEW = {
29
+ torch.ops.aten.diagonal_scatter.default: torch.ops.aten.diagonal.default,
30
+ torch.ops.aten.select_scatter.default: torch.ops.aten.select.int,
31
+ torch.ops.aten.slice_scatter.default: torch.ops.aten.slice.Tensor,
32
+ torch.ops.aten.as_strided_scatter.default: torch.ops.aten.as_strided.default,
33
+ }
34
+ _VIEW_OP_TO_SCATTER = {v: k for k, v in _SCATTER_OP_TO_VIEW.items()}
35
+
36
+
37
+ def graph_call_function(graph: torch.fx.Graph, fn, *args, **kwargs):
38
+ fake_args, fake_kwargs = pytree.tree_map(
39
+ lambda node: node.meta["val"] if isinstance(node, torch.fx.Node) else node,
40
+ (args, kwargs),
41
+ )
42
+ with V.fake_mode:
43
+ fake_result = fn(*fake_args, **fake_kwargs)
44
+
45
+ node = graph.call_function(fn, args, kwargs)
46
+ node.meta["val"] = fake_result
47
+ return node
48
+
49
+
50
+ @dataclass
51
+ class ViewOp:
52
+ target: torch._ops.OpOverload
53
+ args: Tuple[Any, ...]
54
+ kwargs: Dict[str, Any]
55
+
56
+
57
+ def _inplace_generalized_scatter(
58
+ inp: torch.Tensor, src: torch.Tensor, view_ops: List[ViewOp]
59
+ ) -> torch.Tensor:
60
+ tmp = inp
61
+ for view in view_ops:
62
+ fake_args, fake_kwargs = pytree.tree_map(
63
+ lambda node: node.meta["val"] if isinstance(node, torch.fx.Node) else node,
64
+ (view.args, view.kwargs),
65
+ )
66
+ tmp = view.target(tmp, *fake_args, **fake_kwargs)
67
+ tmp.copy_(src)
68
+ return inp
69
+
70
+
71
+ def _generalized_scatter(
72
+ inp: torch.Tensor, src: torch.Tensor, view_ops: List[ViewOp]
73
+ ) -> torch.Tensor:
74
+ out = inp.clone()
75
+ return _inplace_generalized_scatter(out, src, view_ops)
76
+
77
+
78
+ def _decompose_scatter_functional_helper(
79
+ graph: torch.fx.Graph,
80
+ inp: torch.Tensor,
81
+ src: torch.Tensor,
82
+ view_ops: List[ViewOp],
83
+ ) -> torch.fx.Node:
84
+ view_op, view_ops_tail = view_ops[0], view_ops[1:]
85
+
86
+ if view_ops_tail:
87
+ view = graph_call_function(
88
+ graph, view_op.target, inp, *view_op.args, **view_op.kwargs
89
+ )
90
+ src = _decompose_scatter_functional_helper(graph, view, src, view_ops[1:]) # type: ignore[assignment]
91
+
92
+ return graph_call_function(
93
+ graph,
94
+ _VIEW_OP_TO_SCATTER[view_op.target],
95
+ inp,
96
+ src,
97
+ *view_op.args,
98
+ **view_op.kwargs,
99
+ )
100
+
101
+
102
+ def _decompose_scatter_functional(
103
+ graph: torch.fx.Graph, node: torch.fx.Node
104
+ ) -> torch.fx.Node:
105
+ """Decompose _generalized_scatter to a sequence of view_scatter operations
106
+
107
+ e.g. _generalized_scatter(inp, src, [(aten.slice, 0, 0, 10), (aten.slice, 1, 10, -10)])
108
+
109
+ will become
110
+
111
+ view = aten.slice(inp, 0, 0, 10)
112
+ view_updated = aten.slice_scatter(view, src, 1, 10, -10)
113
+ inp_updated = aten.slice_scatter(inp, view_updated, 0, 0, 10)
114
+ """
115
+ assert node.target is _generalized_scatter
116
+ inp, src, view_ops = node.args
117
+ return _decompose_scatter_functional_helper(graph, *node.args) # type: ignore[arg-type]
118
+
119
+
120
+ def _decompose_scatter_mutating(
121
+ graph: torch.fx.Graph, node: torch.fx.Node
122
+ ) -> torch.fx.Node:
123
+ """Decompose _generalized_scatter using mutations
124
+
125
+ e.g. _generalized_scatter(inp, src, [(aten.slice, 0, 0, 10), (aten.slice, 1, 10, -10)])
126
+
127
+ will become
128
+
129
+ inp_updated = aten.clone(inp)
130
+ slice1 = aten.slice(inp_updated, 0, 0, 10)
131
+ slice2 = aten.slice(slice1, 1, 10, -10)
132
+ slice2.copy_(src)
133
+
134
+ """
135
+ assert node.target in (_generalized_scatter, _inplace_generalized_scatter)
136
+ inp, src, view_ops = node.args
137
+ assert not node.kwargs
138
+
139
+ if node.target is _generalized_scatter:
140
+ inp = graph_call_function(graph, aten.clone, inp)
141
+
142
+ tmp = inp
143
+ for view in view_ops: # type: ignore[union-attr]
144
+ tmp = graph_call_function(graph, view.target, tmp, *view.args, **view.kwargs) # type: ignore[union-attr]
145
+
146
+ graph_call_function(graph, aten.copy_.default, tmp, src)
147
+ return inp # type: ignore[return-value]
148
+
149
+
150
+ # View ops whose view_scatter op is lowered into mutations anyway,
151
+ # so is never a pessimisation to decompose.
152
+ _ALWAYS_MUTATING_SCATTER_OPS = {
153
+ aten.as_strided.default,
154
+ aten.diagonal.default,
155
+ }
156
+
157
+
158
+ def scatter_always_uses_mutation(node: torch.fx.Node) -> bool:
159
+ _, _, view_ops = node.args
160
+ return any(view.target in _ALWAYS_MUTATING_SCATTER_OPS for view in view_ops) # type: ignore[union-attr]
161
+
162
+
163
+ def should_reinplace_scatter(node: torch.fx.Node) -> bool:
164
+ """Choose between mutating and functional scatter decompositions
165
+
166
+ Reinplacing view scatter ops can be pessimising as it blocks fusion with the
167
+ input or output tensor computations. However, it is still profitable if the
168
+ input and output would have been realized anyway.
169
+
170
+ """
171
+ inp, src, view_ops = node.args
172
+
173
+ # Mutating scatter ops unconditionally realize input and output
174
+ if scatter_always_uses_mutation(node):
175
+ return True
176
+
177
+ if is_node_realized(inp) and is_node_realized(node): # type: ignore[arg-type]
178
+ return True
179
+
180
+ # If the output is copied back into the input, this forces both to be
181
+ # realized as the output is a user of the input
182
+ if inp.op == "placeholder" and any( # type: ignore[union-attr]
183
+ user.target is aten.copy_.default and user.args[0] is inp for user in node.users
184
+ ):
185
+ return True
186
+
187
+ # Otherwise, assume fusions will make functional variants profitable
188
+ return False
189
+
190
+
191
+ def decompose_generalized_scatter(graph: torch.fx.Graph) -> None:
192
+ """Replace _generalized_scatter with normal aten ops"""
193
+ for node in graph.nodes:
194
+ if node.target not in (_generalized_scatter, _inplace_generalized_scatter):
195
+ continue
196
+
197
+ use_mutation = (
198
+ node.target is _inplace_generalized_scatter
199
+ or scatter_always_uses_mutation(node)
200
+ )
201
+
202
+ with graph.inserting_before(node):
203
+ if use_mutation:
204
+ new_node = _decompose_scatter_mutating(graph, node)
205
+ else:
206
+ new_node = _decompose_scatter_functional(graph, node)
207
+
208
+ node.replace_all_uses_with(new_node)
209
+ graph.erase_node(node)
210
+
211
+
212
+ def canonicalize_view_scatter_ops(graph: torch.fx.Graph) -> None:
213
+ """
214
+ This canonicalizes view scatter ops into a generalized form, defined as:
215
+ def scatter(inp, src, views):
216
+ tmp = inp.clone()
217
+ for view in views:
218
+ tmp = view(tmp)
219
+ tmp.copy_(src)
220
+
221
+ We also fuse consecutive view scatter ops of the form
222
+ a = scatter(view2(self), src, [view1])
223
+ b = scatter(self, a, [view2])
224
+ which can be rewritten as
225
+ b = scatter(self, src, [view2, view1])
226
+ a = view2(b)
227
+
228
+ This is both more efficient as we only do a single scatter, and also
229
+ easier to reinplace since there is only one use of `self`
230
+ """
231
+
232
+ node_to_view_base: Dict[torch.fx.Node, torch.fx.Node] = {}
233
+ node_to_view_op: Dict[torch.fx.Node, List[ViewOp]] = defaultdict(list)
234
+
235
+ def handle_views(node: torch.fx.Node):
236
+ inp = node.args[0]
237
+ node_to_view_base[node] = node_to_view_base.get(inp, inp) # type: ignore[arg-type]
238
+ node_to_view_op[node] = [
239
+ *node_to_view_op[inp], # type: ignore[index]
240
+ ViewOp(
241
+ node.target, # type: ignore[arg-type]
242
+ args=node.args[1:],
243
+ kwargs=node.kwargs,
244
+ ),
245
+ ]
246
+
247
+ def handle_view_scatter(node: torch.fx.Node):
248
+ assert len(node.args) >= 2
249
+ inp, src = node.args[:2]
250
+
251
+ scatter_view_op = ViewOp(
252
+ _SCATTER_OP_TO_VIEW[node.target],
253
+ args=node.args[2:],
254
+ kwargs=node.kwargs,
255
+ )
256
+
257
+ def can_fuse():
258
+ if src.target is not _generalized_scatter: # type: ignore[union-attr]
259
+ return False
260
+ src_inp, src_src, src_scatter_view_op = src.args # type: ignore[union-attr]
261
+
262
+ inp_base = node_to_view_base.get(inp, inp) # type: ignore[arg-type]
263
+ src_base = node_to_view_base.get(src_inp, src_inp) # type: ignore[arg-type]
264
+ return inp_base is src_base and node_to_view_op[src_inp] == [ # type: ignore[index]
265
+ *node_to_view_op[inp], # type: ignore[index]
266
+ scatter_view_op,
267
+ ]
268
+
269
+ if not can_fuse():
270
+ with graph.inserting_before(node):
271
+ new_node = graph_call_function(
272
+ graph,
273
+ _generalized_scatter,
274
+ inp,
275
+ src,
276
+ [scatter_view_op],
277
+ )
278
+ node.replace_all_uses_with(new_node)
279
+ graph.erase_node(node)
280
+ return
281
+
282
+ src_inp, src_src, src_scatter_view_op = src.args # type: ignore[union-attr]
283
+ with graph.inserting_before(src):
284
+ new_node = graph_call_function(
285
+ graph,
286
+ _generalized_scatter,
287
+ inp,
288
+ src_src,
289
+ [scatter_view_op, *src_scatter_view_op], # type: ignore[misc]
290
+ )
291
+ node.replace_all_uses_with(new_node)
292
+ graph.erase_node(node)
293
+
294
+ if src.users: # type: ignore[union-attr]
295
+ new_src = graph_call_function(
296
+ graph,
297
+ _SCATTER_OP_TO_VIEW[node.target],
298
+ new_node,
299
+ *node.args[2:],
300
+ **node.kwargs,
301
+ )
302
+
303
+ handle_views(new_src)
304
+ src.replace_all_uses_with(new_src) # type: ignore[union-attr]
305
+
306
+ graph.erase_node(src)
307
+
308
+ for node in graph.nodes:
309
+ if _is_view_op(node.target):
310
+ handle_views(node)
311
+ elif node.target in _SCATTER_OP_TO_VIEW:
312
+ handle_view_scatter(node)
313
+
314
+
315
+ inplaceable_ops = {
316
+ aten.index_put.default: InplaceableOp(aten.index_put_.default, 0),
317
+ aten._unsafe_index_put.default: InplaceableOp(inductor_prims._unsafe_index_put_, 0),
318
+ _generalized_scatter: InplaceableOp(
319
+ _inplace_generalized_scatter,
320
+ 0,
321
+ extra_check=should_reinplace_scatter,
322
+ ),
323
+ }
324
+
325
+ try:
326
+ c10d_functional = torch.ops._c10d_functional
327
+ inplaceable_collective_ops = {
328
+ c10d_functional.all_reduce.default: InplaceableOp(
329
+ c10d_functional.all_reduce_.default, 0
330
+ ),
331
+ c10d_functional.all_reduce_coalesced.default: InplaceableOp(
332
+ c10d_functional.all_reduce_coalesced_.default, 0
333
+ ),
334
+ }
335
+ inplaceable_ops.update(inplaceable_collective_ops)
336
+ except AttributeError:
337
+ # _c10d_functional ops are only available when torch
338
+ # is built with USE_DISTRIBUTED=1.
339
+ pass
340
+
341
+ inplaceable_foreach_ops: Dict[torch._ops.OpOverload, InplaceableOp] = {}
342
+ for outplace_op, inplace_op in inplaceable_foreach_ops_lowerings.items():
343
+ inplaceable_foreach_ops[outplace_op] = InplaceableOp(inplace_op, 0)
344
+
345
+
346
+ inplaceable_triton_ops = {triton_kernel_wrapper_functional}
347
+
348
+
349
+ # Operators that don't depend on the tensor data
350
+ META_ONLY_OPS = {
351
+ aten.sym_size.int,
352
+ aten.sym_stride.int,
353
+ aten.sym_numel.default,
354
+ aten.sym_storage_offset.default,
355
+ }
356
+
357
+
358
+ def reinplace_inplaceable_ops_core(graph: torch.fx.Graph) -> None:
359
+ """
360
+ Reinplaces in-placeable operations.
361
+ If there are no uses of a view of the mutated arg after the current node,
362
+ it is possible to inplace the op.
363
+ This above algorithm could be justified by observing side effects. While
364
+ we traverse the graph in forwards direction, only latter nodes could view
365
+ side effects of the current node. If the current node is not used later as
366
+ well as no view of this node is used later in the graph, then it is safe to
367
+ inplace as there would be no way to observe the side effects.
368
+ This condition is slightly different for graph inputs where they can only
369
+ be inplaced if the above condition is true and there's a copy_ in the
370
+ epilogue that signals that the caller wants to observe the mutation.
371
+ """
372
+
373
+ copy_args_to_copy_nodes = {}
374
+ mutated_inputs = set()
375
+ storage_to_nodes = defaultdict(list)
376
+ node_order: Dict[Any, int] = {}
377
+ for i, node in enumerate(reversed(graph.nodes)):
378
+ node_order[node] = len(graph.nodes) - i - 1
379
+ storage_to_nodes[get_node_storage(node)].append(node)
380
+ if node.target == aten.copy_.default and node.args[0].op == "placeholder":
381
+ dst = node.args[0]
382
+ src = node.args[1]
383
+ # If the target is a getitem and it indexes a possible clone,
384
+ # then skip over it
385
+ if src.target == operator.getitem and (
386
+ (
387
+ src.args[0].target == triton_kernel_wrapper_functional
388
+ and src.args[0].kwargs["kwargs"][src.args[1]] == node.args[0]
389
+ )
390
+ or (src.args[0].target in inplaceable_foreach_ops)
391
+ or (src.args[0].target == torch.ops.higher_order.auto_functionalized)
392
+ ):
393
+ src = src.args[0]
394
+
395
+ copy_args_to_copy_nodes[(dst, src)] = node
396
+
397
+ mutated_inputs.add(node.args[0])
398
+
399
+ def any_use_of_views_after_node(node, shared_view_nodes, *, copy_node):
400
+ node_loc = node_order[node]
401
+ copy_node_loc = node_order[copy_node] if copy_node is not None else None
402
+
403
+ def is_meta_only_user(node):
404
+ if _is_view_op(node.target):
405
+ return all(is_meta_only_user(u) for u in node.users)
406
+ return node.target in META_ONLY_OPS
407
+
408
+ for view in shared_view_nodes:
409
+ for user in view.users:
410
+ user_loc = node_order[user]
411
+ # Skip all users before node
412
+ if user_loc <= node_loc:
413
+ continue
414
+ # Ignore uses after the copy_ epilogue node, where the input
415
+ # has already been mutated anyway
416
+ if copy_node_loc is not None and copy_node_loc <= user_loc:
417
+ continue
418
+ # Reinplacing does not change shape metadata
419
+ if is_meta_only_user(user):
420
+ continue
421
+ return True
422
+ return False
423
+
424
+ def can_inplace(node, mutated_arg):
425
+ if isinstance(mutated_arg, (list, tuple)):
426
+ return all(can_inplace(node, arg) for arg in mutated_arg)
427
+
428
+ if get_node_storage(mutated_arg) is None:
429
+ return False
430
+ shared_view_nodes = storage_to_nodes[get_node_storage(mutated_arg)]
431
+ if mutated_arg.op == "placeholder":
432
+ if not (
433
+ copy_node := copy_args_to_copy_nodes.get((mutated_arg, node), False)
434
+ ):
435
+ return False
436
+
437
+ if any_use_of_views_after_node(
438
+ node, shared_view_nodes, copy_node=copy_node
439
+ ):
440
+ return False
441
+
442
+ return True
443
+ elif any(view.op == "placeholder" for view in shared_view_nodes):
444
+ # If mutated arg is view of any of the inputs of the graph,
445
+ # do not allow for inplacing.
446
+ # This would require more sophisticated algorithm to handle
447
+ return False
448
+ else:
449
+ return not any_use_of_views_after_node(
450
+ node, shared_view_nodes, copy_node=None
451
+ )
452
+
453
+ replace_dict: Dict[torch.fx.Node, torch.fx.Node] = {}
454
+
455
+ def reinplace_and_refine_tensors_to_clone(old_tensors_to_clone, kwargs):
456
+ tensors_to_clone: List[str] = []
457
+ for arg in old_tensors_to_clone:
458
+ assert arg in kwargs
459
+ mutated_arg = kwargs[arg]
460
+ if can_inplace(node, mutated_arg):
461
+ copy_node = copy_args_to_copy_nodes.get((mutated_arg, node))
462
+ if copy_node is not None:
463
+ replace_dict[copy_node] = copy_node.args[0]
464
+ for user in node.users:
465
+ if user.target == operator.getitem and user.args[1] == arg:
466
+ replace_dict[user] = mutated_arg
467
+ else:
468
+ tensors_to_clone.append(arg)
469
+ return tensors_to_clone
470
+
471
+ for node in graph.nodes:
472
+ if (inplaceable_op := inplaceable_ops.get(node.target, None)) is not None:
473
+ mutated_arg = node.args[inplaceable_op.mutated_arg]
474
+ if can_inplace(node, mutated_arg) and inplaceable_op.extra_check(node):
475
+ # TODO(yifu): this doesn't properly remove copy epilogues for
476
+ # ops that mutate multiple inputs. Need to revise the copy
477
+ # node tracking logic to support the case.
478
+ copy_node = copy_args_to_copy_nodes.get((mutated_arg, node))
479
+ if copy_node is not None:
480
+ replace_dict[copy_node] = copy_node.args[0]
481
+ node.target = inplaceable_op.inplace_op
482
+ elif node.target == torch.ops.higher_order.auto_functionalized:
483
+ _mutable_op = node.args[0]
484
+ from torch._higher_order_ops.auto_functionalize import get_mutable_arg_names
485
+
486
+ tensors_to_clone = get_mutable_arg_names(_mutable_op)
487
+ # Don't try to reinplace Optional[Tensor] args that are None.
488
+ tensors_to_clone = [
489
+ t for t in tensors_to_clone if node.kwargs[t] is not None
490
+ ]
491
+ tensors_to_clone = reinplace_and_refine_tensors_to_clone(
492
+ tensors_to_clone, node.kwargs
493
+ )
494
+
495
+ # Stash the metadata. There is a pass later on where we decompose
496
+ # auto_functionalized into clones + a mutable op; this metadata
497
+ # tells the decomp to only clone the following inputs
498
+ node.meta["only_clone_these_tensors"] = tensors_to_clone
499
+ elif node.target in inplaceable_triton_ops:
500
+ # inplaceable_triton_ops take an additional argument called
501
+ # tensors_to_clone which contain a list of tensors to clone
502
+ # This pass iterates over them and sees which ones are safe
503
+ # to eliminate (i.e. no longer need the clones)
504
+ tensors_to_clone = reinplace_and_refine_tensors_to_clone(
505
+ node.kwargs["tensors_to_clone"], node.kwargs["kwargs"]
506
+ )
507
+
508
+ kwargs = dict(node.kwargs)
509
+ kwargs["tensors_to_clone"] = tensors_to_clone
510
+ node.kwargs = immutable_dict(kwargs)
511
+ elif (
512
+ inplaceable_op := inplaceable_foreach_ops.get(node.target, None)
513
+ ) is not None:
514
+ mutated_args = node.args[inplaceable_op.mutated_arg]
515
+
516
+ if not all((arg, node) in copy_args_to_copy_nodes for arg in mutated_args):
517
+ continue
518
+
519
+ if can_inplace(node, mutated_args):
520
+ for arg in mutated_args:
521
+ copy_node = copy_args_to_copy_nodes[(arg, node)]
522
+ replace_dict[copy_node] = copy_node.args[0]
523
+
524
+ node.target = inplaceable_op.inplace_op
525
+ for node, replacement in replace_dict.items():
526
+ while replacement in replace_dict:
527
+ replacement = replace_dict[replacement]
528
+ replace_dict[node] = replacement
529
+
530
+ node.replace_all_uses_with(replacement)
531
+ graph.erase_node(node)
532
+
533
+
534
+ def reinplace_inplaceable_ops(graph: torch.fx.Graph) -> None:
535
+ canonicalize_view_scatter_ops(graph)
536
+ reinplace_inplaceable_ops_core(graph)
537
+ decompose_generalized_scatter(graph)
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+
4
+ import torch
5
+
6
+ from torch.fx.passes.shape_prop import _extract_tensor_metadata
7
+ from .. import config, inductor_prims
8
+ from ..pattern_matcher import (
9
+ CallFunctionVarArgs,
10
+ Match,
11
+ PatternMatcherPass,
12
+ register_graph_pattern,
13
+ )
14
+ from ..virtualized import V
15
+
16
+ log = logging.getLogger(__name__)
17
+ patterns = PatternMatcherPass()
18
+ aten = torch.ops.aten
19
+
20
+
21
+ def replace_random_passes(gm: torch.fx.GraphModule):
22
+ """Modify the given FX graph to use backend-native random ops"""
23
+ if config.fallback_random:
24
+ return 0
25
+
26
+ count = patterns.apply(gm)
27
+ count += fuse_seed_creation_pass(gm.graph)
28
+
29
+ return count
30
+
31
+
32
+ def fuse_seed_creation_pass(graph: torch.fx.Graph):
33
+ """
34
+ Horizontally fuse all the seed generation on each device
35
+
36
+ a = inductor_seed(dev)
37
+ b = inductor_seed(dev)
38
+
39
+ Becomes:
40
+ seeds = inductor_seeds(2, dev)
41
+ a = inductor_lookup_seed(seeds, 0)
42
+ b = inductor_lookup_seed(seeds, 1)
43
+
44
+ We do this because seed creation is entirely launch overhead bound.
45
+ """
46
+ device_seeds = collections.defaultdict(list)
47
+ for node in graph.nodes:
48
+ if CallFunctionVarArgs(inductor_prims.seed).match(node):
49
+ device_seeds[node.args[0]].append(node)
50
+
51
+ if not device_seeds:
52
+ return 0
53
+
54
+ for device, seeds in device_seeds.items():
55
+ with graph.inserting_before(seeds[0]):
56
+ combined = graph.call_function(inductor_prims.seeds, (len(seeds), device))
57
+ with V.fake_mode:
58
+ combined.meta["val"] = torch.empty(
59
+ [len(seeds)], device=device, dtype=torch.int64
60
+ )
61
+ combined.meta["tensor_meta"] = _extract_tensor_metadata(
62
+ combined.meta["val"]
63
+ )
64
+
65
+ for idx, seed in enumerate(seeds):
66
+ with graph.inserting_before(seed):
67
+ new_seed = graph.call_function(
68
+ inductor_prims.lookup_seed, (combined, idx)
69
+ )
70
+ seed.replace_all_uses_with(new_seed)
71
+ new_seed.meta.update(seed.meta)
72
+ graph.erase_node(seed)
73
+
74
+ return len(device_seeds)
75
+
76
+
77
+ def default_kwargs(device):
78
+ return {}
79
+
80
+
81
+ def get_device(device):
82
+ if device is not None:
83
+ return device
84
+ return torch.empty([]).device # default device
85
+
86
+
87
+ @register_graph_pattern(CallFunctionVarArgs(aten.rand.default), pass_dict=patterns)
88
+ @register_graph_pattern(CallFunctionVarArgs(aten.rand.generator), pass_dict=patterns)
89
+ @register_graph_pattern(CallFunctionVarArgs(aten.randn.default), pass_dict=patterns)
90
+ @register_graph_pattern(CallFunctionVarArgs(aten.randn.generator), pass_dict=patterns)
91
+ def replace_random(
92
+ match: Match,
93
+ size,
94
+ *,
95
+ generator=None,
96
+ dtype=None,
97
+ device=None,
98
+ layout=None,
99
+ pin_memory=None,
100
+ ):
101
+ if generator is not None:
102
+ return
103
+
104
+ def replacement(size):
105
+ result = inductor_prims.random(
106
+ size, inductor_prims.seed(device), mode, **default_kwargs(device)
107
+ )
108
+ if dtype is not None:
109
+ result = result.to(dtype)
110
+ return result
111
+
112
+ mode = {
113
+ aten.rand: "rand",
114
+ aten.randn: "randn",
115
+ }[
116
+ match.output_node().target.overloadpacket # type: ignore[union-attr]
117
+ ] # type: ignore[union-attr]
118
+ device = get_device(device)
119
+ match.replace_by_example(replacement, [size])
120
+
121
+
122
+ @register_graph_pattern(CallFunctionVarArgs(aten.randint.low), pass_dict=patterns)
123
+ def replace_randint(
124
+ match: Match,
125
+ low,
126
+ high,
127
+ size,
128
+ *,
129
+ dtype=torch.int64,
130
+ device=None,
131
+ layout=None,
132
+ pin_memory=None,
133
+ ):
134
+ def replacement(size):
135
+ result = inductor_prims.randint(low, high, size, inductor_prims.seed(device))
136
+ return result.to(dtype)
137
+
138
+ device = get_device(device)
139
+ match.replace_by_example(replacement, [size])
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (213 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_1.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_10.cpython-310.pyc ADDED
Binary file (5.48 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_11.cpython-310.pyc ADDED
Binary file (5.48 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_12.cpython-310.pyc ADDED
Binary file (6.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_13.cpython-310.pyc ADDED
Binary file (3.77 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_14.cpython-310.pyc ADDED
Binary file (5.61 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_15.cpython-310.pyc ADDED
Binary file (6.24 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_16.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/_inductor/fx_passes/serialized_patterns/__pycache__/_sfdp_pattern_17.cpython-310.pyc ADDED
Binary file (6.77 kB). View file