applied-ai-018 commited on
Commit
b00fcdd
·
verified ·
1 Parent(s): a1827f6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/11.attention.query_key_value.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step120/zero/19.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_decomp/decompositions.py +0 -0
  10. venv/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py +302 -0
  11. venv/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py +263 -0
  12. venv/lib/python3.10/site-packages/torch/ao/__init__.py +16 -0
  13. venv/lib/python3.10/site-packages/torch/ao/ns/__init__.py +0 -0
  14. venv/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py +526 -0
  18. venv/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite_fx.py +1025 -0
  19. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__init__.py +0 -0
  20. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_passes.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/n_shadows_utils.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/ns_types.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/qconfig_multi_mapping.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/ao/ns/fx/graph_matcher.py +460 -0
  31. venv/lib/python3.10/site-packages/torch/ao/ns/fx/graph_passes.py +950 -0
  32. venv/lib/python3.10/site-packages/torch/ao/ns/fx/mappings.py +761 -0
  33. venv/lib/python3.10/site-packages/torch/ao/ns/fx/n_shadows_utils.py +1311 -0
  34. venv/lib/python3.10/site-packages/torch/ao/ns/fx/ns_types.py +64 -0
  35. venv/lib/python3.10/site-packages/torch/ao/ns/fx/pattern_utils.py +200 -0
  36. venv/lib/python3.10/site-packages/torch/ao/ns/fx/qconfig_multi_mapping.py +243 -0
  37. venv/lib/python3.10/site-packages/torch/ao/ns/fx/utils.py +533 -0
  38. venv/lib/python3.10/site-packages/torch/ao/ns/fx/weight_utils.py +275 -0
  39. venv/lib/python3.10/site-packages/torch/ao/quantization/__init__.py +189 -0
  40. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_correct_bias.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_learnable_fake_quantize.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fake_quantize.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuse_modules.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/observer.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig_mapping.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quant_type.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/11.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9095c8c9c49ff5bba85f06c924606737fe546aaa60d0f80554cbbe8fb59d5202
3
+ size 50332749
ckpts/universal/global_step120/zero/19.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0abe562356b5ce5c7593db56edcb7f1a2c08eec9d00c88932fb24d3a77ff5158
3
+ size 33555533
ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f97477ba6817c50add81a1b0b6d5bca0d63305eff4eac6bc2c89b8b126c7ce80
3
+ size 33555612
ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fe744aed1a52656bda6bf6950028ad56d51e0a956088b6be8b72e50b9abcd96
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc ADDED
Binary file (110 kB). View file
 
venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc ADDED
Binary file (6.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
venv/lib/python3.10/site-packages/torch/_decomp/decompositions.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, Dict, List, Optional, Tuple
3
+
4
+ import torch
5
+ import torch._decomp
6
+ from torch import Tensor
7
+ from torch._prims_common.wrappers import _maybe_remove_out_wrapper
8
+
9
+ decomposition_table = torch._decomp.decomposition_table
10
+ decomposition_table_for_jvp: Dict[torch._ops.OperatorBase, Callable] = {}
11
+ register_decomposition = torch._decomp.register_decomposition
12
+ aten = torch.ops.aten
13
+
14
+ # NOTE: [forward-mode AD decompositions mechanism]
15
+ #
16
+ # The mechanism is in VariableType,
17
+ # IF any inputs have forward grad
18
+ # AND there is no forward AD formula implemented
19
+ # AND the functions is actually differentiable
20
+ # run the decomposition
21
+ # See run_jit_decomposition_with_args_for_jvp
22
+ # We currently use python decompositions that we torchscript.
23
+ #
24
+ # Note that we would be building the backward graph at the decomposed level
25
+ # too, but that is OK, because we would've errored out otherwise anyway.
26
+ #
27
+ # TODO: The mechanism we are using to register decompositions doesn't
28
+ # seem to be exclusively used for jvp. So open question here is whether
29
+ # torch/csrc/jit/runtime/decomposition_registry.cpp is being used for other things.
30
+ # If that is the case, we may go down the decomposition path unexpectedly
31
+ # (and possibly produce an unintelligible error) vs erroring out earlier and
32
+ # printing that the forward AD formula is not implemented.
33
+ #
34
+ # The solution to this may be to have a explicitly white list control when
35
+ # to enable the decomposition.
36
+
37
+
38
+ def maybe_register_decomposition(op):
39
+ def decorator(f):
40
+ try:
41
+ return register_decomposition(op)(f)
42
+ except Exception:
43
+ return f
44
+
45
+ return decorator
46
+
47
+
48
+ # Functions where we need a special decomposition for jvp but there's another version that
49
+ # should be used more generally (ex. for jvp we need to recompute the mean and variance for
50
+ # the backwards of a normalization function. Without jvp, it should use the saved value)
51
+ decomposition_table_for_jvp = {}
52
+
53
+
54
+ def register_decomposition_for_jvp(fn):
55
+ return register_decomposition(fn, registry=decomposition_table_for_jvp)
56
+
57
+
58
+ def _register_jit_decomposition_for_jvp(decomp, use_python=False):
59
+ if decomp in decomposition_table_for_jvp:
60
+ decomposition_table_used = decomposition_table_for_jvp
61
+ elif decomp in decomposition_table:
62
+ decomposition_table_used = decomposition_table
63
+ else:
64
+ raise RuntimeError(f"could not find decomposition for {decomp}")
65
+ decomp_fn = decomposition_table_used[decomp]
66
+
67
+ # `out_wrapper` extends a decompositions signature with
68
+ # an `out` parameter. However jit will use the unwrapped function's
69
+ # signature instead so we need to unwrap here to prevent an error
70
+ decomp_fn = _maybe_remove_out_wrapper(decomp_fn)
71
+
72
+ if use_python:
73
+ decomp_fn = torch.jit.ignore(decomp_fn)
74
+ sig = inspect.signature(decomp_fn)
75
+
76
+ # Create a string wrapping the function from the signature
77
+ # example output:
78
+ # def wrapped_decomp(x: torch.Tensor, y: int, z: int):
79
+ # return decomp_fn(x, y, z)
80
+ # Thanks copilot!
81
+ def get_function_def(sig):
82
+ param_def = [f"{param_str}" for param_str in sig.parameters.values()]
83
+ param_use = [f"{param_str}" for param_str in sig.parameters.keys()]
84
+
85
+ return f"def wrapped_decomp({', '.join(param_def)}):\n return decomp_fn({', '.join(param_use)})\n"
86
+
87
+ f_str = get_function_def(sig)
88
+ graph = torch.jit.CompilationUnit(f_str).wrapped_decomp.graph
89
+ else:
90
+ graph = torch.jit.script(decomp_fn).graph
91
+ torch.jit._register_decomposition(decomp, graph)
92
+
93
+
94
+ # The only decompositions here are temporary or hacks for the purposes of jvp
95
+
96
+
97
+ # TODO: do these also belong here?
98
+ @maybe_register_decomposition(aten.trace.default)
99
+ def trace(self: Tensor) -> Tensor:
100
+ return torch.sum(torch.diag(self))
101
+
102
+
103
+ @maybe_register_decomposition(aten.log_sigmoid_forward.default)
104
+ def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]:
105
+ min = torch.minimum(self.new_zeros(()), self)
106
+ z = torch.exp(-torch.abs(self))
107
+ if self.is_cuda:
108
+ buffer = self.new_zeros((0,))
109
+ else:
110
+ buffer = z
111
+ return min - torch.log1p(z), buffer
112
+
113
+
114
+ def recompute_mean_var(
115
+ input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool
116
+ ):
117
+ # for most norm decompositions, it will be the same as the core version except for here.
118
+ # We recompute the mean and variance so that they track gradients through input
119
+
120
+ mean = torch.mean(input, dim=inner_dim_indices, keepdim=keepdim)
121
+ var = torch.var(input, dim=inner_dim_indices, unbiased=False, keepdim=keepdim)
122
+ eps = torch.pow(1 / rstd, 2) - var # this makes me so sad inside
123
+ eps = eps.detach()
124
+ rstd = 1 / torch.sqrt(var + eps)
125
+ return mean, rstd
126
+
127
+
128
+ @register_decomposition_for_jvp(aten.native_layer_norm_backward)
129
+ def native_layer_norm_backward(
130
+ grad_out: Tensor,
131
+ input: Tensor,
132
+ normalized_shape: List[int],
133
+ mean: Tensor,
134
+ rstd: Tensor,
135
+ weight: Optional[Tensor],
136
+ bias: Optional[Tensor],
137
+ output_mask: List[bool],
138
+ ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
139
+ input_shape = input.shape
140
+ input_ndim = input.dim()
141
+
142
+ axis = input_ndim - len(normalized_shape)
143
+ inner_dims = input_shape[axis:]
144
+ outer_dims = input_shape[:axis]
145
+ inner_dim_indices = list(range(axis, input_ndim))
146
+ outer_dim_indices = list(range(0, axis))
147
+
148
+ N = 1
149
+ for i in inner_dims:
150
+ N *= i
151
+ M = 1
152
+ for i in outer_dims:
153
+ M *= i
154
+ if M <= 0 or N <= 0:
155
+ return (
156
+ input.new_zeros(input_shape),
157
+ input.new_zeros(input_shape[axis:]),
158
+ input.new_zeros(input_shape[axis:]),
159
+ )
160
+
161
+ mean_, rstd_ = recompute_mean_var(input, rstd, inner_dim_indices, keepdim=True)
162
+
163
+ x_hat = (input - mean_) * rstd_
164
+ if weight is not None:
165
+ grad_x_hat = grad_out * weight
166
+ else:
167
+ grad_x_hat = grad_out
168
+ a = grad_x_hat * N
169
+ b = torch.sum(grad_x_hat, inner_dim_indices, True)
170
+ c1 = torch.mul(grad_x_hat, x_hat)
171
+ c2 = torch.sum(c1, inner_dim_indices, True)
172
+ c3 = torch.mul(x_hat, c2)
173
+ inner = a - b - c3
174
+
175
+ if output_mask[0]:
176
+ d_input: Optional[Tensor] = (rstd_ / N) * inner
177
+ else:
178
+ d_input = torch.zeros_like(input) # should be None but doesn't work with vjp
179
+
180
+ if output_mask[1] and weight is not None:
181
+ if len(outer_dim_indices) > 0:
182
+ d_weight: Optional[Tensor] = torch.sum(
183
+ grad_out * x_hat, outer_dim_indices, False
184
+ )
185
+ else:
186
+ d_weight = grad_out * x_hat
187
+ elif weight is not None:
188
+ d_weight = torch.zeros_like(weight) # should be None but doesn't work with vjp
189
+ else:
190
+ d_weight = torch.zeros(()) # should be None but doesn't work with vjp
191
+
192
+ if output_mask[2] and bias is not None:
193
+ if len(outer_dim_indices) > 0:
194
+ d_bias: Optional[Tensor] = torch.sum(grad_out, outer_dim_indices, False)
195
+ else:
196
+ d_bias = grad_out.clone()
197
+ elif bias is not None:
198
+ d_bias = torch.zeros_like(bias) # should be None but doesn't work with vjp
199
+ else:
200
+ d_bias = torch.zeros(()) # should be None but doesn't work with vjp
201
+
202
+ return (d_input, d_weight, d_bias)
203
+
204
+
205
+ def prod(x: List[int]):
206
+ r = 1
207
+ for i in x:
208
+ r *= i
209
+ return r
210
+
211
+
212
+ @register_decomposition_for_jvp(aten.native_batch_norm_backward)
213
+ def native_batch_norm_backward(
214
+ grad_out: Tensor,
215
+ input: Tensor,
216
+ weight: Optional[Tensor],
217
+ running_mean: Optional[Tensor],
218
+ running_var: Optional[Tensor],
219
+ save_mean: Optional[Tensor],
220
+ save_invstd: Optional[Tensor],
221
+ train: bool,
222
+ eps: float,
223
+ output_mask: List[bool],
224
+ ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
225
+ input_shape = input.shape
226
+ input_rank = input.dim()
227
+ assert input_rank >= 2, "rank of the input must be at least 2"
228
+
229
+ axis = 1
230
+ num_features = prod(input_shape) / input_shape[axis] # type: ignore[arg-type]
231
+ mean = save_mean
232
+ invstd = save_invstd
233
+ if train:
234
+ assert (
235
+ save_mean is not None and save_invstd is not None
236
+ ), "when train=True, save_mean and save_invstd are required"
237
+
238
+ reduciton_dims = [0] + list(range(2, input.dim()))
239
+ assert invstd is not None # for typing
240
+ mean, invstd = recompute_mean_var(input, invstd, reduciton_dims, keepdim=False)
241
+ else:
242
+ assert running_mean is not None and running_var is not None
243
+ mean = running_mean
244
+ invstd = torch.rsqrt(running_var + eps)
245
+
246
+ assert invstd is not None and mean is not None
247
+
248
+ broadcast_mask = [1] * input_rank
249
+ broadcast_mask[axis] = input_shape[axis]
250
+
251
+ reduction_axes: List[int] = []
252
+ for i in range(input_rank):
253
+ if i != axis:
254
+ reduction_axes.append(i)
255
+
256
+ mean = torch.reshape(mean, broadcast_mask)
257
+ norm = 1.0 / num_features
258
+ grad_output_sum = torch.sum(grad_out, reduction_axes)
259
+ dot_p = torch.sum(grad_out * (input - mean), reduction_axes)
260
+
261
+ grad_mean = torch.reshape(grad_output_sum * norm, broadcast_mask)
262
+ proj_scale = torch.reshape(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask)
263
+
264
+ if weight is None:
265
+ grad_scale = torch.reshape(invstd, broadcast_mask) * 1.0
266
+ else:
267
+ grad_scale = torch.reshape(invstd * weight, broadcast_mask)
268
+
269
+ if train:
270
+ proj = (input - mean) * proj_scale
271
+ grad_input = ((grad_out - proj) - grad_mean) * grad_scale
272
+ else:
273
+ grad_input = grad_out * grad_scale
274
+
275
+ if output_mask[1]:
276
+ grad_weight = dot_p * invstd
277
+ elif weight is not None:
278
+ grad_weight = torch.zeros_like(
279
+ weight
280
+ ) # should be None but doesn't work with vjp
281
+ else:
282
+ grad_weight = torch.zeros(()) # should be None but doesn't work with vjp
283
+
284
+ if output_mask[2]:
285
+ grad_bias = grad_output_sum
286
+ else:
287
+ grad_bias = torch.zeros_like(
288
+ grad_output_sum
289
+ ) # should be None but doesn't work with vjp
290
+
291
+ return (grad_input, grad_weight, grad_bias)
292
+
293
+
294
+ _register_jit_decomposition_for_jvp(torch.ops.aten.trace.default, use_python=True)
295
+ _register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss_backward.default)
296
+ _register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss2d_backward.default)
297
+ _register_jit_decomposition_for_jvp(torch.ops.aten._log_softmax_backward_data.default)
298
+ _register_jit_decomposition_for_jvp(torch.ops.aten._softmax_backward_data.default)
299
+ _register_jit_decomposition_for_jvp(torch.ops.aten.log_sigmoid_forward.default)
300
+ _register_jit_decomposition_for_jvp(torch.ops.aten.native_layer_norm_backward.default)
301
+ _register_jit_decomposition_for_jvp(torch.ops.aten.native_batch_norm_backward.default)
302
+ _register_jit_decomposition_for_jvp(torch.ops.aten.cudnn_batch_norm_backward.default)
venv/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from collections import defaultdict
3
+ from typing import Callable, Dict
4
+
5
+ import torch
6
+ import torch._decomp as decomp
7
+ from torch._decomp import get_decompositions
8
+ from torch._ops import OpOverload
9
+
10
+ aten = torch.ops.aten
11
+
12
+ rng_decompositions: Dict[str, Dict[OpOverload, Callable]] = defaultdict(dict)
13
+
14
+
15
+ def register_rng_decomposition(aten_op):
16
+ return decomp.register_decomposition(aten_op, rng_decompositions)
17
+
18
+
19
+ def throw_on_non_cuda(device):
20
+ raise RuntimeError(
21
+ f"You are trying to functionalize a {device.type} RNG operator but {device.type} does not "
22
+ f"use Philox/counter-based RNG. Therefore, functionalizing a {device.type} RNG operator is "
23
+ "not supported. We are discussing the possibility of a Philox-based RNG implementation for CPU."
24
+ )
25
+
26
+
27
+ # TODO - We have to register many more distributions here, and also higher level
28
+ # ops like dropout which have fused implementation and can hide the rand inside.
29
+ @register_rng_decomposition(aten.rand)
30
+ def rand(shape, dtype=None, layout=torch.strided, device=None, pin_memory=False):
31
+ if device and device.type != "cuda":
32
+ throw_on_non_cuda(device)
33
+ seed, offset = PhiloxStateTracker.get_state_as_tuple()
34
+ dtype = dtype or torch.float32
35
+ out, offset_jump = torch.ops.rngprims.philox_rand(
36
+ shape, seed, offset, None, device, dtype
37
+ )
38
+ PhiloxStateTracker.advance_offset(offset_jump)
39
+ return out
40
+
41
+
42
+ @register_rng_decomposition(aten.rand_like)
43
+ def rand_like(
44
+ x: torch.Tensor,
45
+ dtype=None,
46
+ layout=None,
47
+ device=None,
48
+ pin_memory=False,
49
+ memory_format=torch.preserve_format,
50
+ ):
51
+ device = device or x.device
52
+ if device.type != "cuda":
53
+ throw_on_non_cuda(device)
54
+ dtype = dtype or x.dtype
55
+ seed, offset = PhiloxStateTracker.get_state_as_tuple()
56
+ out, offset_jump = torch.ops.rngprims.philox_rand(
57
+ x.shape, seed, offset, None, device, dtype
58
+ )
59
+ PhiloxStateTracker.advance_offset(offset_jump)
60
+ return out
61
+
62
+
63
+ class PhiloxState:
64
+ """
65
+ Represents a PhiloxRngState - (seed, offset) where offset = base_offset +
66
+ relative_offset. seed and base_offset basically point to the rng state just
67
+ before tracing starts. relative offset tracks the totally consumed offset at
68
+ trace time.
69
+ """
70
+
71
+ def __init__(self):
72
+ self.reset()
73
+
74
+ def reset(self):
75
+ self.seed = torch.tensor(())
76
+ self.base_offset = torch.tensor(())
77
+ self.relative_offset = 0
78
+ self.offset_advanced_alteast_once = False
79
+
80
+ def validate_state(self):
81
+ assert self.seed.numel() != 0 and self.base_offset.numel() != 0
82
+
83
+ def advance_offset(self, consumed_offset):
84
+ self.offset_advanced_alteast_once = True
85
+ self.relative_offset = self.relative_offset + consumed_offset
86
+
87
+ def set_state(self, seed, base_offset, relative_offset=0):
88
+ self.seed = seed
89
+ self.base_offset = base_offset
90
+ self.relative_offset = relative_offset
91
+
92
+ def get_state_as_tuple(self):
93
+ self.validate_state()
94
+ return (self.seed, self.base_offset + self.relative_offset)
95
+
96
+ def get_state_as_tensor(self):
97
+ # Only needed because we override get_rng_state.
98
+ self.validate_state()
99
+ return torch.stack([self.seed, self.base_offset + self.relative_offset])
100
+
101
+ def set_state_from_tensor(self, state):
102
+ # Only needed because we override set_rng_state.
103
+ self.seed, self.base_offset = torch.unbind(state)
104
+ self.relative_offset = 0
105
+
106
+
107
+ class PhiloxStateTracker:
108
+ """
109
+ Singleton class to track the philox rng state during AOT Autograd tracing.
110
+ For each aot tracing instance, AOT Autograd resets this tracker and keeps
111
+ track of both forward and backward offsets. At runtime, we only care about
112
+ the total consumed forward and backward offsets. For dynamic shapes, these
113
+ offsets are a function of input shapes. Therefore, the AOT generated graphs
114
+ have additional outputs that compute total consumed forward and backward
115
+ offsets.
116
+ """
117
+
118
+ running_state: PhiloxState
119
+ fwd_state: PhiloxState
120
+ bwd_state: PhiloxState
121
+
122
+ def __enter__(self):
123
+ PhiloxStateTracker.reset()
124
+ return self
125
+
126
+ def __exit__(self, exc_type, exc_cal, exc_tb):
127
+ PhiloxStateTracker.reset()
128
+
129
+ @classmethod
130
+ def reset(cls):
131
+ cls.running_state = PhiloxState()
132
+ cls.fwd_state = PhiloxState()
133
+ cls.bwd_state = PhiloxState()
134
+
135
+ @classmethod
136
+ def mark_beginning_of_forward(cls):
137
+ # Tells the tracker to use fwd_state as the running state
138
+ cls.running_state = cls.fwd_state
139
+
140
+ @classmethod
141
+ def mark_beginning_of_backward(cls):
142
+ # Tells the tracker to use bwd_state as the running state
143
+ cls.running_state = cls.bwd_state
144
+
145
+ @classmethod
146
+ def record_state(cls, seed, offset, mode):
147
+ # Records the seed and offset tensors. These tensors are used to invoke
148
+ # the philox_rand functional primitives.
149
+ if mode == "forward":
150
+ cls.fwd_state.set_state(seed, offset)
151
+ cls.mark_beginning_of_forward()
152
+ else:
153
+ assert mode == "backward"
154
+ cls.bwd_state.set_state(seed, offset)
155
+
156
+ @classmethod
157
+ def get_state_as_tensor(cls):
158
+ # The only reason this exists is because we override get_rng_state and
159
+ # set_rng_state during tracing. get_rng_state expects a tensor output,
160
+ # so return (seed, offset) tuple upset other parts of the program like
161
+ # ctx.saved_tensors.
162
+
163
+ # A bad consequence is that if user saves and restores rng state, we
164
+ # have little bit of ugliness in the generated code, where we first
165
+ # concat the (seed, offset) to create a tensor for get_rng_state, and
166
+ # then split it back to get (seed, offset) tuple in set_rng_state.
167
+
168
+ # TODO: Investigate if there is be a better way to wrap the tuple in a
169
+ # false Tensor object, and then desugar it later on.
170
+ return cls.running_state.get_state_as_tensor()
171
+
172
+ @classmethod
173
+ def get_state_as_tuple(cls):
174
+ return cls.running_state.get_state_as_tuple()
175
+
176
+ @classmethod
177
+ def set_state_from_tensor(cls, x):
178
+ # This is only needed because we override set_rng_state. Look at the
179
+ # comment in get_state_from_tensor method.
180
+ cls.running_state.set_state_from_tensor(x)
181
+
182
+ @classmethod
183
+ def advance_offset(cls, consumed_offset):
184
+ cls.running_state.advance_offset(consumed_offset)
185
+
186
+ @classmethod
187
+ def get_current_relative_offset(cls):
188
+ return cls.running_state.relative_offset
189
+
190
+ @staticmethod
191
+ def multiple_of_4(offset):
192
+ # torch cuda rng state offset must be a multiple of 4. For inductor, as
193
+ # we sum up all the numel, the result might not be a multiple of 4. This
194
+ # method achieves that.
195
+ return (offset + 3) // 4 * 4
196
+
197
+ @classmethod
198
+ def get_updated_fwd_offset(cls):
199
+ # Short circuit if no rand ops were observed
200
+ if not cls.fwd_state.offset_advanced_alteast_once:
201
+ return cls.fwd_state.base_offset
202
+ return cls.multiple_of_4(
203
+ cls.fwd_state.base_offset + cls.fwd_state.relative_offset
204
+ )
205
+
206
+ @classmethod
207
+ def get_updated_bwd_offset(cls):
208
+ # Short circuit if no rand ops were observed
209
+ if not cls.bwd_state.offset_advanced_alteast_once:
210
+ return cls.bwd_state.base_offset
211
+ return cls.multiple_of_4(
212
+ cls.bwd_state.base_offset + cls.bwd_state.relative_offset
213
+ )
214
+
215
+
216
+ # Adding more decompositions which eventually use rand_like inside decomps.
217
+ # Adding these in rng_decompositions ensures the functionalization of rand_like
218
+ # ops used in these decomps. The list is copied from inductor codebase, which
219
+ # uses it for similar purpose.
220
+ #
221
+ # Caution - These decomps do not have same accuracy as that of eager. However,
222
+ # we can't just disable them with a config flag like fallback_random, because
223
+ # for functionalization of rng ops, we have to decompose these ops.
224
+ extra_random_decomps = get_decompositions(
225
+ [
226
+ aten.cauchy,
227
+ aten.cauchy_,
228
+ aten.exponential,
229
+ aten.exponential_,
230
+ aten.geometric,
231
+ aten.geometric_,
232
+ aten.native_dropout,
233
+ aten.normal,
234
+ aten.normal_,
235
+ aten.normal_functional,
236
+ aten.log_normal,
237
+ aten.log_normal_,
238
+ aten.rrelu_with_noise,
239
+ aten.rrelu_with_noise_,
240
+ aten.uniform_,
241
+ ]
242
+ )
243
+ register_extra_random_decomp = functools.partial(
244
+ decomp.register_decomposition, registry=extra_random_decomps
245
+ )
246
+
247
+
248
+ @register_extra_random_decomp([aten.bernoulli_])
249
+ def bernoulli_(self, p=0.5):
250
+ if self.device == torch.device("cpu"):
251
+ return NotImplemented
252
+ return self.copy_(torch.rand_like(self, dtype=torch.float32) < p)
253
+
254
+
255
+ @register_extra_random_decomp([aten.bernoulli.p])
256
+ def bernoulli_p(self, p=0.5, *, generator=None):
257
+ if self.device == torch.device("cpu"):
258
+ return NotImplemented
259
+ assert generator is None
260
+ return torch.rand_like(self, dtype=torch.float32) < p
261
+
262
+
263
+ rng_decompositions.update(extra_random_decomps) # type: ignore[arg-type]
venv/lib/python3.10/site-packages/torch/ao/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # torch.ao is a package with a lot of interdependencies.
2
+ # We will use lazy import to avoid cyclic dependencies here.
3
+
4
+
5
+ __all__ = [
6
+ "nn",
7
+ "ns",
8
+ "quantization",
9
+ "pruning",
10
+ ]
11
+
12
+ def __getattr__(name):
13
+ if name in __all__:
14
+ import importlib
15
+ return importlib.import_module("." + name, __name__)
16
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
venv/lib/python3.10/site-packages/torch/ao/ns/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc ADDED
Binary file (26.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.ao.nn.quantized as nnq
4
+ import torch.ao.nn.quantized.dynamic as nnqd
5
+ from torch.ao.quantization import prepare
6
+ from typing import Dict, List, Optional, Any, Union, Callable, Set
7
+
8
+ from torch.ao.quantization.quantization_mappings import (
9
+ get_default_compare_output_module_list,
10
+ )
11
+
12
+ NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST = {
13
+ nnqd.Linear,
14
+ nnq.Linear,
15
+ nnqd.LSTM,
16
+ nn.LSTM,
17
+ }
18
+
19
+
20
+ def _find_match(
21
+ str_list: Union[Dict[str, Any], List[str]], key_str: str,
22
+ postfix: str,
23
+ ) -> Optional[str]:
24
+ split_str = key_str.split(".")
25
+ if split_str[-1] == postfix:
26
+ match_string = "".join(key_str.split(".")[0:-1])
27
+ for s2 in str_list:
28
+ pattern1 = "".join(s2.split(".")[0:-1])
29
+ pattern2 = "".join(s2.split(".")[0:-2])
30
+ if match_string == pattern1:
31
+ return s2
32
+ if match_string == pattern2:
33
+ return s2
34
+
35
+ # For matching "fc.weight" and "fc._packed_params._packed_params"
36
+ if postfix == "_packed_params":
37
+ match_string = "".join(key_str.split(".")[0:-2])
38
+ if len(match_string) == 0:
39
+ return None
40
+ for s2 in str_list:
41
+ pattern1 = "".join(s2.split(".")[0:-1])
42
+ pattern2 = "".join(s2.split(".")[0:-2])
43
+ if match_string == pattern1:
44
+ return s2
45
+ if match_string == pattern2:
46
+ return s2
47
+ return None
48
+ else:
49
+ return None
50
+
51
+
52
+ def compare_weights(
53
+ float_dict: Dict[str, Any], quantized_dict: Dict[str, Any]
54
+ ) -> Dict[str, Dict[str, torch.Tensor]]:
55
+ r"""Compare the weights of the float module with its corresponding quantized
56
+ module. Return a dict with key corresponding to module names and each entry being
57
+ a dictionary with two keys 'float' and 'quantized', containing the float and
58
+ quantized weights. This dict can be used to compare and compute the quantization
59
+ error of the weights of float and quantized models.
60
+
61
+ Example usage::
62
+
63
+ wt_compare_dict = compare_weights(
64
+ float_model.state_dict(), qmodel.state_dict())
65
+ for key in wt_compare_dict:
66
+ print(
67
+ key,
68
+ compute_error(
69
+ wt_compare_dict[key]['float'],
70
+ wt_compare_dict[key]['quantized'].dequantize()
71
+ )
72
+ )
73
+
74
+ Args:
75
+ float_dict: state dict of the float model
76
+ quantized_dict: state dict of the quantized model
77
+
78
+ Return:
79
+ weight_dict: dict with key corresponding to module names and each entry being
80
+ a dictionary with two keys 'float' and 'quantized', containing the float and
81
+ quantized weights
82
+ """
83
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_weights")
84
+ weight_dict: Dict[str, Dict] = {}
85
+ for key in quantized_dict:
86
+ match_key = _find_match(float_dict, key, "weight")
87
+ if match_key is not None:
88
+ weight_dict[key] = {}
89
+ weight_dict[key]["float"] = float_dict[match_key]
90
+ weight_dict[key]["quantized"] = quantized_dict[key]
91
+ continue
92
+
93
+ # For matching "fc.weight" and "fc._packed_params._packed_params"
94
+ match_key = _find_match(float_dict, key, "_packed_params")
95
+ if match_key is not None:
96
+ weight_dict[key] = {}
97
+ weight_dict[key]["float"] = float_dict[match_key]
98
+ weight_dict[key]["quantized"] = quantized_dict[key][0]
99
+
100
+ # For LSTM
101
+ split_str = key.split(".")
102
+ if split_str[-1] == "param" and split_str[-3] == "_all_weight_values":
103
+ layer = split_str[-2]
104
+ module_name = ".".join(split_str[:-3])
105
+ float_weight_ih_key = module_name + ".weight_ih_l" + layer
106
+ float_weight_hh_key = module_name + ".weight_hh_l" + layer
107
+ if float_weight_ih_key in float_dict and float_weight_hh_key in float_dict:
108
+ weight_dict[key] = {}
109
+ weight_dict[key]["float"] = float_dict[float_weight_ih_key]
110
+ weight_dict[key]["quantized"] = (
111
+ quantized_dict[key].__getstate__()[0][4][0].__getstate__()[0][0]
112
+ )
113
+ weight_dict[key]["float"] = float_dict[float_weight_hh_key]
114
+ weight_dict[key]["quantized"] = (
115
+ quantized_dict[key].__getstate__()[0][4][1].__getstate__()[0][0]
116
+ )
117
+
118
+ return weight_dict
119
+
120
+
121
+ def _get_logger_dict_helper(
122
+ mod: nn.Module, target_dict: Dict[str, Any],
123
+ prefix: str = "",
124
+ ) -> None:
125
+ r"""This is the helper function for get_logger_dict
126
+
127
+ Args:
128
+ mod: module we want to save all logger stats
129
+ prefix: prefix for the current module
130
+ target_dict: the dictionary used to save all logger stats
131
+ """
132
+
133
+ def get_prefix(prefix):
134
+ return prefix if prefix == "" else prefix + "."
135
+
136
+ for name, child in mod.named_children():
137
+ if isinstance(child, Logger):
138
+ target_dict[get_prefix(prefix) + "stats"] = child.stats
139
+ break
140
+
141
+ for name, child in mod.named_children():
142
+ module_prefix = get_prefix(prefix) + name if prefix else name
143
+ _get_logger_dict_helper(child, target_dict, module_prefix)
144
+
145
+
146
+ def get_logger_dict(mod: nn.Module, prefix: str = "") -> Dict[str, Dict]:
147
+ r"""Traverse the modules and save all logger stats into target dict.
148
+ This is mainly used for quantization accuracy debug.
149
+
150
+ Type of loggers supported:
151
+ ShadowLogger: used to log the outputs of the quantized module and its matching float shadow module,
152
+ OutputLogger: used to log the outputs of the modules
153
+
154
+ Args:
155
+ mod: module we want to save all logger stats
156
+ prefix: prefix for the current module
157
+
158
+ Return:
159
+ target_dict: the dictionary used to save all logger stats
160
+
161
+ """
162
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.get_logger_dict")
163
+
164
+ target_dict: Dict[str, Dict] = {}
165
+ _get_logger_dict_helper(mod, target_dict, prefix)
166
+ return target_dict
167
+
168
+
169
+ class Logger(nn.Module):
170
+ r"""Base class for stats logging
171
+ """
172
+
173
+ def __init__(self):
174
+ super().__init__()
175
+ self.stats = {}
176
+ # We only insert observer if the op is quantized with static quantization,
177
+ # which is identified by activation_observer.dtype == quint8. This is needed
178
+ # when attaching Logger as observer for FX mode
179
+ self.dtype = torch.quint8
180
+
181
+ def forward(self, x):
182
+ """
183
+ """ # blank docblock to make autodoc happy
184
+ pass
185
+
186
+
187
+ class ShadowLogger(Logger):
188
+ r"""Class used in Shadow module to record the outputs of the original and
189
+ shadow modules.
190
+ """
191
+
192
+ def __init__(self):
193
+ super().__init__()
194
+ self.stats["float"] = []
195
+ self.stats["quantized"] = []
196
+
197
+ def forward(self, x, y):
198
+ """
199
+ """ # blank docblock to make autodoc happy
200
+ if len(x) > 1:
201
+ x = x[0]
202
+ if len(y) > 1:
203
+ y = y[0]
204
+ self.stats["quantized"].append(x.detach())
205
+ self.stats["float"].append(y.detach())
206
+
207
+
208
+ class OutputLogger(Logger):
209
+ r"""Class used to log the outputs of the module
210
+ """
211
+
212
+ def __init__(self):
213
+ super().__init__()
214
+ self.stats["tensor_val"] = []
215
+
216
+
217
+ def forward(self, x):
218
+ """
219
+ """ # blank docblock to make autodoc happy
220
+ self.stats["tensor_val"].append(x)
221
+ return x
222
+
223
+
224
+ def _convert_tuple_to_list(t: Any) -> Any:
225
+ return [_convert_tuple_to_list(x) for x in t] if type(t) is tuple else t
226
+
227
+
228
+ def _dequantize_tensor_list(t: Any) -> Any:
229
+ return (
230
+ [_dequantize_tensor_list(x) for x in t]
231
+ if type(t) is list
232
+ else t.dequantize()
233
+ if t.is_quantized
234
+ else t
235
+ )
236
+
237
+
238
+ class Shadow(nn.Module):
239
+ r"""Shadow module attaches the float module to its matching quantized module
240
+ as the shadow. Then it uses Logger module to process the outputs of both
241
+ modules.
242
+
243
+ Args:
244
+ q_module: module quantized from float_module that we want to shadow
245
+ float_module: float module used to shadow q_module
246
+ logger_cls: type of logger used to process the outputs of q_module and
247
+ float_module. ShadowLogger or custom loggers can be used.
248
+ """
249
+
250
+ def __init__(self, q_module, float_module, logger_cls):
251
+ super().__init__()
252
+ self.orig_module = q_module
253
+ self.shadow_module = float_module
254
+ self.dequant = nnq.DeQuantize()
255
+ self.logger = logger_cls()
256
+
257
+ def forward(self, *x) -> torch.Tensor:
258
+ """
259
+ """ # blank docblock to make autodoc happy
260
+ xl = _convert_tuple_to_list(x)
261
+ output = self.orig_module(*xl)
262
+ xl_float = _dequantize_tensor_list(xl)
263
+ shadow_output = self.shadow_module(*xl_float)
264
+ self.logger(output, shadow_output)
265
+ return output
266
+
267
+ def add(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
268
+ """
269
+ """ # blank docblock to make autodoc happy
270
+ output = self.orig_module.add(x, y)
271
+ x = x.dequantize()
272
+ y = y.dequantize()
273
+ shadow_output = self.shadow_module.add(x, y)
274
+ self.logger(output, shadow_output)
275
+ return output
276
+
277
+ def add_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
278
+ """
279
+ """ # blank docblock to make autodoc happy
280
+ output = self.orig_module.add_scalar(x, y)
281
+ x = x.dequantize()
282
+ shadow_output = self.shadow_module.add_scalar(x, y)
283
+ self.logger(output, shadow_output)
284
+ return output
285
+
286
+ def mul(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
287
+ """
288
+ """ # blank docblock to make autodoc happy
289
+ output = self.orig_module.mul(x, y)
290
+ x = x.dequantize()
291
+ y = y.dequantize()
292
+ shadow_output = self.shadow_module.mul(x, y)
293
+ self.logger(output, shadow_output)
294
+ return output
295
+
296
+ def mul_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
297
+ """
298
+ """ # blank docblock to make autodoc happy
299
+ output = self.orig_module.mul_scalar(x, y)
300
+ x = x.dequantize()
301
+ shadow_output = self.shadow_module.mul_scalar(x, y)
302
+ self.logger(output, shadow_output)
303
+ return output
304
+
305
+ def cat(self, x: List[torch.Tensor], dim: int = 0) -> torch.Tensor:
306
+ """
307
+ """ # blank docblock to make autodoc happy
308
+ output = self.orig_module.cat(x, dim)
309
+ x = [y.dequantize() for y in x]
310
+ shadow_output = self.shadow_module.cat(x, dim)
311
+ self.logger(output, shadow_output)
312
+ return output
313
+
314
+ def add_relu(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
315
+ """
316
+ """ # blank docblock to make autodoc happy
317
+ output = self.orig_module.add_relu(x, y)
318
+ x = x.dequantize()
319
+ y = y.dequantize()
320
+ shadow_output = self.shadow_module.add_relu(x, y)
321
+ self.logger(output, shadow_output)
322
+ return output
323
+
324
+
325
+ def prepare_model_with_stubs(
326
+ float_module: nn.Module, q_module: nn.Module,
327
+ module_swap_list: Set[type], logger_cls: Callable,
328
+ ) -> None:
329
+ r"""Prepare the model by attaching the float module to its matching quantized
330
+ module as the shadow if the float module type is in module_swap_list.
331
+
332
+ Example usage::
333
+
334
+ prepare_model_with_stubs(float_model, q_model, module_swap_list, Logger)
335
+ q_model(data)
336
+ ob_dict = get_logger_dict(q_model)
337
+
338
+ Args:
339
+ float_module: float module used to generate the q_module
340
+ q_module: module quantized from float_module
341
+ module_swap_list: list of float module types to attach the shadow
342
+ logger_cls: type of logger to be used in shadow module to process the outputs of
343
+ quantized module and its float shadow module
344
+ """
345
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_with_stubs")
346
+
347
+ float_module_children = {}
348
+ for name, mod in float_module.named_children():
349
+ float_module_children[name] = mod
350
+
351
+ reassign = {}
352
+ for name, mod in q_module.named_children():
353
+
354
+ if name not in float_module_children:
355
+ continue
356
+
357
+ float_mod = float_module_children[name]
358
+
359
+ if type(float_mod) not in module_swap_list:
360
+ prepare_model_with_stubs(float_mod, mod, module_swap_list, logger_cls)
361
+
362
+ # Insert shadow module only if the module is not of the same type as
363
+ # the floating point module
364
+ if type(float_mod) in module_swap_list and not _is_identical_module_type(mod, float_mod):
365
+ reassign[name] = Shadow(mod, float_mod, logger_cls)
366
+
367
+ for key, value in reassign.items():
368
+ q_module._modules[key] = value
369
+
370
+ def _is_identical_module_type(mod1, mod2):
371
+ # Compare if two modules have the same dtype
372
+ mod1_module_types = [type(mod) for mod in mod1.modules()]
373
+ mod2_module_types = [type(mod) for mod in mod2.modules()]
374
+ return mod1_module_types == mod2_module_types
375
+
376
+
377
+
378
+ def compare_model_stub(
379
+ float_model: nn.Module, q_model: nn.Module, module_swap_list: Set[type],
380
+ *data, logger_cls=ShadowLogger
381
+ ) -> Dict[str, Dict]:
382
+ r"""Compare quantized module in a model with its floating point counterpart,
383
+ feeding both of them the same input. Return a dict with key corresponding to
384
+ module names and each entry being a dictionary with two keys 'float' and
385
+ 'quantized', containing the output tensors of quantized and its matching
386
+ float shadow module. This dict can be used to compare and compute the module
387
+ level quantization error.
388
+
389
+ This function first call prepare_model_with_stubs() to swap the quantized
390
+ module that we want to compare with the Shadow module, which takes quantized
391
+ module, corresponding float module and logger as input, and creates a forward
392
+ path inside to make the float module to shadow quantized module sharing the
393
+ same input. The logger can be customizable, default logger is ShadowLogger
394
+ and it will save the outputs of the quantized module and float module that
395
+ can be used to compute the module level quantization error.
396
+
397
+ Example usage::
398
+
399
+ module_swap_list = [torchvision.models.quantization.resnet.QuantizableBasicBlock]
400
+ ob_dict = compare_model_stub(float_model,qmodel,module_swap_list, data)
401
+ for key in ob_dict:
402
+ print(key, compute_error(ob_dict[key]['float'], ob_dict[key]['quantized'].dequantize()))
403
+
404
+ Args:
405
+ float_model: float model used to generate the q_model
406
+ q_model: model quantized from float_model
407
+ module_swap_list: list of float module types at which shadow modules will
408
+ be attached.
409
+ data: input data used to run the prepared q_model
410
+ logger_cls: type of logger to be used in shadow module to process the outputs of
411
+ quantized module and its float shadow module
412
+ """
413
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_stub")
414
+ prepare_model_with_stubs(float_model, q_model, module_swap_list, logger_cls)
415
+ q_model(*data)
416
+ ob_dict = get_logger_dict(q_model)
417
+ return ob_dict
418
+
419
+
420
+ def get_matching_activations(
421
+ float_module: nn.Module, q_module: nn.Module,
422
+ ) -> Dict[str, Dict[str, torch.Tensor]]:
423
+ r"""Find the matching activation between float and quantized modules.
424
+
425
+ Args:
426
+ float_module: float module used to generate the q_module
427
+ q_module: module quantized from float_module
428
+
429
+ Return:
430
+ act_dict: dict with key corresponding to quantized module names and each
431
+ entry being a dictionary with two keys 'float' and 'quantized', containing
432
+ the matching float and quantized activations
433
+ """
434
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.get_matching_activations")
435
+ float_dict = get_logger_dict(float_module)
436
+ quantized_dict = get_logger_dict(q_module)
437
+ act_dict: Dict[str, Dict] = {}
438
+ for key in quantized_dict:
439
+ if len(quantized_dict[key]["tensor_val"]) == 0:
440
+ continue
441
+ match_key = _find_match(sorted(float_dict, reverse=True), key, "stats")
442
+ if match_key is not None:
443
+ act_dict[key] = {}
444
+ act_dict[key]["float"] = float_dict[match_key]["tensor_val"]
445
+ act_dict[key]["quantized"] = quantized_dict[key]["tensor_val"]
446
+ return act_dict
447
+
448
+
449
+ def prepare_model_outputs(
450
+ float_module: nn.Module,
451
+ q_module: nn.Module,
452
+ logger_cls=OutputLogger,
453
+ allow_list=None
454
+ ) -> None:
455
+ r"""Prepare the model by attaching the logger to both float module
456
+ and quantized module if they are in the allow_list.
457
+
458
+ Args:
459
+ float_module: float module used to generate the q_module
460
+ q_module: module quantized from float_module
461
+ logger_cls: type of logger to be attached to float_module and q_module
462
+ allow_list: list of module types to attach logger
463
+ """
464
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_outputs")
465
+ if allow_list is None:
466
+ allow_list = get_default_compare_output_module_list()
467
+
468
+ qconfig_debug = torch.ao.quantization.QConfig(activation=logger_cls, weight=None)
469
+ float_module.qconfig = qconfig_debug # type: ignore[assignment]
470
+ prepare(float_module, inplace=True, allow_list=allow_list, prepare_custom_config_dict={})
471
+ q_module.qconfig = qconfig_debug # type: ignore[assignment]
472
+ prepare(
473
+ q_module,
474
+ inplace=True,
475
+ allow_list=allow_list,
476
+ observer_non_leaf_module_list=NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
477
+ prepare_custom_config_dict={}
478
+ )
479
+
480
+
481
+ def compare_model_outputs(
482
+ float_model: nn.Module,
483
+ q_model: nn.Module,
484
+ *data,
485
+ logger_cls=OutputLogger,
486
+ allow_list=None
487
+ ) -> Dict[str, Dict[str, torch.Tensor]]:
488
+ r"""Compare output activations between float and quantized models at
489
+ corresponding locations for the same input. Return a dict with key corresponding
490
+ to quantized module names and each entry being a dictionary with two keys
491
+ 'float' and 'quantized', containing the activations of quantized model and
492
+ float model at matching locations. This dict can be used to compare and
493
+ compute the propagation quantization error.
494
+
495
+ Example usage::
496
+
497
+ act_compare_dict = compare_model_outputs(float_model, qmodel, data)
498
+ for key in act_compare_dict:
499
+ print(
500
+ key,
501
+ compute_error(
502
+ act_compare_dict[key]['float'],
503
+ act_compare_dict[key]['quantized'].dequantize()
504
+ )
505
+ )
506
+
507
+ Args:
508
+ float_model: float model used to generate the q_model
509
+ q_model: model quantized from float_model
510
+ data: input data used to run the prepared float_model and q_model
511
+ logger_cls: type of logger to be attached to float_module and q_module
512
+ allow_list: list of module types to attach logger
513
+
514
+ Return:
515
+ act_compare_dict: dict with key corresponding to quantized module names
516
+ and each entry being a dictionary with two keys 'float' and 'quantized',
517
+ containing the matching float and quantized activations
518
+ """
519
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_outputs")
520
+ if allow_list is None:
521
+ allow_list = get_default_compare_output_module_list()
522
+ prepare_model_outputs(float_model, q_model, logger_cls, allow_list)
523
+ float_model(*data)
524
+ q_model(*data)
525
+ act_compare_dict = get_matching_activations(float_model, q_model)
526
+ return act_compare_dict
venv/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite_fx.py ADDED
@@ -0,0 +1,1025 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains tooling to compare weights and activations
3
+ across models. Example usage::
4
+
5
+ import copy
6
+ import torch
7
+ import torch.ao.quantization.quantize_fx as quantize_fx
8
+ import torch.ao.ns._numeric_suite_fx as ns
9
+
10
+ m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)).eval()
11
+ mp = quantize_fx.prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
12
+ # We convert a copy because we need the original prepared model
13
+ # to be available for comparisons, and `quantize_fx.convert_fx` is inplace.
14
+ mq = quantize_fx.convert_fx(copy.deepcopy(mp))
15
+
16
+ #
17
+ # Comparing weights
18
+ #
19
+
20
+ # extract weight pairs
21
+ weight_comparison = ns.extract_weights('a', mp, 'b', mq)
22
+
23
+ # add SQNR for each comparison, inplace
24
+ ns.extend_logger_results_with_comparison(
25
+ weight_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
26
+ 'sqnr')
27
+
28
+ # weight_comparison contains the weights from `mp` and `mq` stored
29
+ # in pairs, and can be used for further analysis.
30
+
31
+
32
+ #
33
+ # Comparing activations, with error propagation
34
+ #
35
+
36
+ # add loggers
37
+ mp_ns, mq_ns = ns.add_loggers(
38
+ 'a', copy.deepcopy(mp),
39
+ 'b', copy.deepcopy(mq),
40
+ ns.OutputLogger)
41
+
42
+ # send an example datum to capture intermediate activations
43
+ datum = torch.randn(1, 1, 1, 1)
44
+ mp_ns(datum)
45
+ mq_ns(datum)
46
+
47
+ # extract intermediate activations
48
+ act_comparison = ns.extract_logger_info(
49
+ mp_ns, mq_ns, ns.OutputLogger, 'b')
50
+
51
+ # add SQNR for each comparison, inplace
52
+ ns.extend_logger_results_with_comparison(
53
+ act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
54
+ 'sqnr')
55
+
56
+ # act_comparison contains the activations from `mp_ns` and `mq_ns` stored
57
+ # in pairs, and can be used for further analysis.
58
+
59
+ #
60
+ # Comparing activations, without error propagation
61
+ #
62
+
63
+ # create shadow model
64
+ mp_shadows_mq = ns.add_shadow_loggers(
65
+ 'a', copy.deepcopy(mp),
66
+ 'b', copy.deepcopy(mq),
67
+ ns.OutputLogger)
68
+
69
+ # send an example datum to capture intermediate activations
70
+ datum = torch.randn(1, 1, 1, 1)
71
+ mp_shadows_mq(datum)
72
+
73
+ # extract intermediate activations
74
+ shadow_act_comparison = ns.extract_shadow_logger_info(
75
+ mp_shadows_mq, ns.OutputLogger, 'b')
76
+
77
+ # add SQNR for each comparison, inplace
78
+ ns.extend_logger_results_with_comparison(
79
+ shadow_act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
80
+ 'sqnr')
81
+
82
+ # shadow_act_comparison contains the activations from `mp_ns` and `mq_ns` stored
83
+ # in pairs, and can be used for further analysis.
84
+
85
+ """
86
+
87
+ import collections
88
+
89
+ import torch
90
+ import torch.nn as nn
91
+ import torch.ao.quantization.quantize_fx as quantize_fx
92
+ from torch.fx import GraphModule
93
+ from torch.fx.graph import Node
94
+ from torch.ao.ns.fx.mappings import (
95
+ get_base_name_to_sets_of_related_ops,
96
+ )
97
+ from torch.ao.ns.fx.graph_matcher import (
98
+ get_matching_subgraph_pairs,
99
+ get_type_a_related_to_b,
100
+ )
101
+
102
+ from .fx.weight_utils import (
103
+ extract_weight_from_node,
104
+ )
105
+
106
+ from .fx.graph_passes import (
107
+ add_loggers_to_model,
108
+ create_a_shadows_b,
109
+ )
110
+
111
+ from .fx.utils import (
112
+ rekey_logger_info_on_node_name_of_model,
113
+ maybe_add_missing_fqns,
114
+ get_target_type_str,
115
+ )
116
+
117
+ from .fx.ns_types import (
118
+ NSSingleResultValuesType,
119
+ NSResultsType,
120
+ NSNodeTargetType,
121
+ )
122
+ from torch.ao.quantization.backend_config.utils import get_fusion_pattern_to_root_node_getter
123
+ from torch.ao.quantization.backend_config import BackendConfig
124
+ from torch.ao.quantization.fx.match_utils import _find_matches
125
+ from torch.ao.quantization.fx.graph_module import _get_observed_graph_module_attr
126
+ from torch.ao.quantization.fx.qconfig_mapping_utils import _generate_node_name_to_qconfig
127
+ from torch.ao.quantization.fx.quantize_handler import _get_pattern_to_quantize_handlers
128
+ from torch.ao.quantization.qconfig import QConfigAny
129
+ from torch.ao.quantization import QConfigMapping
130
+ from torch.ao.ns.fx.n_shadows_utils import (
131
+ OutputProp,
132
+ _get_dedup_subgraphs,
133
+ SHADOW_WRAPPER_NODE_NAME_PREFIX,
134
+ group_results_by_subgraph,
135
+ create_results_comparison,
136
+ print_n_shadows_summary,
137
+ create_n_transformed_and_logged_copies_of_subgraph,
138
+ create_add_loggers_graph,
139
+ extract_weight_comparison,
140
+ )
141
+ from torch.ao.ns.fx.qconfig_multi_mapping import QConfigMultiMapping
142
+
143
+ from typing import Dict, Tuple, Callable, List, Optional, Set, Any, Type
144
+
145
+ RNNReturnType = Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
146
+
147
+ class OutputLogger(nn.Module):
148
+ """
149
+ Base class for capturing intermediate values.
150
+ """
151
+ stats: List[torch.Tensor]
152
+ stats_rnn: List[RNNReturnType]
153
+
154
+ # Mark as impure so that calls to it will not be removed during DCE.
155
+ _is_impure = True
156
+
157
+ def __init__(
158
+ self,
159
+ ref_node_name: str,
160
+ prev_node_name: str,
161
+ model_name: str,
162
+ ref_name: str,
163
+ prev_node_target_type: str,
164
+ ref_node_target_type: str,
165
+ results_type: str,
166
+ index_within_arg: int,
167
+ index_of_arg: int,
168
+ fqn: Optional[str],
169
+ qconfig_str: Optional[str] = '',
170
+ ):
171
+ super().__init__()
172
+ self.stats: List[torch.Tensor] = []
173
+ self.stats_rnn: List[RNNReturnType] = []
174
+
175
+ # name of the node which was responsible for adding this logger
176
+ # Note:
177
+ # - if we are logging node outputs, this is the same as prev_node_name
178
+ # - if we are logging node inputs, this is the name of the node
179
+ # whose input this logger is logging.
180
+ #
181
+ # example, where logger1 is logging input of op1 and logger2 is logging
182
+ # the output of op1:
183
+ #
184
+ # x1 -> logger1 -> op1 -> logger2 -> x2
185
+ #
186
+ # in this example,
187
+ # - logger1's prev_node_name is x1 and ref_node_name is op1
188
+ # - logger2's prev_node_name is op1 and ref_node_name is op1
189
+ self.ref_node_name = ref_node_name
190
+ # name of the node whose output this Logger is capturing
191
+ self.prev_node_name = prev_node_name
192
+
193
+ # name of the model from which the node originated from
194
+ self.model_name = model_name
195
+ # reference name, used to match loggers from separate models
196
+ # to each other
197
+ self.ref_name = ref_name
198
+ # type of the target of the node whose output this logger is logging
199
+ self.prev_node_target_type = prev_node_target_type
200
+ # type of the target of the node which was responsible for adding this
201
+ # logger
202
+ self.ref_node_target_type = ref_node_target_type
203
+ # what kind of values are inside of stats
204
+ self.results_type = results_type
205
+ # index of this node within the arg of the input/output node
206
+ # for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1
207
+ self.index_within_arg = index_within_arg
208
+ # index of this node within the args of the input/output node
209
+ # for example, in add(x1, x2), x2 would have index_of_arg == 1
210
+ self.index_of_arg = index_of_arg
211
+ # fully qualified name
212
+ self.fqn = fqn
213
+ # if loggers are added before prepare_fx, but we do not want
214
+ # collect results of calibration, only results after convert_fx
215
+ # so, we add a flag to control whether this logger collects data
216
+ self.enabled = True
217
+ # string representation of qconfig
218
+ self.qconfig_str = qconfig_str
219
+ # this can be turned off to reduce memory usage during calibration
220
+ self.save_activations = True
221
+
222
+ # Note: cannot annotate the type of x because TorchScript does not support
223
+ # the Union type.
224
+ def forward(self, x):
225
+ """
226
+ """ # blank docblock to make autodoc happy
227
+ # TODO(future PR): consider designing this better, as the difference
228
+ # between these two flags is subtle and not obvious.
229
+ if not self.enabled:
230
+ return x
231
+ if not self.save_activations:
232
+ return x
233
+ # TODO(future PR): consider refactoring this to better reuse the parent
234
+ # class
235
+ if isinstance(x, torch.Tensor):
236
+ self.stats.append(x.detach())
237
+ elif isinstance(x, tuple) and len(x) == 2 and len(x[1]) == 2:
238
+ new_res = (x[0].detach(), (x[1][0].detach(), x[1][1].detach()))
239
+ self.stats_rnn.append(new_res)
240
+ return x
241
+
242
+ def __repr__(self):
243
+ clean_dict = {
244
+ k: v
245
+ for k, v in self.__dict__.items()
246
+ # skip nn.Module keys
247
+ if (k != 'training') and not k.startswith('_')
248
+ }
249
+ return f"OutputLogger({clean_dict})"
250
+
251
+
252
+ class OutputComparisonLogger(OutputLogger):
253
+ """
254
+ Same as OutputLogger, but also requires the original activation
255
+ in order to calculate the comparison at calibration time
256
+ """
257
+
258
+ def __init__(self, *args, **kwargs):
259
+ super().__init__(*args, **kwargs)
260
+ # TODO(future PR): make the comparison function configurable
261
+ self.comparison_fn = torch.ao.ns.fx.utils.compute_sqnr
262
+ self.comparison_fn_name = 'sqnr'
263
+ # precalculated comparisons of logger output versus reference
264
+ self.comparisons = []
265
+ # precalculated comparisons function
266
+
267
+ def forward(self, x, x_ref):
268
+ """
269
+ """ # blank docblock to make autodoc happy
270
+ if not self.enabled:
271
+ return x
272
+ assert isinstance(x, torch.Tensor), 'non-tensor inputs not yet supported'
273
+ if self.save_activations:
274
+ # save the activation, for debugging
275
+ self.stats.append(x.detach())
276
+ # save the comparison
277
+ self.comparisons.append(self.comparison_fn(x, x_ref))
278
+ return x
279
+
280
+ def __repr__(self):
281
+ clean_dict = {
282
+ k: v
283
+ for k, v in self.__dict__.items()
284
+ # skip nn.Module keys
285
+ if (k != 'training') and not k.startswith('_')
286
+ }
287
+ return f"OutputComparisonLogger({clean_dict})"
288
+
289
+
290
+ class NSTracer(quantize_fx.QuantizationTracer):
291
+ """
292
+ Just like a regular FX quantization tracer, but treats observers and fake_quantize
293
+ modules as leaf modules.
294
+ """
295
+ def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
296
+ """
297
+ """ # blank docblock to make autodoc happy
298
+ if isinstance(m, torch.ao.quantization.ObserverBase):
299
+ return True
300
+ elif isinstance(m, torch.ao.quantization.FakeQuantizeBase):
301
+ return True
302
+ return super().is_leaf_module(m, module_qualified_name)
303
+
304
+
305
+ def _extract_weights_one_model(
306
+ model_name: str,
307
+ model: GraphModule,
308
+ nodes_and_names_to_instrument: List[Tuple[Node, str]],
309
+ results: NSResultsType,
310
+ op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
311
+ ) -> None:
312
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_one_model")
313
+ for node, ref_name in nodes_and_names_to_instrument:
314
+ res_type = NSSingleResultValuesType.WEIGHT.value
315
+ extracted_weight = extract_weight_from_node(
316
+ node, model, op_to_type_to_weight_extraction_fn)
317
+ if extracted_weight:
318
+ if ref_name not in results:
319
+ results[ref_name] = {res_type: {}}
320
+ results[ref_name][res_type][model_name] = [extracted_weight]
321
+
322
+
323
+ def _extract_weights_impl(
324
+ model_name_a: str,
325
+ gm_a: GraphModule,
326
+ model_name_b: str,
327
+ gm_b: GraphModule,
328
+ base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
329
+ unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
330
+ op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
331
+ ) -> NSResultsType:
332
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_impl")
333
+ matched_subgraph_pairs = get_matching_subgraph_pairs(
334
+ gm_a, gm_b, base_name_to_sets_of_related_ops,
335
+ unmatchable_types_map)
336
+
337
+ # split the subgraph pairs into one data structure for each model
338
+ nodes_and_names_to_instrument_a: List[Tuple[Node, str]] = []
339
+ nodes_and_names_to_instrument_b: List[Tuple[Node, str]] = []
340
+ for match_name, match in matched_subgraph_pairs.items():
341
+ subgraph_a, subgraph_b = match
342
+ nodes_and_names_to_instrument_a.append((subgraph_a.base_op_node, match_name))
343
+ nodes_and_names_to_instrument_b.append((subgraph_b.base_op_node, match_name))
344
+
345
+ # populate the results, one model at a time
346
+ results: NSResultsType = {}
347
+ _extract_weights_one_model(
348
+ model_name_a, gm_a, nodes_and_names_to_instrument_a, results,
349
+ op_to_type_to_weight_extraction_fn)
350
+ _extract_weights_one_model(
351
+ model_name_b, gm_b, nodes_and_names_to_instrument_b, results,
352
+ op_to_type_to_weight_extraction_fn)
353
+
354
+ # fill in missing fqn entries
355
+ maybe_add_missing_fqns(results)
356
+
357
+ # rekey on names of nodes in gm_b
358
+ results = rekey_logger_info_on_node_name_of_model(results, model_name_b)
359
+
360
+ return results
361
+
362
+
363
+ def extract_weights(
364
+ model_name_a: str,
365
+ model_a: nn.Module,
366
+ model_name_b: str,
367
+ model_b: nn.Module,
368
+ base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
369
+ unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
370
+ op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
371
+ ) -> NSResultsType:
372
+ """
373
+ Extract weights from model A and model B, and return a comparison.
374
+
375
+ Args:
376
+ model_name_a: string name of model A to use in results
377
+ model_a: model A
378
+ model_name_b: string name of model B to use in results
379
+ model_b: model B
380
+ base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
381
+ unmatchable_types_map: optional override of unmatchable types, subject to change
382
+ op_to_type_to_weight_extraction_fn: optional override of function which extracts weight
383
+ from a type, subject to change
384
+
385
+ Return:
386
+ NSResultsType, containing the weight comparisons
387
+ """
388
+
389
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_weights")
390
+ if base_name_to_sets_of_related_ops is None:
391
+ base_name_to_sets_of_related_ops = \
392
+ get_base_name_to_sets_of_related_ops()
393
+ type_a_related_to_b = \
394
+ get_type_a_related_to_b(base_name_to_sets_of_related_ops)
395
+
396
+ # TODO(future PR): expose these
397
+ skipped_module_names: List[str] = []
398
+ skipped_module_classes: List[Callable] = []
399
+ tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
400
+ tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
401
+ gm_a = GraphModule(model_a, tracer_a.trace(model_a))
402
+ maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope')
403
+ if maybe_model_a_node_name_to_scope is not None:
404
+ gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope
405
+ gm_b = GraphModule(model_b, tracer_b.trace(model_b))
406
+ maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope')
407
+ if maybe_model_b_node_name_to_scope is not None:
408
+ gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope
409
+ return _extract_weights_impl(
410
+ model_name_a, gm_a, model_name_b, gm_b, base_name_to_sets_of_related_ops,
411
+ unmatchable_types_map, op_to_type_to_weight_extraction_fn)
412
+
413
+
414
+ def _add_loggers_one_model(
415
+ model_name: str,
416
+ model: GraphModule,
417
+ nodes_and_names_to_instrument_inputs: List[Tuple[Node, str, str]],
418
+ nodes_and_names_to_instrument_outputs: List[Tuple[Node, str, str]],
419
+ logger_cls: Callable,
420
+ ) -> nn.Module:
421
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_one_model")
422
+
423
+ # TODO(future PR): do not observe nodes we do not care
424
+ # about (both fp32, denylist, etc)
425
+ node_to_instrument_inputs_to_ref_name: Dict[Node, Tuple[str, str]] = {}
426
+ node_to_instrument_outputs_to_ref_name: Dict[Node, Tuple[str, str]] = {}
427
+ for node, ref_name, ref_node_type in nodes_and_names_to_instrument_inputs:
428
+ node_to_instrument_inputs_to_ref_name[node] = (ref_name, ref_node_type)
429
+ for node, ref_name, ref_node_type in nodes_and_names_to_instrument_outputs:
430
+ node_to_instrument_outputs_to_ref_name[node] = (ref_name, ref_node_type)
431
+
432
+ model = add_loggers_to_model(
433
+ model, node_to_instrument_inputs_to_ref_name,
434
+ node_to_instrument_outputs_to_ref_name, logger_cls, model_name)
435
+ return model
436
+
437
+
438
+ def _add_loggers_impl(
439
+ name_a: str,
440
+ gm_a: GraphModule,
441
+ name_b: str,
442
+ gm_b: GraphModule,
443
+ logger_cls: Callable,
444
+ should_log_inputs: bool,
445
+ base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
446
+ unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
447
+ ) -> Tuple[nn.Module, nn.Module]:
448
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_impl")
449
+ matched_subgraph_pairs = get_matching_subgraph_pairs(
450
+ gm_a, gm_b,
451
+ base_name_to_sets_of_related_ops, unmatchable_types_map)
452
+ nodes_and_names_to_instrument_inputs_a = []
453
+ nodes_and_names_to_instrument_inputs_b = []
454
+ nodes_and_names_to_instrument_outputs_a = []
455
+ nodes_and_names_to_instrument_outputs_b = []
456
+ for match_name, (subgraph_a, subgraph_b) in matched_subgraph_pairs.items():
457
+ ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a)
458
+ ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b)
459
+ # Note: for matching inputs we use start_node, such as observing
460
+ # the input of linear in linear-relu
461
+ if should_log_inputs:
462
+ nodes_and_names_to_instrument_inputs_a.append(
463
+ (subgraph_a.start_node, match_name, ref_node_type_a))
464
+ nodes_and_names_to_instrument_inputs_b.append(
465
+ (subgraph_b.start_node, match_name, ref_node_type_b))
466
+ # Note: for matching activations we always use end_node,
467
+ # such as observing the output of relu in linear-relu
468
+ nodes_and_names_to_instrument_outputs_a.append(
469
+ (subgraph_a.end_node, match_name, ref_node_type_a))
470
+ nodes_and_names_to_instrument_outputs_b.append(
471
+ (subgraph_b.end_node, match_name, ref_node_type_b))
472
+
473
+ new_model_a = _add_loggers_one_model(
474
+ name_a, gm_a, nodes_and_names_to_instrument_inputs_a,
475
+ nodes_and_names_to_instrument_outputs_a, logger_cls)
476
+ new_model_b = _add_loggers_one_model(
477
+ name_b, gm_b, nodes_and_names_to_instrument_inputs_b,
478
+ nodes_and_names_to_instrument_outputs_b, logger_cls)
479
+ return (new_model_a, new_model_b)
480
+
481
+
482
+ def add_loggers(
483
+ name_a: str,
484
+ model_a: nn.Module,
485
+ name_b: str,
486
+ model_b: nn.Module,
487
+ logger_cls: Callable,
488
+ should_log_inputs : bool = False,
489
+ base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
490
+ unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
491
+ ) -> Tuple[nn.Module, nn.Module]:
492
+ """
493
+ Instrument model A and model B with loggers.
494
+
495
+ Args:
496
+ name_a: string name of model A to use in results
497
+ model_a: model A
498
+ name_b: string name of model B to use in results
499
+ model_b: model B
500
+ logger_cls: class of Logger to use
501
+ base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
502
+ unmatchable_types_map: optional override of unmatchable types, subject to change
503
+
504
+ Return:
505
+ Returns a tuple of (model_a_with_loggers, model_b_with_loggers). Modifies both models inplace.
506
+ """
507
+
508
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_loggers")
509
+ # TODO(future PR): expose these
510
+ skipped_module_names: List[str] = []
511
+ skipped_module_classes: List[Callable] = []
512
+ tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
513
+ tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
514
+ gm_a = GraphModule(model_a, tracer_a.trace(model_a))
515
+ maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope')
516
+ if maybe_model_a_node_name_to_scope is not None:
517
+ gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope
518
+ gm_b = GraphModule(model_b, tracer_b.trace(model_b))
519
+ maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope')
520
+ if maybe_model_b_node_name_to_scope is not None:
521
+ gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope
522
+ return _add_loggers_impl(
523
+ name_a, gm_a, name_b, gm_b, logger_cls,
524
+ should_log_inputs=should_log_inputs,
525
+ base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
526
+ unmatchable_types_map=unmatchable_types_map)
527
+
528
+
529
+ def _extract_logger_info_one_model(
530
+ model: nn.Module,
531
+ results: NSResultsType,
532
+ logger_cls: Callable,
533
+ ) -> None:
534
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_logger_info_one_model")
535
+ for gm_name, mod in model.named_modules():
536
+ # TODO(future PR): better check when scripted
537
+ is_logger = (
538
+ isinstance(mod, logger_cls) # type: ignore[arg-type]
539
+ or (
540
+ isinstance(mod, torch.jit.RecursiveScriptModule)
541
+ and mod.original_name == 'OutputLogger'
542
+ )
543
+ )
544
+ if is_logger:
545
+ key = mod.ref_name
546
+ if key not in results:
547
+ results[key] = {}
548
+ assert mod.model_name not in results[key], \
549
+ f"{mod.model_name} is already present in results"
550
+ if mod.results_type not in results[key]:
551
+ results[key][mod.results_type] = {}
552
+ if mod.model_name not in results[key][mod.results_type]:
553
+ results[key][mod.results_type][mod.model_name] = []
554
+ stats_to_use = mod.stats
555
+ if len(mod.stats_rnn) > 0:
556
+ stats_to_use = mod.stats_rnn
557
+ data = {
558
+ 'type': mod.results_type,
559
+ 'values': stats_to_use,
560
+ 'ref_node_name': mod.ref_node_name,
561
+ 'ref_node_target_type': mod.ref_node_target_type,
562
+ 'prev_node_name': mod.prev_node_name,
563
+ 'prev_node_target_type': mod.prev_node_target_type,
564
+ 'index_within_arg': mod.index_within_arg,
565
+ 'index_of_arg': mod.index_of_arg,
566
+ 'fqn': mod.fqn,
567
+ 'qconfig_str': mod.qconfig_str,
568
+ }
569
+ if hasattr(mod, 'comparisons'):
570
+ data['comparisons'] = mod.comparisons
571
+ data['comparison_fn_name'] = mod.comparison_fn_name
572
+ else:
573
+ data['comparisons'] = []
574
+ data['comparison_fn_name'] = ''
575
+ results[key][mod.results_type][mod.model_name].append(data)
576
+ # ensure the list stays sorted
577
+ results[key][mod.results_type][mod.model_name].sort(
578
+ key=lambda res:
579
+ f"{res['index_of_arg']}:{res['index_within_arg']}"
580
+ )
581
+
582
+
583
+ # TODO(future PR): align on naming
584
+ # this is equivalent of just the comparison extraction part of `ns.compare_model_outputs`
585
+ def extract_logger_info(
586
+ model_a: nn.Module,
587
+ model_b: nn.Module,
588
+ logger_cls: Callable,
589
+ model_name_to_use_for_layer_names: str,
590
+ ) -> NSResultsType:
591
+ """
592
+ Traverse all loggers in `model_a` and `model_b`, and extract the logged
593
+ information.
594
+
595
+ Args:
596
+ model_a: model A
597
+ model_b: model B
598
+ logger_cls: class of Logger to use
599
+ model_name_to_use_for_layer_names: string name of model to use for
600
+ layer names in the output
601
+
602
+ Return:
603
+ NSResultsType, containing the logged comparisons
604
+ """
605
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_logger_info")
606
+ results: NSResultsType = {}
607
+ for model in (model_a, model_b):
608
+ _extract_logger_info_one_model(model, results, logger_cls)
609
+ # fill in missing fqn entries
610
+ maybe_add_missing_fqns(results)
611
+ # rekey on the name of model b
612
+ results = rekey_logger_info_on_node_name_of_model(
613
+ results, model_name_to_use_for_layer_names)
614
+ return results
615
+
616
+
617
+ def _add_shadow_loggers_impl(
618
+ name_a: str,
619
+ gm_a: GraphModule,
620
+ name_b: str,
621
+ gm_b: GraphModule,
622
+ logger_cls: Callable,
623
+ should_log_inputs: bool,
624
+ base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
625
+ node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
626
+ unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
627
+ ) -> nn.Module:
628
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_shadow_loggers_impl")
629
+ matched_subgraph_pairs = get_matching_subgraph_pairs(
630
+ gm_a, gm_b, base_name_to_sets_of_related_ops,
631
+ unmatchable_types_map)
632
+ gm_a_shadows_b = create_a_shadows_b(
633
+ name_a, gm_a, name_b, gm_b, matched_subgraph_pairs, logger_cls,
634
+ should_log_inputs=should_log_inputs,
635
+ node_type_to_io_type_map=node_type_to_io_type_map)
636
+ return gm_a_shadows_b
637
+
638
+
639
+ def add_shadow_loggers(
640
+ name_a: str,
641
+ model_a: nn.Module,
642
+ name_b: str,
643
+ model_b: nn.Module,
644
+ logger_cls: Callable,
645
+ should_log_inputs: bool = False,
646
+ base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
647
+ node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
648
+ unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
649
+ ) -> nn.Module:
650
+ """
651
+ Instrument model A and model B with shadow loggers.
652
+
653
+ Args:
654
+ name_a: string name of model A to use in results
655
+ model_a: model A
656
+ name_b: string name of model B to use in results
657
+ model_b: model B
658
+ logger_cls: class of Logger to use
659
+ should_log_inputs: whether to log inputs
660
+ base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
661
+ unmatchable_types_map: optional override of unmatchable types, subject to change
662
+ """
663
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_shadow_loggers")
664
+ # TODO(future PR): expose these
665
+ skipped_module_names: List[str] = []
666
+ skipped_module_classes: List[Callable] = []
667
+ tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
668
+ tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
669
+ gm_a = GraphModule(model_a, tracer_a.trace(model_a))
670
+ maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope')
671
+ if maybe_model_a_node_name_to_scope is not None:
672
+ gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope
673
+ gm_b = GraphModule(model_b, tracer_b.trace(model_b))
674
+ maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope')
675
+ if maybe_model_b_node_name_to_scope is not None:
676
+ gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope
677
+ return _add_shadow_loggers_impl(
678
+ name_a, gm_a, name_b, gm_b, logger_cls,
679
+ should_log_inputs=should_log_inputs,
680
+ base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
681
+ node_type_to_io_type_map=node_type_to_io_type_map,
682
+ unmatchable_types_map=unmatchable_types_map)
683
+
684
+
685
+ def extract_shadow_logger_info(
686
+ model_a_shadows_b: nn.Module,
687
+ logger_cls: Callable,
688
+ model_name_to_use_for_layer_names: str,
689
+ ) -> NSResultsType:
690
+ """
691
+ Traverse all loggers in a shadow model, and extract the logged
692
+ information.
693
+
694
+ Args:
695
+ model_a_shadows_b: shadow model
696
+ logger_cls: class of Logger to use
697
+ model_name_to_use_for_layer_names: string name of model to use for
698
+ layer names in the output
699
+
700
+ Return:
701
+ NSResultsType, containing the logged comparisons
702
+ """
703
+ torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_shadow_logger_info")
704
+ results: NSResultsType = collections.defaultdict(dict)
705
+ _extract_logger_info_one_model(model_a_shadows_b, results, logger_cls)
706
+ # fill in missing fqn entries
707
+ maybe_add_missing_fqns(results)
708
+ # rekey on the name of model b
709
+ results = rekey_logger_info_on_node_name_of_model(
710
+ results, model_name_to_use_for_layer_names)
711
+ return dict(results)
712
+
713
+
714
+ def extend_logger_results_with_comparison(
715
+ results: NSResultsType,
716
+ model_name_1: str,
717
+ model_name_2: str,
718
+ comparison_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
719
+ comparison_name: str,
720
+ ) -> None:
721
+ """
722
+ Compares the logged values from `model_name_2` against the corresponding
723
+ values in `model_name_1`, using `comparison_fn`. Records the result
724
+ in `model_name_2`'s results under `comparison_name`. Modifies `results` inplace.
725
+
726
+ Args:
727
+ results: the result data structure from `extract_logger_info` or
728
+ `extract_shadow_logger_info`.
729
+ model_name_1: string name of model 1
730
+ model_name_2: string name of model 2
731
+ comparison_fn: function to compare two Tensors
732
+ comparison_name: string name of model to use for
733
+ layer names in the output
734
+ """
735
+ for results_type_to_results in results.values():
736
+ for model_name_to_results in results_type_to_results.values():
737
+ assert model_name_1 in model_name_to_results, \
738
+ f"{model_name_1} not found in results"
739
+ assert model_name_2 in model_name_to_results, \
740
+ f"{model_name_2} not found in results"
741
+
742
+ results_1 = model_name_to_results[model_name_1]
743
+ results_2 = model_name_to_results[model_name_2]
744
+
745
+ for result_2 in results_2:
746
+ index_within_arg_2 = result_2['index_within_arg']
747
+ index_of_arg_2 = result_2['index_of_arg']
748
+ # find corresponding result_1
749
+ result_1 = None
750
+ for cur_result_1 in results_1:
751
+ index_within_arg_1 = cur_result_1['index_within_arg']
752
+ index_of_arg_1 = cur_result_1['index_of_arg']
753
+ if (
754
+ (index_within_arg_1 == index_within_arg_2) and
755
+ (index_of_arg_1 == index_of_arg_2)
756
+ ):
757
+ result_1 = cur_result_1
758
+ break
759
+ assert result_1 is not None
760
+
761
+ values_1 = result_1['values']
762
+ values_2 = result_2['values']
763
+ result_2[comparison_name] = []
764
+ for value_1, value_2 in zip(values_1, values_2):
765
+ comparison_result = comparison_fn(value_1, value_2)
766
+ result_2[comparison_name].append(comparison_result)
767
+
768
+ def prepare_n_shadows_model(
769
+ model: torch.nn.Module,
770
+ example_inputs: Any,
771
+ qconfig_multi_mapping: QConfigMultiMapping,
772
+ backend_config: BackendConfig,
773
+ custom_prepare_fn: Optional[Callable] = None,
774
+ custom_prepare_kwargs: Optional[Dict[str, Any]] = None,
775
+ custom_tracer: Any = None,
776
+ ) -> GraphModule:
777
+ """
778
+ Given a model with a graph with M ops such as
779
+
780
+
781
+ args_kwargs_m -> op_m -> output_m
782
+
783
+
784
+ And a set of N qconfigs for each op, creates a new model, with
785
+ each of the subgraph of `op_m` transformed into
786
+
787
+ .. code::
788
+
789
+ |---------> op_m_n -> log_m_n
790
+ | /
791
+ args_kwargs_m ---------> op_m -> log_m_0
792
+
793
+ Where op_m_n is op_m wrapped in a submodule and transformed with
794
+ qconfig_n, and its inner graph looks like
795
+
796
+ .. code::
797
+
798
+ args_m -------- op_m_prepared_with_qconfig_n -> out_m_n
799
+ /
800
+ kwargs_m ---
801
+
802
+ This is useful for testing different quantization of multiple layers in
803
+ a single pass through the model.
804
+
805
+ High level TODOs for future PRs:
806
+ * figure out a better way to name the output structure
807
+ * return a results data structure instead of printing it out
808
+ * add examples to docblocks
809
+ """
810
+
811
+ if custom_tracer is None:
812
+ tracer = quantize_fx.QuantizationTracer([], [])
813
+ else:
814
+ tracer = custom_tracer
815
+ mt = torch.fx.GraphModule(model, tracer.trace(model))
816
+ # this is necessary to ensure logger FQNs get populated
817
+ mt._node_name_to_scope = tracer.node_name_to_scope
818
+
819
+ # run example input propagation, we need this to call prepare_fx on
820
+ # individual subgraphs
821
+ output_prop = OutputProp(mt)
822
+ output_prop.propagate(*example_inputs)
823
+
824
+ # Find the set of subgraphs in the original graph which we need to
825
+ # consider.
826
+ modules = dict(mt.named_modules(remove_duplicate=False))
827
+ patterns = _get_pattern_to_quantize_handlers(backend_config)
828
+ root_node_getter_mapping = \
829
+ get_fusion_pattern_to_root_node_getter(backend_config)
830
+ standalone_module_names: List[str] = []
831
+ standalone_module_classes: List[Type] = []
832
+ custom_module_classes: List[Type] = []
833
+ matches = _find_matches(
834
+ mt.graph, modules, patterns, root_node_getter_mapping,
835
+ standalone_module_names, standalone_module_classes, custom_module_classes)
836
+ subgraphs_dedup: Dict[str, List[Node]] = \
837
+ _get_dedup_subgraphs(matches)
838
+
839
+ # generate node to qconfig for each subgraph
840
+ # TODO(future PR): deduplicate repeating entries
841
+ list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]] = []
842
+ for qconfig_mapping in qconfig_multi_mapping.qconfig_mappings_list:
843
+ node_name_to_qconfig = _generate_node_name_to_qconfig(
844
+ mt, modules, mt.graph, qconfig_mapping, tracer.node_name_to_scope)
845
+ list_of_node_name_to_qconfig.append(node_name_to_qconfig)
846
+
847
+ # For each region in the model, do the following:
848
+ # For each qconfig for that region, do the following:
849
+ # 1. create a copy of the region wrapped in a module
850
+ # 2. pass original args, original kwargs, and expected output to module
851
+ # 3. add an output comparison logger and hook it up to compare
852
+ # actual output to expected output
853
+ # 4. run `prepare_fx` on the module
854
+ for (subgraph_idx, (match_name, nodes_in_this_subgraph)) in \
855
+ enumerate(subgraphs_dedup.items()):
856
+ create_n_transformed_and_logged_copies_of_subgraph(
857
+ mt, subgraph_idx, match_name, nodes_in_this_subgraph,
858
+ qconfig_multi_mapping.qconfig_mappings_list, list_of_node_name_to_qconfig,
859
+ custom_prepare_fn, custom_prepare_kwargs # type: ignore[arg-type]
860
+ )
861
+
862
+ return mt
863
+
864
+ # TODO(future PR): we should rethink the names of all the PNP APIs
865
+ def _prepare_n_shadows_add_loggers_model(
866
+ model: torch.nn.Module,
867
+ example_inputs: Any,
868
+ qconfig_mapping: QConfigMapping,
869
+ backend_config: BackendConfig,
870
+ ) -> torch.nn.Module:
871
+ r"""
872
+ Note: this API is not recommended for wide usage, it is only
873
+ provided for customers who need to migrate from the `add_loggers`
874
+ API.
875
+
876
+ This creates a model which provides logging for the following
877
+ problem: if we quantize `model` with `qconfig_mapping` and feed
878
+ the same input through both models, log the comparisons of
879
+ corresponding intermediate layers.
880
+
881
+ The problem is solved with a single model. Specifically, we
882
+ partition `model` into N subgraphs, create a copy of each relevant
883
+ subgraph, wrap it in a module, apply the quantization API to that
884
+ module, and hook up loggers to measure the comparisons.
885
+
886
+ Example starting graph:
887
+
888
+ x0 -> op0 -> x1 -> op1 -> x2
889
+
890
+ Example config: quantize op0 to int8, do nothing to op1.
891
+ The following graph will be created:
892
+
893
+ .. code::
894
+
895
+ x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log
896
+ \ \ \ # noqa: W605
897
+ ---> op0_1 -> x1_1 ----> clog -> op1_0 -> x2_1 ----> clog
898
+
899
+ Where op0_0 is op0, op0_1 is op0 wrapped in a submodule and quantized
900
+ to int8, op1_0 is op1 (appearing in the graph twice), log is a logger,
901
+ and clog is a comparison logger.
902
+ """
903
+
904
+ tracer = quantize_fx.QuantizationTracer([], [])
905
+ mt = torch.fx.GraphModule(model, tracer.trace(model))
906
+ # this is necessary to ensure logger FQNs get populated
907
+ mt._node_name_to_scope = tracer.node_name_to_scope
908
+
909
+ # run example input propagation, we need this to call prepare_fx on
910
+ # individual subgraphs
911
+ output_prop = OutputProp(mt)
912
+ output_prop.propagate(*example_inputs)
913
+
914
+ # Find the set of subgraphs in the original graph which we need to
915
+ # consider.
916
+ modules = dict(mt.named_modules(remove_duplicate=False))
917
+ patterns = _get_pattern_to_quantize_handlers(backend_config)
918
+ root_node_getter_mapping = \
919
+ get_fusion_pattern_to_root_node_getter(backend_config)
920
+ standalone_module_names: List[str] = []
921
+ standalone_module_classes: List[Type] = []
922
+ custom_module_classes: List[Type] = []
923
+ matches = _find_matches(
924
+ mt.graph, modules, patterns, root_node_getter_mapping,
925
+ standalone_module_names, standalone_module_classes, custom_module_classes)
926
+ subgraphs_dedup: Dict[str, List[Node]] = \
927
+ _get_dedup_subgraphs(matches)
928
+
929
+ # generate node to qconfig for each subgraph
930
+ node_name_to_qconfig = _generate_node_name_to_qconfig(
931
+ mt, modules, mt.graph, qconfig_mapping, tracer.node_name_to_scope)
932
+
933
+ # Now, mutate the graph to be the add_loggers graph with propagation
934
+ # error.
935
+ create_add_loggers_graph(
936
+ mt, subgraphs_dedup, qconfig_mapping, node_name_to_qconfig)
937
+
938
+ return mt
939
+
940
+ # TODO(future PR): we should rethink the names of all the PNP APIs
941
+ def _n_shadows_compare_weights(
942
+ model: torch.nn.Module,
943
+ example_inputs: Any,
944
+ qconfig_mapping: QConfigMapping,
945
+ backend_config: BackendConfig,
946
+ ) -> NSResultsType:
947
+ """
948
+ Note: this API is not recommended for wide usage, it is only
949
+ provided for customers who need to migrate from the `add_loggers`
950
+ API.
951
+ """
952
+ qconfig_multi_mapping = \
953
+ QConfigMultiMapping.from_list_qconfig_mapping([qconfig_mapping])
954
+ mp = prepare_n_shadows_model(
955
+ model, example_inputs, qconfig_multi_mapping, backend_config)
956
+ # passing inputs through the model is necessary to populate
957
+ # observers which observe weights with real values
958
+ mp(*example_inputs)
959
+ mq = convert_n_shadows_model(mp)
960
+ weight_comparison = extract_weight_comparison(mq)
961
+ return weight_comparison
962
+
963
+ # TODO(future PR): consider aligning API signature with other similar quantization
964
+ # functions (enable_fake_quant, etc)
965
+ def loggers_set_enabled(model: torch.nn.Module, enabled: bool) -> None:
966
+ """
967
+ Sets the `enabled` setting on a `model`'s loggers
968
+ """
969
+ for name, child in model.named_modules():
970
+ if isinstance(child, OutputLogger):
971
+ child.enabled = enabled
972
+
973
+ # TODO(future PR): consider aligning API signature with other similar quantization
974
+ # functions (enable_fake_quant, etc)
975
+ def loggers_set_save_activations(
976
+ model: torch.nn.Module,
977
+ save_activations: bool,
978
+ ) -> None:
979
+ """
980
+ Sets the `save_activations` setting on a `model`'s loggers
981
+ """
982
+ for name, child in model.named_modules():
983
+ if isinstance(child, OutputLogger):
984
+ child.save_activations = save_activations
985
+
986
+ def convert_n_shadows_model(
987
+ model: GraphModule,
988
+ custom_convert_fn: Optional[Callable] = None,
989
+ custom_convert_kwargs: Optional[Dict[str, Any]] = None
990
+ ) -> GraphModule:
991
+ """
992
+ Given a model from `prepare_n_shadows_model`, runs `convert_fx`
993
+ on each shadow submodule.
994
+ """
995
+ for node in model.graph.nodes:
996
+ # TODO(future PR): consider matching in a safer way than
997
+ # node name string match
998
+ if node.name.startswith(SHADOW_WRAPPER_NODE_NAME_PREFIX):
999
+ orig_mod = getattr(model, node.name)
1000
+ if custom_convert_fn is None:
1001
+ converted_mod = torch.ao.quantization.quantize_fx.convert_fx(
1002
+ orig_mod)
1003
+ else:
1004
+ if custom_convert_kwargs is None:
1005
+ custom_convert_kwargs = {}
1006
+ converted_mod = custom_convert_fn(orig_mod, **custom_convert_kwargs)
1007
+ setattr(model, node.name, converted_mod)
1008
+
1009
+ return model
1010
+
1011
+ def extract_results_n_shadows_model(model: torch.nn.Module) -> NSResultsType:
1012
+ """
1013
+ Extracts logger results from `model`.
1014
+ """
1015
+ results: NSResultsType = {}
1016
+ _extract_logger_info_one_model(model, results, OutputLogger)
1017
+ return results
1018
+
1019
+ def print_comparisons_n_shadows_model(results: NSResultsType) -> None:
1020
+ """
1021
+ Prints a summary of extracted `results`.
1022
+ """
1023
+ results_grouped = group_results_by_subgraph(results)
1024
+ results_comparison = create_results_comparison(results_grouped)
1025
+ print_n_shadows_summary(results_comparison)
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_passes.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/n_shadows_utils.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/ns_types.cpython-310.pyc ADDED
Binary file (988 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/qconfig_multi_mapping.cpython-310.pyc ADDED
Binary file (7.25 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc ADDED
Binary file (6.85 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/ns/fx/graph_matcher.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import enum
3
+
4
+ import torch
5
+ toq = torch.ops.quantized
6
+
7
+ from torch.fx import GraphModule
8
+ from torch.fx.graph import Graph, Node
9
+
10
+ from torch.ao.quantization.utils import getattr_from_fqn
11
+ from .ns_types import NSSubgraph, NSNodeTargetType
12
+ from .mappings import (
13
+ get_base_name_to_sets_of_related_ops,
14
+ get_unmatchable_types_map,
15
+ )
16
+ from .pattern_utils import (
17
+ get_type_a_related_to_b,
18
+ get_reversed_fusions,
19
+ end_node_matches_reversed_fusion,
20
+ )
21
+ from torch.ao.quantization import (
22
+ ObserverBase,
23
+ FakeQuantizeBase,
24
+ )
25
+
26
+ from typing import Dict, Tuple, List, Optional, Set, Any
27
+
28
+ def _get_output_nodes(g: Graph) -> List[Node]:
29
+ return [n for n in g.nodes if n.op == 'output']
30
+
31
+ class _NSGraphMatchableSubgraphsIterator:
32
+ """
33
+ Iterates through the graph of gm, starting with the output nodes
34
+ and continuing backwards.
35
+ 1. Returns matchable subgraphs, in order. A subgraph is defined by
36
+ (start_node, end_node).
37
+ 2. Skips over non-matchable subgraphs
38
+ """
39
+ def __init__(
40
+ self,
41
+ gm: GraphModule,
42
+ non_matchable_functions: Set[NSNodeTargetType],
43
+ non_matchable_modules: Set[NSNodeTargetType],
44
+ non_matchable_methods: Set[NSNodeTargetType],
45
+ ):
46
+ self.gm: GraphModule = gm
47
+ self.non_matchable_functions: Set[NSNodeTargetType] = non_matchable_functions
48
+ self.non_matchable_modules: Set[NSNodeTargetType] = non_matchable_modules
49
+ self.non_matchable_methods: Set[NSNodeTargetType] = non_matchable_methods
50
+ self.seen_nodes: Set[Node] = set()
51
+ self.stack: List[Node] = []
52
+ for start_node in _get_output_nodes(self.gm.graph):
53
+ self.stack.append(start_node)
54
+
55
+ def __iter__(self):
56
+ return self
57
+
58
+ def __next__(self) -> NSSubgraph:
59
+ """
60
+ Returns the next matchable subgraph.
61
+ """
62
+ while len(self.stack) > 0:
63
+ cur_end_node = self.stack.pop()
64
+ if cur_end_node in self.seen_nodes:
65
+ continue
66
+
67
+ # for subgraphs which are single nodes, start_node == end_node
68
+ # for subgraphs with more than one node, start node != end_node
69
+ cur_start_node = cur_end_node
70
+ # Subgraphs like linear-relu have the base node as the start node.
71
+ # Subgraphs like dequantize-linear-relu-to(torch.float16) have the
72
+ # base node as the second node.
73
+ # The cur_base_op_node var will move to the actual node during
74
+ # the fusion matching later in this code block.
75
+ cur_base_op_node = cur_end_node
76
+
77
+ # Check for potential fusions. For now, we are greedy
78
+ # and always skip all non-base nodes of a fusion. For example,
79
+ # if we match linear-relu backwards, we will always skip the
80
+ # relu node and attempt to match the linear node. This can
81
+ # be made configurable later if needed.
82
+ for _reverse_fusion_ops, base_op_idx in get_reversed_fusions():
83
+ is_match = end_node_matches_reversed_fusion(
84
+ cur_end_node, _reverse_fusion_ops, self.gm, self.seen_nodes)
85
+ if is_match:
86
+ # navigate to the base node
87
+ for rev_fusion_idx in range(len(_reverse_fusion_ops) - 1):
88
+ self.seen_nodes.add(cur_start_node)
89
+ # for now, assume that there are no other nodes
90
+ # which need to be added to the stack
91
+ cur_start_node = cur_start_node.args[0] # type: ignore[assignment]
92
+ # if the base op index matches the current node, set it
93
+ rev_base_op_idx = \
94
+ len(_reverse_fusion_ops) - 2 - base_op_idx
95
+ if rev_fusion_idx == rev_base_op_idx:
96
+ cur_base_op_node = cur_start_node
97
+ break
98
+
99
+ self.seen_nodes.add(cur_start_node)
100
+ # add args of previous nodes to stack
101
+ for arg in cur_start_node.all_input_nodes:
102
+ self._recursively_add_node_arg_to_stack(arg)
103
+
104
+ # skip unmatchable nodes
105
+ # note: this check is done on the start_node, i.e.
106
+ # if we are matching linear-relu in reverse, this would do the matchable
107
+ # check on the linear
108
+ if not self._is_matchable(cur_base_op_node):
109
+ continue
110
+
111
+ # If an observer or a fake_quant was not matched as a part of
112
+ # a pattern of multiple nodes, ignore it. One case where this is
113
+ # relevant is an observer on a graph input, which was added because
114
+ # it is necessary for the next node.
115
+ if cur_end_node.op == 'call_module' and cur_start_node is cur_end_node:
116
+ maybe_obs = getattr_from_fqn(self.gm, cur_end_node.target) # type: ignore[arg-type]
117
+ if isinstance(maybe_obs, (ObserverBase, FakeQuantizeBase)):
118
+ continue
119
+
120
+ return NSSubgraph(
121
+ start_node=cur_start_node, end_node=cur_end_node,
122
+ base_op_node=cur_base_op_node)
123
+
124
+ raise StopIteration
125
+
126
+ def _recursively_add_node_arg_to_stack(self, arg: Any) -> None:
127
+ """
128
+ Adds all of the nodes in this arg to the stack, properly navigating
129
+ through list, dicts and tuples.
130
+ """
131
+ if isinstance(arg, Node):
132
+ self.stack.append(arg)
133
+ elif isinstance(arg, torch.fx.immutable_collections.immutable_list) or type(arg) is tuple:
134
+ for inner_arg in arg:
135
+ self._recursively_add_node_arg_to_stack(inner_arg)
136
+ elif isinstance(arg, torch.fx.immutable_collections.immutable_dict):
137
+ for value in arg.values():
138
+ self._recursively_add_node_arg_to_stack(value)
139
+
140
+ def _is_matchable(self, node: Node) -> bool:
141
+ if node.op == 'call_function':
142
+ return node.target not in self.non_matchable_functions
143
+ elif node.op == 'call_module':
144
+ assert isinstance(node.target, str)
145
+ target_mod = getattr_from_fqn(self.gm, node.target)
146
+ return not \
147
+ any(isinstance(target_mod, t) # type: ignore[arg-type]
148
+ for t in self.non_matchable_modules)
149
+ elif node.op == 'call_method':
150
+ return node.target not in self.non_matchable_methods
151
+ else:
152
+ return False
153
+
154
+ class GraphMatchingException(Exception):
155
+ """
156
+ Exception raised when two graphs cannot be matched.
157
+ """
158
+ pass
159
+
160
+ class SubgraphTypeRelationship(enum.Enum):
161
+ # same type, known
162
+ # example: F.linear and F.linear, or nn.Conv2d and nn.Conv2d
163
+ EQUAL = enum.auto()
164
+ # same type, but the type is not known to Numerical Suite
165
+ # (user defined type, etc).
166
+ EQUAL_BUT_UKNOWN = enum.auto()
167
+ # known, same subgraph_relationship set, but not the same type
168
+ # example: F.linear and toq.linear
169
+ RELATED_BUT_NOT_EQUAL = enum.auto()
170
+ # not related
171
+ NOT_RELATED = enum.auto()
172
+
173
+ def _get_subgraph_relationship_type(
174
+ subgraph_a: NSSubgraph,
175
+ subgraph_b: NSSubgraph,
176
+ gm_a: GraphModule,
177
+ gm_b: GraphModule,
178
+ type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]],
179
+ ) -> SubgraphTypeRelationship:
180
+ node_a = subgraph_a.base_op_node
181
+ node_b = subgraph_b.base_op_node
182
+
183
+ # TODO(next): make this code handle matching by what is before the base op
184
+ if node_a.op != node_b.op:
185
+ if not (
186
+ node_a.op in ('call_function', 'call_method') and
187
+ node_b.op in ('call_function', 'call_method')
188
+ ):
189
+ return SubgraphTypeRelationship.NOT_RELATED
190
+
191
+ if node_a.op in ('call_function', 'call_method'):
192
+ key = (node_a.target, node_b.target)
193
+
194
+ if key not in type_a_related_to_b:
195
+ if node_a.target == node_b.target:
196
+ return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN
197
+ else:
198
+ return SubgraphTypeRelationship.NOT_RELATED
199
+ # after this point, we are dealing with known types
200
+
201
+ if node_a.target == node_b.target:
202
+ node_a_has_prev = subgraph_a.base_op_node == subgraph_a.start_node
203
+ node_b_has_prev = subgraph_b.base_op_node == subgraph_b.start_node
204
+ if node_a_has_prev and (not node_b_has_prev):
205
+ return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
206
+ elif (not node_a_has_prev) and node_b_has_prev:
207
+ return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
208
+ elif (not node_a_has_prev) and (not node_b_has_prev):
209
+ return SubgraphTypeRelationship.EQUAL
210
+ else:
211
+ # TODO(future PR): check for matches start_op_node and base_op_node
212
+ return SubgraphTypeRelationship.EQUAL
213
+
214
+ if key in type_a_related_to_b:
215
+ return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
216
+ else:
217
+ return SubgraphTypeRelationship.NOT_RELATED
218
+ elif node_a.op == 'call_module':
219
+ assert (subgraph_a.base_op_node == subgraph_a.start_node and
220
+ subgraph_b.base_op_node == subgraph_b.start_node), \
221
+ "Matching call_module patterns where base_op_node != start_node is not supported yet"
222
+ # for call_module, we need to look up the modules to do the type check
223
+ assert isinstance(node_a.target, str)
224
+ mod_a = getattr_from_fqn(gm_a, node_a.target)
225
+ assert isinstance(node_b.target, str)
226
+ mod_b = getattr_from_fqn(gm_b, node_b.target)
227
+
228
+ key = (type(mod_a), type(mod_b))
229
+
230
+ if key not in type_a_related_to_b:
231
+ if type(mod_a) == type(mod_b):
232
+ return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN
233
+ else:
234
+ return SubgraphTypeRelationship.NOT_RELATED
235
+ elif type(mod_a) == type(mod_b):
236
+ return SubgraphTypeRelationship.EQUAL
237
+ else:
238
+ return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
239
+
240
+ return SubgraphTypeRelationship.NOT_RELATED
241
+
242
+ def _get_name_for_subgraph(
243
+ subgraph_a: NSSubgraph,
244
+ gm_a: GraphModule,
245
+ base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
246
+ existing_names: Set[str],
247
+ ) -> str:
248
+ """
249
+ Returns a unique name for a subgraph. This name is based on two things:
250
+ 1. the name of the set containing the underlying type of the base op in the
251
+ subgraph (i.e. 'torch.nn.functional.linear' if this is related to a linear op)
252
+ 2. the number of previous subgraphs with related underlying type of the base op
253
+
254
+ For example, in the graph
255
+
256
+ linear0 -> relu0 -> linear1 -> relu1
257
+
258
+ The subgraphs are (linear0, relu0) and (linear1, relu1). If we iterate
259
+ from the output node backwards, the name given to (linear1, relu1) will be
260
+ `base_op_torch.nn.functional.linear_0`, and the name given to (linear0, relu0)
261
+ will be `base_op_torch.nn.functional.linear_1`.
262
+
263
+ Why are we not just using the node name? Answer: because of two requirements:
264
+ A. fusions must be supported
265
+ B. some Numeric Suite APIs can be called without having all of the models in memory
266
+
267
+ For example, let's say we need to match nodes of
268
+
269
+ (1) ... -> linear0 -> relu0 -> ...
270
+
271
+ And
272
+
273
+ (2) ... -> linear_relu0 -> ...
274
+
275
+ Without being able to inspect them together. With the current naming scheme, if
276
+ we iterate through both of these graphs in the same order, and assuming the rest
277
+ of the graphs match, both of these subgraphs will get the same name without
278
+ (1) and (2) knowing anything about each other.
279
+ """
280
+ target_type = _get_node_target_type(subgraph_a.base_op_node, gm_a)
281
+ target_base_type = None
282
+ for base_name, sets_of_related_ops in base_name_to_sets_of_related_ops.items():
283
+ if target_type in sets_of_related_ops:
284
+ target_base_type = base_name
285
+ target_base_name = 'base_op_' + str(target_base_type)
286
+ counter = 0
287
+ proposed_name = target_base_name + '_' + str(counter)
288
+ while proposed_name in existing_names:
289
+ counter += 1
290
+ proposed_name = target_base_name + '_' + str(counter)
291
+ existing_names.add(proposed_name)
292
+ return proposed_name
293
+
294
+ def _get_node_target_type(node: Node, gm: GraphModule) -> Optional[NSNodeTargetType]:
295
+ if node.op in ('call_function', 'call_method'):
296
+ return node.target
297
+ elif node.op == 'call_module':
298
+ assert isinstance(node.target, str)
299
+ mod = getattr_from_fqn(gm, node.target)
300
+ return type(mod)
301
+ return None
302
+
303
+ def get_matching_subgraph_pairs(
304
+ gm_a: GraphModule,
305
+ gm_b: GraphModule,
306
+ base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
307
+ unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
308
+ ) -> Dict[str, Tuple[NSSubgraph, NSSubgraph]]:
309
+ """
310
+ Matches matchable subgraphs of graph_a to graph_b.
311
+
312
+ For a node, "matchable" is defined as a node which is not an observer,
313
+ fake_quants, quant or dequant.
314
+
315
+ A subgraph can contain one or more nodes. A subgraph is matchable if
316
+ at least one node inside of it is matchable. Currently, all nodes in
317
+ a subgraph must be matchable (because we assume no observers will be
318
+ inserted in the middle of a fusion).
319
+
320
+ A subgraph is defined by (start_node, end_node). We assume that only
321
+ start_node and end_node are linked with the surrounding graph, all other
322
+ nodes in a subgraph are self-contained.
323
+
324
+ A pair of nodes is "related" if both nodes represent the same mathematical
325
+ operation across different quantization flavors. For example,
326
+ `F.linear` and `torch.ops.quantized.linear` are related, and
327
+ `F.linear` and `torch.nn.Conv` are not related.
328
+
329
+ For each matchable pair of nodes node_a and node_b, they will match
330
+ if node_a and node_b are related.
331
+
332
+ For graphs A and B, they will match iff:
333
+ 1. the number of matchable subgraphs in A and B is equivalent
334
+ 2. when iterating through the matchable subgraphs of A and B in the same order, each
335
+ corresponding pair of base nodes is related.
336
+
337
+ This enables us to find the corresponding subgraphs between
338
+ graphs of related models. For example, if we had two graphs such as:
339
+
340
+ graph_a: x0 -> conv_0 (type: nn.Conv2d) -> obs_0 -> x1
341
+ w -/
342
+ b -/
343
+
344
+ graph_b: x0 -> quant_0 -> qconv_0 (type: nnq.Conv2d) -> dequant_0 -> x1
345
+ packed_params_0 -/
346
+
347
+ This function will return the following result:
348
+ {
349
+ 'conv_0': ( # the name of the node in graph_b
350
+ (conv_0, conv_0), # (start_node_a, end_node_a)
351
+ (qconv_0, qconv_0), # (start_node_b, end_node_b)
352
+ ),
353
+ }
354
+
355
+ Or, if we have a fusion pattern,
356
+
357
+ graph_a: x0 -> linear_0 -> relu_0 -> obs_0 -> x1
358
+ w -/
359
+ b -/
360
+
361
+ graph_b: x0 -> quant_0 -> linear_relu_0 -> dequant_0 -> x1
362
+ packed_params_0 -/
363
+
364
+ This function will return the following result:
365
+ {
366
+ 'linear_relu_0': ( # the name of the node in graph_b
367
+ (linear_0, relu_0), # (start_node_a, end_node_a)
368
+ (linear_relu_0, linear_relu_0), # (start_node_b, end_node_b)
369
+ ),
370
+ }
371
+ """
372
+ if unmatchable_types_map is None:
373
+ unmatchable_types_map = get_unmatchable_types_map()
374
+ non_matchable_functions = unmatchable_types_map['funs_unmatchable']
375
+ non_matchable_modules = unmatchable_types_map['mods_unmatchable']
376
+ non_matchable_methods = unmatchable_types_map['meths_unmatchable']
377
+
378
+ graph_a_iterator = _NSGraphMatchableSubgraphsIterator(
379
+ gm_a, non_matchable_functions, non_matchable_modules,
380
+ non_matchable_methods)
381
+ graph_b_iterator = _NSGraphMatchableSubgraphsIterator(
382
+ gm_b, non_matchable_functions, non_matchable_modules,
383
+ non_matchable_methods)
384
+ results = collections.OrderedDict()
385
+ if base_name_to_sets_of_related_ops is None:
386
+ base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
387
+ type_a_related_to_b = \
388
+ get_type_a_related_to_b(base_name_to_sets_of_related_ops)
389
+
390
+ existing_names_a: Set[str] = set()
391
+ existing_names_b: Set[str] = set()
392
+
393
+ while True:
394
+ # fetch the next subgraphs from a and b
395
+ cur_subgraph_a, cur_subgraph_b = None, None
396
+ try:
397
+ cur_subgraph_a = next(graph_a_iterator)
398
+ except StopIteration:
399
+ pass
400
+ try:
401
+ cur_subgraph_b = next(graph_b_iterator)
402
+ except StopIteration:
403
+ pass
404
+
405
+ # look up types of a and b for useful error messages
406
+ type_start_a, type_start_b = None, None
407
+ if cur_subgraph_a is not None:
408
+ type_start_a = _get_node_target_type(cur_subgraph_a.start_node, gm_a)
409
+ if cur_subgraph_b is not None:
410
+ type_start_b = _get_node_target_type(cur_subgraph_b.start_node, gm_b)
411
+
412
+ # check for results and determine what to do next
413
+ if cur_subgraph_a is not None and cur_subgraph_b is not None:
414
+ # both nodes were fetched, check for subgraph_relationship
415
+ # note: subgraph_relationship is checked on the start node, i.e.
416
+ # if a linear-relu pattern is checked, we would check for subgraph_relationship
417
+ # of the linear
418
+ subgraph_relationship = _get_subgraph_relationship_type(
419
+ cur_subgraph_a, cur_subgraph_b,
420
+ gm_a, gm_b, type_a_related_to_b)
421
+ if subgraph_relationship == SubgraphTypeRelationship.NOT_RELATED:
422
+ msg = f"""
423
+ The subgraphs
424
+ ({cur_subgraph_a}, {type_start_a}) and
425
+ ({cur_subgraph_b}, {type_start_b})
426
+ are not related. Please ensure that the two models you pass in have the same number
427
+ of subgraphs, and each pair of subgraphs is related to each other."""
428
+ raise GraphMatchingException(msg)
429
+ elif subgraph_relationship == SubgraphTypeRelationship.EQUAL_BUT_UKNOWN:
430
+ # skip matching but unknown types
431
+ continue
432
+ key_name_a = _get_name_for_subgraph(
433
+ cur_subgraph_a, gm_a, base_name_to_sets_of_related_ops,
434
+ existing_names_a)
435
+ key_name_b = _get_name_for_subgraph(
436
+ cur_subgraph_b, gm_b, base_name_to_sets_of_related_ops,
437
+ existing_names_b)
438
+ assert key_name_a == key_name_b, \
439
+ f"Subgraph names {key_name_a} and {key_name_b} do not match"
440
+ results[key_name_a] = (cur_subgraph_a, cur_subgraph_b)
441
+ continue
442
+ elif cur_subgraph_a is None and cur_subgraph_b is None:
443
+ # we reached the end of both graphs
444
+ break
445
+ else:
446
+ # only one node was fetched, no match possible, throw error
447
+ msg = f"""
448
+ Attempting to match
449
+ ({cur_subgraph_a}, {type_start_a}) and
450
+ ({cur_subgraph_b}, {type_start_b}),
451
+ one of which is empty. Please ensure that the two models you pass in have the same number
452
+ of subgraphs."""
453
+ raise GraphMatchingException(msg)
454
+
455
+ # The subgraph pairs are originally created by traversing the two graphs
456
+ # from the outputs to the inputs. Reverse the results to return the
457
+ # subgraphs in their order of execution.
458
+ results = collections.OrderedDict(reversed(list(results.items())))
459
+
460
+ return results
venv/lib/python3.10/site-packages/torch/ao/ns/fx/graph_passes.py ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx import GraphModule, map_arg
3
+ from torch.fx.graph import Graph, Node
4
+ from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix
5
+
6
+ from .utils import (
7
+ get_node_first_input_and_output_type,
8
+ getattr_from_fqn,
9
+ NodeInputOrOutputType,
10
+ return_first_non_observer_node,
11
+ get_number_of_non_param_args,
12
+ get_target_type_str,
13
+ get_arg_indices_of_inputs_to_log,
14
+ get_node_input_qparams,
15
+ op_type_supports_shadowing,
16
+ get_normalized_nth_input,
17
+ )
18
+
19
+ from .ns_types import (
20
+ NSSingleResultValuesType,
21
+ NSSubgraph,
22
+ NSNodeTargetType,
23
+ )
24
+ from torch.ao.ns.fx.mappings import (
25
+ get_node_type_to_io_type_map,
26
+ )
27
+ from torch.ao.quantization.observer import _is_activation_post_process
28
+
29
+ from typing import Dict, Tuple, Callable, List, Any, Union, Optional, Set
30
+
31
+ def _maybe_get_fqn(node: Node, gm: GraphModule) -> Optional[str]:
32
+ fqn = None
33
+ if hasattr(gm, '_node_name_to_scope'):
34
+ # fqn on observers is not present, because they do not
35
+ # exist when the fqns are created during tracing. If this is
36
+ # an observer, get the fqn of the node being observed.
37
+ node_to_use_for_fqn = node
38
+ if node.op == 'call_module':
39
+ assert isinstance(node.target, str)
40
+ module = getattr_from_fqn(gm, node.target)
41
+ if _is_activation_post_process(module):
42
+ node_to_use_for_fqn = get_normalized_nth_input(node, gm, 0)
43
+ fqn = gm._node_name_to_scope[node_to_use_for_fqn.name][0] # type: ignore[index]
44
+ return fqn # type: ignore[return-value]
45
+
46
+ def _insert_logger_after_node(
47
+ node: Node,
48
+ gm: GraphModule,
49
+ logger_cls: Callable,
50
+ logger_node_name_suffix: str,
51
+ ref_node_name: str,
52
+ model_name: str,
53
+ ref_name: str,
54
+ ref_node_target_type: str,
55
+ results_type: str,
56
+ index_within_arg: int,
57
+ index_of_arg: int,
58
+ fqn: Optional[str],
59
+ ) -> Node:
60
+ """
61
+ Given a starting graph of
62
+
63
+ prev_node -> node -> next_node
64
+
65
+ This function creates a new logger_cls obj and adds it
66
+ after node, resulting in
67
+
68
+ prev_node -> node -> logger_obj -> next_node
69
+ """
70
+ # create new name
71
+ logger_node_name = \
72
+ get_new_attr_name_with_prefix(node.name + logger_node_name_suffix)(gm)
73
+ target_type = get_target_type_str(node, gm)
74
+ # create the logger object
75
+ logger_obj = logger_cls(
76
+ ref_node_name, node.name, model_name, ref_name, target_type,
77
+ ref_node_target_type,
78
+ results_type, index_within_arg, index_of_arg, fqn)
79
+ # attach the logger object to the parent module
80
+ setattr(gm, logger_node_name, logger_obj)
81
+ logger_node = node.graph.create_node(
82
+ 'call_module', logger_node_name, (node,), {})
83
+ return logger_node
84
+
85
+ def add_loggers_to_model(
86
+ gm: GraphModule,
87
+ node_to_instrument_inputs_to_ref_node_name: Dict[Node, Tuple[str, str]],
88
+ node_to_instrument_outputs_to_ref_node_name: Dict[Node, Tuple[str, str]],
89
+ logger_cls: Callable,
90
+ model_name: str,
91
+ ) -> GraphModule:
92
+ """
93
+ Takes the graph of gm, adds loggers to the output
94
+ of each node in nodes_to_instrument. Returns a GraphModule with the new
95
+ graph.
96
+ """
97
+
98
+ new_graph = Graph()
99
+ env: Dict[str, Any] = {}
100
+ modules = dict(gm.named_modules())
101
+
102
+ def load_arg(a):
103
+ return map_arg(a, lambda node: env[node.name])
104
+
105
+ for node in gm.graph.nodes:
106
+ if node.op == 'output':
107
+ new_graph.output(map_arg(get_normalized_nth_input(node, gm, 0), load_arg))
108
+ continue
109
+
110
+ if (
111
+ (node in node_to_instrument_inputs_to_ref_node_name) or
112
+ (node in node_to_instrument_outputs_to_ref_node_name)
113
+ ):
114
+ fqn = _maybe_get_fqn(node, gm)
115
+
116
+ if node in node_to_instrument_inputs_to_ref_node_name:
117
+ ref_name, ref_node_type = node_to_instrument_inputs_to_ref_node_name[node]
118
+ # Ops such add and mul are special because either
119
+ # one or two of the first two arguments can be tensors,
120
+ # and if one argument is a tensor it can be first or
121
+ # second (x + 1 versus 1 + x).
122
+ arg_indices_to_log = get_arg_indices_of_inputs_to_log(node)
123
+ for node_arg_idx in arg_indices_to_log:
124
+ node_arg = get_normalized_nth_input(node, gm, node_arg_idx)
125
+ if type(node_arg) == Node:
126
+ # create a single input logger
127
+ prev_node = env[node_arg.name]
128
+ env[node_arg.name] = _insert_logger_after_node(
129
+ prev_node, gm, logger_cls, '_ns_logger_', node.name,
130
+ model_name, ref_name, ref_node_type,
131
+ NSSingleResultValuesType.NODE_INPUT.value,
132
+ index_within_arg=0, index_of_arg=node_arg_idx,
133
+ fqn=fqn)
134
+ elif type(node_arg) == torch.fx.immutable_collections.immutable_list:
135
+ # create N input loggers, one for each node
136
+ for arg_idx, arg in enumerate(node_arg): # type: ignore[var-annotated, arg-type]
137
+ prev_node = env[arg.name]
138
+ env[prev_node.name] = _insert_logger_after_node(
139
+ prev_node, gm, logger_cls, '_ns_logger_', node.name,
140
+ model_name, ref_name, ref_node_type,
141
+ NSSingleResultValuesType.NODE_INPUT.value,
142
+ index_within_arg=arg_idx, index_of_arg=node_arg_idx,
143
+ fqn=fqn)
144
+ else:
145
+ pass
146
+
147
+ # ensure env is populated with base node
148
+ # Note: runs for both inputs and outputs
149
+ env[node.name] = new_graph.node_copy(node, load_arg)
150
+
151
+ if node in node_to_instrument_outputs_to_ref_node_name:
152
+ ref_name, ref_node_type = node_to_instrument_outputs_to_ref_node_name[node]
153
+ # add the logger after the base node
154
+ env[node.name] = _insert_logger_after_node(
155
+ env[node.name], gm, logger_cls, '_ns_logger_', node.name,
156
+ model_name, ref_name, ref_node_type,
157
+ NSSingleResultValuesType.NODE_OUTPUT.value,
158
+ index_within_arg=0, index_of_arg=0, fqn=fqn)
159
+
160
+ else:
161
+ env[node.name] = new_graph.node_copy(node, load_arg)
162
+
163
+ new_gm = GraphModule(gm, new_graph)
164
+ return new_gm
165
+
166
+ def _insert_quantize_per_tensor_node(
167
+ prev_node_c: Node,
168
+ node_a: Node,
169
+ gm_b: GraphModule,
170
+ graph_c: Graph,
171
+ scale: Union[torch.Tensor, float],
172
+ zero_point: Union[torch.Tensor, int],
173
+ dtype_cast_name: str,
174
+ ) -> Node:
175
+ # copy scale
176
+ scale_node_name = \
177
+ get_new_attr_name_with_prefix(
178
+ node_a.name + '_input_scale_')(gm_b)
179
+ setattr(gm_b, scale_node_name, scale)
180
+ scale_node = graph_c.create_node(
181
+ 'get_attr', scale_node_name, (), {}, scale_node_name)
182
+ # copy zero_point
183
+ zero_point_node_name = \
184
+ get_new_attr_name_with_prefix(
185
+ node_a.name + '_input_zero_point_')(gm_b)
186
+ setattr(gm_b, zero_point_node_name, zero_point)
187
+ zero_point_node = graph_c.create_node(
188
+ 'get_attr', zero_point_node_name, (), {}, zero_point_node_name)
189
+ # create the quantize_per_tensor call
190
+ return graph_c.create_node(
191
+ 'call_function', torch.quantize_per_tensor,
192
+ (prev_node_c, scale_node, zero_point_node, torch.quint8), {},
193
+ dtype_cast_name)
194
+
195
+ def _insert_dtype_cast_after_node(
196
+ node_a: Node,
197
+ node_c: Node,
198
+ prev_node_c: Union[Node, List[Node]],
199
+ gm_a: GraphModule,
200
+ gm_b: GraphModule,
201
+ graph_c: Graph,
202
+ node_name_prefix: str,
203
+ logger_cls: Callable,
204
+ node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
205
+ ) -> Union[Node, List[Node]]:
206
+ """
207
+ Given a starting graph C (derived from graph B) of
208
+
209
+ ... -> prev_node_c -> node_c -> ...
210
+
211
+ And a corresponding related node_a, inserts the correct dtype
212
+ cast node after prev_node_c to cast into the dtype expected
213
+ by node_a, resulting in:
214
+
215
+ dtype_cast
216
+ /
217
+ ... -> prev_node_c -> node_c -> ...
218
+
219
+ For example, if node_c is an int8 op and node_a is an fp32 op, this function
220
+ will insert a dequant.
221
+ """
222
+ dtype_cast_op = None
223
+ dtype_cast_mod_cls = None
224
+ dtype_cast_method = None
225
+ dtype_cast_method_dtype = None
226
+ dtype_cast_scale = None
227
+ dtype_cast_zero_point = None
228
+ node_input_type_a, _node_output_type_a = \
229
+ get_node_first_input_and_output_type(
230
+ node_a, gm_a, logger_cls, node_type_to_io_type_map)
231
+ node_input_type_c, _node_output_type_c = \
232
+ get_node_first_input_and_output_type(
233
+ node_c, gm_b, logger_cls, node_type_to_io_type_map)
234
+
235
+ if (
236
+ (node_input_type_a == NodeInputOrOutputType.FP32 and
237
+ node_input_type_c == NodeInputOrOutputType.INT8) or
238
+ (node_input_type_a == NodeInputOrOutputType.FP32 and
239
+ node_input_type_c == NodeInputOrOutputType.FP16) or
240
+ # TODO(future PR): determine the actual dtype of node_c,
241
+ # the current code only works because dequantize works with
242
+ # multiple input dtypes.
243
+ (node_input_type_a == NodeInputOrOutputType.FP32 and
244
+ node_input_type_c == NodeInputOrOutputType.FP32_OR_INT8)
245
+ ):
246
+ dtype_cast_op = torch.dequantize
247
+ elif (
248
+ node_input_type_a == node_input_type_c and
249
+ node_input_type_a != NodeInputOrOutputType.UNKNOWN
250
+ ):
251
+ dtype_cast_mod_cls = torch.nn.Identity
252
+ elif (
253
+ node_input_type_a == NodeInputOrOutputType.INT8 and
254
+ node_input_type_c == NodeInputOrOutputType.FP32
255
+ ):
256
+ # int8 shadows fp32, the dtype cast needs to quantize to int8
257
+ # with the right qparams.
258
+ node_a_input_qparams = get_node_input_qparams(
259
+ node_a, gm_a, node_type_to_io_type_map)
260
+ if node_a_input_qparams is not None:
261
+ dtype_cast_op = torch.quantize_per_tensor # type: ignore[assignment]
262
+ dtype_cast_scale, dtype_cast_zero_point = node_a_input_qparams
263
+ elif (
264
+ node_input_type_a == NodeInputOrOutputType.FP16 and
265
+ node_input_type_c == NodeInputOrOutputType.FP32
266
+ ):
267
+ dtype_cast_method = 'to'
268
+ dtype_cast_method_dtype = torch.float16
269
+ else:
270
+ raise AssertionError(
271
+ f"dtype cast from {node_input_type_c} {node_c.format_node()} to " +
272
+ f"{node_input_type_a} {node_a.format_node()} needs to be implemented")
273
+
274
+ if isinstance(prev_node_c, Node):
275
+ new_dtype_cast_name = \
276
+ get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
277
+ if dtype_cast_op:
278
+ if dtype_cast_scale is not None and dtype_cast_zero_point is not None:
279
+ return _insert_quantize_per_tensor_node(
280
+ prev_node_c, node_a, gm_b, graph_c, dtype_cast_scale,
281
+ dtype_cast_zero_point, new_dtype_cast_name)
282
+ else:
283
+ return graph_c.create_node(
284
+ 'call_function', dtype_cast_op, (prev_node_c,), {},
285
+ new_dtype_cast_name)
286
+ elif dtype_cast_method:
287
+ return graph_c.create_node(
288
+ 'call_method', dtype_cast_method,
289
+ (prev_node_c, dtype_cast_method_dtype), {}, new_dtype_cast_name)
290
+ else:
291
+ assert dtype_cast_mod_cls
292
+ dtype_cast_mod = dtype_cast_mod_cls()
293
+ setattr(gm_b, new_dtype_cast_name, dtype_cast_mod)
294
+ return graph_c.create_node(
295
+ 'call_module', new_dtype_cast_name, (prev_node_c,), {},
296
+ new_dtype_cast_name)
297
+ elif isinstance(prev_node_c, list):
298
+ results = []
299
+ for prev_node_c_inner in prev_node_c:
300
+ new_dtype_cast_name = \
301
+ get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
302
+ if dtype_cast_op:
303
+ # TODO(future PR): add handling for quantize_per_tensor
304
+ new_dtype_cast_node = graph_c.create_node(
305
+ 'call_function', dtype_cast_op, (prev_node_c_inner,), {},
306
+ new_dtype_cast_name)
307
+ results.append(new_dtype_cast_node)
308
+ else:
309
+ assert dtype_cast_mod_cls
310
+ dtype_cast_mod = dtype_cast_mod_cls()
311
+ setattr(gm_b, new_dtype_cast_name, dtype_cast_mod)
312
+ new_dtype_cast_node = graph_c.create_node(
313
+ 'call_module', new_dtype_cast_name, (prev_node_c_inner,), {},
314
+ new_dtype_cast_name)
315
+ results.append(new_dtype_cast_node)
316
+ return results
317
+ else:
318
+ raise AssertionError(f"type f{type(prev_node_c)} is not handled")
319
+
320
+ # TODO(future PR): look into using copy_node API instead
321
+ def _copy_node_from_a_to_c(
322
+ node_a: Node,
323
+ gm_a: GraphModule,
324
+ gm_b: GraphModule,
325
+ graph_c: Graph,
326
+ ) -> Node:
327
+ """
328
+ Simple copy of node_a to graph_c.
329
+ """
330
+ if node_a.op == 'get_attr':
331
+ node_a_copy_name = \
332
+ get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
333
+ node_a_obj = getattr_from_fqn(gm_a, node_a.target) # type: ignore[arg-type]
334
+ if torch.is_tensor(node_a_obj):
335
+ node_a_obj = node_a_obj.detach()
336
+ setattr(gm_b, node_a_copy_name, node_a_obj)
337
+ node_a_copy = graph_c.create_node(
338
+ node_a.op, node_a_copy_name, (), {}, node_a_copy_name)
339
+ return node_a_copy
340
+ elif node_a.op == 'call_method':
341
+ assert node_a.target in ('dequantize', 'to'), \
342
+ f"target {node_a.target} is not implemented"
343
+ if node_a.target == 'dequantize':
344
+ arg_copy = _copy_node_from_a_to_c(
345
+ get_normalized_nth_input(node_a, gm_a, 0),
346
+ gm_a, gm_b, graph_c) # type: ignore[arg-type]
347
+ node_a_copy_name = \
348
+ get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
349
+ node_a_copy = graph_c.create_node(
350
+ node_a.op, node_a.target, (arg_copy,), {}, node_a_copy_name)
351
+ return node_a_copy
352
+ else: # to
353
+ arg_copy = _copy_node_from_a_to_c(
354
+ get_normalized_nth_input(node_a, gm_a, 0), gm_a, gm_b, graph_c) # type: ignore[arg-type]
355
+ node_a_copy_name = \
356
+ get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
357
+ node_a_copy = graph_c.create_node(
358
+ node_a.op, node_a.target,
359
+ (arg_copy, get_normalized_nth_input(node_a, gm_a, 1)),
360
+ {}, node_a_copy_name)
361
+ return node_a_copy
362
+
363
+ else:
364
+ raise AssertionError(
365
+ f"handling of node {node_a.format_node()} with op {node_a.op} is not implemented")
366
+
367
+ def _can_insert_copy_of_subgraph_a(
368
+ subgraph_a: NSSubgraph,
369
+ gm_a: GraphModule,
370
+ num_non_param_args_node_a: int,
371
+ ) -> bool:
372
+ """
373
+ This function returns `False` if the input subgraph cannot be copied by
374
+ `_insert_copy_of_subgraph_a_after_input_node_c`. This usually means
375
+ that there is a corner case logic for which copy is not yet implemented.
376
+ """
377
+ # populate the list of nodes we need to check
378
+ nodes = []
379
+ cur_node = subgraph_a.end_node
380
+ while cur_node != subgraph_a.start_node:
381
+ nodes.append(cur_node)
382
+ cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment]
383
+ nodes.append(cur_node)
384
+ nodes.reverse()
385
+
386
+ def _can_insert(node_a_arg, gm_a):
387
+ if isinstance(node_a_arg, Node):
388
+ arg_a = return_first_non_observer_node(node_a_arg, gm_a)
389
+ if arg_a.op == 'call_method':
390
+ return arg_a.target in ('dequantize', 'to')
391
+ elif arg_a.op == 'get_attr':
392
+ return True
393
+ else:
394
+ return False
395
+ elif isinstance(node_a_arg, (list, tuple)):
396
+ for el in node_a_arg:
397
+ if not isinstance(el, Node):
398
+ return False
399
+ return True
400
+
401
+ # For each node, check if we handle the copy behavior. This follows the
402
+ # logic in `_insert_copy_of_subgraph_a_after_input_node_c`.
403
+ for node_a in nodes:
404
+
405
+ local_num_non_param_args_node_a = num_non_param_args_node_a \
406
+ if node_a is nodes[0] else 1
407
+
408
+ norm_args_kwargs = node_a.normalized_arguments(
409
+ gm_a, normalize_to_only_use_kwargs=True)
410
+ if norm_args_kwargs is not None:
411
+ norm_args, norm_kwargs = norm_args_kwargs
412
+ else:
413
+ norm_args, norm_kwargs = node_a.args, node_a.kwargs
414
+
415
+ cur_idx = 0
416
+
417
+ while cur_idx < len(norm_args):
418
+ if cur_idx == 0:
419
+ pass
420
+ elif cur_idx == 1 and local_num_non_param_args_node_a == 2:
421
+ pass
422
+ else:
423
+ if not _can_insert(norm_args[cur_idx], gm_a):
424
+ return False
425
+ cur_idx += 1
426
+
427
+ for kwarg_val in norm_kwargs.values():
428
+ # stitch the inputs from base graph
429
+ if cur_idx == 0:
430
+ pass
431
+ elif cur_idx == 1 and local_num_non_param_args_node_a == 2:
432
+ pass
433
+ else:
434
+ if not _can_insert(kwarg_val, gm_a):
435
+ return False
436
+ cur_idx += 1
437
+
438
+ return True
439
+
440
+ def _insert_copy_of_subgraph_a_after_input_node_c(
441
+ input_node_c: Union[Node, List[Node]],
442
+ input_node_c_2: Optional[Union[Node, List[Node]]],
443
+ subgraph_a: NSSubgraph,
444
+ gm_a: GraphModule,
445
+ gm_b: GraphModule,
446
+ node_name_prefix: str,
447
+ ) -> Node:
448
+ """
449
+ TODO(before land): real docblock
450
+ """
451
+ if isinstance(input_node_c, Node):
452
+ graph_c = input_node_c.graph
453
+ else:
454
+ assert isinstance(input_node_c, list)
455
+ graph_c = input_node_c[0].graph
456
+
457
+ # create a sequential list of the subgraphs' nodes from start to end,
458
+ # because we need to add the nodes to graph C in non-reverse order
459
+ nodes_of_a = [subgraph_a.end_node]
460
+ cur_node = subgraph_a.end_node
461
+ while cur_node != subgraph_a.start_node:
462
+ cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment]
463
+ nodes_of_a.insert(0, cur_node)
464
+
465
+ # go through nodes of a in order, and insert them into the graph of c
466
+ # sequentially
467
+ cur_node_a = nodes_of_a[0]
468
+ cur_node_c = _insert_copy_of_node_a_after_input_node_c(
469
+ input_node_c,
470
+ input_node_c_2,
471
+ cur_node_a,
472
+ gm_a,
473
+ gm_b,
474
+ node_name_prefix)
475
+ for cur_idx_a in range(1, len(nodes_of_a)):
476
+ cur_node_a = nodes_of_a[cur_idx_a]
477
+ prev_node_c = cur_node_c # previous added node is the input to next node
478
+ cur_node_c = _insert_copy_of_node_a_after_input_node_c(
479
+ prev_node_c,
480
+ # TODO(future PR): enable multiple inputs for nodes which are not at start of subgraph
481
+ None,
482
+ cur_node_a,
483
+ gm_a,
484
+ gm_b,
485
+ node_name_prefix)
486
+ # return the last inserted node
487
+ return cur_node_c
488
+
489
+
490
+ def _insert_copy_of_node_a_after_input_node_c(
491
+ input_node_c: Union[Node, List[Node]],
492
+ input_node_c_2: Optional[Union[Node, List[Node]]],
493
+ node_a: Node,
494
+ gm_a: GraphModule,
495
+ gm_b: GraphModule,
496
+ node_name_prefix: str,
497
+ ) -> Node:
498
+ """
499
+ Assume that node_a from graph_a has
500
+ args (input, (input2)?, arg1, ...), and
501
+ kwargs {kw0: kwarg0, ...}
502
+
503
+ Note: input2 is optional. If it equals to None, we assume that the op
504
+ has a single non-param input. If it is specified, we assume that the op
505
+ has two non-param inputs.
506
+
507
+ Copies the underlying values of arg1..argn and kwarg0..kwargn into gm_b,
508
+ and creates the corresponding nodes in graph_c. Note: observers are ignored,
509
+ so if an arg is an observer we navigate up until we find a non-observer parent.
510
+
511
+ If node_a is a call_module, points the module pointed to by node_a to gm_b.
512
+
513
+ Creates the copy of node_a in graph_c, with input as the first arg,
514
+ and all other args and kwargs pointing to the copies of the objects
515
+ in gm_b created above.
516
+
517
+ An example in pictures:
518
+
519
+ graph A:
520
+ ========
521
+
522
+ input -------------> node_a
523
+ / / /
524
+ (input_2)?----------/ / /
525
+ / /
526
+ weight -> weight_obs /
527
+ /
528
+ bias ----------------
529
+
530
+ graph C (derived from B):
531
+ =========================
532
+
533
+ input_node_c --> node_a_copy
534
+ / / /
535
+ (input_node_c_2)? / /
536
+ / /
537
+ weight_copy ----/ /
538
+ /
539
+ bias_copy ------/
540
+ """
541
+ if isinstance(input_node_c, Node):
542
+ graph_c = input_node_c.graph
543
+ else:
544
+ assert isinstance(input_node_c, list)
545
+ graph_c = input_node_c[0].graph
546
+
547
+ norm_args_kwargs = node_a.normalized_arguments(
548
+ gm_a, normalize_to_only_use_kwargs=True)
549
+ if norm_args_kwargs is not None:
550
+ norm_args, norm_kwargs = norm_args_kwargs
551
+ else:
552
+ norm_args, norm_kwargs = node_a.args, node_a.kwargs
553
+
554
+ new_args = []
555
+ new_kwargs = {}
556
+
557
+ def _copy_arg(arg):
558
+ # copy the other inputs from the other graph
559
+ if isinstance(arg, Node):
560
+ arg = return_first_non_observer_node(arg, gm_a)
561
+ arg = _copy_node_from_a_to_c(arg, gm_a, gm_b, graph_c)
562
+ return arg
563
+ elif isinstance(arg, (int, float, torch.dtype)):
564
+ return arg
565
+ elif isinstance(kwarg_val, (list, tuple)):
566
+ for el in kwarg_val:
567
+ assert not isinstance(el, Node), \
568
+ "handling of Node inside list is not implemented"
569
+ return arg
570
+ else:
571
+ raise AssertionError(
572
+ f"handling for kwarg of type {type(kwarg_val)} is not implemented")
573
+
574
+ cur_idx = 0
575
+
576
+ while cur_idx < len(norm_args):
577
+ if cur_idx == 0:
578
+ new_arg = input_node_c
579
+ elif cur_idx == 1 and input_node_c_2 is not None:
580
+ new_arg = input_node_c_2
581
+ else:
582
+ new_arg = _copy_arg(norm_args[cur_idx])
583
+ new_args.append(new_arg)
584
+ cur_idx += 1
585
+
586
+ for kwarg_name, kwarg_val in norm_kwargs.items():
587
+ # stitch the inputs from base graph
588
+ if cur_idx == 0:
589
+ new_kwargs[kwarg_name] = input_node_c
590
+ elif cur_idx == 1 and input_node_c_2 is not None:
591
+ new_kwargs[kwarg_name] = input_node_c_2
592
+ else:
593
+ new_kwargs[kwarg_name] = _copy_arg(kwarg_val)
594
+ cur_idx += 1
595
+
596
+ new_args = tuple(new_args) # type: ignore[assignment]
597
+
598
+ node_a_shadows_c_name = \
599
+ get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
600
+
601
+ if node_a.op == 'call_module':
602
+ # if target is a module, we point to the module from gm_b
603
+ new_mod_copy_name = \
604
+ get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
605
+ # fetch the corresponding module from gm_a
606
+ assert isinstance(node_a.target, str)
607
+ mod_a = getattr_from_fqn(gm_a, node_a.target)
608
+ setattr(gm_b, new_mod_copy_name, mod_a)
609
+ node_a_shadows_c = graph_c.create_node(
610
+ node_a.op, new_mod_copy_name, new_args,
611
+ new_kwargs, node_a_shadows_c_name)
612
+ return node_a_shadows_c
613
+ else:
614
+ assert node_a.op in ('call_function', 'call_method')
615
+ node_a_shadows_c = graph_c.create_node(
616
+ node_a.op, node_a.target, new_args,
617
+ new_kwargs, node_a_shadows_c_name)
618
+ return node_a_shadows_c
619
+
620
+ def create_a_shadows_b(
621
+ name_a: str,
622
+ gm_a: GraphModule,
623
+ name_b: str,
624
+ gm_b: GraphModule,
625
+ matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
626
+ logger_cls: Callable,
627
+ should_log_inputs: bool,
628
+ node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
629
+ ) -> GraphModule:
630
+ """
631
+ Creates a new GraphModule consisting of the graph of C, with the meaningful
632
+ nodes of A shadowing the corresponding nodes of B. For example,
633
+
634
+ Graph A:
635
+ a0 -> op0_fp32 -> a1 -> op1_fp32 -> a2
636
+
637
+ Graph B:
638
+ b0 -> op0_int8 -> b1 -> op1_int8 -> b2
639
+
640
+ matched_node_pairs: {'op0': (op0_fp32, op0_int8), 'op1': (op1_fp32, op1_int8)}
641
+
642
+ Graph C (A shadows B):
643
+
644
+ / dequant0 -> op0_fp32 -> logger_a_0 / dequant_1 -> op1_fp32 -> logger_a_1
645
+ / /
646
+ b0 -------------> op0_int8 -> logger_b_0 --------------> op1_int8 -> logger_b_1
647
+
648
+ In a nutshell, this function does the following for each node pair:
649
+ * copies the necessary attributes and modules from gm_a to gm_b,
650
+ keeping names unique
651
+ * adds a dtype cast op (dequant, quant, etc)
652
+ * adds a copy of node_a in gm_b's graph
653
+ * adds loggers to the outputs of node_a and node_b
654
+ """
655
+
656
+ if node_type_to_io_type_map is None:
657
+ node_type_to_io_type_map = get_node_type_to_io_type_map()
658
+
659
+ # graph_c is the graph created from copying the nodes of graph_b and inserting
660
+ # the shadows with the nodes copied from graph_a
661
+ graph_c = Graph()
662
+ env_c: Dict[str, Any] = {}
663
+ modules = dict(gm_b.named_modules())
664
+
665
+ def load_arg(a):
666
+ return map_arg(a, lambda node: env_c[node.name])
667
+
668
+ start_node_b_to_matched_subgraph_a_and_name = {}
669
+ end_node_b_to_matched_subgraph_a_and_name = {}
670
+ for match_name, match in matched_subgraph_pairs.items():
671
+ subgraph_a, subgraph_b = match
672
+ ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a)
673
+ ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b)
674
+ start_node_b_to_matched_subgraph_a_and_name[subgraph_b.start_node] = \
675
+ (subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
676
+ end_node_b_to_matched_subgraph_a_and_name[subgraph_b.end_node] = \
677
+ (subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
678
+
679
+ for node_b in gm_b.graph.nodes:
680
+ if node_b.op == 'output':
681
+ graph_c.output(map_arg(node_b.args[0], load_arg))
682
+ continue
683
+
684
+ # calculate the flags to determine what to do with this node
685
+ node_b_is_start_node = node_b in start_node_b_to_matched_subgraph_a_and_name
686
+ node_b_is_end_node = node_b in end_node_b_to_matched_subgraph_a_and_name
687
+
688
+ if (node_b_is_start_node or node_b_is_end_node):
689
+
690
+ if node_b_is_start_node:
691
+ subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
692
+ start_node_b_to_matched_subgraph_a_and_name[node_b]
693
+ else:
694
+ assert node_b_is_end_node
695
+ subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
696
+ end_node_b_to_matched_subgraph_a_and_name[node_b]
697
+
698
+ all_op_types_support_shadowing = (
699
+ op_type_supports_shadowing(subgraph_a.start_node) and
700
+ op_type_supports_shadowing(node_b)
701
+ )
702
+ if not all_op_types_support_shadowing:
703
+ print(
704
+ f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
705
+ f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
706
+ ', unsupported')
707
+ env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
708
+ continue
709
+
710
+ # For both start_node and end_node verify that we know how to do
711
+ # the dtype cast. If we do not, skip.
712
+ node_input_type_a, node_output_type_a = \
713
+ get_node_first_input_and_output_type(
714
+ subgraph_a.start_node, gm_a, logger_cls,
715
+ node_type_to_io_type_map)
716
+ node_input_type_b, node_output_type_b = \
717
+ get_node_first_input_and_output_type(
718
+ node_b, gm_b, logger_cls,
719
+ node_type_to_io_type_map)
720
+ node_io_types_known_a_and_b = (
721
+ node_input_type_a != NodeInputOrOutputType.UNKNOWN and
722
+ node_output_type_a != NodeInputOrOutputType.UNKNOWN and
723
+ node_input_type_b != NodeInputOrOutputType.UNKNOWN and
724
+ node_output_type_b != NodeInputOrOutputType.UNKNOWN
725
+ )
726
+ if not node_io_types_known_a_and_b:
727
+ print(
728
+ f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
729
+ f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
730
+ ', unknown dtype cast')
731
+ env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
732
+ continue
733
+
734
+ # If we are shadowing from fp32 to int8, we need to insert
735
+ # quantize_per_tensor call with qparams from the previous node.
736
+ # Only do this if we are able to infer these qparams from the graph.
737
+ if (
738
+ node_input_type_a == NodeInputOrOutputType.INT8 and
739
+ node_input_type_b == NodeInputOrOutputType.FP32
740
+ ):
741
+ node_a_input_qparams = get_node_input_qparams(
742
+ subgraph_a.start_node, gm_a, node_type_to_io_type_map)
743
+ if not node_a_input_qparams:
744
+ print(
745
+ f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
746
+ f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
747
+ ', unknown input qparams')
748
+ env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
749
+ continue
750
+
751
+ num_non_param_args_node_a = \
752
+ get_number_of_non_param_args(subgraph_a.start_node, gm_a)
753
+ if not _can_insert_copy_of_subgraph_a(subgraph_a, gm_a, num_non_param_args_node_a):
754
+ print(
755
+ f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
756
+ f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
757
+ ', unhandled logic in subgraph copy')
758
+ env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
759
+ continue
760
+
761
+ fqn_base_a = _maybe_get_fqn(subgraph_a.base_op_node, gm_a)
762
+ fqn_base_b = _maybe_get_fqn(subgraph_b.base_op_node, gm_b) # type: ignore[possibly-undefined]
763
+
764
+ if node_b_is_start_node:
765
+
766
+ # if necessary, log the input of node_c
767
+ if should_log_inputs:
768
+ prev_node_b = get_normalized_nth_input(node_b, gm_b, 0)
769
+ if isinstance(prev_node_b, Node):
770
+ prev_node_c = env_c[prev_node_b.name]
771
+ env_c[prev_node_c.name] = _insert_logger_after_node(
772
+ prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
773
+ node_b.name, name_b, ref_name, ref_node_type_b,
774
+ NSSingleResultValuesType.NODE_INPUT.value,
775
+ index_within_arg=0, index_of_arg=0,
776
+ fqn=fqn_base_b)
777
+ elif isinstance(prev_node_b, list):
778
+ # first, save the prev_node instances, because they
779
+ # will be overwritten in the env after the first logger
780
+ # is added
781
+ prev_node_c_list = [env_c[arg.name] for arg in prev_node_b]
782
+
783
+ for arg_idx, arg in enumerate(prev_node_b):
784
+ prev_node_c = prev_node_c_list[arg_idx]
785
+ env_c[prev_node_c.name] = _insert_logger_after_node(
786
+ prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
787
+ node_b.name, name_b, ref_name, ref_node_type_b,
788
+ NSSingleResultValuesType.NODE_INPUT.value,
789
+ index_within_arg=arg_idx, index_of_arg=0,
790
+ fqn=fqn_base_b)
791
+ else:
792
+ # logging of inputs which are not lists is not supported yet
793
+ raise AssertionError(f"type {type(prev_node_b)} is not handled yet")
794
+ # subgraph so far:
795
+ #
796
+ # (prev_node_c)+ -> (logger_c_input)?
797
+
798
+ # Note: this if statement is always True, spelling it out to clarify code
799
+ # intent.
800
+ if node_b_is_start_node or node_b_is_end_node:
801
+ # ensure env_c is populated with base node
802
+ env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
803
+ node_c = env_c[node_b.name]
804
+
805
+ # after this point,
806
+ #
807
+ # node_a is the original node from graph_a, with parent module gm_a
808
+ # node_b is the original node from graph_b, with parent module gm_b
809
+ # node_c is the copy of node_b in graph_c
810
+ #
811
+ # subgraph so far:
812
+ #
813
+ # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
814
+
815
+ if node_b_is_start_node:
816
+
817
+ # cast dtype from the dtype of node_c's input to the dtype of
818
+ # node_a's input (dequant, etc)
819
+ # prev_node_c = node_c.args[0]
820
+ prev_node_c = get_normalized_nth_input(node_c, gm_b, 0) # type: ignore[possibly-undefined]
821
+ if should_log_inputs:
822
+ # skip the input logger when inserting a dtype cast
823
+ if isinstance(prev_node_c, Node):
824
+ prev_node_c = get_normalized_nth_input(node_c, gm_b, 0)
825
+ elif isinstance(prev_node_c, list):
826
+ prev_node_c = [get_normalized_nth_input(arg, gm_b, 0) for arg in prev_node_c]
827
+ dtype_cast_node = _insert_dtype_cast_after_node(
828
+ subgraph_a.start_node, node_c, prev_node_c, gm_a, gm_b, graph_c,
829
+ node_b.name + '_dtype_cast_', logger_cls,
830
+ node_type_to_io_type_map)
831
+ # note: not inserting to env_c because all nodes which use the dtype
832
+ # casts are copied from graph_a
833
+ #
834
+ # subgraph so far:
835
+ #
836
+ # (dtype_cast_node)+
837
+ # /
838
+ # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
839
+
840
+ # if input logging is enabled, log the input to the subgraph
841
+ if should_log_inputs:
842
+ # TODO: explain this
843
+ ref_node_name = ''
844
+ if isinstance(dtype_cast_node, Node):
845
+ dtype_cast_node = _insert_logger_after_node(
846
+ dtype_cast_node, gm_b, logger_cls, '_ns_logger_a_inp_',
847
+ ref_node_name, name_a, ref_name, ref_node_type_a,
848
+ NSSingleResultValuesType.NODE_INPUT.value,
849
+ index_within_arg=0, index_of_arg=0,
850
+ fqn=fqn_base_a)
851
+ input_logger: Union[Node, List[Node]] = dtype_cast_node
852
+ else:
853
+ assert isinstance(dtype_cast_node, list)
854
+ new_loggers = []
855
+ for dtype_cast_idx, dtype_cast_node_inner in enumerate(dtype_cast_node):
856
+ dtype_cast_logger = _insert_logger_after_node(
857
+ dtype_cast_node_inner, gm_b, logger_cls, '_ns_logger_a_inp_',
858
+ ref_node_name, name_a, ref_name, ref_node_type_a,
859
+ NSSingleResultValuesType.NODE_INPUT.value,
860
+ index_within_arg=dtype_cast_idx,
861
+ index_of_arg=0,
862
+ fqn=fqn_base_a)
863
+ new_loggers.append(dtype_cast_logger)
864
+ dtype_cast_node = new_loggers
865
+ input_logger = dtype_cast_node
866
+ # subgraph so far:
867
+ #
868
+ # (dtype_cast_node)+ -> (logger_a_input)?
869
+ # /
870
+ # prev_node_c -> (logger_c_input)? -> node_start_c
871
+
872
+ # hook up the new mod_a copy to be in the graph, receiving the
873
+ # same inputs as mod_b does, with dtype cast to match a
874
+ # Some ops, such as LSTMs, have two non-param inputs. If we have
875
+ # such an op, pass the second param as well. Note: dtype casting
876
+ # for the second param is not implemented yet, it can be added
877
+ # later if there is a use case.
878
+ node_c_second_non_param_arg = None
879
+ num_non_param_args_node_a = get_number_of_non_param_args(subgraph_a.start_node, gm_a)
880
+ if num_non_param_args_node_a == 2:
881
+ # node_c_second_non_param_arg = node_c.args[1]
882
+ node_c_second_non_param_arg = get_normalized_nth_input(node_c, gm_b, 1)
883
+ node_a_shadows_c = _insert_copy_of_subgraph_a_after_input_node_c(
884
+ dtype_cast_node, node_c_second_non_param_arg,
885
+ subgraph_a, gm_a, gm_b, node_c.name + '_shadow_copy_')
886
+ env_c[node_a_shadows_c.name] = node_a_shadows_c
887
+ # subgraph so far:
888
+ #
889
+ # dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy(args/kwargs not shown)
890
+ # /
891
+ # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
892
+
893
+ if should_log_inputs:
894
+ # When we created the input logger, we left the ref_node_name
895
+ # as an empty string, because the subgraph copy did not exist
896
+ # yet. Now that the subgraph copy exists, we modify this name
897
+ # to its true value.
898
+ # Note: the alternative to this is to create the input logger
899
+ # after creating the subgraph, which is slightly more
900
+ # complicated. This is the lesser of two evils.
901
+ # input_logger = env_c[dtype_cast_node.name]
902
+ # Find the first node in the subgraph
903
+ cur_node = node_a_shadows_c
904
+ while get_normalized_nth_input(cur_node, gm_b, 0) != input_logger: # type: ignore[possibly-undefined]
905
+ cur_node = get_normalized_nth_input(cur_node, gm_b, 0) # type: ignore[assignment]
906
+ if isinstance(input_logger, Node):
907
+ input_logger_mod = getattr(gm_b, input_logger.name)
908
+ input_logger_mod.ref_node_name = cur_node.name
909
+ else:
910
+ assert isinstance(input_logger, list)
911
+ for input_logger_inner in input_logger:
912
+ input_logger_mod = getattr(gm_b, input_logger_inner.name)
913
+ input_logger_mod.ref_node_name = cur_node.name
914
+
915
+ # hook up a logger to the mod_a copy
916
+ env_c[node_a_shadows_c.name] = _insert_logger_after_node(
917
+ env_c[node_a_shadows_c.name], gm_b, logger_cls, '_ns_logger_a_',
918
+ node_a_shadows_c.name, name_a, ref_name, ref_node_type_a,
919
+ NSSingleResultValuesType.NODE_OUTPUT.value,
920
+ index_within_arg=0, index_of_arg=0,
921
+ fqn=fqn_base_a)
922
+ # subgraph so far:
923
+ #
924
+ # dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
925
+ # /
926
+ # (prev_node_c)+ -> (logger_c_input)? -> node_start_c
927
+
928
+ if node_b_is_end_node:
929
+
930
+ # hook up a logger to the mod_b copy
931
+ env_c[node_b.name] = _insert_logger_after_node(
932
+ env_c[node_b.name], gm_b, logger_cls, '_ns_logger_b_',
933
+ node_b.name, name_b, ref_name, ref_node_type_b,
934
+ NSSingleResultValuesType.NODE_OUTPUT.value,
935
+ index_within_arg=0, index_of_arg=0,
936
+ fqn=fqn_base_b)
937
+ # subgraph so far:
938
+ #
939
+ # dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
940
+ # /
941
+ # (prev_node_c+) -> (logger_c_input)? -> node_start_c -> ... -> node_end_c -> logger_c
942
+ #
943
+ # Note: node_start_c may be the same node as node_end_c, or they
944
+ # may have nodes inbetween.
945
+
946
+ else:
947
+ env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
948
+
949
+ gm_c = GraphModule(gm_b, graph_c)
950
+ return gm_c
venv/lib/python3.10/site-packages/torch/ao/ns/fx/mappings.py ADDED
@@ -0,0 +1,761 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ toq = torch.ops.quantized
7
+
8
+ import torch.ao.nn.quantized as nnq
9
+ import torch.ao.nn.quantized.dynamic as nnqd
10
+ import torch.ao.nn.intrinsic.quantized as nniq
11
+ import torch.ao.nn.intrinsic.quantized.dynamic as nniqd
12
+ import torch.ao.nn.intrinsic.qat as nniqat
13
+ import torch.ao.nn.intrinsic as nni
14
+ import torch.ao.nn.qat as nnqat
15
+ import torch.ao.nn.qat.dynamic as nnqatd
16
+ from torch.ao.quantization.backend_config import get_native_backend_config
17
+ import torch.ao.quantization.fx._lower_to_native_backend as \
18
+ _lower_to_native_backend
19
+ import torch.ao.quantization.quantization_mappings as quantization_mappings
20
+
21
+ from .ns_types import NSNodeTargetType
22
+
23
+ from typing import Callable, Dict, List, Optional, Set, Tuple
24
+
25
+
26
+ def get_base_name_to_sets_of_related_ops() -> Dict[str, Set[NSNodeTargetType]]:
27
+ # note: this set is modified below by items from backend_config
28
+ sets_of_related_ops: List[Set[NSNodeTargetType]] = [
29
+ # conv modules
30
+ {
31
+ nn.Conv1d,
32
+ },
33
+ {
34
+ nn.Conv2d,
35
+ },
36
+ {
37
+ nn.Conv3d,
38
+ },
39
+ # conv functionals
40
+ {
41
+ F.conv1d,
42
+ },
43
+ {
44
+ F.conv2d,
45
+ },
46
+ {
47
+ F.conv3d,
48
+ },
49
+ # linear modules
50
+ {
51
+ nn.Linear,
52
+ },
53
+ # linear functionals
54
+ {
55
+ F.linear,
56
+ },
57
+ # average pool
58
+ {
59
+ nn.AvgPool1d,
60
+ torch.avg_pool1d,
61
+ },
62
+ {
63
+ nn.AvgPool2d,
64
+ torch._C._nn.avg_pool2d,
65
+ },
66
+ {
67
+ nn.AvgPool3d,
68
+ torch._C._nn.avg_pool3d,
69
+ },
70
+ # adaptive average pool
71
+ {
72
+ nn.AdaptiveAvgPool1d,
73
+ F.adaptive_avg_pool1d,
74
+ },
75
+ {
76
+ nn.AdaptiveAvgPool2d,
77
+ F.adaptive_avg_pool2d,
78
+ },
79
+ {
80
+ nn.AdaptiveAvgPool3d,
81
+ F.adaptive_avg_pool3d,
82
+ },
83
+ # LSTM
84
+ {
85
+ nn.LSTM,
86
+ },
87
+ # add
88
+ {
89
+ torch.add,
90
+ operator.add, # x + y
91
+ },
92
+ # cat
93
+ {
94
+ torch.cat,
95
+ },
96
+ # mul
97
+ {
98
+ torch.mul,
99
+ operator.mul,
100
+ },
101
+ # relu
102
+ {
103
+ F.relu,
104
+ nn.ReLU,
105
+ 'relu',
106
+ 'relu_',
107
+ torch.relu,
108
+ },
109
+ # maxpool
110
+ {
111
+ nn.MaxPool1d,
112
+ F.max_pool1d,
113
+ },
114
+ {
115
+ nn.MaxPool2d,
116
+ F.max_pool2d,
117
+ },
118
+ {
119
+ nn.MaxPool3d,
120
+ F.max_pool3d,
121
+ },
122
+ # sigmoid
123
+ {
124
+ torch.sigmoid,
125
+ 'sigmoid',
126
+ 'sigmoid_',
127
+ nn.Sigmoid,
128
+ F.sigmoid,
129
+ },
130
+ # BatchNorm
131
+ {
132
+ nn.BatchNorm2d,
133
+ },
134
+ {
135
+ nn.BatchNorm3d,
136
+ },
137
+ # ConvTranspose
138
+ {
139
+ nn.ConvTranspose1d,
140
+ },
141
+ {
142
+ nn.ConvTranspose2d,
143
+ },
144
+ {
145
+ nn.ConvTranspose3d,
146
+ },
147
+ # functional transposed conv
148
+ {
149
+ F.conv_transpose1d,
150
+ },
151
+ {
152
+ F.conv_transpose2d,
153
+ },
154
+ {
155
+ F.conv_transpose3d,
156
+ },
157
+ # ELU
158
+ {
159
+ nn.ELU,
160
+ },
161
+ # Embedding
162
+ {
163
+ nn.Embedding,
164
+ },
165
+ # EmbeddingBag
166
+ {
167
+ nn.EmbeddingBag,
168
+ },
169
+ # GroupNorm
170
+ {
171
+ nn.GroupNorm,
172
+ },
173
+ # Hardswish
174
+ {
175
+ nn.Hardswish,
176
+ },
177
+ # InstanceNorm
178
+ {
179
+ nn.InstanceNorm1d,
180
+ },
181
+ {
182
+ nn.InstanceNorm2d,
183
+ },
184
+ {
185
+ nn.InstanceNorm3d,
186
+ },
187
+ # LayerNorm
188
+ {
189
+ nn.LayerNorm,
190
+ },
191
+ # LeakyReLU
192
+ {
193
+ nn.LeakyReLU,
194
+ },
195
+ # ReLU6
196
+ {
197
+ nn.ReLU6,
198
+ F.relu6,
199
+ },
200
+ # F.elu
201
+ {
202
+ F.elu,
203
+ },
204
+ # F.hardswish
205
+ {
206
+ F.hardswish,
207
+ },
208
+ # F.group_norm
209
+ {
210
+ F.group_norm,
211
+ },
212
+ # F.instance_norm
213
+ {
214
+ F.instance_norm,
215
+ },
216
+ # F.layer_norm
217
+ {
218
+ F.layer_norm,
219
+ },
220
+ # F.leaky_relu
221
+ {
222
+ F.leaky_relu,
223
+ },
224
+ # F.silu
225
+ {
226
+ nn.SiLU,
227
+ F.silu,
228
+ },
229
+ # F.mish
230
+ {
231
+ nn.Mish,
232
+ F.mish,
233
+ },
234
+ # F.tanh
235
+ {
236
+ nn.Tanh,
237
+ F.tanh,
238
+ torch.tanh,
239
+ 'tanh_',
240
+ 'tanh',
241
+ },
242
+ # F.hardsigmoid
243
+ {
244
+ 'hardsigmoid_',
245
+ 'hardsigmoid',
246
+ F.hardsigmoid,
247
+ nn.Hardsigmoid,
248
+ },
249
+ # F.hardtanh
250
+ {
251
+ nn.Hardtanh,
252
+ F.hardtanh,
253
+ F.hardtanh_,
254
+ },
255
+ # floordiv
256
+ {
257
+ operator.floordiv,
258
+ },
259
+ # unsqueeze
260
+ {
261
+ torch.unsqueeze,
262
+ },
263
+ # stack
264
+ {
265
+ torch.stack,
266
+ },
267
+ # squeeze
268
+ {
269
+ torch.squeeze,
270
+ },
271
+ # sort
272
+ {
273
+ torch.sort,
274
+ },
275
+ # repeat_interleave
276
+ {
277
+ torch.repeat_interleave,
278
+ },
279
+ # min
280
+ {
281
+ torch.min,
282
+ },
283
+ # mean
284
+ {
285
+ torch.mean,
286
+ },
287
+ # max
288
+ {
289
+ torch.max,
290
+ },
291
+ # transpose
292
+ {
293
+ torch.transpose,
294
+ },
295
+ # flatten
296
+ {
297
+ torch.flatten,
298
+ },
299
+ # clamp
300
+ {
301
+ torch.clamp,
302
+ },
303
+ # chunk
304
+ {
305
+ torch.chunk,
306
+ },
307
+ # interpolate
308
+ {
309
+ torch.nn.functional.interpolate,
310
+ },
311
+ # dropout
312
+ {
313
+ nn.Dropout,
314
+ },
315
+ # F.dropout
316
+ {
317
+ F.dropout,
318
+ },
319
+ # matmul
320
+ {
321
+ torch.matmul,
322
+ },
323
+ # Softmax
324
+ {
325
+ nn.Softmax,
326
+ },
327
+ # PReLU
328
+ {
329
+ nn.PReLU,
330
+ nnq.PReLU,
331
+ },
332
+ # F.prelu
333
+ {
334
+ F.prelu,
335
+ toq.prelu,
336
+ },
337
+ # pixel shuffle
338
+ {
339
+ nn.PixelShuffle,
340
+ },
341
+ {
342
+ F.pixel_shuffle,
343
+ },
344
+ # pixel unshuffle
345
+ {
346
+ nn.PixelUnshuffle,
347
+ },
348
+ {
349
+ F.pixel_unshuffle,
350
+ },
351
+ # narrow
352
+ {
353
+ torch.narrow,
354
+ },
355
+ ]
356
+
357
+ # for each floating point op, add versions of the op added by
358
+ # backend_config
359
+ backend_config = get_native_backend_config()
360
+
361
+ new_connections: List[Tuple[Callable, Callable]] = [
362
+ # technical debt edge case
363
+ (nn.Linear, nn.modules.linear.NonDynamicallyQuantizableLinear),
364
+ ]
365
+
366
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
367
+
368
+ # pattern format: (c, (b, a))
369
+ first_element = pattern
370
+ # look from the end, because pattern is in reverse order
371
+ while isinstance(first_element, (list, tuple)):
372
+ first_element = first_element[-1]
373
+
374
+ if config.fused_module is not None:
375
+ # case 1: pattern fuses a pattern of ops into an op
376
+ # example: nn.Conv1d, nn.ReLU fused into nni.ConvReLU1d
377
+ new_connections.append((first_element, config.fused_module))
378
+
379
+ if config.qat_module is not None:
380
+ # case 2: pattern swaps a module into a QAT module
381
+ # example: nni.ConvReLU1d swapped into nniqat.ConvReLU1d
382
+ new_connections.append((first_element, config.qat_module))
383
+
384
+ if config.reference_quantized_module is not None:
385
+ # case 3: reference version of floating point module, such as
386
+ # nn.Conv2d and nnqr.Conv2d
387
+ new_connections.append((first_element, config.reference_quantized_module))
388
+
389
+ #
390
+ # Add reference module swaps from default lowering path
391
+ #
392
+
393
+ for source_to_target in (
394
+ _lower_to_native_backend.STATIC_LOWER_MODULE_MAP,
395
+ _lower_to_native_backend.DYNAMIC_LOWER_MODULE_MAP,
396
+ _lower_to_native_backend.WEIGHT_ONLY_LOWER_MODULE_MAP,
397
+ _lower_to_native_backend.SPECIAL_PATTERN_LOWER_MODULE_MAP,
398
+ ):
399
+ for source, target in source_to_target.items(): # type: ignore[attr-defined]
400
+ new_connections.append((source, target))
401
+
402
+ for source_to_double_target in (
403
+ _lower_to_native_backend.STATIC_LOWER_FUSED_MODULE_MAP,
404
+ _lower_to_native_backend.STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP,
405
+ _lower_to_native_backend.DYNAMIC_LOWER_FUSED_MODULE_MAP,
406
+ ):
407
+ for source, (target1, target2) in source_to_double_target.items(): # type: ignore[attr-defined]
408
+ new_connections.append((source, target1))
409
+ new_connections.append((source, target2))
410
+
411
+ #
412
+ # Add function swaps from default lowering path
413
+ #
414
+
415
+ for source, (target1, target2) in \
416
+ _lower_to_native_backend.STATIC_LOWER_FUNCTIONAL_MAP.items():
417
+ new_connections.append((source, target1))
418
+ new_connections.append((source, target2))
419
+
420
+ for source_to_target in (
421
+ _lower_to_native_backend.QBIN_OP_MAPPING,
422
+ _lower_to_native_backend.QBIN_RELU_OP_MAPPING,
423
+ quantization_mappings.DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS,
424
+ ):
425
+ for source, target in source_to_target.items():
426
+ new_connections.append((source, target))
427
+
428
+ #
429
+ # Add other swaps, ideally in the future this could be removed
430
+ # after the lowering code stops using these.
431
+ #
432
+ for source_to_target in (
433
+ quantization_mappings.DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
434
+ ):
435
+ for source, target in source_to_target.items():
436
+ new_connections.append((source, target))
437
+
438
+
439
+ # add the new connections from backend_config
440
+ for item1, item2 in new_connections:
441
+ for set_of_related_ops in sets_of_related_ops:
442
+ if item1 in set_of_related_ops or item2 in set_of_related_ops:
443
+ set_of_related_ops.add(item1)
444
+ set_of_related_ops.add(item2)
445
+ break
446
+
447
+ base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]] = {}
448
+
449
+ counter = 0
450
+ for set_of_related_ops in sets_of_related_ops:
451
+ base_name = str(counter)
452
+ counter += 1
453
+ base_name_to_sets_of_related_ops[base_name] = set_of_related_ops
454
+
455
+ return base_name_to_sets_of_related_ops
456
+
457
+
458
+ def get_base_name_for_op(
459
+ base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
460
+ op: NSNodeTargetType,
461
+ ) -> Optional[str]:
462
+ for base_name, set_of_related_ops in base_name_to_sets_of_related_ops.items():
463
+ if op in set_of_related_ops:
464
+ return base_name
465
+ return None
466
+
467
+
468
+ def add_op_to_sets_of_related_ops(
469
+ base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
470
+ op: NSNodeTargetType,
471
+ related_op: Optional[NSNodeTargetType],
472
+ ) -> None:
473
+ if related_op is not None:
474
+ for set_of_related_ops in base_name_to_sets_of_related_ops.values():
475
+ if related_op in set_of_related_ops:
476
+ set_of_related_ops.add(op)
477
+ return
478
+ # if we got here, related_op was not found
479
+ raise AssertionError(f"{related_op} was not found")
480
+ else:
481
+ counter = 0
482
+ while str(counter) in base_name_to_sets_of_related_ops:
483
+ counter += 1
484
+ base_name_to_sets_of_related_ops[str(counter)] = {op}
485
+
486
+
487
+ # TODO(future PR): clean this up
488
+ def get_node_type_to_io_type_map() -> Dict[str, Set[NSNodeTargetType]]:
489
+ FUNS_IO_TYPE_FP32: Set[NSNodeTargetType] = {
490
+ F.linear,
491
+ F.conv1d,
492
+ F.conv2d,
493
+ F.conv3d,
494
+ torch.cat,
495
+ F.elu,
496
+ F.hardswish,
497
+ F.instance_norm,
498
+ F.layer_norm,
499
+ F.leaky_relu,
500
+ F.dropout,
501
+ F.silu,
502
+ F.mish,
503
+ operator.add,
504
+ torch.add,
505
+ operator.mul,
506
+ torch.mul,
507
+ torch.sum,
508
+ F.prelu,
509
+ }
510
+
511
+ FUNS_IO_TYPE_FP16: Set[NSNodeTargetType] = set()
512
+
513
+ FUNS_IO_TYPE_INT8: Set[NSNodeTargetType] = {
514
+ toq.linear,
515
+ toq.linear_relu,
516
+ toq.conv1d,
517
+ toq.conv1d_relu,
518
+ toq.conv2d,
519
+ toq.conv2d_relu,
520
+ toq.conv3d,
521
+ toq.conv3d_relu,
522
+ toq.cat,
523
+ toq.elu,
524
+ toq.hardswish,
525
+ toq.instance_norm,
526
+ toq.layer_norm,
527
+ toq.leaky_relu,
528
+ toq.dropout,
529
+ toq.prelu,
530
+ # TODO(future PR): implement shadowing for binary ops and
531
+ # uncomment below
532
+ # toq.add,
533
+ # toq.mul,
534
+ }
535
+
536
+ FUNS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = {
537
+ F.relu,
538
+ F.tanh,
539
+ torch.tanh,
540
+ F.sigmoid,
541
+ torch.sigmoid,
542
+ F.hardsigmoid,
543
+ operator.floordiv,
544
+ torch.adaptive_avg_pool1d,
545
+ F.adaptive_avg_pool2d,
546
+ F.adaptive_avg_pool3d,
547
+ F.dropout,
548
+ F.hardtanh,
549
+ F.hardtanh_,
550
+ F.interpolate,
551
+ F.max_pool1d,
552
+ F.max_pool2d,
553
+ F.max_pool3d,
554
+ F.relu6,
555
+ F.pixel_shuffle,
556
+ F.pixel_unshuffle,
557
+ torch.avg_pool1d,
558
+ torch._C._nn.avg_pool2d,
559
+ torch._C._nn.avg_pool3d,
560
+ torch.cat,
561
+ torch.chunk,
562
+ torch.clamp,
563
+ torch.flatten,
564
+ torch.transpose,
565
+ torch.max,
566
+ torch.mean,
567
+ torch.min,
568
+ torch.narrow,
569
+ torch.repeat_interleave,
570
+ torch.sort,
571
+ torch.squeeze,
572
+ torch.stack,
573
+ torch.unsqueeze,
574
+ operator.add,
575
+ }
576
+
577
+ MODS_IO_TYPE_FP32: Set[NSNodeTargetType] = {
578
+ nn.Linear,
579
+ nnqat.Linear,
580
+ nnqatd.Linear,
581
+ nnqd.Linear,
582
+ torch.nn.modules.linear.NonDynamicallyQuantizableLinear,
583
+ nn.Conv1d,
584
+ nn.Conv2d,
585
+ nn.Conv3d,
586
+ nnqat.Conv1d,
587
+ nnqat.Conv2d,
588
+ nnqat.Conv3d,
589
+ nnqat.Embedding,
590
+ nnqat.EmbeddingBag,
591
+ nn.LSTM,
592
+ # note: nnqd.Linear is an instance of nnq.Linear, so this
593
+ # check has to happen before the int8 module check
594
+ nnqd.LSTM,
595
+ nn.BatchNorm2d,
596
+ nn.BatchNorm3d,
597
+ nn.Dropout,
598
+ nn.ConvTranspose1d,
599
+ nn.ConvTranspose2d,
600
+ nn.ConvTranspose3d,
601
+ nn.ELU,
602
+ nn.GroupNorm,
603
+ nn.InstanceNorm1d,
604
+ nn.InstanceNorm2d,
605
+ nn.InstanceNorm3d,
606
+ nn.LayerNorm,
607
+ nn.Hardswish,
608
+ nn.LeakyReLU,
609
+ nn.ReLU6,
610
+ nn.SiLU,
611
+ nn.Mish,
612
+ nn.Softmax,
613
+ nn.PReLU,
614
+ nni.BNReLU2d,
615
+ nni.BNReLU3d,
616
+ nni.ConvReLU1d,
617
+ nni.ConvReLU2d,
618
+ nni.ConvReLU3d,
619
+ nni.LinearReLU,
620
+ nni.LinearBn1d,
621
+ nni.ConvBn1d,
622
+ nni.ConvBn2d,
623
+ nni.ConvBn3d,
624
+ nniqat.ConvBn1d,
625
+ nniqat.ConvBn2d,
626
+ nniqat.ConvBn3d,
627
+ nniqat.ConvBnReLU1d,
628
+ nniqat.ConvBnReLU2d,
629
+ nniqat.ConvBnReLU3d,
630
+ nniqat.ConvReLU1d,
631
+ nniqat.ConvReLU2d,
632
+ nniqat.ConvReLU3d,
633
+ nniqat.LinearReLU,
634
+ nniqat.LinearBn1d,
635
+ nniqd.LinearReLU,
636
+ nni.LinearLeakyReLU,
637
+ nni.LinearTanh,
638
+ nni.ConvAdd2d,
639
+ nni.ConvAddReLU2d,
640
+ }
641
+
642
+ MODS_IO_TYPE_INT8: Set[NSNodeTargetType] = {
643
+ nnq.Linear,
644
+ nnq.Conv1d,
645
+ nnq.Conv2d,
646
+ nnq.Conv3d,
647
+ nnq.BatchNorm2d,
648
+ nnq.BatchNorm3d,
649
+ nnq.Dropout,
650
+ nnq.ConvTranspose1d,
651
+ nnq.ConvTranspose2d,
652
+ nnq.ELU,
653
+ nnq.InstanceNorm1d,
654
+ nnq.InstanceNorm2d,
655
+ nnq.InstanceNorm3d,
656
+ nnq.LayerNorm,
657
+ nnq.Hardswish,
658
+ nnq.LeakyReLU,
659
+ nnq.Embedding,
660
+ nnq.EmbeddingBag,
661
+ nnq.Dropout,
662
+ nnq.Softmax,
663
+ nnq.PReLU,
664
+ nniq.BNReLU2d,
665
+ nniq.BNReLU3d,
666
+ nniq.ConvReLU1d,
667
+ nniq.ConvReLU2d,
668
+ nniq.ConvReLU3d,
669
+ nniq.LinearReLU,
670
+ nniq.LinearLeakyReLU,
671
+ nniq.LinearTanh,
672
+ nniq.ConvAdd2d,
673
+ nniq.ConvAddReLU2d,
674
+ }
675
+
676
+ MODS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = {
677
+ nn.ReLU,
678
+ nn.Tanh,
679
+ nn.Sigmoid,
680
+ nn.Hardsigmoid,
681
+ nn.AdaptiveAvgPool1d,
682
+ nn.AdaptiveAvgPool2d,
683
+ nn.AdaptiveAvgPool3d,
684
+ nn.AvgPool1d,
685
+ nn.AvgPool2d,
686
+ nn.AvgPool3d,
687
+ nn.Dropout,
688
+ nn.Hardtanh,
689
+ nn.Identity,
690
+ nn.MaxPool1d,
691
+ nn.MaxPool2d,
692
+ nn.MaxPool3d,
693
+ nn.PixelShuffle,
694
+ nn.PixelUnshuffle,
695
+ nn.ReLU6,
696
+ }
697
+
698
+ METHS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = {
699
+ 'sigmoid_',
700
+ 'sigmoid',
701
+ 'tanh_',
702
+ 'tanh',
703
+ 'hardsigmoid_',
704
+ 'hardsigmoid',
705
+ 'relu_',
706
+ 'relu',
707
+ }
708
+
709
+ return {
710
+ 'funs_io_type_fp32': FUNS_IO_TYPE_FP32,
711
+ 'funs_io_type_fp16': FUNS_IO_TYPE_FP16,
712
+ 'funs_io_type_int8': FUNS_IO_TYPE_INT8,
713
+ 'funs_io_type_fp32_or_int8': FUNS_IO_TYPE_FP32_OR_INT8,
714
+ 'mods_io_type_fp32': MODS_IO_TYPE_FP32,
715
+ 'mods_io_type_int8': MODS_IO_TYPE_INT8,
716
+ 'mods_io_type_fp32_or_int8': MODS_IO_TYPE_FP32_OR_INT8,
717
+ 'meths_io_type_fp32_or_int8': METHS_IO_TYPE_FP32_OR_INT8,
718
+ }
719
+
720
+
721
+ def get_unmatchable_types_map() -> Dict[str, Set[NSNodeTargetType]]:
722
+
723
+ FUNS_UNMATCHABLE: Set[NSNodeTargetType] = {
724
+ torch.quantize_per_tensor,
725
+ operator.getitem,
726
+ }
727
+
728
+ MODS_UNMATCHABLE: Set[NSNodeTargetType] = {
729
+ nn.Identity,
730
+ }
731
+
732
+ METHS_UNMATCHABLE: Set[NSNodeTargetType] = {
733
+ 'to',
734
+ 'dequantize',
735
+ 'reshape',
736
+ 'view',
737
+ 'unsqueeze_',
738
+ 'unsqueeze',
739
+ 'transpose',
740
+ 'squeeze_',
741
+ 'squeeze',
742
+ 'size',
743
+ 'shape',
744
+ 'resize_',
745
+ 'repeat_interleave',
746
+ 'repeat',
747
+ 'permute',
748
+ 'numel',
749
+ 'mean',
750
+ 'detach_',
751
+ 'detach',
752
+ 'contiguous',
753
+ 'clamp',
754
+ 'chunk',
755
+ }
756
+
757
+ return {
758
+ 'funs_unmatchable': FUNS_UNMATCHABLE,
759
+ 'mods_unmatchable': MODS_UNMATCHABLE,
760
+ 'meths_unmatchable': METHS_UNMATCHABLE,
761
+ }
venv/lib/python3.10/site-packages/torch/ao/ns/fx/n_shadows_utils.py ADDED
@@ -0,0 +1,1311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.fx
3
+ from torch.fx import (
4
+ Node,
5
+ GraphModule,
6
+ Graph,
7
+ )
8
+
9
+ from torch.ao.ns.fx.utils import (
10
+ # TODO(future PR): make this work correctly for methods
11
+ get_target_type_str,
12
+ get_normalized_nth_input,
13
+ )
14
+ from torch.ao.ns.fx.ns_types import (
15
+ NSSingleResultValuesType,
16
+ NSResultsType,
17
+ )
18
+ from torch.ao.ns.fx.graph_passes import _maybe_get_fqn
19
+ from torch.ao.quantization import QConfigMapping
20
+ from torch.ao.quantization.qconfig import QConfigAny
21
+ from torch.ao.quantization.utils import getattr_from_fqn
22
+ from torch.ao.quantization.fx.match_utils import _MatchResult
23
+ from torch.utils._pytree import tree_map
24
+
25
+ import collections
26
+ import copy
27
+ from typing import List, Dict, Set, Tuple, Callable, Any, Optional
28
+ import operator
29
+
30
+ SHADOW_NODE_NAME_PREFIX = 'shadow'
31
+ SHADOW_WRAPPER_NODE_NAME_PREFIX = 'shadow_wrapper'
32
+
33
+ # TODO(future PR): reuse existing mapping instead of creating a new one
34
+ BINARY_FUNCTIONS = {
35
+ torch.add,
36
+ torch.Tensor.add,
37
+ operator.add,
38
+ torch.mul,
39
+ torch.Tensor.mul,
40
+ operator.mul,
41
+ }
42
+
43
+ def _get_attr_name(subgraph_idx, subgraph_candidate_idx):
44
+ return f"{SHADOW_NODE_NAME_PREFIX}_{subgraph_idx}_{subgraph_candidate_idx}"
45
+
46
+ def _get_attr_wrapper_name(subgraph_idx, subgraph_candidate_idx):
47
+ return f"{SHADOW_WRAPPER_NODE_NAME_PREFIX}_{subgraph_idx}_{subgraph_candidate_idx}"
48
+
49
+
50
+ class OutputProp:
51
+ """
52
+ Output propagation (modeled from shape propagation).
53
+
54
+ Given a GraphModule and an example input, saves the output flowing
55
+ through each node on `node.traced_result`.
56
+
57
+ Code based on the example from
58
+ https://pytorch.org/docs/stable/fx.html#the-interpreter-pattern
59
+ """
60
+ def __init__(self, mod):
61
+ self.mod = mod
62
+ self.graph = mod.graph
63
+ self.modules = dict(self.mod.named_modules())
64
+
65
+ def propagate(self, *args):
66
+ args_iter = iter(args)
67
+ env : Dict[str, Node] = {}
68
+
69
+ def load_arg(a):
70
+ return torch.fx.graph.map_arg(a, lambda n: env[n.name])
71
+
72
+ def fetch_attr(target : str):
73
+ target_atoms = target.split('.')
74
+ attr_itr = self.mod
75
+ for i, atom in enumerate(target_atoms):
76
+ if not hasattr(attr_itr, atom):
77
+ raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
78
+ attr_itr = getattr(attr_itr, atom)
79
+ return attr_itr
80
+
81
+ for node in self.graph.nodes:
82
+ if node.op == 'placeholder':
83
+ result = next(args_iter)
84
+ elif node.op == 'get_attr':
85
+ result = fetch_attr(node.target)
86
+ elif node.op == 'call_function':
87
+ result = node.target(*load_arg(node.args), **load_arg(node.kwargs))
88
+ elif node.op == 'call_method':
89
+ self_obj, *args = load_arg(node.args)
90
+ kwargs = load_arg(node.kwargs)
91
+ result = getattr(self_obj, node.target)(*args, **kwargs)
92
+ elif node.op == 'call_module':
93
+ result = self.modules[node.target](*load_arg(node.args), **load_arg(node.kwargs))
94
+
95
+ if isinstance(result, torch.Tensor): # type: ignore[possibly-undefined]
96
+ node.traced_result = result
97
+
98
+ env[node.name] = result
99
+
100
+ return None
101
+
102
+ def _get_dedup_subgraphs(
103
+ matches: Dict[str, _MatchResult]
104
+ ) -> Dict[str, List[Node]]:
105
+ # the original matches variable is unique by node, make it unique by subgraph
106
+ # instead
107
+ seen_nodes = set()
108
+ subgraphs_dedup = {}
109
+
110
+ # Dict items are not reversible until Python 3.8, so we hack it
111
+ # to be compatible with previous Python versions
112
+ # TODO(future PR): try reversed(list(matches.items()))
113
+ matches_items_reversed: List[Tuple[str, _MatchResult]] = []
114
+ for name, cur_match in matches.items():
115
+ matches_items_reversed.insert(0, (name, cur_match))
116
+
117
+ # Note: the order is important. `matches` currently provides the matches
118
+ # in reverse order. We would like to process the matches in non-reverse
119
+ # order, so that we can create an intuitive naming scheme, such as
120
+ # naming the first op's submodules `shadow_0_0` through `shadow_0_(n-1)`
121
+ for name, cur_match in matches_items_reversed: # type: ignore[call-overload]
122
+ was_seen = False
123
+ for node_or_tuple in cur_match[1]:
124
+
125
+ # Cur_match[1] has an unusual type. It says that it's a `List[Node]`,
126
+ # but it is really not. Furthermore, the contents of this field
127
+ # can change from match results of multiple nodes of the same pattern
128
+ #
129
+ # For example, for conv -> bn -> relu, we see
130
+ # match_results = {
131
+ # 'conv': (relu, [(bn, conv), relu], ...),
132
+ # 'bn': (relu, [(bn, conv), relu], ...),
133
+ # 'relu': (relu, [(bn, conv), relu], ...),
134
+ # }
135
+ #
136
+ # Ideally we should clean up the `find_matches` function to make
137
+ # this more intuitive. For the purposes of this prototype, we hack
138
+ # around it.
139
+
140
+ if isinstance(node_or_tuple, Node):
141
+ if node_or_tuple in seen_nodes:
142
+ was_seen = True
143
+ seen_nodes.add(node_or_tuple)
144
+
145
+ else:
146
+ assert isinstance(node_or_tuple, tuple)
147
+ for node in node_or_tuple:
148
+ assert isinstance(node, Node)
149
+ if node in seen_nodes:
150
+ was_seen = True
151
+ seen_nodes.add(node)
152
+
153
+ if was_seen:
154
+ continue
155
+
156
+ # Start with the unusual type, convert it to [op_0, ..., op_n]
157
+ list_of_nodes = []
158
+
159
+ if len(cur_match[1]) == 1:
160
+ list_of_nodes = cur_match[1]
161
+ else:
162
+ assert len(cur_match[1]) == 2
163
+ # either (a, b), or ((a, b), c) or (c, (a, b))
164
+ # cannot make any assumptions on order, not clear what the
165
+ # _find_matches function is doing to populate this
166
+ # TODO(future PR): make this code less confusing, see discussion
167
+ # in https://github.com/pytorch/pytorch/pull/80521/files#r975918836
168
+
169
+ def _order_nodes(node_a, node_b, node_c) -> List[Node]:
170
+ nodes = [node_a, node_b, node_c]
171
+ first_node = None
172
+ mid_node = None
173
+ last_node = None
174
+ for n in nodes:
175
+ prev_n = n.args[0]
176
+ next_n = next(iter(n.users))
177
+ if prev_n not in nodes:
178
+ first_node = n
179
+ elif next_n not in nodes:
180
+ last_node = n
181
+ else:
182
+ mid_node = n
183
+ assert first_node is not None and mid_node is not None and \
184
+ last_node is not None
185
+ assert mid_node.args[0] is first_node
186
+ assert last_node.args[0] is mid_node
187
+ return [last_node, mid_node, first_node]
188
+
189
+ if isinstance(cur_match[1][0], Node) and isinstance(cur_match[1][1], Node):
190
+ # (a, b)
191
+ list_of_nodes = cur_match[1]
192
+ elif isinstance(cur_match[1][0], tuple):
193
+ # ((a, b), c)
194
+ node_a, node_b = cur_match[1][0]
195
+ node_c = cur_match[1][1]
196
+ list_of_nodes = _order_nodes(node_a, node_b, node_c)
197
+ elif isinstance(cur_match[1][1], tuple):
198
+ # (a, (b, c))
199
+ node_a, node_b = cur_match[1][1]
200
+ node_c = cur_match[1][0]
201
+ list_of_nodes = _order_nodes(node_a, node_b, node_c)
202
+
203
+ # [node_n, ..., node_0], note that the order is reversed
204
+ # to make it chronological for simple subgraphs
205
+ list_of_nodes.reverse()
206
+ subgraphs_dedup[name] = list_of_nodes
207
+
208
+ return subgraphs_dedup
209
+
210
+ def _get_logger_for_subgraph(
211
+ model: GraphModule,
212
+ first_node: Node,
213
+ last_node: Node,
214
+ subgraph_idx: int,
215
+ subgraph_candidate_idx: int,
216
+ qconfig_str: str,
217
+ logger_cls: Callable,
218
+ fqn: Optional[str],
219
+ ) -> torch.nn.Module:
220
+ """
221
+ Given a model and a linear subgraph starting from `first_node` and
222
+ ending with `last_node`, creates a logger for the end of this
223
+ subgraph.
224
+ """
225
+ if fqn is None:
226
+ fqn = ''
227
+ logger_mod_orig = logger_cls(
228
+ first_node.name, # ref_node_name
229
+ last_node.name, # prev_node_name
230
+ f'subgraph_{subgraph_idx}_{subgraph_candidate_idx}', # model_name
231
+ 'model', # ref_name
232
+ get_target_type_str(last_node, model), # prev_node_target_type
233
+ get_target_type_str(first_node, model), # ref_node_target_type
234
+ NSSingleResultValuesType.NODE_OUTPUT.value, # results_type
235
+ 0, # index_within_arg
236
+ 0, # index_of_arg
237
+ fqn, # fqn
238
+ qconfig_str,
239
+ )
240
+ # Usually we expect the user to add loggers, then calibrate, then convert,
241
+ # and then populate loggers. This is why the loggers start disabled.
242
+ # TODO(future PR): reconsider the design to make this more intuitive.
243
+ logger_mod_orig.enabled = False
244
+ return logger_mod_orig
245
+
246
+ def create_submodule_from_subgraph(
247
+ model: torch.nn.Module,
248
+ first_node: Node,
249
+ last_node: Node,
250
+ ) -> GraphModule:
251
+ """
252
+ Input: a model, and a linear subgraph within the model from first_node to
253
+ last_node.
254
+
255
+ Output: a new submodule containing a copy of the subgraph, with the inputs
256
+ to the first node becoming the inputs to the submodule, and all other
257
+ nodes in the subgraph being copied.
258
+
259
+ Example inputs:
260
+
261
+ `model`: a module with graph
262
+
263
+ x0 -> op1 -> x1 -> op2 -> x2
264
+ |
265
+ arg1
266
+
267
+ `first_node`: op1
268
+ `last_node`: op2
269
+
270
+ Example output: a new module with graph
271
+
272
+ input1 -> op1_copy -> x1 -> op2_copy -> output1
273
+ |
274
+ arg1
275
+ """
276
+
277
+ #
278
+ # create a blank GraphModule with an empty graph
279
+ #
280
+
281
+ class M(torch.nn.Module):
282
+ def forward(self, x):
283
+ pass
284
+
285
+ m = M()
286
+ gm = torch.fx.symbolic_trace(m)
287
+ g = gm.graph
288
+ for node in reversed(gm.graph.nodes):
289
+ g.erase_node(node)
290
+
291
+ #
292
+ # modify the graph to have a copy of our subgraph
293
+ #
294
+
295
+ cur_node_orig = first_node
296
+ cur_args_orig = cur_node_orig.args
297
+ cur_kwargs_orig = cur_node_orig.kwargs
298
+
299
+ cur_name_idx = 0
300
+
301
+ iteration_limit = 100
302
+ cur_iteration = 0
303
+
304
+ while True:
305
+ if cur_node_orig is first_node:
306
+ # we are at the first node, we need to set up graph inputs
307
+ # TODO(future): some graphs could have placeholders which are unrelated
308
+ # to the first node, need to handle this
309
+ cur_args_copy = []
310
+ cur_kwargs_copy = {}
311
+ seen_names: Set[str] = set()
312
+ old_name_to_new_node: Dict[str, Node] = {}
313
+
314
+ def _add_placeholder(
315
+ g: Graph, node: Node, seen_names, old_name_to_new_node
316
+ ):
317
+ # note: for graphs starting with patterns such as `y = x + x`, we
318
+ # need to ensure we do not add multiple placeholders with the
319
+ # same name
320
+ counter = 0
321
+ while node.name + '_' + str(counter) in seen_names:
322
+ counter += 1
323
+ cur_name = node.name + '_' + str(counter)
324
+ seen_names.add(cur_name)
325
+ placeholder = g.placeholder(cur_name)
326
+ old_name_to_new_node[node.name] = placeholder
327
+ return placeholder
328
+
329
+ for arg in cur_node_orig.args:
330
+ if isinstance(arg, Node):
331
+ p = _add_placeholder(
332
+ g, arg, seen_names, old_name_to_new_node)
333
+ cur_args_copy.append(p)
334
+ elif isinstance(arg, (list, tuple)):
335
+ new_arg = []
336
+ for inner_arg in arg:
337
+ if isinstance(inner_arg, Node):
338
+ new_arg.append(_add_placeholder(
339
+ g, inner_arg, seen_names, old_name_to_new_node))
340
+ else:
341
+ new_arg.append(inner_arg)
342
+ cur_args_copy.append(new_arg)
343
+ else:
344
+ cur_args_copy.append(arg)
345
+
346
+ # TODO(future PR): handle non-normalized kwargs
347
+ for kwarg_name, kwarg in cur_node_orig.kwargs.items():
348
+ if isinstance(kwarg, Node):
349
+ cur_kwargs_copy[kwarg_name] = _add_placeholder(
350
+ g, kwarg, seen_names, old_name_to_new_node)
351
+ elif isinstance(kwarg, (list, tuple)):
352
+ new_kwarg = []
353
+ for inner_kwarg in kwarg:
354
+ p = _add_placeholder(
355
+ g, inner_kwarg, seen_names, old_name_to_new_node)
356
+ new_kwarg.append(p)
357
+ cur_kwargs_copy[kwarg_name] = new_kwarg
358
+ else:
359
+ cur_kwargs_copy[kwarg_name] = kwarg
360
+
361
+ cur_args_copy = tuple(cur_args_copy) # type: ignore[assignment]
362
+ else:
363
+ # we are not at first node, first arg is from the previous node,
364
+ # and all other args are copied
365
+
366
+ # the current implementation is simplistic and cannot handle
367
+ # ops with two or more arguments which need to be passed from
368
+ # the previous op, so we assert them out
369
+ assert cur_node_orig.target not in BINARY_FUNCTIONS
370
+
371
+ # at this point in the code, cur_node_copy is pointing to the copy
372
+ # of the previous node
373
+ # TODO(future PR): this is not handling complicated graphs correctly, need to
374
+ # look at actual relationships instead of assuming sequential graph
375
+ # TODO(future PR): this is ignoring kwargs, will need to support kwargs
376
+ # for any fusion pattern which has them for a node that is not the
377
+ # first node.
378
+ cur_args_copy = [cur_node_copy] # type: ignore[has-type, possibly-undefined] # noqa: F821
379
+
380
+ if len(cur_node_orig.args) > 1:
381
+ for arg in cur_node_orig.args[1:]:
382
+ if isinstance(arg, torch.nn.Parameter):
383
+ new_arg = arg.clone().detach() # type: ignore[assignment]
384
+ mod_name = f"mod_{cur_name_idx}"
385
+ cur_name_idx += 1
386
+ setattr(gm, mod_name, new_arg)
387
+ new_arg_placeholder = gm.placeholder(mod_name)
388
+ cur_args_copy.append(new_arg_placeholder)
389
+ elif isinstance(arg, (float, int, torch.dtype)):
390
+ cur_args_copy.append(arg)
391
+ else:
392
+ raise AssertionError(f'arg of type {type(arg)} not handled yet')
393
+ cur_args_copy = tuple(cur_args_copy) # type: ignore[assignment]
394
+
395
+ # copy the node
396
+ if cur_node_orig.op == 'call_module':
397
+ orig_mod = getattr_from_fqn(model, cur_node_orig.target) # type: ignore[arg-type]
398
+ orig_mod_copy = copy.deepcopy(orig_mod)
399
+ mod_name = f"mod_{cur_name_idx}"
400
+ setattr(gm, mod_name, orig_mod_copy)
401
+ cur_name_idx += 1
402
+ cur_node_copy = g.call_module(mod_name, cur_args_copy, cur_kwargs_copy) # type: ignore[possibly-undefined]
403
+
404
+ elif cur_node_orig.op == 'call_function':
405
+ cur_node_copy = g.call_function(
406
+ cur_node_orig.target, cur_args_copy, cur_kwargs_copy) # type: ignore[possibly-undefined]
407
+
408
+ elif cur_node_orig.op == 'call_method':
409
+ cur_node_copy = g.call_method(
410
+ cur_node_orig.target, cur_args_copy, cur_kwargs_copy) # type: ignore[possibly-undefined]
411
+
412
+ else:
413
+ raise AssertionError(f'{cur_node_orig.op} not supported yet')
414
+
415
+ if cur_node_orig is last_node:
416
+ break
417
+
418
+ # go to next node
419
+ assert len(cur_node_orig.users.keys()) == 1, \
420
+ f'{cur_node_orig} has more than 1 users, not supported yet'
421
+ cur_node_orig = next(iter(cur_node_orig.users.keys()))
422
+ cur_args_orig = cur_node_orig.args
423
+ cur_kwargs_orig = cur_node_orig.kwargs
424
+
425
+ cur_iteration += 1
426
+ if cur_iteration > iteration_limit:
427
+ raise AssertionError('iteration limit exceeded')
428
+
429
+ # set up outputs
430
+ g.output(cur_node_copy)
431
+
432
+ gm.recompile()
433
+ return gm
434
+
435
+ def create_one_transformed_and_logged_copy_of_subgraph(
436
+ mt: GraphModule,
437
+ subgraph_idx: int,
438
+ subgraph_candidate_idx: int,
439
+ first_node: Node,
440
+ last_node: Node,
441
+ fqn: Optional[str],
442
+ list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]],
443
+ example_inputs: Any,
444
+ last_added_shadow_node_list: List[Optional[Node]],
445
+ custom_prepare_fn: Optional[Callable] = None,
446
+ custom_prepare_kwargs: Optional[Dict[str, Any]] = None,
447
+ ) -> None:
448
+ """
449
+ Given a subgraph in `mt` and a subgraph candidate idx, inserts the
450
+ subgraph candidate copy and instruments it with loggers.
451
+
452
+ If subgraph_candidate_idx is 0, this is the baseline fp32 subgraph and we just
453
+ add a logger to the end.
454
+
455
+ If subgraph_candidate_idx is not 0, we create a copy of the subgraph and
456
+ prepare it with `prepare_fx`.
457
+ """
458
+
459
+ # TODO(future PR): move logger classes to utils to remove circular dependency
460
+ from torch.ao.ns._numeric_suite_fx import OutputLogger, OutputComparisonLogger
461
+
462
+ if subgraph_candidate_idx == 0:
463
+ # idx = 0 is the floating point (original) version of the subgraph
464
+ # We keep the subgraph as is, and add a logger at the end
465
+
466
+ qconfig_str = ''
467
+ logger_mod_orig = _get_logger_for_subgraph(
468
+ mt, first_node, last_node, subgraph_idx, subgraph_candidate_idx,
469
+ qconfig_str, OutputLogger, fqn)
470
+
471
+ attr_name = _get_attr_name(subgraph_idx, subgraph_candidate_idx)
472
+ assert not hasattr(mt, attr_name)
473
+ setattr(mt, attr_name, logger_mod_orig)
474
+ with mt.graph.inserting_after(last_node):
475
+ new_node = mt.graph.call_module(attr_name, args=(last_node,), kwargs={})
476
+ last_added_shadow_node_list[0] = new_node
477
+
478
+ else:
479
+ # idx > 0 means we have a candidate qconfig to try, so we need
480
+ # to make a copy of the subgraph, feed it with the right inputs,
481
+ # and add a logger at the end
482
+
483
+ # get the qconfig
484
+ # subtract one because the first candidate is the floating point
485
+ # version of the subgraph
486
+ node_name_to_qconfig = \
487
+ list_of_node_name_to_qconfig[subgraph_candidate_idx - 1]
488
+ qconfig = node_name_to_qconfig[first_node.name]
489
+
490
+ # if no quantization is requested, skip
491
+ # TODO(future PR): deduplicate equivalent qconfigs that come from
492
+ # different qconfig mapping objects
493
+ if qconfig is None:
494
+ return
495
+
496
+ qconfig_mapping = QConfigMapping().set_global(qconfig)
497
+
498
+ # create a copy of the submodule, wrapped in a separate module
499
+ orig_mod_copy_wrapped = create_submodule_from_subgraph(
500
+ mt, first_node, last_node)
501
+
502
+ # add a call to prepare_fx on the wrapper module
503
+ if custom_prepare_fn is None:
504
+ orig_mod_copy_wrapped = torch.ao.quantization.quantize_fx.prepare_fx(
505
+ orig_mod_copy_wrapped, qconfig_mapping, example_inputs=example_inputs)
506
+ else:
507
+ if custom_prepare_kwargs is None:
508
+ custom_prepare_kwargs = {}
509
+ for kwarg_name in ["example_inputs", "prepare_custom_config", "qconfig_mapping"]:
510
+ assert kwarg_name not in custom_prepare_kwargs, f"cannot specify {kwarg_name} in custom_prepare_kwargs"
511
+ prepare_kwargs: Dict[str, Any] = {
512
+ "example_inputs": example_inputs,
513
+ "qconfig_mapping": qconfig_mapping
514
+ }
515
+ prepare_kwargs.update(custom_prepare_kwargs)
516
+ orig_mod_copy_wrapped = custom_prepare_fn(
517
+ orig_mod_copy_wrapped,
518
+ **prepare_kwargs)
519
+
520
+ # attach the wrapper to the model
521
+ attr_name = _get_attr_wrapper_name(subgraph_idx, subgraph_candidate_idx)
522
+ assert not hasattr(mt, attr_name)
523
+ setattr(mt, attr_name, orig_mod_copy_wrapped)
524
+
525
+ # add a call to the wrapper module from the parent graph
526
+ insert_after_node = last_added_shadow_node_list[0]
527
+ with mt.graph.inserting_after(insert_after_node):
528
+ # TODO(future PR): handle fusion patterns where non-first nodes
529
+ # need inputs
530
+
531
+ # pass in all node args and kwargs
532
+
533
+ new_args = []
534
+ for arg in first_node.args:
535
+ if isinstance(arg, Node):
536
+ new_args.append(arg)
537
+ elif isinstance(arg, (list, tuple)) and len(arg) and isinstance(arg[0], Node):
538
+ for inner_arg in arg:
539
+ if isinstance(inner_arg, Node):
540
+ new_args.append(inner_arg)
541
+
542
+ new_kwargs = {}
543
+ for name, old_kwarg in first_node.kwargs.items():
544
+ if isinstance(old_kwarg, Node):
545
+ new_kwargs[name] = old_kwarg
546
+ elif isinstance(old_kwarg, (list, tuple)) and len(old_kwarg):
547
+ # TODO(future PR): clarify why we are adding kwargs to args
548
+ new_args.extend(old_kwarg)
549
+
550
+ new_args = tuple(new_args) # type: ignore[assignment]
551
+
552
+ new_node = mt.graph.call_module(
553
+ attr_name, args=new_args, kwargs=new_kwargs)
554
+
555
+ # add a logger to parent graph to observe the shadow wrapper
556
+ logger_mod_orig = _get_logger_for_subgraph(
557
+ mt, first_node, last_node, subgraph_idx, subgraph_candidate_idx,
558
+ str(qconfig), OutputComparisonLogger, fqn)
559
+
560
+ attr_name = _get_attr_name(subgraph_idx, subgraph_candidate_idx)
561
+ assert not hasattr(mt, attr_name)
562
+ setattr(mt, attr_name, logger_mod_orig)
563
+ with mt.graph.inserting_after(new_node):
564
+ logger = mt.graph.call_module(attr_name, args=(new_node, last_node), kwargs={})
565
+ last_added_shadow_node_list[0] = logger
566
+
567
+ mt.recompile()
568
+
569
+ def create_n_transformed_and_logged_copies_of_subgraph(
570
+ mt: GraphModule,
571
+ subgraph_idx: int,
572
+ match_name: str,
573
+ nodes_in_this_subgraph: List[Any],
574
+ qconfig_mappings: List[QConfigMapping],
575
+ list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]],
576
+ custom_prepare_fn: Optional[Callable] = None,
577
+ custom_prepare_kwargs: Optional[Dict[str, Any]] = None,
578
+ ) -> None:
579
+ """
580
+ Given a model `mt` and a subgraph_idx, creates the needed copies
581
+ of the subgraph for all qconfigs, and instruments them with loggers.
582
+ """
583
+ # for now, assume that
584
+ # 1. the first node has one input
585
+ # 2. the last node has one output
586
+
587
+ # for now, ignore all subgraphs that contain non-nodes (tuples, etc)
588
+ # TODO(future PR): implement this
589
+ if any(
590
+ not isinstance(node, Node)
591
+ for node in nodes_in_this_subgraph
592
+ ):
593
+ return
594
+
595
+ first_node = nodes_in_this_subgraph[0]
596
+ last_node = nodes_in_this_subgraph[-1]
597
+ # We used output propagation to populate example values on each
598
+ # node. Use the example values from the previous node as the input
599
+ # to the current node.
600
+ prev_node = get_normalized_nth_input(first_node, mt, 0)
601
+ if isinstance(prev_node, list):
602
+ example_inputs = [x.traced_result for x in prev_node]
603
+ elif isinstance(prev_node, tuple):
604
+ example_inputs = (x.traced_result for x in prev_node) # type: ignore[assignment]
605
+ else:
606
+ # currently some customer models do not have a traced_result in
607
+ # every node, so we have to guard for this case since we cannot
608
+ # quantize without an example input
609
+ # TODO(future PR): add a test case for this once we have an easy
610
+ # repro, see https://github.com/pytorch/pytorch/pull/80521/files#r975940489
611
+ # for additional context
612
+ if hasattr(prev_node, 'traced_result'):
613
+ example_inputs = (prev_node.traced_result,) # type: ignore[attr-defined, assignment]
614
+ else:
615
+ print(
616
+ 'unable to get example input for node ' +
617
+ f'{first_node.format_node()}, skipping')
618
+ return
619
+
620
+ # If there are no quantization configs for this subgraph, skip adding
621
+ # loggers. This reduces memory usage for models where not all layers are
622
+ # quantized.
623
+ # TODO(future): consider making this configurable
624
+ found_at_least_one_qconfig = False
625
+ for subgraph_candidate_idx in range(len(qconfig_mappings) + 1):
626
+
627
+ if subgraph_candidate_idx == 0:
628
+ # fp32 baseline does not need a qconfig
629
+ continue
630
+
631
+ # a. we have N shadows, so len(qconfig_mappings) is N
632
+ # b. we will have the fp32 layer + N shadows, so overall number of
633
+ # (original_op) + (*shadows) will be N+1
634
+ # c. since `subgraph_candidate_idx` represents (b), we need
635
+ # to subtract 1 to query from (a)
636
+ node_name_to_qconfig = \
637
+ list_of_node_name_to_qconfig[subgraph_candidate_idx - 1]
638
+ qconfig = node_name_to_qconfig[first_node.name]
639
+ if qconfig is not None:
640
+ found_at_least_one_qconfig = True
641
+ break
642
+ if not found_at_least_one_qconfig:
643
+ print('unable to find at least one qconfig for node ' +
644
+ f'{first_node.format_node()}, skipping')
645
+ return
646
+
647
+ fqn = _maybe_get_fqn(first_node, mt)
648
+
649
+ # We want the results to contain the subgraphs in natural order,
650
+ # and the graph to also contain shadow wrappers and shadow loggers
651
+ # in natural order.
652
+ # If we just iterate in reverse, the graph will be in natural
653
+ # order but the eventual results will be in reverse order.
654
+ # So, we keep track of the last shadow logger we added and
655
+ # always insert after it.
656
+ last_added_shadow_node_list: List[Optional[Node]] = [None]
657
+ for subgraph_candidate_idx in range(len(qconfig_mappings) + 1):
658
+
659
+ create_one_transformed_and_logged_copy_of_subgraph(
660
+ mt, subgraph_idx, subgraph_candidate_idx, first_node,
661
+ last_node, fqn, list_of_node_name_to_qconfig,
662
+ example_inputs, last_added_shadow_node_list, custom_prepare_fn,
663
+ custom_prepare_kwargs)
664
+
665
+ def create_add_loggers_graph(
666
+ model: GraphModule,
667
+ subgraphs_dedup: Dict[str, List[Node]],
668
+ qconfig_mapping: QConfigMapping,
669
+ node_name_to_qconfig: Dict[str, QConfigAny],
670
+ ) -> None:
671
+ r"""
672
+ Given a model, a model graph partition (currently a set of matched
673
+ subgraphs) and instructions how to transform each subgraph
674
+ (currently quantizing it according to qconfig_mapping), modifies
675
+ the model graph to create an alternate path through the original graph,
676
+ with each of the subgraphs quantized. This is useful to compare
677
+ propagation error of a transformation such as quantization.
678
+
679
+ For example, given layer op0 and op1, there are four cases when handling op1:
680
+ 1. op0 and op1 quantized
681
+ 2. op0 and op1 unquantized
682
+ 3. op0 quantized, op1 unquantized
683
+ 4. op0 unquantized, op1 quantized
684
+
685
+ Example input, case 1:
686
+
687
+ .. code::
688
+
689
+ x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log
690
+ \ \ \ \ # noqa: W605
691
+ ---> op0_1 -> x1_1 ----> clog op1_1 -> x2_1 ----> clog
692
+
693
+ Example output, case 1:
694
+
695
+ .. code::
696
+
697
+ x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log
698
+ \ \ \ # noqa: W605
699
+ ---> op0_1 -> x1_1 ----> clog -> op1_1 -> x2_1 ----> clog
700
+
701
+ """
702
+ # TODO(future PR): move logger classes to utils to remove circular dependency
703
+ from torch.ao.ns._numeric_suite_fx import OutputLogger, OutputComparisonLogger
704
+
705
+ def _get_subgraph_containing_node(node, subgraphs_dedup):
706
+ for subgraph in subgraphs_dedup.values():
707
+ if node in subgraph:
708
+ return subgraph
709
+ return None
710
+
711
+ # First, we need to create shadow branches, going from
712
+ #
713
+ # x0 -> op0 -> x1 -> ...
714
+ #
715
+ #
716
+ # to
717
+ #
718
+ # x0 -> op0_0 -> x1_0 -> log -> ...
719
+ # \ \
720
+ # -> op0_1 -> x1_1 -> clog
721
+ #
722
+ # Later, the outputs of each shadow will be rerouted to calculate
723
+ # propagation error.
724
+
725
+ # Note: we cannot iterate over matched subgraphs because some nodes
726
+ # may not be matched. So, we iterate over nodes in the graph, and
727
+ # associate them to matched subgraphs if possible.
728
+
729
+ nodes_to_skip = set()
730
+ # for each subgraph, save a mapping from first node of subgraph
731
+ # to first and last node of the shadow of this subgraph
732
+ orig_first_node_to_shadow_in_node = {}
733
+ orig_first_node_to_shadow_out_node = {}
734
+ # need to record original list because we will mutate the graph as we go
735
+ orig_nodes = list(model.graph.nodes) # type: ignore[union-attr, arg-type]
736
+ cur_subgraph_idx = 0
737
+ for n in orig_nodes:
738
+ if n.op in ('placeholder', 'get_attr', 'output') or n in nodes_to_skip:
739
+ continue
740
+
741
+ maybe_subgraph = _get_subgraph_containing_node(n, subgraphs_dedup)
742
+ insert_submodule_copy = False
743
+ if maybe_subgraph is not None:
744
+ first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1]
745
+ for node_to_skip in maybe_subgraph:
746
+ nodes_to_skip.add(node_to_skip)
747
+ qconfig = node_name_to_qconfig[first_node.name]
748
+ if qconfig is not None:
749
+ insert_submodule_copy = True
750
+ else:
751
+ first_node, last_node = n, n
752
+
753
+ if insert_submodule_copy:
754
+ match_name = first_node.name
755
+ create_n_transformed_and_logged_copies_of_subgraph(
756
+ model, cur_subgraph_idx, match_name, maybe_subgraph,
757
+ [qconfig_mapping], [node_name_to_qconfig],
758
+ None, None # type: ignore[arg-type]
759
+ )
760
+ # find the created shadow module and record it so we
761
+ # can find it easily in step 2
762
+ expected_shadow_target = f"shadow_wrapper_{cur_subgraph_idx}_1"
763
+ new_shadow_mod = None
764
+ for maybe_shadow_mod in model.graph.nodes:
765
+ if maybe_shadow_mod.op == 'call_module' and \
766
+ maybe_shadow_mod.target == expected_shadow_target:
767
+ new_shadow_mod = maybe_shadow_mod
768
+ break
769
+ assert new_shadow_mod is not None
770
+ orig_first_node_to_shadow_in_node[first_node] = new_shadow_mod
771
+ orig_first_node_to_shadow_out_node[first_node] = new_shadow_mod
772
+
773
+ else:
774
+ # create a copy of the subgraph by only copying FX nodes
775
+ # but not copying any parameters, to minimize memory usage
776
+ subgraph_to_use = maybe_subgraph if maybe_subgraph is not None \
777
+ else [first_node]
778
+
779
+ # add a regular logger after last_node
780
+ qconfig_str = ''
781
+ subgraph_candidate_idx = 0
782
+ fqn = _maybe_get_fqn(first_node, model)
783
+ logger_mod_orig = _get_logger_for_subgraph(
784
+ model, first_node, last_node, cur_subgraph_idx, subgraph_candidate_idx,
785
+ qconfig_str, OutputLogger, fqn)
786
+ attr_name = _get_attr_name(cur_subgraph_idx, subgraph_candidate_idx)
787
+ assert not hasattr(model, attr_name)
788
+ setattr(model, attr_name, logger_mod_orig)
789
+ insertion_point = last_node
790
+ with model.graph.inserting_after(insertion_point):
791
+ logger = model.graph.call_module(
792
+ attr_name, args=(last_node,), kwargs={})
793
+ insertion_point = logger
794
+
795
+ # create a copy of the subgraph
796
+ cur_node_orig = first_node
797
+ cur_node_copy = None
798
+ first_node_copy = None
799
+ while cur_node_orig in subgraph_to_use:
800
+ # TODO(future PR): make this support all possible args/kwargs
801
+ if cur_node_orig is first_node:
802
+ new_args = cur_node_orig.args
803
+ new_kwargs = cur_node_orig.kwargs
804
+ else:
805
+ first_arg_for_copy = cur_node_copy
806
+ new_args = tuple([first_arg_for_copy, *cur_node_orig.args[1:]]) # noqa: C409
807
+ new_kwargs = cur_node_orig.kwargs
808
+ # make a copy of cur_node_orig
809
+ with model.graph.inserting_after(insertion_point):
810
+ cur_node_copy = model.graph.create_node(
811
+ cur_node_orig.op,
812
+ cur_node_orig.target,
813
+ new_args,
814
+ new_kwargs,
815
+ # cur_node_orig.name, # TODO(future PR): set name explicitly
816
+ )
817
+ if first_node_copy is None:
818
+ first_node_copy = cur_node_copy
819
+ # since now only linear subgraphs are supported, all nodes
820
+ # except the last one must have only one user
821
+ if cur_node_orig != last_node:
822
+ assert len(cur_node_orig.users.keys()) == 1
823
+ cur_node_orig = next(iter(cur_node_orig.users.keys()))
824
+ assert not cur_node_orig.name.startswith(SHADOW_NODE_NAME_PREFIX)
825
+ insertion_point = cur_node_copy
826
+
827
+ # add a comparison logger after last_node's copy
828
+ subgraph_candidate_idx = 1
829
+ logger_mod_orig = _get_logger_for_subgraph(
830
+ model, first_node, last_node, cur_subgraph_idx, subgraph_candidate_idx,
831
+ qconfig_str, OutputComparisonLogger, fqn)
832
+ attr_name = _get_attr_name(cur_subgraph_idx, subgraph_candidate_idx)
833
+ assert not hasattr(model, attr_name)
834
+ setattr(model, attr_name, logger_mod_orig)
835
+ with model.graph.inserting_after(insertion_point):
836
+ logger = model.graph.call_module(
837
+ attr_name, args=(cur_node_copy, last_node), kwargs={})
838
+
839
+ # save the final node so we can use it in step 2
840
+ orig_first_node_to_shadow_in_node[first_node] = first_node_copy
841
+ orig_first_node_to_shadow_out_node[first_node] = cur_node_copy
842
+
843
+ cur_subgraph_idx += 1
844
+
845
+ model.recompile()
846
+
847
+ # Now, we go from
848
+ #
849
+ # x0 -> op0_0 -> x1_0 -> log -> x1 -> op1_0 -> ...
850
+ # \ \ \
851
+ # -> op0_1 -> x1_1 -> clog -> op1_1 -> ...
852
+ #
853
+ # to
854
+ #
855
+ # x0 -> op0_0 -> x1_0 -> log --> x1_0 -> op1_0 -> ...
856
+ # \ \
857
+ # -> op0_1 -> x1_1 -> clog -> x1_1 -> op1_1 -> ...
858
+ #
859
+ # sample values of key internal variables for the example above:
860
+ #
861
+ # orig_first_node_to_shadow_in_node = {op0_0: op0_1, op1_0: op1_1}
862
+ # orig_first_node_to_shadow_out_node = {op0_0: op0_1, op1_0: op1_1}
863
+ #
864
+ # note: for subgraphs with more than one node, in_node will be different
865
+ # compared to out_node
866
+
867
+
868
+ nodes_to_skip = set()
869
+ for n in orig_nodes:
870
+ if n.op in ('placeholder', 'get_attr', 'output') or n in nodes_to_skip:
871
+ continue
872
+
873
+ maybe_subgraph = _get_subgraph_containing_node(n, subgraphs_dedup)
874
+ if maybe_subgraph is not None:
875
+ first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1]
876
+ for node_to_skip in maybe_subgraph:
877
+ nodes_to_skip.add(node_to_skip)
878
+ else:
879
+ first_node, last_node = n, n
880
+
881
+ def maybe_remap_node_to_shadow(node):
882
+ """
883
+ If unshadowed `node` has a shadow version, return that. If not,
884
+ return `node`.
885
+ """
886
+ if not isinstance(node, Node):
887
+ # handle scalars
888
+ return node
889
+
890
+ if node.op in ('placeholder', 'get_attr'):
891
+ return node
892
+
893
+ # Find the shadowed version of this arg from the previous
894
+ # subgraph. For this, we need to:
895
+ # 1. navigate to the first node of the previous subgraph
896
+ # 2. get the output of the shadow wrapper which has (1) as an input
897
+
898
+ # For now, assume the arg is in matched subgraphs. In the
899
+ # future we may have to handle the case where this is not true.
900
+ prev_subgraph = _get_subgraph_containing_node(
901
+ node, subgraphs_dedup)
902
+ if prev_subgraph is None:
903
+ prev_subgraph = [node]
904
+ prev_first_node = prev_subgraph[0]
905
+ prev_shadow_output = \
906
+ orig_first_node_to_shadow_out_node[prev_first_node]
907
+ return prev_shadow_output
908
+
909
+ cur_shadow_input = \
910
+ orig_first_node_to_shadow_in_node[first_node]
911
+ assert cur_shadow_input is not None
912
+ cur_shadow_input.args = tree_map(
913
+ maybe_remap_node_to_shadow, cur_shadow_input.args)
914
+ cur_shadow_input.kwargs = tree_map(
915
+ maybe_remap_node_to_shadow, cur_shadow_input.kwargs)
916
+
917
+ model.recompile()
918
+
919
+ def _get_weight_info_from_shadow_wrapper(shadow_wrapper: torch.nn.Module):
920
+ # input: shadow wrapper module
921
+ # output if shadow wrapper module has a weighted op:
922
+ # (quantize_fn, (quantize_fn_args))
923
+ # output if shadow wrapper module doesn't have a weighted op:
924
+ # None
925
+
926
+ # For now, assume that the weight is the second input
927
+ # to the shadow module. If that changes, we can fix it later.
928
+ placeholders_seen = 0
929
+ for shadow_n in shadow_wrapper.graph.nodes: # type: ignore[union-attr]
930
+ if shadow_n.op != 'placeholder':
931
+ continue
932
+
933
+ placeholders_seen += 1
934
+ if placeholders_seen != 2:
935
+ continue
936
+
937
+ # the subgraph looks like
938
+ #
939
+ # _input_scale_1 = self._input_scale_1
940
+ # _input_zero_point_1 = self._input_zero_point_1
941
+ # quantize_per_channel = torch.quantize_per_channel(
942
+ # w2_0, _input_scale_1, _input_zero_point_1,
943
+ # 0, torch.qint8)
944
+ #
945
+ # we have `w2_0`, and are navigating this subgraph
946
+ # to get `_input_scale_1` and `_input_zero_point_1`
947
+
948
+ assert len(shadow_n.users) == 1
949
+ quant_node = next(iter(shadow_n.users.keys()))
950
+ new_args: Any = None
951
+ if quant_node.target == torch.quantize_per_channel:
952
+ _weight, scale_node, zp_node, axis, dtype = quant_node.args
953
+ scale_val = getattr_from_fqn(
954
+ shadow_wrapper, scale_node.target)
955
+ zp_val = getattr_from_fqn(
956
+ shadow_wrapper, zp_node.target)
957
+ new_args = (scale_val, zp_val, axis, dtype)
958
+ else:
959
+ assert quant_node.target == torch.quantize_per_tensor
960
+ _weight, scale_node, zp_node, dtype = quant_node.args
961
+ scale_val = getattr_from_fqn(
962
+ shadow_wrapper, scale_node.target)
963
+ zp_val = getattr_from_fqn(
964
+ shadow_wrapper, zp_node.target)
965
+ new_args = (scale_val, zp_val, dtype)
966
+ return (quant_node.target, new_args)
967
+
968
+ return None
969
+
970
+
971
+ def extract_weight_comparison(m: GraphModule) -> NSResultsType:
972
+
973
+ # example graph:
974
+ #
975
+ # w1 = self.w1
976
+ # b1 = self.b1
977
+ # linear = torch._C._nn.linear(x, w1, b1)
978
+ # shadow_0_0 = self.shadow_0_0(linear)
979
+ # shadow_wrapper_0_1 = self.shadow_wrapper_0_1(x, w1, b1)
980
+ # shadow_0_1 = self.shadow_0_1(shadow_wrapper_0_1, linear)
981
+ #
982
+ # algorithm:
983
+ # 1. for each call_function node matching our allowlist:
984
+ # 2. if corresponding shadow wrapper exists, extract the weight pair
985
+ #
986
+ # Note: this is not super robust, but that's ok because this is
987
+ # just for legacy customers who depend on the previous two-model version
988
+ # of this API. TBD if we need to make this robust.
989
+ # Note: modules are not supported, since existing customers only
990
+ # use functions.
991
+
992
+ # TODO(future PR): move this to config
993
+ weighted_ops = {
994
+ torch.nn.functional.linear,
995
+ }
996
+
997
+ results: NSResultsType = {
998
+ 'model': {NSSingleResultValuesType.WEIGHT.value: {}}
999
+ }
1000
+
1001
+ for n in m.graph.nodes: # type: ignore[union-attr]
1002
+ if not (n.op == 'call_function' and n.target in weighted_ops):
1003
+ continue
1004
+
1005
+ # Check if we have a corresponding shadow wrapper
1006
+ # TODO(future PR, if needed): support kwargs
1007
+ # TODO(future PR, if needed): support multiple shadow users
1008
+ first_arg = n.args[0]
1009
+ shadow_wrapper_node = None
1010
+ for user in first_arg.users:
1011
+ # TODO(before land): fix string match
1012
+ if user.op == 'call_module' and \
1013
+ user.target.startswith('shadow_wrapper'):
1014
+ shadow_wrapper_node = user
1015
+ break
1016
+
1017
+ if shadow_wrapper_node is None:
1018
+ continue
1019
+
1020
+ shadow_wrapper = getattr_from_fqn(
1021
+ m, shadow_wrapper_node.target) # type: ignore[arg-type]
1022
+ weight_info = _get_weight_info_from_shadow_wrapper(
1023
+ shadow_wrapper)
1024
+ if weight_info is None:
1025
+ continue
1026
+
1027
+ # get weight
1028
+ w_node = n.args[1]
1029
+ w_obj = getattr_from_fqn(m, w_node.target).detach()
1030
+
1031
+ # get a quantized version of weight
1032
+ quant_fn, quant_fn_args_except_first = weight_info
1033
+ new_args = (w_obj, *quant_fn_args_except_first)
1034
+ w_obj_q = quant_fn(*new_args)
1035
+
1036
+ # add a comparison
1037
+ ref_node_name = n.name
1038
+ prev_node_name = n.name
1039
+ ref_node_type = get_target_type_str(n, m)
1040
+ prev_node_type = ref_node_type
1041
+ fqn = None
1042
+ if hasattr(m, '_node_name_to_scope'):
1043
+ fqn = m._node_name_to_scope[n.name][0] # type: ignore[index]
1044
+ comparison = torch.ao.ns.fx.utils.compute_sqnr(w_obj, w_obj_q)
1045
+ result_fp32 = {
1046
+ 'res_type': NSSingleResultValuesType.WEIGHT.value,
1047
+ 'values': [w_obj],
1048
+ 'prev_node_name': prev_node_name,
1049
+ 'prev_node_target_type': prev_node_type,
1050
+ 'ref_node_name': ref_node_name,
1051
+ 'ref_node_target_type': ref_node_type,
1052
+ 'index_within_arg': 0,
1053
+ 'index_of_arg': 0,
1054
+ 'fqn': fqn,
1055
+ 'qconfig_str': '',
1056
+ 'comparisons': [comparison],
1057
+ 'comparison_fn_name': 'sqnr',
1058
+ }
1059
+ result_q = {
1060
+ 'res_type': NSSingleResultValuesType.WEIGHT.value,
1061
+ 'values': [w_obj_q],
1062
+ 'prev_node_name': prev_node_name,
1063
+ 'prev_node_target_type': prev_node_type,
1064
+ 'ref_node_name': ref_node_name,
1065
+ 'ref_node_target_type': ref_node_type,
1066
+ 'index_within_arg': 0,
1067
+ 'index_of_arg': 0,
1068
+ 'fqn': fqn,
1069
+ 'qconfig_str': '',
1070
+ 'comparisons': [comparison],
1071
+ 'comparison_fn_name': 'sqnr',
1072
+ }
1073
+
1074
+ # go from subgraph_n_1 to subgraph_n_0
1075
+ _1, _2, node_idx, _3 = shadow_wrapper_node.target.split('_')
1076
+ name_fp32 = f"subgraph_{node_idx}_0"
1077
+ name_q = f"subgraph_{node_idx}_1"
1078
+
1079
+ results['model'][NSSingleResultValuesType.WEIGHT.value][name_fp32] = \
1080
+ [result_fp32]
1081
+ results['model'][NSSingleResultValuesType.WEIGHT.value][name_q] = \
1082
+ [result_q]
1083
+
1084
+ return results
1085
+
1086
+ # TODO(future PR): redesign this to make it easier to consume outputs
1087
+ def group_results_by_subgraph(results: NSResultsType) -> Any:
1088
+ """
1089
+ Creates a comparison of results
1090
+
1091
+ Input:
1092
+
1093
+ {
1094
+ 'model': {
1095
+ 'node_output': {
1096
+ 'subgraph_0_0': [
1097
+ 'values': [torch.tensor(...), ...], ...
1098
+ 'ref_node_name': ...,
1099
+ 'ref_node_target_type': ...,
1100
+ 'qconfig_str': ...,
1101
+ 'comparisons': [], ...
1102
+ 'comparison_fn_name': '',
1103
+ 'fqn': '...',
1104
+ ],
1105
+ 'subgraph_0_1': [
1106
+ 'values': [torch.tensor(...), ...], ...
1107
+ 'ref_node_name': ...,
1108
+ 'ref_node_target_type': ...,
1109
+ 'qconfig_str': ...,
1110
+ 'comparisons': [torch.tensor(...), ...], ...
1111
+ 'comparison_fn_name': '...',
1112
+ 'fqn': '...',
1113
+ ],
1114
+ ...
1115
+ },
1116
+ },
1117
+ }
1118
+
1119
+ Output:
1120
+ {
1121
+ 'subgraph_0': {
1122
+ '0': {
1123
+ 'ref_node_name': '...',
1124
+ 'ref_node_target_type': ...,
1125
+ 'values': [torch.tensor(...), ...],
1126
+ 'qconfig_str': None,
1127
+ 'comparisons': [torch.tensor(...), ...], ...
1128
+ 'comparison_fn_name': '...',
1129
+ 'fqn': '...',
1130
+ },
1131
+ '1': {
1132
+ 'ref_node_name': '...',
1133
+ 'ref_node_target_type': ...,
1134
+ 'values': [torch.tensor(...), ...],
1135
+ 'qconfig_str': '...',
1136
+ 'comparisons': [torch.tensor(...), ...], ...
1137
+ 'comparison_fn_name': '...',
1138
+ 'fqn': '...',
1139
+ },
1140
+ },
1141
+ }
1142
+
1143
+ """
1144
+ subgraph_name_to_subgraph_results: Any = collections.defaultdict(dict)
1145
+
1146
+ # node_output or weight
1147
+ key_to_use = next(iter(results['model'].keys()))
1148
+
1149
+ for subgraph_name_with_idx, subgraph_candidate_results in \
1150
+ results['model'][key_to_use].items():
1151
+
1152
+ # convert from `subgraph_m_n` to `subgraph_m` and `n`
1153
+ subgraph_str, subgraph_idx, subgraph_candidate_idx = \
1154
+ subgraph_name_with_idx.split('_')
1155
+ subgraph_name = f'{subgraph_str}_{subgraph_idx}'
1156
+
1157
+ subgraph_results = {
1158
+ 'ref_node_name': subgraph_candidate_results[0]['ref_node_name'],
1159
+ 'ref_node_target_type': subgraph_candidate_results[0]['ref_node_target_type'],
1160
+ 'fqn': subgraph_candidate_results[0]['fqn'],
1161
+ 'values': subgraph_candidate_results[0]['values'],
1162
+ 'qconfig_str': subgraph_candidate_results[0]['qconfig_str'],
1163
+ 'comparisons': subgraph_candidate_results[0]['comparisons'],
1164
+ 'comparison_fn_name': subgraph_candidate_results[0]['comparison_fn_name'],
1165
+ }
1166
+
1167
+ subgraph_name_to_subgraph_results[subgraph_name][subgraph_candidate_idx] = \
1168
+ subgraph_results
1169
+
1170
+ return dict(subgraph_name_to_subgraph_results)
1171
+
1172
+ # TODO(future PR): redesign this to make it easier to consume outputs
1173
+ def create_results_comparison(
1174
+ results_grouped,
1175
+ ) -> Any:
1176
+ """
1177
+ Input:
1178
+
1179
+ {
1180
+ 'subgraph_0': {
1181
+ '0': {
1182
+ 'ref_node_name': '...',
1183
+ 'ref_node_target_type': ...,
1184
+ 'values': [torch.tensor(...), ...],
1185
+ 'qconfig_str': '',
1186
+ 'comparisons': [],
1187
+ 'comparison_fn_name': '',
1188
+ 'fqn': '...',
1189
+ },
1190
+ '1': {
1191
+ 'ref_node_name': '...',
1192
+ 'ref_node_target_type': ...,
1193
+ 'values': [torch.tensor(...), ...],
1194
+ 'qconfig_str': '...',
1195
+ 'comparisons': [torch.tensor(...), ...],
1196
+ 'comparison_fn_name': 'sqnr',
1197
+ 'fqn': '...',
1198
+ },
1199
+ },
1200
+ }
1201
+
1202
+ Output:
1203
+ {
1204
+ 'subgraph_0': {
1205
+ 'ref_node_name': '...',
1206
+ 'ref_node_target_type': '...',
1207
+ 'fqn': '...',
1208
+ 'candidates': {
1209
+ '1': {
1210
+ 'qconfig_str': ...,
1211
+ 'comparison_fn_name': 'sqnr',
1212
+ 'cmp_raw': [..., ...],
1213
+ 'cmp_mean': ...,
1214
+ },
1215
+ ...,
1216
+ },
1217
+ },
1218
+ }
1219
+ """
1220
+
1221
+ results_comparison = {}
1222
+
1223
+ for subgraph_name, subgraph_results in results_grouped.items():
1224
+
1225
+ candidates = {}
1226
+ for subgraph_inner_name, subgraph_inner_result in subgraph_results.items():
1227
+ # skip comparing baseline to baseline
1228
+ if subgraph_inner_name == '0':
1229
+ continue
1230
+
1231
+ # we expect the comparisons to be precalculated from
1232
+ # calibration, so we just fetch them here
1233
+ cmp_raw = subgraph_inner_result['comparisons']
1234
+ cmp_raw_tensor = torch.stack(cmp_raw)
1235
+
1236
+ candidates[subgraph_inner_name] = {
1237
+ 'qconfig_str': subgraph_inner_result['qconfig_str'],
1238
+ 'comparison_fn_name': subgraph_inner_result['comparison_fn_name'],
1239
+ 'cmp_raw': cmp_raw_tensor,
1240
+ 'cmp_mean': torch.mean(cmp_raw_tensor),
1241
+ }
1242
+
1243
+ results_comparison[subgraph_name] = {
1244
+ 'ref_node_name': subgraph_results['0']['ref_node_name'],
1245
+ 'ref_node_target_type': subgraph_results['0']['ref_node_target_type'],
1246
+ 'fqn': subgraph_results['0']['fqn'],
1247
+ 'candidates': candidates,
1248
+ }
1249
+
1250
+ return results_comparison
1251
+
1252
+ # TODO(future PR): redesign this to make it easier to consume outputs
1253
+ def print_n_shadows_summary(
1254
+ results_comparison,
1255
+ ) -> None:
1256
+ """
1257
+ Input:
1258
+
1259
+ {
1260
+ 'subgraph_0': {
1261
+ 'ref_node_name': 'linear1',
1262
+ 'ref_node_target_type': '...',
1263
+ 'fqn': '...',
1264
+ 'candidates': {
1265
+ '1': {
1266
+ 'qconfig_str': ...,
1267
+ 'comparison_fn_name': ...,
1268
+ 'cmp_raw': [45.0, 55.0],
1269
+ 'cmp_mean': 50.0,
1270
+ },
1271
+ ...,
1272
+ },
1273
+ },
1274
+ }
1275
+
1276
+ Prints:
1277
+
1278
+ node_name | node_type | fqn | 0 | 1 | ...
1279
+ linear1 | ... | ... | 45.0 | 50.0 | ...
1280
+ """
1281
+
1282
+ try:
1283
+ from tabulate import tabulate
1284
+ except ImportError:
1285
+ print("`print_tabular` relies on the library `tabulate`, "
1286
+ "which could not be found on this machine. Run `pip "
1287
+ "install tabulate` to install the library.")
1288
+ return
1289
+
1290
+ results = []
1291
+ for subgraph_data in results_comparison.values():
1292
+ mean_all_candidates = [
1293
+ candidate['cmp_mean']
1294
+ for candidate_name, candidate in subgraph_data['candidates'].items()
1295
+ ]
1296
+
1297
+ data_row = [
1298
+ subgraph_data['ref_node_name'],
1299
+ subgraph_data['ref_node_target_type'],
1300
+ subgraph_data['fqn'],
1301
+ *mean_all_candidates,
1302
+ ]
1303
+ results.append(data_row)
1304
+
1305
+ max_candidate_idx_len = -1
1306
+ for data_row in results:
1307
+ max_candidate_idx_len = max(max_candidate_idx_len, len(data_row[1]))
1308
+ candidate_idx_headers = [str(x) for x in range(max_candidate_idx_len)]
1309
+
1310
+ headers = ['node_name', 'node_type', 'fqn', *candidate_idx_headers]
1311
+ print(tabulate(results, headers=headers))
venv/lib/python3.10/site-packages/torch/ao/ns/fx/ns_types.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ from typing import NamedTuple
3
+
4
+ from torch.fx.graph import Node
5
+
6
+ from typing import Dict, Any, List, Union, Callable
7
+
8
+ class NSSingleResultValuesType(str, enum.Enum):
9
+ WEIGHT = 'weight'
10
+ NODE_OUTPUT = 'node_output'
11
+ NODE_INPUT = 'node_input'
12
+
13
+ class NSSubgraph(NamedTuple):
14
+ start_node: Node
15
+ end_node: Node
16
+ base_op_node: Node
17
+
18
+ # TODO(future PR): see if we can use typing_extensions's TypedDict instead
19
+ # to properly type the various keys
20
+ # {
21
+ # # one of NSSingleResultValuesType
22
+ # 'type': 'weight',
23
+ # # the values of type specified above
24
+ # 'values': [torch.tensor(...), ...],
25
+ # # name of the node directly before the logger
26
+ # 'prev_node_name': 'linear1',
27
+ # # type of the underlying function or module
28
+ # 'prev_node_target_type': torch.nn.functional.linear # or torch.nn.Linear, etc
29
+ # # name of the node responsible for adding this logger
30
+ # # Note: this may differ from prev_node_name if we are logging inputs
31
+ # 'ref_node_name': 'linear1',
32
+ # # index of this node within the arg of the input/output node
33
+ # # for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1
34
+ # 'index_within_arg': 0,
35
+ # # index of this node within the args of the input/output node
36
+ # # for example, in add(x1, x2), x2 would have index_of_arg == 1
37
+ # 'index_of_arg': 0,
38
+ # # precomputed comparisons of logger values to reference values
39
+ # 'comparisons': [torch.tensor(...), ...]
40
+ # # name of function used for precomputed comparisons
41
+ # 'comparison_fn_name': 'sqnr',
42
+ # # string representation of qconfig responsible for creating this logger
43
+ # 'qconfig_str': 'QConfig(...)',
44
+ # }
45
+ NSSingleResultType = Dict[str, Any]
46
+
47
+ # {
48
+ # 'layer_name_1': { # subgraph name
49
+ # 'node_output': { # results type (node_output, node_input, weight)
50
+ # 'model_name_a': # model name
51
+ # [NSSingleResultType, ...], # results, ordered by index_within_arg
52
+ # 'model_name_b':
53
+ # [NSSingleResultType, ...],
54
+ # },
55
+ # },
56
+ # }
57
+ #
58
+ NSResultsType = Dict[str, Dict[str, Dict[str, List[NSSingleResultType]]]]
59
+
60
+ # Defines the underlying target type of a node, for example:
61
+ # `F.conv1d` for a `call_function` conv node
62
+ # `nn.Conv1d` for a `call_module` node calling the forward of a `nn.Conv1d` module
63
+ # `'sigmoid'` for a `call_method` node calling `x.sigmoid()`
64
+ NSNodeTargetType = Union[Callable, str]
venv/lib/python3.10/site-packages/torch/ao/ns/fx/pattern_utils.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ toq = torch.ops.quantized
5
+
6
+ from torch.fx import GraphModule
7
+ from torch.fx.graph import Node
8
+
9
+ from torch.ao.quantization.backend_config import get_native_backend_config
10
+ from torch.ao.quantization.fx.quantize_handler import _get_pattern_to_quantize_handlers
11
+ from torch.ao.quantization.utils import getattr_from_fqn
12
+ from .ns_types import NSNodeTargetType
13
+ from torch.ao.quantization import (
14
+ ObserverBase,
15
+ FakeQuantizeBase,
16
+ )
17
+
18
+ from typing import Dict, Tuple, Set, Callable, Any, Union, List
19
+
20
+
21
+ def get_type_a_related_to_b(
22
+ base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
23
+ ) -> Set[Tuple[NSNodeTargetType, NSNodeTargetType]]:
24
+ # TODO(future PR): allow customizations
25
+ # TODO(future PR): reuse existing quantization mappings
26
+ # TODO(future PR): add the rest of modules and ops here
27
+ type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]] = set()
28
+
29
+ for s in base_name_to_sets_of_related_ops.values():
30
+ s_list = list(s)
31
+ # add every bidirectional pair
32
+ for idx_0 in range(0, len(s_list)):
33
+ for idx_1 in range(idx_0, len(s_list)):
34
+ type_a_related_to_b.add((s_list[idx_0], s_list[idx_1]))
35
+ type_a_related_to_b.add((s_list[idx_1], s_list[idx_0]))
36
+
37
+ return type_a_related_to_b
38
+
39
+
40
+ NSFusionElType = Union[
41
+ Callable, # call_function or call_module type, example: F.linear or nn.Conv2d
42
+ str, # call_method name, example: "dequantize"
43
+ Tuple[str, Any], # call_method name and first argument, example: ("to", torch.float16)
44
+ ]
45
+ NSFusionType = Union[
46
+ Tuple[NSFusionElType, NSFusionElType],
47
+ Tuple[NSFusionElType, NSFusionElType, NSFusionElType, NSFusionElType],
48
+ ]
49
+
50
+ def get_reversed_fusions() -> List[Tuple[NSFusionType, int]]:
51
+ """
52
+ Set of potential fusions, in reverse order. The order is reversed
53
+ to match how fusion patterns are defined in quantization code.
54
+
55
+ Fusion format:
56
+ ((fusion_op_0, fusion_op_1), base_op_idx)
57
+
58
+ Where base_op_idx is the idx of the op we should use to match other related
59
+ ops. Note: base_op_idx is specified in non-reverse order, i.e. a base_op_idx
60
+ of 0 represents the first op in regular (non-reverse) order, 1 represents the
61
+ second op, etc.
62
+ """
63
+ results: List[Tuple[NSFusionType, int]] = []
64
+
65
+ # Possible syntaxes:
66
+ # * single op: torch.nn.Conv2d
67
+ # * multiple ops: (torch.nn.ReLU, torch.nn.Conv2d)
68
+ # For fusions, we only care about patterns composed of multiple ops.
69
+ # TODO(future PR): allow customizations from default patterns.
70
+ all_quant_patterns = _get_pattern_to_quantize_handlers(get_native_backend_config())
71
+
72
+ default_base_op_idx = 0
73
+ for quant_pattern in all_quant_patterns.keys():
74
+ # TODO: this is a temporary hack to flatten the patterns from quantization so
75
+ # that it works with the ns matcher function, maybe we should use `_is_match`
76
+ # in torch.ao.quantization.fx.match_utils to match the patterns
77
+ if isinstance(quant_pattern, tuple) and len(quant_pattern) == 2 and \
78
+ isinstance(quant_pattern[1], tuple) and len(quant_pattern[1]) == 2:
79
+ # flatten the pattern with form (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d))
80
+ quant_pattern = (quant_pattern[0], quant_pattern[1][0], quant_pattern[1][1])
81
+
82
+ # Only patterns of multiple ops are fusions, ignore
83
+ # patterns which contain a single ops (they get matched
84
+ # without caring about fusions).
85
+ if isinstance(quant_pattern, tuple):
86
+ results.append((quant_pattern, default_base_op_idx)) # type: ignore[arg-type]
87
+
88
+ # For each pattern, add additional patterns with observers and
89
+ # fake quants at the end.
90
+ # TODO(future PR): if needed, implement matching for a node
91
+ # having multiple output observers.
92
+ for cls in (ObserverBase, FakeQuantizeBase):
93
+ if isinstance(quant_pattern, tuple):
94
+ new_pattern = (cls, *quant_pattern)
95
+ else:
96
+ new_pattern = (cls, quant_pattern)
97
+ results.append((new_pattern, default_base_op_idx)) # type: ignore[arg-type]
98
+
99
+
100
+ # After this point, results contains values such as
101
+ # [..., ((torch.nn.Relu, torch.nn.Conv2d), 0), ...]
102
+
103
+ # Patterns for matching fp16 emulation are not specified in the quantization
104
+ # fusion mappings. For now, define them here.
105
+ fp16_em_base_op_idx = 1
106
+ patterns_to_add = [
107
+ # linear-relu fp16 emulation:
108
+ # fp16_to_fp32 -> linear -> relu -> fp32_to_fp16
109
+ ((("to", torch.float16), F.relu, F.linear, "dequantize"), fp16_em_base_op_idx,),
110
+ # Conv-BN fusion (this happens outside of quantization patterns,
111
+ # which is why it is defined separately here).
112
+ ((nn.BatchNorm1d, nn.Conv1d), default_base_op_idx),
113
+ ((nn.BatchNorm2d, nn.Conv2d), default_base_op_idx),
114
+ ((nn.BatchNorm3d, nn.Conv3d), default_base_op_idx),
115
+ ((nn.ReLU, nn.BatchNorm1d, nn.Conv1d), default_base_op_idx),
116
+ ((nn.ReLU, nn.BatchNorm2d, nn.Conv2d), default_base_op_idx),
117
+ ((nn.ReLU, nn.BatchNorm3d, nn.Conv3d), default_base_op_idx),
118
+ ]
119
+ for p in patterns_to_add:
120
+ results.append(p) # type: ignore[arg-type]
121
+ results.append(((ObserverBase, *p[0]), p[1])) # type: ignore[arg-type]
122
+ results.append(((FakeQuantizeBase, *p[0]), p[1])) # type: ignore[arg-type]
123
+
124
+ return results
125
+
126
+
127
+ def end_node_matches_reversed_fusion(
128
+ end_node: Node,
129
+ reversed_fusion: NSFusionType,
130
+ gm: GraphModule,
131
+ seen_nodes: Set[Node],
132
+ ) -> bool:
133
+ """
134
+ Returns true if a pattern ending with `end_node` matches
135
+ the fusion pattern.
136
+ """
137
+ cur_node = end_node
138
+ for fusion_idx in range(len(reversed_fusion)):
139
+ # each node can only belong to one matched pattern
140
+ if cur_node in seen_nodes:
141
+ return False
142
+
143
+ cur_fusion_el = reversed_fusion[fusion_idx]
144
+
145
+ if cur_node.op == 'call_function':
146
+ fusion_el_is_fun = (not isinstance(cur_fusion_el, str)) and \
147
+ (not isinstance(cur_fusion_el, type))
148
+ if fusion_el_is_fun:
149
+ if cur_node.target != cur_fusion_el:
150
+ return False
151
+ if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
152
+ cur_node = cur_node.args[0]
153
+ else:
154
+ return False
155
+ else:
156
+ return False
157
+
158
+ elif cur_node.op == 'call_module':
159
+ fusion_el_is_mod = isinstance(cur_fusion_el, type)
160
+ if fusion_el_is_mod:
161
+ assert isinstance(cur_node.target, str)
162
+ target_mod = getattr_from_fqn(gm, cur_node.target)
163
+ if not isinstance(cur_fusion_el, type):
164
+ return False
165
+ if not isinstance(target_mod, cur_fusion_el):
166
+ return False
167
+ if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
168
+ cur_node = cur_node.args[0]
169
+ else:
170
+ return False
171
+ else:
172
+ return False
173
+
174
+ elif cur_node.op == 'call_method':
175
+ fusion_el_is_meth_with_second_arg = \
176
+ isinstance(cur_fusion_el, tuple) and len(cur_fusion_el) == 2
177
+ fusion_el_is_meth_without_args = isinstance(cur_fusion_el, str)
178
+ if fusion_el_is_meth_without_args or fusion_el_is_meth_with_second_arg:
179
+ if fusion_el_is_meth_without_args:
180
+ if cur_node.target != cur_fusion_el:
181
+ return False
182
+ else:
183
+ assert isinstance(cur_fusion_el, tuple)
184
+ if cur_node.target != cur_fusion_el[0]:
185
+ return False
186
+ elif len(cur_node.args) < 2:
187
+ return False
188
+ elif cur_node.args[1] != cur_fusion_el[1]:
189
+ return False
190
+
191
+ if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
192
+ cur_node = cur_node.args[0]
193
+ else:
194
+ return False
195
+ else:
196
+ return False
197
+ else:
198
+ return False
199
+
200
+ return True
venv/lib/python3.10/site-packages/torch/ao/ns/fx/qconfig_multi_mapping.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ from typing import Any, Callable, Dict, List, Union
5
+
6
+ import torch
7
+ from torch.ao.quantization import QConfigMapping
8
+ from torch.ao.quantization.qconfig_mapping import _QCONFIG_STYLE_ORDER
9
+ from torch.ao.quantization.qconfig import QConfigAny
10
+
11
+ __all__ = ["QConfigMultiMapping"]
12
+
13
+ _QCONFIG_STYLE_TO_METHOD: Dict[str, str] = {
14
+ "global_qconfig": "set_global",
15
+ "object_type_qconfigs": "set_object_type",
16
+ "module_name_regex_qconfigs": "set_module_name_regex",
17
+ "module_name_qconfigs": "set_module_name",
18
+ "module_name_object_type_order_qconfigs": "set_module_name_object_type_order",
19
+ }
20
+
21
+ def _remove_duplicates_and_none(qconfig_list: List[QConfigAny]) -> None:
22
+ to_remove = []
23
+ for index, cur_qconfig in enumerate(qconfig_list):
24
+ if cur_qconfig is None:
25
+ to_remove.append(index)
26
+ break
27
+ for checked_qconfig in qconfig_list[:index]:
28
+ if torch.ao.quantization.qconfig_equals(cur_qconfig, checked_qconfig):
29
+ to_remove.append(index)
30
+ break
31
+ for index in to_remove[::-1]:
32
+ qconfig_list.pop(index)
33
+
34
+ class QConfigMultiMapping:
35
+ """
36
+ This class, used with the prepare_n_shadows_model API, stores a list of :class:`torch.ao.quantization.QConfigMapping`s
37
+ so that multiple QConfigs can be specified for each QConfig matching style.
38
+
39
+ The user can specify QConfigs using the following methods (in increasing match priority):
40
+
41
+ ``set_global`` : sets the global (default) QConfigs
42
+
43
+ ``set_object_type`` : sets the QConfigs for a given module type, function, or method name
44
+
45
+ ``set_module_name_regex`` : sets the QConfigs for modules matching the given regex string
46
+
47
+ ``set_module_name`` : sets the QConfigs for modules matching the given module name
48
+
49
+ ``set_module_name_object_type_order`` : sets the QConfigs for modules matching a combination
50
+ of the given module name, object type, and the index at which the module appears
51
+
52
+ Note: Usage of set methods is the same as in QConfigMapping except with a passed in list of QConfigs rather than a
53
+ single QConfig.
54
+
55
+ Example usage::
56
+
57
+ qconfig_mapping = QConfigMultiMapping()
58
+ .set_global([qconfig1, qconfig2])
59
+ .set_object_type(torch.nn.Linear, [qconfig2, qconfig3])
60
+ .set_object_type(torch.nn.ReLU, [qconfig1])
61
+ .set_module_name_regex("foo.*bar.*conv[0-9]+", [qconfig2])
62
+ .set_module_name_regex("foo.*", [qconfig1, qconfig2, qconfig3])
63
+ .set_module_name("module1", [None])
64
+ .set_module_name("module2", [qconfig2])
65
+ .set_module_name_object_type_order("foo.bar", torch.nn.functional.linear, 0, [qconfig3])
66
+
67
+ """
68
+
69
+ def __init__(self):
70
+ # initialize this with 1 QConfigMapping to avoid corner cases
71
+ self.qconfig_mappings_list: List[QConfigMapping] = [QConfigMapping()]
72
+
73
+ def _handle_list_size_mismatch(
74
+ self, qconfig_list: List[QConfigAny], style: str
75
+ ) -> None:
76
+ # this method handles cases where the size of qconfig_list does not match
77
+ # the size of qconfig_mappings_list.
78
+ # Issue: Consider a user inserting global_qconfig A and B first, then inserting
79
+ # qconfig C as an object_type_qconfig for conv ops. If we internally store
80
+ # 1 QConfigMapping with A and C and another with just B, then the
81
+ # second QConfigMapping will match B to conv ops (which is not wanted), since B is global.
82
+
83
+ # we avoid this by maintaining the invariant that if any QConfigMapping
84
+ # has a qconfig style+key with a qconfig in it, all QConfigMappings must
85
+ # have either a qconfig or None for that same style+key. In the above
86
+ # example, a None qconfig would prevent the unwanted match in the
87
+ # second QConfigMapping
88
+
89
+ if len(qconfig_list) > len(self.qconfig_mappings_list):
90
+ # Case: we have more qconfigs (in qconfig_list) than QConfigMappings
91
+
92
+ # Add new QConfigMappings (initialized so we maintain the `invariant`)
93
+
94
+ new_qconfig_mapping = QConfigMapping()
95
+ # searches other QConfigMappings for qconfig style+keys
96
+ # that need to be inserted as `None` into the new QConfigMapping
97
+ for qconfig_mapping in self.qconfig_mappings_list:
98
+
99
+ # global_qconfig has None by default
100
+ for check_style in _QCONFIG_STYLE_ORDER[1:]:
101
+ qconfigs_dict = getattr(qconfig_mapping, check_style)
102
+ target_qconfigs_dict = getattr(new_qconfig_mapping, check_style)
103
+ for key in qconfigs_dict:
104
+ target_qconfigs_dict[key] = None
105
+ break
106
+
107
+ # insert copies of this new QConfigMapping until all entires
108
+ # in qconfig_list can fit among the QConfigMappings
109
+ while len(qconfig_list) > len(self.qconfig_mappings_list):
110
+ self.qconfig_mappings_list.append(copy.deepcopy(new_qconfig_mapping))
111
+ else:
112
+ # Case: we have fewer qconfigs in qconfig_list than QConfigMappings
113
+
114
+ # pad qconfig_list with `None` until length is same
115
+ while len(qconfig_list) < len(self.qconfig_mappings_list):
116
+ qconfig_list.append(None)
117
+
118
+ # this function applies the insertion method across each QConfigMapping
119
+ def _insert_qconfig_list(
120
+ self,
121
+ style: str,
122
+ args: List[Union[str, int, Callable]],
123
+ qconfig_list: List[QConfigAny],
124
+ ) -> None:
125
+
126
+ # we remove duplicates and None to make the ordering of qconfigs
127
+ # deterministic upon insertion.
128
+ _remove_duplicates_and_none(qconfig_list)
129
+
130
+ self._handle_list_size_mismatch(qconfig_list, style)
131
+ method_name = _QCONFIG_STYLE_TO_METHOD[style]
132
+ for qconfig_mapping, qconfig in zip(self.qconfig_mappings_list, qconfig_list):
133
+ # uses QConfigMapping set method to insert qconfig
134
+ set_method = getattr(qconfig_mapping, method_name)
135
+ set_method(*args, qconfig)
136
+
137
+ def set_global(self, global_qconfig_list: List[QConfigAny]) -> QConfigMultiMapping:
138
+ """
139
+ Set global QConfigs
140
+ see :func:`~torch.ao.quantization.QConfigMapping.set_global()` for more info
141
+ """
142
+ self._insert_qconfig_list("global_qconfig", [], global_qconfig_list)
143
+ return self
144
+
145
+ def set_object_type(
146
+ self, object_type: Union[Callable, str], qconfig_list: List[QConfigAny]
147
+ ) -> QConfigMultiMapping:
148
+ """
149
+ Set object type QConfigs
150
+ see :func:`~torch.ao.quantization.QConfigMapping.set_object_type()` for more info
151
+ """
152
+ self._insert_qconfig_list("object_type_qconfigs", [object_type], qconfig_list)
153
+ return self
154
+
155
+ def set_module_name_regex(
156
+ self, module_name_regex: str, qconfig_list: List[QConfigAny]
157
+ ) -> QConfigMultiMapping:
158
+ """
159
+ Set module_name_regex QConfigs
160
+ see :func:`~torch.ao.quantization.QConfigMapping.set_module_name_regex()` for more info
161
+ """
162
+ self._insert_qconfig_list(
163
+ "module_name_regex_qconfigs", [module_name_regex], qconfig_list
164
+ )
165
+ return self
166
+
167
+ def set_module_name(
168
+ self, module_name: str, qconfig_list: List[QConfigAny]
169
+ ) -> QConfigMultiMapping:
170
+ """
171
+ Set module_name QConfigs
172
+ see :func:`~torch.ao.quantization.QConfigMapping.set_module_name()` for more info
173
+ """
174
+ self._insert_qconfig_list("module_name_qconfigs", [module_name], qconfig_list)
175
+ return self
176
+
177
+ def set_module_name_object_type_order(
178
+ self,
179
+ module_name: str,
180
+ object_type: Callable,
181
+ index: int,
182
+ qconfig_list: List[QConfigAny],
183
+ ) -> QConfigMultiMapping:
184
+ """
185
+ Set module_name QConfigs
186
+ see :func:`~torch.ao.quantization.QConfigMapping.set_module_name_object_type_order()` for more info
187
+ """
188
+ self._insert_qconfig_list(
189
+ "module_name_object_type_order_qconfigs",
190
+ [module_name, object_type, index],
191
+ qconfig_list,
192
+ )
193
+ return self
194
+
195
+ def __repr__(self):
196
+ return (
197
+ self.__class__.__name__ +
198
+ " [" +
199
+ "".join(f"\n{qconfig_mapping.__repr__()}," for qconfig_mapping in self.qconfig_mappings_list) +
200
+ "\n]"
201
+ )
202
+
203
+ @classmethod
204
+ def from_list_qconfig_mapping(
205
+ cls, qconfig_mapping_list: List[QConfigMapping]
206
+ ) -> QConfigMultiMapping:
207
+ """
208
+ Creates a QConfigMultiMapping from a list of QConfigMappings
209
+ """
210
+ new_qconfig_multi_mapping = cls()
211
+
212
+ new_qconfig_multi_mapping.qconfig_mappings_list = copy.deepcopy(
213
+ qconfig_mapping_list
214
+ )
215
+
216
+ # we need to avoid the issue described in _handle_list_size_mismatch,
217
+ # so we reinsert all the qconfigs using the QConfigMultiMapping
218
+ # set methods
219
+
220
+ # go through all qconfig styles
221
+ # note: global can be ignored since it is None by default
222
+ for style in _QCONFIG_STYLE_ORDER[1:]:
223
+
224
+ # gather all key+qconfigs for current style
225
+ # into qconfig_dict_list
226
+ qconfig_dict_list: Dict[Any, List[QConfigAny]] = {}
227
+ for qconfig_mapping in qconfig_mapping_list:
228
+ qconfig_dict = getattr(qconfig_mapping, style)
229
+ for key, qconfig in qconfig_dict.items():
230
+ if key not in qconfig_dict_list:
231
+ qconfig_dict_list[key] = []
232
+ qconfig_dict_list[key].append(qconfig)
233
+
234
+ # reinsert all gathered key+qconfigs
235
+ set_method_name = _QCONFIG_STYLE_TO_METHOD[style]
236
+ set_method = getattr(new_qconfig_multi_mapping, set_method_name)
237
+ for key, qconfig_list in qconfig_dict_list.items():
238
+ if isinstance(key, tuple):
239
+ set_method(*key, qconfig_list)
240
+ else:
241
+ set_method(key, qconfig_list)
242
+
243
+ return new_qconfig_multi_mapping
venv/lib/python3.10/site-packages/torch/ao/ns/fx/utils.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ import operator
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.ao.nn.intrinsic.quantized as nniq
7
+ import torch.ao.nn.quantized as nnq
8
+
9
+ toq = torch.ops.quantized
10
+ from typing import Tuple, Callable, Dict, Set, List, Optional, Union
11
+
12
+ from torch.fx import GraphModule
13
+ from torch.fx.graph import Node
14
+ from torch.ao.quantization import (
15
+ ObserverBase,
16
+ FakeQuantizeBase,
17
+ )
18
+ from torch.ao.quantization.utils import getattr_from_fqn
19
+ from torch.ao.quantization.observer import _is_activation_post_process
20
+
21
+ from .ns_types import NSNodeTargetType, NSResultsType
22
+
23
+ # TODO(future PR): consider deleting this enum and using the torch types
24
+ # directly. This might be tricky because it is not a one to one mapping.
25
+ class NodeInputOrOutputType(enum.Enum):
26
+ FP32 = enum.auto() # torch.float
27
+ INT8 = enum.auto() # torch.qint8 or torch.quint8
28
+ FP16 = enum.auto() # torch.float16
29
+ UNKNOWN = enum.auto() # we cannot determine input/output dtype
30
+ # TODO(future PR): while these functions can support multiple dtypes,
31
+ # for the purposes of numerical debugging we want to get the actual
32
+ # dtype used in the model. We will likely need some kind of dtype
33
+ # propagation to estimate this.
34
+ FP32_OR_INT8 = enum.auto() # either torch.float or torch.quint8 or torch.qint8
35
+ # TODO(future PRs): dynamic quant, fake quant, etc
36
+
37
+
38
+ def get_node_first_input_and_output_type(
39
+ node: Node,
40
+ gm: GraphModule,
41
+ logger_cls: Callable,
42
+ node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
43
+ ) -> Tuple[NodeInputOrOutputType, NodeInputOrOutputType]:
44
+
45
+ # TODO(future PR): clean this up
46
+ FUNS_IO_TYPE_FP32 = node_type_to_io_type_map["funs_io_type_fp32"]
47
+ FUNS_IO_TYPE_FP16 = node_type_to_io_type_map["funs_io_type_fp16"]
48
+ FUNS_IO_TYPE_INT8 = node_type_to_io_type_map["funs_io_type_int8"]
49
+ FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["funs_io_type_fp32_or_int8"]
50
+ MODS_IO_TYPE_FP32 = node_type_to_io_type_map["mods_io_type_fp32"]
51
+ MODS_IO_TYPE_INT8 = node_type_to_io_type_map["mods_io_type_int8"]
52
+ MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
53
+ METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["meths_io_type_fp32_or_int8"]
54
+
55
+ if node.op == "call_function":
56
+ if node.target in FUNS_IO_TYPE_FP32:
57
+ return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
58
+ if node.target in FUNS_IO_TYPE_FP16:
59
+ return (NodeInputOrOutputType.FP16, NodeInputOrOutputType.FP16)
60
+ elif node.target in FUNS_IO_TYPE_INT8:
61
+ return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
62
+ elif node.target in FUNS_IO_TYPE_FP32_OR_INT8:
63
+ first_arg = get_normalized_nth_input(node, gm, 0)
64
+ assert isinstance(first_arg, Node)
65
+ (
66
+ _prev_node_input_type,
67
+ prev_node_output_type,
68
+ ) = get_node_first_input_and_output_type(
69
+ first_arg, gm, logger_cls, node_type_to_io_type_map
70
+ )
71
+ return (prev_node_output_type, prev_node_output_type)
72
+ else:
73
+ return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
74
+
75
+ elif node.op == "call_module":
76
+ assert node.op == "call_module"
77
+ assert isinstance(node.target, str)
78
+ mod = getattr_from_fqn(gm, node.target)
79
+ is_known_fp32_or_int8_input_module = any(
80
+ isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
81
+ )
82
+ if (
83
+ isinstance(mod, (logger_cls, ObserverBase, FakeQuantizeBase)) # type: ignore[arg-type]
84
+ or is_known_fp32_or_int8_input_module
85
+ ):
86
+ # A logger or observer's input and output type is the output
87
+ # type of the preceding node.
88
+ first_arg = get_normalized_nth_input(node, gm, 0)
89
+ assert isinstance(first_arg, Node)
90
+ (
91
+ _prev_node_input_type,
92
+ prev_node_output_type,
93
+ ) = get_node_first_input_and_output_type(
94
+ first_arg, gm, logger_cls, node_type_to_io_type_map
95
+ )
96
+ return (prev_node_output_type, prev_node_output_type)
97
+ is_known_fp32_input_module = any(
98
+ isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32 # type: ignore[arg-type]
99
+ )
100
+ is_known_int8_input_module = any(
101
+ isinstance(mod, target_type) for target_type in MODS_IO_TYPE_INT8 # type: ignore[arg-type]
102
+ )
103
+ if is_known_fp32_input_module:
104
+ return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
105
+ elif is_known_int8_input_module:
106
+ return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
107
+ else:
108
+ return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
109
+
110
+ elif node.op == "call_method":
111
+ if node.target == "dequantize":
112
+ # Dequantize is a special node because it allows multiple input types.
113
+ # So, we look up the output type of the previous node and return that
114
+ # as the input type of this node instance.
115
+ prev_node = get_normalized_nth_input(node, gm, 0)
116
+ assert isinstance(prev_node, Node)
117
+ (
118
+ _prev_node_input_type,
119
+ prev_node_output_type,
120
+ ) = get_node_first_input_and_output_type(
121
+ prev_node, gm, logger_cls, node_type_to_io_type_map
122
+ )
123
+ return (prev_node_output_type, NodeInputOrOutputType.FP32)
124
+
125
+ elif node.target == "to":
126
+ # to is a special node because it allows multiple input types.
127
+ # So, we look up the output type of the previous node and return that
128
+ # as the input type of this node instance. We also look up the target
129
+ # of to and return the correct output type.
130
+ prev_node = get_normalized_nth_input(node, gm, 0)
131
+ assert isinstance(prev_node, Node)
132
+ (
133
+ _prev_node_input_type,
134
+ prev_node_output_type,
135
+ ) = get_node_first_input_and_output_type(
136
+ prev_node, gm, logger_cls, node_type_to_io_type_map
137
+ )
138
+
139
+ cur_node_dtype_target = get_normalized_nth_input(node, gm, 1)
140
+ assert (
141
+ cur_node_dtype_target is torch.float16
142
+ ), f"{cur_node_dtype_target} handling needs to be added"
143
+
144
+ return (prev_node_output_type, NodeInputOrOutputType.FP16)
145
+
146
+ elif node.target in METHS_IO_TYPE_FP32_OR_INT8:
147
+ first_arg = get_normalized_nth_input(node, gm, 0)
148
+ assert isinstance(first_arg, Node)
149
+ (
150
+ _prev_node_input_type,
151
+ prev_node_output_type,
152
+ ) = get_node_first_input_and_output_type(
153
+ first_arg, gm, logger_cls, node_type_to_io_type_map
154
+ )
155
+ return (prev_node_output_type, prev_node_output_type)
156
+
157
+ return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
158
+ else:
159
+ return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
160
+
161
+
162
+ def get_node_input_qparams(
163
+ node: Node,
164
+ gm: GraphModule,
165
+ node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
166
+ ) -> Optional[Tuple[Union[torch.Tensor, float], Union[torch.Tensor, int]]]:
167
+ """
168
+ Returns the qparams (scale, zero_point) of the first input to `node`,
169
+ if they can be inferred from the graph.
170
+ """
171
+ prev_node = get_normalized_nth_input(node, gm, 0)
172
+
173
+ if not isinstance(prev_node, Node):
174
+ return None
175
+
176
+ MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
177
+
178
+ def _get_scale_zp_from_function_args(node, gm, scale_arg_idx, zp_arg_idx):
179
+ scale_node = get_normalized_nth_input(node, gm, scale_arg_idx)
180
+ zp_node = get_normalized_nth_input(node, gm, zp_arg_idx)
181
+ assert isinstance(scale_node, Node) and isinstance(scale_node.target, str)
182
+ assert isinstance(zp_node, Node) and isinstance(zp_node.target, str)
183
+ scale_obj = getattr_from_fqn(gm, scale_node.target)
184
+ zp_obj = getattr_from_fqn(gm, zp_node.target)
185
+ return (scale_obj, zp_obj)
186
+
187
+ if prev_node.op == "call_function":
188
+
189
+ # quantize - read the args directly
190
+ if prev_node.target == torch.quantize_per_tensor:
191
+ return _get_scale_zp_from_function_args(prev_node, gm, 1, 2)
192
+ elif prev_node.target in (toq.add, toq.add_relu, toq.mul, toq.mul_relu):
193
+ return _get_scale_zp_from_function_args(prev_node, gm, 2, 3)
194
+
195
+ return None
196
+ # TODO(future PR): handle more functionals
197
+ # TODO(future PR): handle functional ops which inherit qparams from input
198
+
199
+ elif prev_node.op == "call_module":
200
+
201
+ # get type of the module
202
+ assert isinstance(prev_node.target, str)
203
+ module_obj = getattr_from_fqn(gm, prev_node.target)
204
+ if isinstance(
205
+ module_obj,
206
+ (
207
+ nnq.Linear,
208
+ nnq.Conv1d,
209
+ nnq.Conv2d,
210
+ nniq.ConvReLU2d,
211
+ nnq.Conv3d,
212
+ nnq.BatchNorm2d,
213
+ nnq.BatchNorm3d,
214
+ nnq.ConvTranspose1d,
215
+ nnq.ConvTranspose2d,
216
+ nnq.ELU,
217
+ nnq.GroupNorm,
218
+ nnq.InstanceNorm1d,
219
+ nnq.InstanceNorm2d,
220
+ nnq.InstanceNorm3d,
221
+ nnq.LayerNorm,
222
+ nnq.Hardswish,
223
+ nnq.LeakyReLU,
224
+ nnq.ReLU6,
225
+ nniq.BNReLU2d,
226
+ nniq.BNReLU3d,
227
+ nniq.ConvReLU1d,
228
+ nniq.ConvReLU2d,
229
+ nniq.ConvReLU3d,
230
+ nniq.LinearReLU,
231
+ ),
232
+ ):
233
+ return (module_obj.scale, module_obj.zero_point) # type: ignore[return-value]
234
+
235
+ is_known_fp32_or_int8_input_module = any(
236
+ isinstance(module_obj, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
237
+ )
238
+ if is_known_fp32_or_int8_input_module:
239
+ return get_node_input_qparams(prev_node, gm, node_type_to_io_type_map)
240
+
241
+ return None
242
+
243
+
244
+ def return_first_non_observer_node(
245
+ node: Node,
246
+ gm: GraphModule,
247
+ ) -> Node:
248
+ """
249
+ If node is not an observer, returns it. If node is an observer,
250
+ navigates up the graph and returns the first parent which is not an
251
+ observer. For example,
252
+
253
+ graph: (node_non_obs), node = node_non_obs : returns node_non_obs
254
+ graph: (node_non_obs -> obs0), node = obs0 : returns node_non_obs
255
+ graph: (node_non_obs -> obs0 -> fq0), node = fq0 : returns node_non_obs
256
+ """
257
+ if node.op == "call_module":
258
+ node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
259
+ if _is_activation_post_process(node_obj):
260
+ assert len(node.args) == 1
261
+ assert isinstance(node.args[0], Node)
262
+ node = node.args[0]
263
+ # code duplication intended, not worth refactoring
264
+ assert isinstance(node.target, str)
265
+ node_obj = getattr_from_fqn(gm, node.target)
266
+ if _is_activation_post_process(node_obj):
267
+ assert len(node.args) == 1
268
+ assert isinstance(node.args[0], Node)
269
+ node = node.args[0]
270
+ return node
271
+
272
+
273
+ def get_number_of_non_param_args(
274
+ node: Node,
275
+ gm: GraphModule,
276
+ ) -> int:
277
+ """
278
+ Assumes that all non-param args occur first. Returns the number of
279
+ non-param args expected for a node. For example, for
280
+
281
+ F.linear(x, weight, bias)
282
+
283
+ Returns 1, because x is a non-param arg and weight and bias are params.
284
+ For
285
+
286
+ lstm_mod(x, hid)
287
+
288
+ Returns 2, because both x and hid are non-param args.
289
+ """
290
+ if node.op == "call_module":
291
+ node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
292
+ if isinstance(node_obj, nn.LSTM):
293
+ return 2
294
+
295
+ # default is 1
296
+ return 1
297
+
298
+
299
+ def get_arg_indices_of_inputs_to_log(node: Node) -> List[int]:
300
+ """
301
+ Returns the indices of args of the node which we should attach
302
+ loggers to, if input logging is enabled.
303
+
304
+ For example,
305
+ * for (x + y), returns [0, 1]
306
+ * for (1 + y), returns [1]
307
+ * for (x + 1), returns [0]
308
+ * for (linear(x, w, b)) returns [0]
309
+ * by default, returns [0]
310
+ """
311
+ if len(node.args) == 0:
312
+ return []
313
+ if node.op == "call_function" and (
314
+ # TODO(future PR): use relationship map instead of hardcoding
315
+ node.target in (torch.add, torch.ops.quantized.add, operator.add)
316
+ or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)
317
+ ):
318
+ result = []
319
+ for i in range(2):
320
+ if type(node.args[i]) == Node:
321
+ result.append(i)
322
+ return result
323
+ return [0]
324
+
325
+
326
+ def get_target_type_str(node: Node, gm: GraphModule) -> str:
327
+ """
328
+ Returns a string representation of the type of the function or module
329
+ pointed to by this node, or '' for other node types.
330
+ """
331
+ target_type = ""
332
+ if node.op in ("call_function", "call_method"):
333
+ target_type = torch.typename(node.target)
334
+ elif node.op == "call_module":
335
+ assert isinstance(node.target, str)
336
+ target_mod = getattr_from_fqn(gm, node.target)
337
+ target_type = torch.typename(target_mod)
338
+ return target_type
339
+
340
+
341
+ def rekey_logger_info_on_node_name_of_model(
342
+ results: NSResultsType,
343
+ model_name: str,
344
+ ) -> NSResultsType:
345
+ """
346
+ Rekeys the layer name of a results dictionary to use node names
347
+ from `model_name`.
348
+
349
+ For example, transforms
350
+
351
+ {'base_op_1_0': {'node_output': {'model_a':
352
+ [{'ref_node_name': 'linear1', ...}]}}}
353
+
354
+ into
355
+
356
+ {'linear1': {'node_output': {'model_a':
357
+ [{'ref_node_name': 'linear1', ...}]}}}
358
+
359
+ Note: we cannot use these node names directly because they are not
360
+ guaranteed to be consistent across models. This is why we extract
361
+ the results first and rekey afterwards.
362
+ """
363
+ new_results = {}
364
+ for old_layer_name, result_type_to_results in results.items():
365
+ new_layer_name = None
366
+ for model_name_to_results in result_type_to_results.values():
367
+ for cur_model_name, list_of_results in model_name_to_results.items():
368
+ if cur_model_name == model_name:
369
+ assert len(list_of_results)
370
+ new_layer_name = list_of_results[0]["ref_node_name"]
371
+ else:
372
+ continue
373
+ if new_layer_name is not None:
374
+ new_results[new_layer_name] = result_type_to_results
375
+ else:
376
+ new_results[old_layer_name] = result_type_to_results
377
+ return new_results
378
+
379
+
380
+ def maybe_add_missing_fqns(results: NSResultsType) -> None:
381
+ """
382
+ If `fqn` entries are filled in for one of the models in `results`, copies
383
+ them over to any models which do not have them filled out.
384
+
385
+ A common use case benefitting from this is comparing a model prepared by
386
+ quantization to a quantized model. In this case, the model prepared by
387
+ quantization would have `fqn` entries, and the quantized model would not.
388
+ """
389
+
390
+ # Check in the first result to find any model with fqn entries defined.
391
+ model_name_with_fqns = None
392
+ for result_type_to_results in results.values():
393
+ for model_name_to_results in result_type_to_results.values():
394
+ for model_name, model_results in model_name_to_results.items():
395
+ if len(model_results) > 0:
396
+ if model_results[0]["fqn"] is not None:
397
+ model_name_with_fqns = model_name
398
+ break
399
+ break
400
+ break
401
+
402
+ if model_name_with_fqns:
403
+ for result_type_to_results in results.values():
404
+ for model_name_to_results in result_type_to_results.values():
405
+ ref_model_results = model_name_to_results[model_name_with_fqns]
406
+ for model_name, model_results in model_name_to_results.items():
407
+ if model_name == model_name_with_fqns:
408
+ continue
409
+ for i in range(len(model_results)):
410
+ fqn = ref_model_results[i]["fqn"]
411
+ model_results[i]["fqn"] = fqn
412
+
413
+
414
+ def maybe_dequantize_first_two_tensor_args_and_handle_tuples(f):
415
+ def inner(*args, **kwargs):
416
+ a0, a1, *a_other = args
417
+
418
+ if (isinstance(a0, tuple) and isinstance(a1, tuple)) or (
419
+ isinstance(a0, list) and isinstance(a1, list)
420
+ ):
421
+ results = []
422
+ for el0, el1 in zip(a0, a1):
423
+ new_args = (el0, el1, *a_other)
424
+ results.append(inner(*new_args, **kwargs))
425
+ return results
426
+
427
+ elif isinstance(a0, torch.Tensor) and isinstance(a1, torch.Tensor):
428
+ if a0.is_quantized:
429
+ a0 = a0.dequantize()
430
+ if a1.is_quantized:
431
+ a1 = a1.dequantize()
432
+
433
+ # for the purposes of this util, only handle floats
434
+ if a0.dtype != torch.float or a1.dtype != torch.float:
435
+ return None
436
+
437
+ new_args = (a0, a1, *a_other)
438
+ return f(*new_args, **kwargs)
439
+
440
+ return inner
441
+
442
+
443
+ @maybe_dequantize_first_two_tensor_args_and_handle_tuples
444
+ def compute_sqnr(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
445
+ """
446
+ Computes the SQNR between `x` and `y`.
447
+
448
+ Args:
449
+ x: Tensor or tuple of tensors
450
+ y: Tensor or tuple of tensors
451
+
452
+ Return:
453
+ float or tuple of floats
454
+ """
455
+ Ps = torch.norm(x)
456
+ Pn = torch.norm(x - y)
457
+ return 20 * torch.log10(Ps / Pn)
458
+
459
+
460
+ @maybe_dequantize_first_two_tensor_args_and_handle_tuples
461
+ def compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
462
+ """
463
+ Computes the normalized L2 error between `x` and `y`.
464
+
465
+ Args:
466
+ x: Tensor or tuple of tensors
467
+ y: Tensor or tuple of tensors
468
+
469
+ Return:
470
+ float or tuple of floats
471
+ """
472
+ return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum())
473
+
474
+
475
+ @maybe_dequantize_first_two_tensor_args_and_handle_tuples
476
+ def compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
477
+ """
478
+ Computes the cosine similarity between `x` and `y`.
479
+
480
+ Args:
481
+ x: Tensor or tuple of tensors
482
+ y: Tensor or tuple of tensors
483
+
484
+ Return:
485
+ float or tuple of floats
486
+ """
487
+ # For convolutions, the shape of the quantized weight has one additional
488
+ # dimension compared to the shape of the fp32 weight. Match the shapes
489
+ # to enable cosine similarity comparison.
490
+ x = x.reshape(1, -1)
491
+ y = y.reshape(1, -1)
492
+ return torch.nn.functional.cosine_similarity(x, y)
493
+
494
+ def op_type_supports_shadowing(node: Node) -> bool:
495
+ if node.op == 'call_function':
496
+ if node.target in (torch.add, torch.mul, operator.add, operator.mul, torch.cat, torch.stack):
497
+ # shadowing for ops with multiple tensor inputs is not implemented yet
498
+ return False
499
+ return True
500
+
501
+ def get_normalized_nth_input(node: Node, gm: GraphModule, idx: int) -> Node:
502
+ """
503
+ Given a node, gets the n'th input to that node, normalizing
504
+ args and kwargs to the best of its ability.
505
+ """
506
+ try:
507
+ norm_args_and_kwargs = node.normalized_arguments(
508
+ gm, normalize_to_only_use_kwargs=True)
509
+ if norm_args_and_kwargs is not None:
510
+ norm_args, norm_kwargs = norm_args_and_kwargs
511
+ assert len(norm_args) + len(norm_kwargs) > idx
512
+ if idx < len(norm_args):
513
+ return norm_args[idx]
514
+ else:
515
+ # note: in Python 3.7+ dicts are ordered
516
+ return list(norm_kwargs.values())[idx]
517
+ else:
518
+ assert len(node.args) + len(node.kwargs) > idx
519
+ if idx < len(node.args):
520
+ return node.args[idx] # type: ignore[return-value]
521
+ else:
522
+ kwargs_idx = idx + len(node.args)
523
+ return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]
524
+ except RuntimeError:
525
+ # this RuntimeError happens when node argument normalization
526
+ # requires typehints to proceed, such as for torch.add where
527
+ # either the first, second or both arguments could be tensors
528
+ assert len(node.args) + len(node.kwargs) > idx
529
+ if idx < len(node.args):
530
+ return node.args[idx] # type: ignore[return-value]
531
+ else:
532
+ kwargs_idx = idx + len(node.args)
533
+ return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]
venv/lib/python3.10/site-packages/torch/ao/ns/fx/weight_utils.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torch.ao.nn.quantized.dynamic as nnqd
5
+ import torch.ao.nn.quantized as nnq
6
+ import torch.ao.nn.intrinsic.qat as nniqat
7
+ import torch.ao.nn.qat as nnqat
8
+ import torch.ao.nn.intrinsic as nni
9
+ import torch.ao.nn.intrinsic.quantized as nniq
10
+ toq = torch.ops.quantized
11
+ from torch.fx import GraphModule
12
+ from torch.fx.graph import Node
13
+
14
+ from .utils import (
15
+ get_target_type_str,
16
+ getattr_from_fqn,
17
+ return_first_non_observer_node,
18
+ )
19
+
20
+ from .ns_types import (
21
+ NSSingleResultValuesType,
22
+ NSSingleResultType,
23
+ )
24
+
25
+ from typing import List, Optional, Dict, Callable
26
+
27
+ def mod_weight_detach(mod: nn.Module) -> torch.Tensor:
28
+ return mod.weight.detach() # type: ignore[operator]
29
+
30
+ def mod_0_weight_detach(mod: nn.Module) -> torch.Tensor:
31
+ return mod[0].weight.detach() # type: ignore[index]
32
+
33
+ def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor:
34
+ return mod._weight_bias()[0] # type: ignore[operator]
35
+
36
+ def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]:
37
+ res = []
38
+ for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
39
+ if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
40
+ param_value = mod._flat_weights[idx].detach() # type: ignore[index]
41
+ res.append(param_value)
42
+ return res
43
+
44
+ def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]:
45
+ res = []
46
+ for weight_value in mod._all_weight_values: # type: ignore[union-attr]
47
+ res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
48
+ res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
49
+ return res
50
+
51
+ def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
52
+ if (
53
+ isinstance(mod, (nn.Conv1d, nn.Conv2d, nn.Conv3d))
54
+ ):
55
+ return mod.weight.detach()
56
+ elif (
57
+ isinstance(mod, (nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d))
58
+ ):
59
+ return mod[0].weight.detach()
60
+ else:
61
+ return mod._weight_bias()[0] # type: ignore[operator]
62
+
63
+ def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor:
64
+ if isinstance(mod, nn.Linear):
65
+ return mod.weight.detach()
66
+ elif isinstance(mod, nni.LinearReLU):
67
+ return mod[0].weight.detach()
68
+ else:
69
+ return mod._weight_bias()[0] # type: ignore[operator]
70
+
71
+ def get_lstm_mod_weights(mod: nn.Module) -> List[torch.Tensor]:
72
+ # TODO(future PR): make more generic, handle everything
73
+ if isinstance(mod, nn.LSTM):
74
+ res = []
75
+ for idx, param_name in enumerate(mod._flat_weights_names):
76
+ if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
77
+ param_value = mod._flat_weights[idx].detach()
78
+ res.append(param_value)
79
+ return res
80
+ else:
81
+ assert isinstance(mod, nnqd.LSTM), f"type {type(mod)} not handled yet"
82
+ res = []
83
+ for weight_value in mod._all_weight_values:
84
+ res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
85
+ res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
86
+ return res
87
+
88
+ def get_conv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
89
+ # traverse backwards from the weight arg, accounting for any observers
90
+ weight_arg_node = node.args[1]
91
+ assert isinstance(weight_arg_node, Node)
92
+ weight_node = return_first_non_observer_node(weight_arg_node, gm)
93
+ assert isinstance(weight_node, Node)
94
+ assert weight_node.op == 'get_attr'
95
+ weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
96
+ return weight.detach()
97
+
98
+ def get_qconv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
99
+ # qconv state is arg 1
100
+ qconv_state_node = node.args[1]
101
+ assert isinstance(qconv_state_node, Node)
102
+ assert qconv_state_node.op == 'get_attr'
103
+ qconv_state_obj = getattr_from_fqn(gm, qconv_state_node.target) # type: ignore[arg-type]
104
+ return qconv_state_obj.weight()
105
+
106
+ def get_linear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
107
+ # traverse backwards from the weight arg, accounting for any observers
108
+ # supported patterns:
109
+ # weight -> obs -> linear
110
+ # weight -> to(torch.float16) -> dequantize -> linear
111
+ linear_second_arg = node.args[1]
112
+ assert isinstance(linear_second_arg, Node)
113
+
114
+ if linear_second_arg.op == 'call_module':
115
+ # weight -> obs -> linear
116
+ weight_arg_node = node.args[1]
117
+ assert isinstance(weight_arg_node, Node)
118
+ weight_node = weight_arg_node.args[0]
119
+ assert isinstance(weight_node, Node)
120
+ assert weight_node.op == 'get_attr'
121
+ weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
122
+ return weight.detach()
123
+ elif linear_second_arg.op == 'call_method':
124
+ # weight -> to(torch.float16) -> dequantize -> linear
125
+ assert linear_second_arg.op == 'call_method'
126
+ dequant_node = node.args[1]
127
+ assert isinstance(dequant_node, Node)
128
+ to_fp16_node = dequant_node.args[0]
129
+ assert isinstance(to_fp16_node, Node)
130
+ # extract the dtype, so we can cast to it before returning
131
+ target_dtype = to_fp16_node.args[1]
132
+ weight_node = to_fp16_node.args[0]
133
+ assert isinstance(weight_node, Node)
134
+ assert weight_node.op == 'get_attr'
135
+ weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
136
+ # return the weight with fp16 cast
137
+ return weight.detach().to(target_dtype)
138
+ else:
139
+ assert linear_second_arg.op == 'get_attr'
140
+ weight = getattr_from_fqn(gm, linear_second_arg.target) # type: ignore[arg-type]
141
+ return weight.detach()
142
+
143
+ def get_qlinear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
144
+ # packed weight is arg 1
145
+ packed_weight_node = node.args[1]
146
+ assert isinstance(packed_weight_node, Node)
147
+ assert packed_weight_node.op == 'get_attr'
148
+ packed_weight = getattr_from_fqn(gm, packed_weight_node.target) # type: ignore[arg-type]
149
+ # TODO(future PR): why does packed_weight.unpack() not work?
150
+ (weight, _bias), _name = packed_weight.__getstate__()
151
+ return weight
152
+
153
+ def get_op_to_type_to_weight_extraction_fn() -> Dict[str, Dict[Callable, Callable]]:
154
+
155
+ op_to_type_to_weight_extraction_fn: Dict[str, Dict[Callable, Callable]] = {
156
+ 'call_module': {
157
+ # Conv1d
158
+ nn.Conv1d: mod_weight_detach,
159
+ nni.ConvReLU1d: mod_0_weight_detach,
160
+ nnq.Conv1d: mod_weight_bias_0,
161
+ nnqat.Conv1d: mod_weight_detach,
162
+ nniqat.ConvBn1d: mod_weight_detach,
163
+ nniqat.ConvBnReLU1d: mod_weight_detach,
164
+ nniqat.ConvReLU1d: mod_weight_detach,
165
+ nniq.ConvReLU1d: mod_weight_bias_0,
166
+ # Conv2d
167
+ nn.Conv2d: mod_weight_detach,
168
+ nni.ConvReLU2d: mod_0_weight_detach,
169
+ nnq.Conv2d: mod_weight_bias_0,
170
+ nnqat.Conv2d: mod_weight_detach,
171
+ nniqat.ConvBn2d: mod_weight_detach,
172
+ nniqat.ConvBnReLU2d: mod_weight_detach,
173
+ nniqat.ConvReLU2d: mod_weight_detach,
174
+ nniq.ConvReLU2d: mod_weight_bias_0,
175
+ # Conv3d
176
+ nn.Conv3d: mod_weight_detach,
177
+ nni.ConvReLU3d: mod_0_weight_detach,
178
+ nnq.Conv3d: mod_weight_bias_0,
179
+ nnqat.Conv3d: mod_weight_detach,
180
+ nniqat.ConvBn3d: mod_weight_detach,
181
+ nniqat.ConvBnReLU3d: mod_weight_detach,
182
+ nniqat.ConvReLU3d: mod_weight_detach,
183
+ nniq.ConvReLU3d: mod_weight_bias_0,
184
+ # Linear
185
+ nn.Linear: mod_weight_detach,
186
+ nnq.Linear: mod_weight_bias_0,
187
+ nni.LinearReLU: mod_0_weight_detach,
188
+ nniq.LinearReLU: mod_weight_bias_0,
189
+ nnqat.Linear: mod_weight_detach,
190
+ nnqd.Linear: mod_weight_bias_0,
191
+ nniqat.LinearReLU: mod_weight_detach,
192
+ nniqat.LinearBn1d: mod_weight_detach,
193
+ nn.modules.linear.NonDynamicallyQuantizableLinear: mod_weight_detach,
194
+ # LSTM
195
+ nn.LSTM: get_lstm_weight,
196
+ nnqd.LSTM: get_qlstm_weight,
197
+ },
198
+ 'call_function': {
199
+ # Conv
200
+ F.conv1d: get_conv_fun_weight,
201
+ F.conv2d: get_conv_fun_weight,
202
+ F.conv3d: get_conv_fun_weight,
203
+ toq.conv1d: get_qconv_fun_weight,
204
+ toq.conv2d: get_qconv_fun_weight,
205
+ toq.conv3d: get_qconv_fun_weight,
206
+ toq.conv1d_relu: get_qconv_fun_weight,
207
+ toq.conv2d_relu: get_qconv_fun_weight,
208
+ toq.conv3d_relu: get_qconv_fun_weight,
209
+ # Linear
210
+ F.linear: get_linear_fun_weight,
211
+ toq.linear: get_qlinear_fun_weight,
212
+ toq.linear_relu: get_qlinear_fun_weight,
213
+ },
214
+ }
215
+
216
+ return op_to_type_to_weight_extraction_fn
217
+
218
+ def extract_weight_from_node(
219
+ node: Node,
220
+ gm: GraphModule,
221
+ op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
222
+ ) -> Optional[NSSingleResultType]:
223
+ res_type = NSSingleResultValuesType.WEIGHT.value
224
+
225
+ # Not all graphmodules have _node_name_to_scope, so only fill it
226
+ # out if it exists.
227
+ fqn = None
228
+ if hasattr(gm, '_node_name_to_scope'):
229
+ fqn = gm._node_name_to_scope[node.name][0] # type: ignore[index]
230
+
231
+ if op_to_type_to_weight_extraction_fn is None:
232
+ op_to_type_to_weight_extraction_fn = get_op_to_type_to_weight_extraction_fn()
233
+
234
+ ref_node_type = get_target_type_str(node, gm)
235
+ # for extracting weights, these are always the same
236
+ prev_node_type = ref_node_type
237
+
238
+ if node.op == 'call_function':
239
+ function_mapping = op_to_type_to_weight_extraction_fn['call_function']
240
+ for target_fn_type, weight_extraction_fn in function_mapping.items():
241
+ if node.target == target_fn_type:
242
+ weight = weight_extraction_fn(node, gm)
243
+ return {
244
+ 'type': res_type,
245
+ 'values': [weight],
246
+ 'prev_node_name': node.name,
247
+ 'prev_node_target_type': prev_node_type,
248
+ 'ref_node_name': node.name,
249
+ 'ref_node_target_type': ref_node_type,
250
+ 'index_within_arg': 0,
251
+ 'index_of_arg': 0,
252
+ 'fqn': fqn,
253
+ }
254
+
255
+ elif node.op == 'call_module':
256
+ # for call_module, we need to look up the modules to do the type check
257
+ assert isinstance(node.target, str)
258
+ mod = getattr_from_fqn(gm, node.target)
259
+ module_mapping = op_to_type_to_weight_extraction_fn['call_module']
260
+ for target_mod_type, weight_extraction_fn in module_mapping.items():
261
+ if type(mod) == target_mod_type:
262
+ weight = weight_extraction_fn(mod)
263
+ return {
264
+ 'type': res_type,
265
+ 'values': [weight],
266
+ 'prev_node_name': node.name,
267
+ 'prev_node_target_type': prev_node_type,
268
+ 'ref_node_name': node.name,
269
+ 'ref_node_target_type': ref_node_type,
270
+ 'index_within_arg': 0,
271
+ 'index_of_arg': 0,
272
+ 'fqn': fqn,
273
+ }
274
+
275
+ return None
venv/lib/python3.10/site-packages/torch/ao/quantization/__init__.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F403
2
+
3
+ from .fake_quantize import * # noqa: F403
4
+ from .fuse_modules import fuse_modules # noqa: F403
5
+ from .fuse_modules import fuse_modules_qat # noqa: F403
6
+ from .fuser_method_mappings import * # noqa: F403
7
+ from .observer import * # noqa: F403
8
+ from .qconfig import * # noqa: F403
9
+ from .qconfig_mapping import * # noqa: F403
10
+ from .quant_type import * # noqa: F403
11
+ from .quantization_mappings import * # type: ignore[no-redef]
12
+ from .quantize import * # noqa: F403
13
+ from .quantize_jit import * # noqa: F403
14
+ from .stubs import * # noqa: F403
15
+ from .pt2e.export_utils import _move_exported_model_to_eval as move_exported_model_to_eval
16
+ from .pt2e.export_utils import _move_exported_model_to_train as move_exported_model_to_train
17
+ from .pt2e.export_utils import _allow_exported_model_train_eval as allow_exported_model_train_eval
18
+ from .pt2e.generate_numeric_debug_handle import generate_numeric_debug_handle # noqa: F401
19
+ from typing import Union, List, Callable, Tuple, Optional
20
+ from torch import Tensor
21
+ import torch
22
+
23
+ ObserverOrFakeQuantize = Union[ObserverBase, FakeQuantizeBase]
24
+ ObserverOrFakeQuantize.__module__ = "torch.ao.quantization"
25
+
26
+ __all__ = [
27
+ "DeQuantStub",
28
+ "FakeQuantize",
29
+ "FakeQuantizeBase",
30
+ "FixedQParamsFakeQuantize",
31
+ "FixedQParamsObserver",
32
+ "FusedMovingAvgObsFakeQuantize",
33
+ "HistogramObserver",
34
+ "MatchAllNode",
35
+ "MinMaxObserver",
36
+ "MovingAverageMinMaxObserver",
37
+ "MovingAveragePerChannelMinMaxObserver",
38
+ "NoopObserver",
39
+ "ObserverBase",
40
+ "ObserverOrFakeQuantize",
41
+ "Pattern",
42
+ "PerChannelMinMaxObserver",
43
+ "PlaceholderObserver",
44
+ "QConfig",
45
+ "QConfigAny",
46
+ "QConfigDynamic",
47
+ "QConfigMapping",
48
+ "QuantStub",
49
+ "QuantType",
50
+ "QuantWrapper",
51
+ "RecordingObserver",
52
+ "ReuseInputObserver",
53
+ "UniformQuantizationObserverBase",
54
+ "add_quant_dequant",
55
+ "convert",
56
+ "convert_dynamic_jit",
57
+ "convert_jit",
58
+ "default_affine_fixed_qparams_fake_quant",
59
+ "default_affine_fixed_qparams_observer",
60
+ "default_debug_observer",
61
+ "default_dynamic_fake_quant",
62
+ "default_dynamic_quant_observer",
63
+ "default_embedding_fake_quant",
64
+ "default_embedding_fake_quant_4bit",
65
+ "default_eval_fn",
66
+ "default_fake_quant",
67
+ "default_fixed_qparams_range_0to1_fake_quant",
68
+ "default_fixed_qparams_range_0to1_observer",
69
+ "default_fixed_qparams_range_neg1to1_fake_quant",
70
+ "default_fixed_qparams_range_neg1to1_observer",
71
+ "default_float_qparams_observer",
72
+ "default_float_qparams_observer_4bit",
73
+ "default_fused_act_fake_quant",
74
+ "default_fused_per_channel_wt_fake_quant",
75
+ "default_fused_wt_fake_quant",
76
+ "default_histogram_fake_quant",
77
+ "default_histogram_observer",
78
+ "default_observer",
79
+ "default_per_channel_weight_fake_quant",
80
+ "default_per_channel_weight_observer",
81
+ "default_placeholder_observer",
82
+ "default_reuse_input_observer",
83
+ "default_symmetric_fixed_qparams_fake_quant",
84
+ "default_symmetric_fixed_qparams_observer",
85
+ "default_weight_fake_quant",
86
+ "default_weight_observer",
87
+ "disable_fake_quant",
88
+ "disable_observer",
89
+ "enable_fake_quant",
90
+ "enable_observer",
91
+ "fuse_conv_bn",
92
+ "fuse_conv_bn_jit",
93
+ "fuse_conv_bn_relu",
94
+ "fuse_convtranspose_bn",
95
+ "fuse_linear_bn",
96
+ "fuse_modules",
97
+ "fuse_modules_qat",
98
+ "fused_per_channel_wt_fake_quant_range_neg_127_to_127",
99
+ "fused_wt_fake_quant_range_neg_127_to_127",
100
+ "get_combined_dict",
101
+ "get_default_compare_output_module_list",
102
+ "get_default_custom_config_dict",
103
+ "get_default_dynamic_quant_module_mappings",
104
+ "get_default_dynamic_sparse_quant_module_mappings",
105
+ "get_default_float_to_quantized_operator_mappings",
106
+ "get_default_qat_module_mappings",
107
+ "get_default_qat_qconfig",
108
+ "get_default_qat_qconfig_dict",
109
+ "get_default_qat_qconfig_mapping",
110
+ "get_default_qconfig",
111
+ "get_default_qconfig_dict",
112
+ "get_default_qconfig_mapping",
113
+ "get_default_qconfig_propagation_list",
114
+ "get_default_static_quant_module_mappings",
115
+ "get_default_static_quant_reference_module_mappings",
116
+ "get_default_static_sparse_quant_module_mappings",
117
+ "get_dynamic_quant_module_class",
118
+ "get_embedding_qat_module_mappings",
119
+ "get_embedding_static_quant_module_mappings",
120
+ "get_fuser_method",
121
+ "get_fuser_method_new",
122
+ "get_observer_state_dict",
123
+ "get_quantized_operator",
124
+ "get_static_quant_module_class",
125
+ "load_observer_state_dict",
126
+ "move_exported_model_to_eval",
127
+ "move_exported_model_to_train",
128
+ "allow_exported_model_train_eval",
129
+ "no_observer_set",
130
+ "per_channel_weight_observer_range_neg_127_to_127",
131
+ "prepare",
132
+ "prepare_dynamic_jit",
133
+ "prepare_jit",
134
+ "prepare_qat",
135
+ "propagate_qconfig_",
136
+ "qconfig_equals",
137
+ "quantize",
138
+ "quantize_dynamic",
139
+ "quantize_dynamic_jit",
140
+ "quantize_jit",
141
+ "quantize_qat",
142
+ "script_qconfig",
143
+ "script_qconfig_dict",
144
+ "swap_module",
145
+ "weight_observer_range_neg_127_to_127",
146
+ "generate_numeric_debug_handle",
147
+ ]
148
+
149
+ def default_eval_fn(model, calib_data):
150
+ r"""Define the default evaluation function.
151
+
152
+ Default evaluation function takes a torch.utils.data.Dataset or a list of
153
+ input Tensors and run the model on the dataset
154
+ """
155
+ for data, target in calib_data:
156
+ model(data)
157
+
158
+ class _DerivedObserverOrFakeQuantize(ObserverBase):
159
+ r"""This observer is used to describe an observer whose quantization parameters
160
+ are derived from other observers
161
+ """
162
+
163
+ def __init__(
164
+ self,
165
+ dtype: torch.dtype,
166
+ obs_or_fqs: List[ObserverOrFakeQuantize],
167
+ derive_qparams_fn: Callable[[List[ObserverOrFakeQuantize]], Tuple[Tensor, Tensor]],
168
+ quant_min: Optional[int]=None,
169
+ quant_max: Optional[int]=None,
170
+ qscheme: Optional[torch.qscheme]=None,
171
+ ch_axis: Optional[int] = None
172
+ ):
173
+ super().__init__(dtype)
174
+ self.obs_or_fqs = obs_or_fqs
175
+ self.derive_qparams_fn = derive_qparams_fn
176
+ self.quant_min = quant_min
177
+ self.quant_max = quant_max
178
+ self.qscheme = qscheme
179
+ self.ch_axis = ch_axis
180
+
181
+ from .utils import is_per_channel
182
+ if is_per_channel(self.qscheme):
183
+ assert self.ch_axis is not None, "Must provide a valid ch_axis if qscheme is per channel"
184
+
185
+ def forward(self, x: Tensor) -> Tensor:
186
+ return x
187
+
188
+ def calculate_qparams(self):
189
+ return self.derive_qparams_fn(self.obs_or_fqs)
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.81 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_correct_bias.cpython-310.pyc ADDED
Binary file (4.85 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc ADDED
Binary file (5.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_learnable_fake_quantize.cpython-310.pyc ADDED
Binary file (6.81 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fake_quantize.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuse_modules.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc ADDED
Binary file (9.28 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/observer.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig_mapping.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quant_type.cpython-310.pyc ADDED
Binary file (992 Bytes). View file