applied-ai-018 commited on
Commit
a16a162
·
verified ·
1 Parent(s): b00fcdd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  2. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py +23 -0
  3. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_common_operator_config_utils.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_qnnpack_pt2e.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/backend_config.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/executorch.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/fbgemm.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/native.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/observation_type.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/onednn.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/qnnpack.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/tensorrt.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/utils.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_common_operator_config_utils.py +637 -0
  17. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py +160 -0
  18. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py +659 -0
  19. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/executorch.py +494 -0
  20. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/fbgemm.py +116 -0
  21. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py +204 -0
  22. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/observation_type.py +0 -0
  23. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/onednn.py +542 -0
  24. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py +160 -0
  25. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py +81 -0
  26. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py +279 -0
  27. venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py +113 -0
  28. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/__init__.py +3 -0
  29. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py +925 -0
  30. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/_equalize.py +820 -0
  31. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py +1170 -0
  32. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py +1131 -0
  33. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py +161 -0
  34. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse_handler.py +120 -0
  35. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py +119 -0
  36. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_qnnpack.py +18 -0
  37. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/lstm_utils.py +183 -0
  38. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py +237 -0
  39. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py +87 -0
  40. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/prepare.py +1880 -0
  41. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py +343 -0
  42. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/tracer.py +45 -0
  43. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py +885 -0
  44. venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py +0 -0
  45. venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/export_utils.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2129c796a503d48e2ff5300ecb2d6919f48fd02c874d0258fb2497679e53624
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig, DTypeWithConstraints, ObservationType
2
+ from .fbgemm import get_fbgemm_backend_config
3
+ from .native import get_native_backend_config, get_native_backend_config_dict
4
+ from .qnnpack import get_qnnpack_backend_config
5
+ from .tensorrt import get_tensorrt_backend_config, get_tensorrt_backend_config_dict
6
+ from .executorch import get_executorch_backend_config
7
+ from .onednn import get_onednn_backend_config
8
+
9
+ __all__ = [
10
+ "get_fbgemm_backend_config",
11
+ "get_native_backend_config",
12
+ "get_native_backend_config_dict",
13
+ "get_qnnpack_backend_config",
14
+ "get_tensorrt_backend_config",
15
+ "get_tensorrt_backend_config_dict",
16
+ "get_executorch_backend_config",
17
+ "BackendConfig",
18
+ "BackendPatternConfig",
19
+ "DTypeConfig",
20
+ "DTypeWithConstraints",
21
+ "ObservationType",
22
+ "get_onednn_backend_config",
23
+ ]
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (886 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_common_operator_config_utils.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_qnnpack_pt2e.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/backend_config.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/executorch.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/fbgemm.cpython-310.pyc ADDED
Binary file (2.23 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/native.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/observation_type.cpython-310.pyc ADDED
Binary file (212 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/onednn.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/qnnpack.cpython-310.pyc ADDED
Binary file (2.63 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/tensorrt.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/utils.cpython-310.pyc ADDED
Binary file (9.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_common_operator_config_utils.py ADDED
@@ -0,0 +1,637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import operator
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import torch.nn as nn
6
+ import torch.ao.nn.intrinsic as nni
7
+ import torch.ao.nn.intrinsic.qat as nniqat
8
+ import torch.ao.nn.qat as nnqat
9
+ import torch.ao.nn.quantized.reference as nnqr
10
+ from collections import namedtuple
11
+ from typing import Callable, Dict, List, Union
12
+ from .backend_config import (
13
+ BackendPatternConfig,
14
+ DTypeConfig,
15
+ DTypeWithConstraints,
16
+ ObservationType,
17
+ )
18
+ from ..fuser_method_mappings import (
19
+ _sequential_wrapper2,
20
+ fuse_conv_bn,
21
+ fuse_conv_bn_relu,
22
+ fuse_linear_bn,
23
+ fuse_convtranspose_bn,
24
+ )
25
+
26
+ __all__: List[str] = []
27
+
28
+ # TODO: rename to be more explicit, e.g. qat_conv_relu
29
+ _ConvMetadata = namedtuple(
30
+ "_ConvMetadata",
31
+ ["root", "transpose", "bn", "reference", "transpose_reference",
32
+ "fused_conv_relu", "fused_conv_bn", "fused_conv_bn_relu",
33
+ "qat", "relu_qat", "bn_qat", "bn_relu_qat",
34
+ "func", "func_transpose"])
35
+ _Conv1dMetadata = _ConvMetadata(
36
+ nn.Conv1d, nn.ConvTranspose1d, nn.BatchNorm1d, nnqr.Conv1d, nnqr.ConvTranspose1d,
37
+ nni.ConvReLU1d, nni.ConvBn1d, nni.ConvBnReLU1d,
38
+ nnqat.Conv1d, nniqat.ConvReLU1d, nniqat.ConvBn1d, nniqat.ConvBnReLU1d,
39
+ F.conv1d, F.conv_transpose1d)
40
+ _Conv2dMetadata = _ConvMetadata(
41
+ nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d, nnqr.Conv2d, nnqr.ConvTranspose2d,
42
+ nni.ConvReLU2d, nni.ConvBn2d, nni.ConvBnReLU2d,
43
+ nnqat.Conv2d, nniqat.ConvReLU2d, nniqat.ConvBn2d, nniqat.ConvBnReLU2d,
44
+ F.conv2d, F.conv_transpose2d)
45
+ _Conv3dMetadata = _ConvMetadata(
46
+ nn.Conv3d, nn.ConvTranspose3d, nn.BatchNorm3d, nnqr.Conv3d, nnqr.ConvTranspose3d,
47
+ nni.ConvReLU3d, nni.ConvBn3d, nni.ConvBnReLU3d,
48
+ nnqat.Conv3d, nniqat.ConvReLU3d, nniqat.ConvBn3d, nniqat.ConvBnReLU3d,
49
+ F.conv3d, F.conv_transpose3d)
50
+
51
+ # Add constraints for fixed qparams ops like sigmoid and tanh to ensure values
52
+ # fall within the proper ranges, e.g. [0, 1] for sigmoid, [-1, 1] for tanh
53
+ _FIXED_QPARAM_OP_0TO1_CONSTRAINTS = DTypeWithConstraints(
54
+ dtype=torch.quint8,
55
+ quant_min_lower_bound=0,
56
+ quant_max_upper_bound=255,
57
+ scale_exact_match=1.0 / 256.0,
58
+ zero_point_exact_match=0,
59
+ )
60
+ _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS = DTypeWithConstraints(
61
+ dtype=torch.quint8,
62
+ quant_min_lower_bound=0,
63
+ quant_max_upper_bound=255,
64
+ scale_exact_match=2.0 / 256.0,
65
+ zero_point_exact_match=128,
66
+ )
67
+ _FIXED_QPARAMS_OP_TO_CONSTRAINTS: Dict[Union[Callable, str], DTypeWithConstraints] = {
68
+ torch.nn.Hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
69
+ torch.nn.functional.hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
70
+ "hardsigmoid": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
71
+ "hardsigmoid_": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
72
+ torch.nn.Sigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
73
+ torch.sigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
74
+ "sigmoid": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
75
+ "sigmoid_": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
76
+ torch.nn.Softmax: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
77
+ torch.nn.Tanh: _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
78
+ torch.tanh: _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
79
+ "tanh": _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
80
+ "tanh_": _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
81
+ }
82
+
83
+ def _get_binary_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
84
+ binary_op_configs: List[BackendPatternConfig] = []
85
+ num_tensor_args_to_observation_type_mapping = {
86
+ # TODO: this is not used right now since we have extra check in prepare
87
+ # will need to change this to NO_OBSERVER later after we implemented
88
+ # Tensor dtype inference properly
89
+ 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
90
+ 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
91
+ 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
92
+ }
93
+ for op_with_quantized_bop_scalar_variant in [operator.add, torch.add, operator.mul, torch.mul]:
94
+ bop_patterns = [
95
+ (op_with_quantized_bop_scalar_variant, nn.ReLU),
96
+ (op_with_quantized_bop_scalar_variant, F.relu),
97
+ (op_with_quantized_bop_scalar_variant, torch.relu),
98
+ op_with_quantized_bop_scalar_variant
99
+ ]
100
+ for bop_pattern in bop_patterns:
101
+ binary_op_configs.append(
102
+ BackendPatternConfig(bop_pattern)
103
+ .set_dtype_configs(dtype_configs) # noqa: E131
104
+ ._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping))
105
+ # matmul
106
+ binary_op_configs.append(
107
+ BackendPatternConfig(torch.matmul)
108
+ .set_dtype_configs(dtype_configs) # noqa: E131
109
+ )
110
+ return binary_op_configs
111
+
112
+ def _get_linear_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
113
+ """
114
+ Return all configs related to linear modules and ops.
115
+ """
116
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
117
+ linear_configs: List[BackendPatternConfig] = []
118
+
119
+ # (1) Single linear modules/functions
120
+ # -------------------------------------
121
+ # linear module
122
+ linear_configs.append(
123
+ BackendPatternConfig(torch.nn.Linear)
124
+ .set_observation_type(observation_type) # noqa: E131
125
+ .set_dtype_configs(dtype_configs)
126
+ .set_root_module(torch.nn.Linear)
127
+ .set_reference_quantized_module(nnqr.Linear)
128
+ .set_qat_module(nnqat.Linear))
129
+ # linear qat module
130
+ linear_configs.append(
131
+ BackendPatternConfig(nnqat.Linear)
132
+ .set_observation_type(observation_type) # noqa: E131
133
+ .set_dtype_configs(dtype_configs)
134
+ .set_root_module(torch.nn.Linear)
135
+ .set_reference_quantized_module(nnqr.Linear))
136
+ # functional linear
137
+ linear_configs.append(
138
+ BackendPatternConfig(torch.nn.functional.linear)
139
+ .set_observation_type(observation_type) # noqa: E131
140
+ .set_dtype_configs(dtype_configs)
141
+ ._set_input_type_to_index({"weight": 1, "bias": 2}))
142
+
143
+ # (2) Linear + relu
144
+ # -------------------
145
+ # 2.1 linear module + relu fusion config
146
+ # linear relu, linear module + relu module
147
+ linear_configs.append(
148
+ BackendPatternConfig((torch.nn.Linear, torch.nn.ReLU))
149
+ .set_dtype_configs(dtype_configs) # noqa: E131
150
+ .set_fuser_method(_sequential_wrapper2(nni.LinearReLU))
151
+ .set_fused_module(nni.LinearReLU))
152
+ # linear relu, linear module + functional relu
153
+ linear_configs.append(
154
+ BackendPatternConfig((torch.nn.Linear, torch.nn.functional.relu))
155
+ .set_dtype_configs(dtype_configs) # noqa: E131
156
+ .set_fuser_method(_sequential_wrapper2(nni.LinearReLU))
157
+ .set_fused_module(nni.LinearReLU))
158
+
159
+ # 2.2 linear module + relu, fused module configs
160
+ # linear relu, fused module
161
+ linear_configs.append(
162
+ BackendPatternConfig(nni.LinearReLU)
163
+ .set_observation_type(observation_type) # noqa: E131
164
+ .set_dtype_configs(dtype_configs)
165
+ .set_root_module(torch.nn.Linear)
166
+ .set_reference_quantized_module(nnqr.Linear)
167
+ .set_qat_module(nniqat.LinearReLU))
168
+ # linear relu, qat fused module
169
+ linear_configs.append(
170
+ BackendPatternConfig(nniqat.LinearReLU)
171
+ .set_observation_type(observation_type) # noqa: E131
172
+ .set_dtype_configs(dtype_configs)
173
+ .set_root_module(torch.nn.Linear)
174
+ .set_reference_quantized_module(nnqr.Linear))
175
+ # 2.3 functional linear + relu configs
176
+ # linear relu, functional linear + relu module
177
+ linear_configs.append(
178
+ BackendPatternConfig((F.linear, torch.nn.ReLU))
179
+ .set_observation_type(observation_type) # noqa: E131
180
+ .set_dtype_configs(dtype_configs))
181
+ # linear relu, functional linear + functional relu
182
+ linear_configs.append(
183
+ BackendPatternConfig((F.linear, F.relu))
184
+ .set_observation_type(observation_type) # noqa: E131
185
+ .set_dtype_configs(dtype_configs))
186
+
187
+ # (3) Linear + batchnorm
188
+ # ------------------------
189
+ # 3.1 linear bn fusion
190
+ linear_configs.append(
191
+ BackendPatternConfig((nn.Linear, nn.BatchNorm1d))
192
+ .set_dtype_configs(dtype_configs) # noqa: E131
193
+ .set_fuser_method(fuse_linear_bn)
194
+ .set_fused_module(nni.LinearBn1d))
195
+
196
+ # 3.2 linear bn fused
197
+ # linear bn, fused module
198
+ linear_configs.append(
199
+ BackendPatternConfig(nni.LinearBn1d)
200
+ .set_observation_type(observation_type) # noqa: E131
201
+ .set_dtype_configs(dtype_configs)
202
+ .set_root_module(torch.nn.Linear)
203
+ .set_reference_quantized_module(nnqr.Linear)
204
+ .set_qat_module(nniqat.LinearBn1d))
205
+ # linear bn, qat fused module
206
+ linear_configs.append(
207
+ BackendPatternConfig(nniqat.LinearBn1d)
208
+ .set_observation_type(observation_type) # noqa: E131
209
+ .set_dtype_configs(dtype_configs)
210
+ .set_root_module(torch.nn.Linear)
211
+ .set_reference_quantized_module(nnqr.Linear))
212
+ return linear_configs
213
+
214
+ def _get_conv_configs(dtype_configs):
215
+ """
216
+ Return all configs related to conv modules and ops.
217
+ """
218
+ conv_configs = []
219
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
220
+ for convs in [_Conv1dMetadata, _Conv2dMetadata, _Conv3dMetadata]:
221
+
222
+ # (1) Single conv modules/functions
223
+ # -----------------------------------
224
+ # conv module
225
+ conv_configs.append(
226
+ BackendPatternConfig(convs.root)
227
+ .set_observation_type(observation_type) # noqa: E131
228
+ .set_dtype_configs(dtype_configs)
229
+ .set_root_module(convs.root)
230
+ .set_reference_quantized_module(convs.reference)
231
+ .set_qat_module(convs.qat))
232
+ # conv qat module
233
+ conv_configs.append(
234
+ BackendPatternConfig(convs.qat)
235
+ .set_observation_type(observation_type) # noqa: E131
236
+ .set_dtype_configs(dtype_configs)
237
+ .set_root_module(convs.root)
238
+ .set_reference_quantized_module(convs.reference))
239
+ # functional conv
240
+ conv_configs.append(
241
+ BackendPatternConfig(convs.func)
242
+ .set_observation_type(observation_type) # noqa: E131
243
+ .set_dtype_configs(dtype_configs)
244
+ ._set_input_type_to_index({"weight": 1, "bias": 2}))
245
+
246
+ # (2) Conv + relu
247
+ # -----------------
248
+ # 2.1 conv module + relu fusion configs
249
+ # conv relu fusion, conv module + relu module
250
+ conv_configs.append(
251
+ BackendPatternConfig((convs.root, torch.nn.ReLU))
252
+ .set_dtype_configs(dtype_configs) # noqa: E131
253
+ .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
254
+ .set_fused_module(convs.fused_conv_relu))
255
+ # conv relu fusion, conv module + functional relu
256
+ conv_configs.append(
257
+ BackendPatternConfig((convs.root, F.relu))
258
+ .set_dtype_configs(dtype_configs) # noqa: E131
259
+ .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
260
+ .set_fused_module(convs.fused_conv_relu))
261
+ # 2.2 conv module + relu fused module configs
262
+ # conv relu, fused module
263
+ conv_configs.append(
264
+ BackendPatternConfig(convs.fused_conv_relu)
265
+ .set_observation_type(observation_type) # noqa: E131
266
+ .set_dtype_configs(dtype_configs)
267
+ .set_root_module(convs.root)
268
+ .set_reference_quantized_module(convs.reference)
269
+ .set_qat_module(convs.relu_qat))
270
+ # conv relu, qat fused module
271
+ conv_configs.append(
272
+ BackendPatternConfig(convs.relu_qat)
273
+ .set_observation_type(observation_type) # noqa: E131
274
+ .set_dtype_configs(dtype_configs)
275
+ .set_root_module(convs.root)
276
+ .set_reference_quantized_module(convs.reference))
277
+ # 2.3 functional conv + relu configs
278
+ # conv relu, functional conv + relu module
279
+ conv_configs.append(
280
+ BackendPatternConfig((convs.func, torch.nn.ReLU))
281
+ .set_observation_type(observation_type) # noqa: E131
282
+ .set_dtype_configs(dtype_configs))
283
+ # conv relu, functional conv + functional relu
284
+ conv_configs.append(
285
+ BackendPatternConfig((convs.func, F.relu))
286
+ .set_observation_type(observation_type) # noqa: E131
287
+ .set_dtype_configs(dtype_configs))
288
+
289
+ # fused conv relu
290
+ conv_configs.append(
291
+ BackendPatternConfig(convs.fused_conv_relu)
292
+ .set_dtype_configs(dtype_configs) # noqa: E131
293
+ .set_qat_module(convs.relu_qat))
294
+
295
+ conv_configs.append(
296
+ BackendPatternConfig(convs.relu_qat)
297
+ .set_dtype_configs(dtype_configs) # noqa: E131
298
+ .set_root_module(convs.root)
299
+ .set_reference_quantized_module(convs.reference))
300
+
301
+ # (3) Conv + batchnorm (+ relu)
302
+ # -------------------------------
303
+ # 3.1 conv bn fusion configs
304
+ # conv + bn fusion
305
+ conv_configs.append(
306
+ BackendPatternConfig((convs.root, convs.bn))
307
+ .set_dtype_configs(dtype_configs) # noqa: E131
308
+ .set_fuser_method(fuse_conv_bn)
309
+ .set_fused_module(convs.fused_conv_bn))
310
+ # conv + bn + relu module fusion
311
+ conv_configs.append(
312
+ BackendPatternConfig((convs.root, convs.bn, nn.ReLU))
313
+ .set_dtype_configs(dtype_configs) # noqa: E131
314
+ .set_fuser_method(fuse_conv_bn_relu)
315
+ .set_fused_module(convs.fused_conv_bn_relu))
316
+ # conv + bn + relu functional fusion
317
+ conv_configs.append(
318
+ BackendPatternConfig((convs.root, convs.bn, F.relu))
319
+ .set_dtype_configs(dtype_configs) # noqa: E131
320
+ .set_root_module(convs.root)
321
+ .set_fuser_method(fuse_conv_bn_relu)
322
+ .set_fused_module(convs.fused_conv_bn_relu))
323
+ # TODO: we can add fusion for torch.relu as well
324
+
325
+ # 3.2 conv + bn (+ relu) fused module configs
326
+ # fused conv bn
327
+ conv_configs.append(
328
+ BackendPatternConfig(convs.fused_conv_bn)
329
+ .set_dtype_configs(dtype_configs) # noqa: E131
330
+ .set_qat_module(convs.bn_qat))
331
+
332
+ # fused conv bn relu
333
+ conv_configs.append(
334
+ BackendPatternConfig(convs.fused_conv_bn_relu)
335
+ .set_dtype_configs(dtype_configs) # noqa: E131
336
+ .set_qat_module(convs.bn_relu_qat))
337
+
338
+ # conv bn, qat fused module
339
+ conv_configs.append(
340
+ BackendPatternConfig(convs.bn_qat)
341
+ .set_observation_type(observation_type) # noqa: E131
342
+ .set_dtype_configs(dtype_configs)
343
+ .set_root_module(convs.root)
344
+ .set_reference_quantized_module(convs.reference))
345
+ # conv bn relu, qat fused module
346
+ conv_configs.append(
347
+ BackendPatternConfig(convs.bn_relu_qat)
348
+ .set_observation_type(observation_type) # noqa: E131
349
+ .set_dtype_configs(dtype_configs)
350
+ .set_root_module(convs.root)
351
+ .set_reference_quantized_module(convs.reference))
352
+
353
+ # (4) conv transpose and its fusion
354
+ # 4.1 conv transpose config
355
+ conv_configs.append(
356
+ BackendPatternConfig(convs.transpose)
357
+ .set_dtype_configs(dtype_configs) # noqa: E131
358
+ .set_root_module(convs.transpose)
359
+ .set_reference_quantized_module(convs.transpose_reference))
360
+
361
+ # 4.2 conv transpose + bn fusion
362
+ conv_configs.append(
363
+ BackendPatternConfig((convs.transpose, convs.bn))
364
+ .set_dtype_configs(dtype_configs) # noqa: E131
365
+ .set_fuser_method(fuse_convtranspose_bn)
366
+ .set_root_module(convs.transpose)
367
+ .set_reference_quantized_module(convs.transpose_reference))
368
+
369
+ # 4.3 functional conv transpose
370
+ conv_configs.append(
371
+ BackendPatternConfig(convs.func_transpose)
372
+ .set_dtype_configs(dtype_configs) # noqa: E131
373
+ ._set_input_type_to_index({"weight": 1, "bias": 2}))
374
+
375
+ return conv_configs
376
+
377
+ def _get_cat_config(dtype_configs: List[DTypeConfig]) -> BackendPatternConfig:
378
+ return BackendPatternConfig(torch.cat) \
379
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
380
+ .set_dtype_configs(dtype_configs)
381
+
382
+ def _get_ln_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
383
+ ln_configs = []
384
+ ln_configs.append(
385
+ BackendPatternConfig(torch.nn.LayerNorm)
386
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
387
+ .set_dtype_configs(dtype_configs)
388
+ )
389
+ ln_configs.append(
390
+ BackendPatternConfig(torch.nn.functional.layer_norm)
391
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
392
+ .set_dtype_configs(dtype_configs)
393
+ ._set_input_type_to_index({"weight": 2, "bias": 3})
394
+ )
395
+ return ln_configs
396
+
397
+ def _get_default_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
398
+ configs = []
399
+ default_ops = [
400
+ torch.nn.ELU,
401
+ torch.nn.LeakyReLU,
402
+ torch.nn.Hardswish,
403
+ torch.nn.InstanceNorm1d,
404
+ torch.nn.InstanceNorm2d,
405
+ torch.nn.InstanceNorm3d,
406
+ torch.nn.Dropout,
407
+ torch.nn.PReLU,
408
+ torch.nn.functional.elu,
409
+ torch.nn.functional.hardswish,
410
+ torch.nn.functional.leaky_relu,
411
+ torch.nn.functional.dropout,
412
+ ]
413
+ for op in default_ops:
414
+ configs.append(
415
+ BackendPatternConfig(op)
416
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
417
+ .set_dtype_configs(dtype_configs))
418
+
419
+ configs.append(
420
+ BackendPatternConfig(torch.nn.functional.group_norm)
421
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
422
+ .set_dtype_configs(dtype_configs)
423
+ ._set_input_type_to_index({"weight": 2, "bias": 3})
424
+ )
425
+
426
+ configs.append(
427
+ BackendPatternConfig(torch.nn.functional.instance_norm)
428
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
429
+ .set_dtype_configs(dtype_configs)
430
+ ._set_input_type_to_index({"weight": 3, "bias": 4})
431
+ )
432
+ return configs
433
+
434
+ def _add_fixed_qparams_to_dtype_configs(
435
+ dtype_configs: List[DTypeConfig],
436
+ constraints: DTypeWithConstraints,
437
+ ) -> List[DTypeConfig]:
438
+ """
439
+ Return a copy of the list of DTypeConfigs where activations are subject to the specified
440
+ constraints required for fixed qparams ops.
441
+
442
+ If the data type doesn't match the one in the constraints, simply leave the corresponding
443
+ DTypeConfig unchanged.
444
+
445
+ If `scale_min_lower_bound` or `scale_max_upper_bound` is specified in the activations,
446
+ throw an exception since these settings are incompatible with fixed qparams ops.
447
+ """
448
+ new_dtype_configs = []
449
+ for dtype_config in dtype_configs:
450
+ dc = copy.deepcopy(dtype_config)
451
+ for orig_constraints in [dc.input_dtype_with_constraints, dc.output_dtype_with_constraints]:
452
+ if orig_constraints.dtype != constraints.dtype:
453
+ continue
454
+ if orig_constraints.scale_min_lower_bound is not None:
455
+ raise ValueError(f"scale_min_lower_bound is invalid for fixed qparams ops: {dtype_config}")
456
+ if orig_constraints.scale_max_upper_bound is not None:
457
+ raise ValueError(f"scale_max_upper_bound is invalid for fixed qparams ops: {dtype_config}")
458
+ orig_constraints.quant_min_lower_bound = constraints.quant_min_lower_bound
459
+ orig_constraints.quant_max_upper_bound = constraints.quant_max_upper_bound
460
+ orig_constraints.scale_exact_match = constraints.scale_exact_match
461
+ orig_constraints.zero_point_exact_match = constraints.zero_point_exact_match
462
+ new_dtype_configs.append(dc)
463
+ return new_dtype_configs
464
+
465
+ def _get_fixed_qparams_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
466
+ fixed_qparams_op_configs = []
467
+ for fixed_qparam_op, constraints in _FIXED_QPARAMS_OP_TO_CONSTRAINTS.items():
468
+ new_dtype_configs = _add_fixed_qparams_to_dtype_configs(dtype_configs, constraints)
469
+ fixed_qparams_op_configs.append(
470
+ BackendPatternConfig(fixed_qparam_op)
471
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
472
+ .set_dtype_configs(new_dtype_configs))
473
+ return fixed_qparams_op_configs
474
+
475
+ def _get_share_qparams_op_configs(dtype_configs):
476
+ """ Get the operator config for the operators that works for both float and quantized input
477
+ if input is quantized, the output Tensor shares the same quantization parameter
478
+ with input.
479
+ Example operator: avgpool2d, reshape, transpose, maxpool2d
480
+ Example observed operator:
481
+ observer_0 - avgpool2d - observer_0 (same observer instance as input)
482
+ """
483
+
484
+ def _get_share_qprams_op_backend_config(op):
485
+ return BackendPatternConfig(op) \
486
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
487
+ .set_dtype_configs(dtype_configs)
488
+
489
+ share_qparams_ops = [
490
+ torch.nn.AdaptiveAvgPool1d,
491
+ torch.nn.AdaptiveAvgPool2d,
492
+ torch.nn.AdaptiveAvgPool3d,
493
+ torch.nn.AvgPool1d,
494
+ torch.nn.AvgPool2d,
495
+ torch.nn.AvgPool3d,
496
+ torch.nn.Hardtanh,
497
+ torch.nn.Identity,
498
+ torch.nn.MaxPool1d,
499
+ torch.nn.MaxPool2d,
500
+ torch.nn.MaxPool3d,
501
+ torch.nn.PixelShuffle,
502
+ torch.nn.PixelUnshuffle,
503
+ torch.nn.ReLU,
504
+ torch.nn.ReLU6,
505
+ torch.adaptive_avg_pool1d,
506
+ torch.nn.functional.adaptive_avg_pool2d,
507
+ torch.nn.functional.adaptive_avg_pool3d,
508
+ torch.nn.functional.hardtanh,
509
+ torch.nn.functional.hardtanh_,
510
+ torch.nn.functional.interpolate,
511
+ torch.nn.functional.max_pool1d,
512
+ torch.nn.functional.max_pool2d,
513
+ torch.nn.functional.max_pool3d,
514
+ torch.nn.functional.pixel_shuffle,
515
+ torch.nn.functional.pixel_unshuffle,
516
+ torch.nn.functional.relu,
517
+ torch.nn.functional.relu6,
518
+ torch.avg_pool1d,
519
+ torch._C._nn.avg_pool2d,
520
+ torch._C._nn.avg_pool3d,
521
+ torch.clamp,
522
+ torch.flatten,
523
+ torch.mean,
524
+ torch.narrow,
525
+ torch.repeat_interleave,
526
+ torch.transpose,
527
+ torch.squeeze,
528
+ torch.stack,
529
+ torch.unsqueeze,
530
+ operator.floordiv,
531
+ "contiguous",
532
+ "clamp",
533
+ "detach",
534
+ "detach_",
535
+ "mean",
536
+ "permute",
537
+ "repeat",
538
+ "repeat_interleave",
539
+ "reshape",
540
+ "resize_",
541
+ "relu",
542
+ "relu_",
543
+ "squeeze",
544
+ "squeeze_",
545
+ "transpose",
546
+ "unsqueeze",
547
+ "unsqueeze_",
548
+ "view"
549
+ ]
550
+ return [_get_share_qprams_op_backend_config(op) for op in share_qparams_ops]
551
+
552
+ def _get_bn_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
553
+ """ Get configs related to batchnorm. """
554
+ bn_configs = []
555
+ bn_to_fused_bn = {
556
+ torch.nn.BatchNorm2d: nni.BNReLU2d,
557
+ torch.nn.BatchNorm3d: nni.BNReLU3d,
558
+ }
559
+ for bn in bn_to_fused_bn.keys():
560
+ fused_bn = bn_to_fused_bn[bn]
561
+ # bn module + relu module fusion config
562
+ bn_configs.append(
563
+ BackendPatternConfig((bn, nn.ReLU))
564
+ .set_dtype_configs(dtype_configs) # noqa: E131
565
+ .set_fuser_method(_sequential_wrapper2(fused_bn))
566
+ .set_fused_module(fused_bn))
567
+ # bn module + F.relu fusion config
568
+ bn_configs.append(
569
+ BackendPatternConfig((bn, F.relu))
570
+ .set_dtype_configs(dtype_configs) # noqa: E131
571
+ .set_fuser_method(_sequential_wrapper2(fused_bn))
572
+ .set_fused_module(fused_bn))
573
+ bn_configs.append(
574
+ BackendPatternConfig(bn)
575
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
576
+ .set_dtype_configs(dtype_configs))
577
+
578
+ # fused bn configs
579
+ for fused_bn in bn_to_fused_bn.values():
580
+ bn_configs.append(
581
+ BackendPatternConfig(fused_bn)
582
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
583
+ .set_dtype_configs(dtype_configs))
584
+ return bn_configs
585
+
586
+ def _get_rnn_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
587
+ rnn_op_configs = []
588
+ for rnn_op, ref_rnn_op in [
589
+ (nn.GRUCell, nnqr.GRUCell),
590
+ (nn.LSTMCell, nnqr.LSTMCell),
591
+ (nn.RNNCell, nnqr.RNNCell),
592
+ (nn.LSTM, nnqr.LSTM),
593
+ (nn.GRU, nnqr.GRU)
594
+ ]:
595
+ rnn_op_configs.append(
596
+ BackendPatternConfig(rnn_op)
597
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
598
+ .set_dtype_configs(dtype_configs)
599
+ .set_root_module(rnn_op)
600
+ .set_reference_quantized_module(ref_rnn_op))
601
+ return rnn_op_configs
602
+
603
+ def _get_embedding_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
604
+ embedding_op_configs = []
605
+ for embedding_op, qat_embedding_op, ref_embedding_op in [
606
+ (nn.Embedding, nnqat.Embedding, nnqr.Embedding),
607
+ (nn.EmbeddingBag, nnqat.EmbeddingBag, nnqr.EmbeddingBag),
608
+ ]:
609
+ embedding_op_configs.append(
610
+ BackendPatternConfig(embedding_op)
611
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
612
+ .set_dtype_configs(dtype_configs)
613
+ .set_qat_module(qat_embedding_op)
614
+ .set_root_module(embedding_op)
615
+ .set_reference_quantized_module(ref_embedding_op))
616
+
617
+ # config for qat op
618
+ embedding_op_configs.append(
619
+ BackendPatternConfig(qat_embedding_op)
620
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
621
+ .set_dtype_configs(dtype_configs)
622
+ .set_root_module(embedding_op)
623
+ .set_reference_quantized_module(ref_embedding_op))
624
+ return embedding_op_configs
625
+
626
+ def _get_tensor_info_op_configs(dtype_configs):
627
+ """
628
+ These ops work on tensors of different dtypes but return non-tensors
629
+ containing information about the input tensor.
630
+ """
631
+
632
+ def _get_config(op):
633
+ return BackendPatternConfig(op) \
634
+ .set_observation_type(ObservationType.INPUT_OUTPUT_NOT_OBSERVED) \
635
+ .set_dtype_configs(dtype_configs)
636
+
637
+ return [_get_config(op) for op in ("shape", "size")]
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import torch
3
+ from torch.ao.quantization.backend_config import (
4
+ BackendConfig,
5
+ DTypeConfig,
6
+ ObservationType,
7
+ BackendPatternConfig,
8
+ )
9
+
10
+ weighted_op_quint8_dtype_config = DTypeConfig(
11
+ input_dtype=torch.quint8,
12
+ output_dtype=torch.quint8,
13
+ weight_dtype=torch.qint8,
14
+ bias_dtype=torch.float,
15
+ )
16
+ from typing import List
17
+
18
+ def get_linear_configs():
19
+ linear_configs = []
20
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
21
+ dtype_configs = [weighted_op_quint8_dtype_config]
22
+
23
+ # TODO: need to fix the way we insert observers for this pattern
24
+ # should be solved in the new fusion API
25
+ # reason that this doesn't work: the pattern is a bit complicated and we don't
26
+ # have a way to specify which input of the pattern we would like to observe
27
+ # pattern:
28
+ # bias input weight
29
+ # \ | /
30
+ # \ | t
31
+ # \ | /
32
+ # addmm
33
+ # we want to observe "weight" as weight, but there is not way to convey this
34
+ # information with current pattern language
35
+ #
36
+ # right now:
37
+ # original:
38
+ # weight - t \
39
+ # input - addmm
40
+ # observed (no hack):
41
+ # weight - t - observer \
42
+ # input - observer - addmm
43
+ # target:
44
+ # weight - observer - t \
45
+ # input - observer - addmm
46
+
47
+ # def root_node_getter(node_pattern):
48
+ # addmm, bias, act, weight = node_pattern
49
+ # return addmm
50
+
51
+ # linear_configs.append(
52
+ # BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.aten.t.default))
53
+ # .set_observation_type(observation_type) # noqa: E131
54
+ # .set_dtype_configs(dtype_configs)
55
+ # ._set_root_node_getter(root_node_getter))
56
+
57
+ linear_configs.append(
58
+ BackendPatternConfig(torch.ops.aten.addmm.default)
59
+ .set_observation_type(observation_type) # noqa: E131
60
+ .set_dtype_configs(dtype_configs)
61
+ ._set_input_type_to_index({"weight": 2, "bias": 0})
62
+ )
63
+ # linear is decomposed to `t - mm` if bias is not present
64
+ linear_configs.append(
65
+ BackendPatternConfig(torch.ops.aten.mm.default)
66
+ .set_observation_type(observation_type) # noqa: E131
67
+ .set_dtype_configs(dtype_configs)
68
+ ._set_input_type_to_index({"weight": 1})
69
+ )
70
+ return linear_configs
71
+
72
+ def get_conv_configs():
73
+ conv_configs = []
74
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
75
+ dtype_configs = [weighted_op_quint8_dtype_config]
76
+ conv_configs.append(
77
+ BackendPatternConfig(torch.ops.aten.convolution.default)
78
+ .set_observation_type(observation_type) # noqa: E131
79
+ .set_dtype_configs(dtype_configs)
80
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
81
+ )
82
+ conv_configs.append(
83
+ BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu.default))
84
+ .set_observation_type(observation_type) # noqa: E131
85
+ .set_dtype_configs(dtype_configs)
86
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
87
+ )
88
+ # TODO: remove when functionalization is supported in PT2 mode
89
+ conv_configs.append(
90
+ BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu_.default))
91
+ .set_observation_type(observation_type) # noqa: E131
92
+ .set_dtype_configs(dtype_configs)
93
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
94
+ )
95
+ return conv_configs
96
+
97
+ def get_pooling_configs():
98
+ backend_pattern_configs = []
99
+ observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
100
+ dtype_configs = [weighted_op_quint8_dtype_config]
101
+
102
+ def root_node_getter(node_pattern):
103
+ getitem, maxpool, index = node_pattern
104
+ return maxpool
105
+
106
+ backend_pattern_configs.append(
107
+ BackendPatternConfig()
108
+ ._set_pattern_complex_format((operator.getitem, torch.ops.aten.max_pool2d_with_indices.default, 0))
109
+ .set_observation_type(observation_type) # noqa: E131
110
+ .set_dtype_configs(dtype_configs)
111
+ ._set_root_node_getter(root_node_getter)
112
+ )
113
+
114
+ return backend_pattern_configs
115
+
116
+ def get_relu_configs():
117
+ backend_pattern_configs = []
118
+ observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
119
+ dtype_configs = [weighted_op_quint8_dtype_config]
120
+ backend_pattern_configs.append(
121
+ BackendPatternConfig(torch.ops.aten.relu.default)
122
+ .set_observation_type(observation_type) # noqa: E131
123
+ .set_dtype_configs(dtype_configs))
124
+ return backend_pattern_configs
125
+
126
+ def get_binary_op_configs():
127
+ binary_op_configs: List[BackendPatternConfig] = []
128
+ dtype_configs = [weighted_op_quint8_dtype_config]
129
+ num_tensor_args_to_observation_type_mapping = {
130
+ # TODO: this is not used right now since we have extra check in prepare
131
+ # will need to change this to NO_OBSERVER later after we implemented
132
+ # Tensor dtype inference properly
133
+ 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
134
+ 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
135
+ 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
136
+ }
137
+ for op_with_quantized_bop_scalar_variant in [torch.ops.aten.add.Tensor, torch.ops.aten.add_.Tensor]:
138
+ bop_patterns = [
139
+ (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu.default),
140
+ op_with_quantized_bop_scalar_variant,
141
+ # TODO: remove when functionalization is supported in pt2_mode
142
+ (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu_.default),
143
+ ]
144
+ for bop_pattern in bop_patterns:
145
+ binary_op_configs.append(
146
+ BackendPatternConfig(bop_pattern)
147
+ .set_dtype_configs(dtype_configs) # noqa: E131
148
+ ._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping))
149
+
150
+ return binary_op_configs
151
+
152
+ def get_qnnpack_pt2e_backend_config():
153
+ return (
154
+ BackendConfig("qnnpack_pytorch_2.0_export")
155
+ .set_backend_pattern_configs(get_linear_configs())
156
+ .set_backend_pattern_configs(get_binary_op_configs())
157
+ .set_backend_pattern_configs(get_conv_configs())
158
+ .set_backend_pattern_configs(get_pooling_configs())
159
+ .set_backend_pattern_configs(get_relu_configs())
160
+ )
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from dataclasses import dataclass
3
+ from typing import Any, Callable, Dict, List, Optional, Type, Union
4
+
5
+ import torch
6
+ from torch.ao.quantization.utils import Pattern
7
+ from enum import Enum
8
+
9
+
10
+ __all__ = [
11
+ "BackendConfig",
12
+ "BackendPatternConfig",
13
+ "DTypeConfig",
14
+ "DTypeWithConstraints",
15
+ "ObservationType",
16
+ ]
17
+
18
+
19
+ # DTypeConfig dict keys
20
+ INPUT_DTYPE_DICT_KEY = "input_dtype"
21
+ OUTPUT_DTYPE_DICT_KEY = "output_dtype"
22
+ WEIGHT_DTYPE_DICT_KEY = "weight_dtype"
23
+ BIAS_DTYPE_DICT_KEY = "bias_dtype"
24
+ IS_DYNAMIC_DICT_KEY = "is_dynamic"
25
+
26
+ # BackendConfig dict keys
27
+ NAME_DICT_KEY = "name"
28
+ CONFIGS_DICT_KEY = "configs"
29
+
30
+ # BackendPatternConfig dict keys
31
+ PATTERN_DICT_KEY = "pattern"
32
+ PATTERN_COMPLEX_FORMAT_DICT_KEY = "pattern_complex_format"
33
+ OBSERVATION_TYPE_DICT_KEY = "observation_type"
34
+ DTYPE_CONFIGS_DICT_KEY = "dtype_configs"
35
+ ROOT_MODULE_DICT_KEY = "root_module"
36
+ QAT_MODULE_DICT_KEY = "qat_module"
37
+ REFERENCE_QUANTIZED_MODULE_DICT_KEY = "reference_quantized_module_for_root"
38
+ FUSED_MODULE_DICT_KEY = "fused_module"
39
+ FUSER_METHOD_DICT_KEY = "fuser_method"
40
+ ROOT_NODE_GETTER_DICT_KEY = "root_node_getter"
41
+ EXTRA_INPUTS_GETTER_DICT_KEY = "extra_inputs_getter"
42
+ NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY = "num_tensor_args_to_observation_type"
43
+ INPUT_TYPE_TO_INDEX_DICT_KEY = "input_type_to_index"
44
+
45
+
46
+ # TODO: maybe rename this to something that's not related to observer
47
+ # e.g. QParamsType
48
+ class ObservationType(Enum):
49
+ """ An enum that represents different ways of how an operator/operator pattern
50
+ should be observed
51
+ """
52
+
53
+ OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0
54
+ """this means input and output are observed with different observers, based
55
+ on qconfig.activation
56
+ example: conv, linear, softmax
57
+ """
58
+
59
+ OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1
60
+ """this means the output will use the same observer instance as input, based
61
+ on qconfig.activation
62
+ example: torch.cat, maxpool
63
+ """
64
+
65
+ INPUT_OUTPUT_NOT_OBSERVED = 2
66
+ """this means the input and output are never observed
67
+ example: x.shape, x.size
68
+ """
69
+
70
+
71
+ @dataclass
72
+ class DTypeWithConstraints:
73
+ """
74
+ Config for specifying additional constraints for a given dtype, such as quantization
75
+ value ranges, scale value ranges, and fixed quantization params, to be used in
76
+ :class:`~torch.ao.quantization.backend_config.DTypeConfig`.
77
+
78
+ The constraints currently supported are:
79
+
80
+ * `quant_min_lower_bound` and `quant_max_upper_bound`: Lower and upper
81
+ bounds for the minimum and maximum quantized values respectively. If
82
+ the QConfig’s `quant_min` and `quant_max` fall outside this range,
83
+ then the QConfig will be ignored.
84
+
85
+ * `scale_min_lower_bound` and `scale_max_upper_bound`: Lower and upper
86
+ bounds for the minimum and maximum scale values respectively. If the
87
+ QConfig’s minimum scale value (currently exposed as `eps`) falls below
88
+ the lower bound, then the QConfig will be ignored. Note that the upper
89
+ bound is currently not enforced.
90
+
91
+ * `scale_exact_match` and `zero_point_exact_match`: Exact match requirements
92
+ for scale and zero point, to be used for operators with fixed quantization
93
+ parameters such as sigmoid and tanh. If the observer specified in the QConfig
94
+ is neither `FixedQParamsObserver` nor `FixedQParamsFakeQuantize`, or if
95
+ the quantization parameters don't match, then the QConfig will be ignored.
96
+ """
97
+ dtype: Optional[torch.dtype] = None
98
+ quant_min_lower_bound: Union[int, float, None] = None
99
+ quant_max_upper_bound: Union[int, float, None] = None
100
+ scale_min_lower_bound: Union[int, float, None] = None
101
+ scale_max_upper_bound: Union[int, float, None] = None
102
+ scale_exact_match: Optional[float] = None
103
+ zero_point_exact_match: Optional[int] = None
104
+
105
+
106
+ @dataclass
107
+ class DTypeConfig:
108
+ """
109
+ Config object that specifies the supported data types passed as arguments to
110
+ quantize ops in the reference model spec, for input and output activations,
111
+ weights, and biases.
112
+
113
+ For example, consider the following reference model:
114
+
115
+ quant1 - [dequant1 - fp32_linear - quant2] - dequant2
116
+
117
+ The pattern in the square brackets refers to the reference pattern of
118
+ statically quantized linear. Setting the input dtype as `torch.quint8`
119
+ in the DTypeConfig means we pass in `torch.quint8` as the dtype argument
120
+ to the first quantize op (quant1). Similarly, setting the output dtype as
121
+ `torch.quint8` means we pass in `torch.quint8` as the dtype argument to
122
+ the second quantize op (quant2).
123
+
124
+ Note that the dtype here does not refer to the interface dtypes of the
125
+ op. For example, the "input dtype" here is not the dtype of the input
126
+ tensor passed to the quantized linear op. Though it can still be the
127
+ same as the interface dtype, this is not always the case, e.g. the
128
+ interface dtype is fp32 in dynamic quantization but the "input dtype"
129
+ specified in the DTypeConfig would still be quint8. The semantics of
130
+ dtypes here are the same as the semantics of the dtypes specified in
131
+ the observers.
132
+
133
+ These dtypes are matched against the ones specified in the user’s
134
+ QConfig. If there is a match, and the QConfig satisfies the constraints
135
+ specified in the DTypeConfig (if any), then we will quantize the given
136
+ pattern using this DTypeConfig. Otherwise, the QConfig is ignored and
137
+ the pattern will not be quantized.
138
+
139
+ Example usage::
140
+
141
+ >>> # xdoctest: +SKIP(failing)
142
+ >>> dtype_config1 = DTypeConfig(
143
+ ... input_dtype=torch.quint8,
144
+ ... output_dtype=torch.quint8,
145
+ ... weight_dtype=torch.qint8,
146
+ ... bias_dtype=torch.float)
147
+
148
+ >>> dtype_config2 = DTypeConfig(
149
+ ... input_dtype=DTypeWithConstraints(
150
+ ... dtype=torch.quint8,
151
+ ... quant_min_lower_bound=0,
152
+ ... quant_max_upper_bound=255,
153
+ ... ),
154
+ ... output_dtype=DTypeWithConstraints(
155
+ ... dtype=torch.quint8,
156
+ ... quant_min_lower_bound=0,
157
+ ... quant_max_upper_bound=255,
158
+ ... ),
159
+ ... weight_dtype=DTypeWithConstraints(
160
+ ... dtype=torch.qint8,
161
+ ... quant_min_lower_bound=-128,
162
+ ... quant_max_upper_bound=127,
163
+ ... ),
164
+ ... bias_dtype=torch.float)
165
+
166
+ >>> dtype_config1.input_dtype
167
+ torch.quint8
168
+
169
+ >>> dtype_config2.input_dtype
170
+ torch.quint8
171
+
172
+ >>> dtype_config2.input_dtype_with_constraints
173
+ DTypeWithConstraints(dtype=torch.quint8, quant_min_lower_bound=0, quant_max_upper_bound=255, \
174
+ scale_min_lower_bound=None, scale_max_upper_bound=None)
175
+ """
176
+ input_dtype_with_constraints: DTypeWithConstraints
177
+ output_dtype_with_constraints: DTypeWithConstraints
178
+ weight_dtype_with_constraints: DTypeWithConstraints
179
+ bias_dtype: Optional[torch.dtype]
180
+ is_dynamic: Optional[bool]
181
+
182
+ def __init__(
183
+ self,
184
+ input_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
185
+ output_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
186
+ weight_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
187
+ bias_dtype: Optional[torch.dtype] = None,
188
+ is_dynamic: Optional[bool] = None,
189
+ ):
190
+ if isinstance(input_dtype, DTypeWithConstraints):
191
+ self.input_dtype_with_constraints = input_dtype
192
+ else:
193
+ self.input_dtype_with_constraints = DTypeWithConstraints(dtype=input_dtype)
194
+
195
+ if isinstance(output_dtype, DTypeWithConstraints):
196
+ self.output_dtype_with_constraints = output_dtype
197
+ else:
198
+ self.output_dtype_with_constraints = DTypeWithConstraints(dtype=output_dtype)
199
+
200
+ if isinstance(weight_dtype, DTypeWithConstraints):
201
+ self.weight_dtype_with_constraints = weight_dtype
202
+ else:
203
+ self.weight_dtype_with_constraints = DTypeWithConstraints(dtype=weight_dtype)
204
+
205
+ self.bias_dtype = bias_dtype
206
+ self.is_dynamic = is_dynamic
207
+
208
+ @property
209
+ def input_dtype(self) -> Optional[torch.dtype]:
210
+ return self.input_dtype_with_constraints.dtype
211
+
212
+ @property
213
+ def output_dtype(self) -> Optional[torch.dtype]:
214
+ return self.output_dtype_with_constraints.dtype
215
+
216
+ @property
217
+ def weight_dtype(self) -> Optional[torch.dtype]:
218
+ return self.weight_dtype_with_constraints.dtype
219
+
220
+ @classmethod
221
+ def from_dict(cls, dtype_config_dict: Dict[str, Any]) -> DTypeConfig:
222
+ """
223
+ Create a ``DTypeConfig`` from a dictionary with the following items (all optional):
224
+ "input_dtype": torch.dtype or ``DTypeWithConstraints``
225
+ "output_dtype": torch.dtype or ``DTypeWithConstraints``
226
+ "weight_dtype": torch.dtype or ``DTypeWithConstraints``
227
+ "bias_type": torch.dtype
228
+ "is_dynamic": bool
229
+ """
230
+ input_dtype = dtype_config_dict.get(INPUT_DTYPE_DICT_KEY, None)
231
+ if input_dtype is not None and not isinstance(input_dtype, (torch.dtype, DTypeWithConstraints)):
232
+ raise ValueError("Expected input_dtype to be a torch.dtype or DTypeWithConstraints")
233
+ output_dtype = dtype_config_dict.get(OUTPUT_DTYPE_DICT_KEY, None)
234
+ if output_dtype is not None and not isinstance(output_dtype, (torch.dtype, DTypeWithConstraints)):
235
+ raise ValueError("Expected output_dtype to be a torch.dtype or DTypeWithConstraints")
236
+ weight_dtype = dtype_config_dict.get(WEIGHT_DTYPE_DICT_KEY, None)
237
+ if weight_dtype is not None and not isinstance(weight_dtype, (torch.dtype, DTypeWithConstraints)):
238
+ raise ValueError("Expected weight_dtype to be a torch.dtype or DTypeWithConstraints")
239
+ bias_dtype = dtype_config_dict.get(BIAS_DTYPE_DICT_KEY, None)
240
+ is_dynamic = dtype_config_dict.get(IS_DYNAMIC_DICT_KEY, None)
241
+ return cls(input_dtype, output_dtype, weight_dtype, bias_dtype, is_dynamic)
242
+
243
+ def to_dict(self) -> Dict[str, Any]:
244
+ """
245
+ Convert this ``DTypeConfig`` to a dictionary with the items described in
246
+ :func:`~torch.ao.quantization.backend_config.DTypeConfig.from_dict`.
247
+ """
248
+ dtype_config_dict: Dict[str, Any] = {}
249
+ if self.input_dtype is not None:
250
+ dtype_config_dict[INPUT_DTYPE_DICT_KEY] = self.input_dtype_with_constraints
251
+ if self.output_dtype is not None:
252
+ dtype_config_dict[OUTPUT_DTYPE_DICT_KEY] = self.output_dtype_with_constraints
253
+ if self.weight_dtype is not None:
254
+ dtype_config_dict[WEIGHT_DTYPE_DICT_KEY] = self.weight_dtype_with_constraints
255
+ if self.bias_dtype is not None:
256
+ dtype_config_dict[BIAS_DTYPE_DICT_KEY] = self.bias_dtype
257
+ if self.is_dynamic is not None:
258
+ dtype_config_dict[IS_DYNAMIC_DICT_KEY] = self.is_dynamic
259
+ return dtype_config_dict
260
+
261
+
262
+ class BackendConfig:
263
+ # TODO: refer to NativeBackendConfig once that is implemented
264
+ """Config that defines the set of patterns that can be quantized on a given backend, and how reference
265
+ quantized models can be produced from these patterns.
266
+
267
+ A pattern in this context refers to a module, a functional, an operator, or a directed acyclic graph
268
+ of the above. Each pattern supported on the target backend can be individually configured through
269
+ :class:`~torch.ao.quantization.backend_config.BackendPatternConfig` in terms of:
270
+
271
+ (1) The supported input/output activation, weight, and bias data types
272
+
273
+ (2) How observers and quant/dequant ops are inserted in order to construct the reference pattern, and
274
+
275
+ (3) (Optionally) Fusion, QAT, and reference module mappings.
276
+
277
+ The format of the patterns is described in:
278
+ https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md
279
+
280
+ Example usage::
281
+
282
+ import torch
283
+ from torch.ao.quantization.backend_config import (
284
+ BackendConfig,
285
+ BackendPatternConfig,
286
+ DTypeConfig,
287
+ ObservationType,
288
+ )
289
+
290
+ weighted_int8_dtype_config = DTypeConfig(
291
+ input_dtype=torch.quint8,
292
+ output_dtype=torch.quint8,
293
+ weight_dtype=torch.qint8,
294
+ bias_dtype=torch.float)
295
+
296
+ def fuse_conv2d_relu(is_qat, conv, relu):
297
+ return torch.ao.nn.intrinsic.ConvReLU2d(conv, relu)
298
+
299
+ # For quantizing Linear
300
+ linear_config = BackendPatternConfig(torch.nn.Linear) \
301
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
302
+ .add_dtype_config(weighted_int8_dtype_config) \
303
+ .set_root_module(torch.nn.Linear) \
304
+ .set_qat_module(torch.ao.nn.qat.Linear) \
305
+ .set_reference_quantized_module(torch.ao.nn.quantized.reference.Linear)
306
+
307
+ # For fusing Conv2d + ReLU into ConvReLU2d
308
+ conv_relu_config = BackendPatternConfig((torch.nn.Conv2d, torch.nn.ReLU)) \
309
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
310
+ .add_dtype_config(weighted_int8_dtype_config) \
311
+ .set_fused_module(torch.ao.nn.intrinsic.ConvReLU2d) \
312
+ .set_fuser_method(fuse_conv2d_relu)
313
+
314
+ # For quantizing ConvReLU2d
315
+ fused_conv_relu_config = BackendPatternConfig(torch.ao.nn.intrinsic.ConvReLU2d) \
316
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
317
+ .add_dtype_config(weighted_int8_dtype_config) \
318
+ .set_root_module(torch.nn.Conv2d) \
319
+ .set_qat_module(torch.ao.nn.intrinsic.qat.ConvReLU2d) \
320
+ .set_reference_quantized_module(torch.ao.nn.quantized.reference.Conv2d)
321
+
322
+ backend_config = BackendConfig("my_backend") \
323
+ .set_backend_pattern_config(linear_config) \
324
+ .set_backend_pattern_config(conv_relu_config) \
325
+ .set_backend_pattern_config(fused_conv_relu_config)
326
+
327
+ """
328
+ def __init__(self, name: str = ""):
329
+ self.name = name
330
+ # Store all BackendPatternConfigs in a map to handle duplicates
331
+ # Note: the key in this map uses the complex reversed tuple format.
332
+ # This is intended only for internal use; users who wish to access
333
+ # the original patterns should go through `self.configs` instead.
334
+ self._pattern_complex_format_to_config: Dict[Pattern, BackendPatternConfig] = {}
335
+
336
+ def __repr__(self):
337
+ return f"BackendConfig({self.__dict__})"
338
+
339
+ def set_name(self, name: str) -> BackendConfig:
340
+ """
341
+ Set the name of the target backend.
342
+ """
343
+ self.name = name
344
+ return self
345
+
346
+ def set_backend_pattern_config(self, config: BackendPatternConfig) -> BackendConfig:
347
+ """
348
+ Set the config for an pattern that can be run on the target backend.
349
+ This overrides any existing config for the given pattern.
350
+ """
351
+ # Avoid circular dependencies
352
+ pattern_complex_format = torch.ao.quantization.backend_config.utils \
353
+ ._get_pattern_in_reversed_nested_tuple_format(config) # type: ignore[attr-defined]
354
+ self._pattern_complex_format_to_config[pattern_complex_format] = config
355
+ return self
356
+
357
+ def set_backend_pattern_configs(self, configs: List[BackendPatternConfig]) -> BackendConfig:
358
+ """
359
+ Set the configs for patterns that can be run on the target backend.
360
+ This overrides any existing config for a given pattern if it was previously registered already.
361
+ """
362
+ for conf in configs:
363
+ self.set_backend_pattern_config(conf)
364
+ return self
365
+
366
+ @property
367
+ def configs(self) -> List[BackendPatternConfig]:
368
+ """
369
+ Return a copy of the list of configs set in this `BackendConfig`.
370
+ """
371
+ return list(self._pattern_complex_format_to_config.values())
372
+
373
+ @classmethod
374
+ def from_dict(cls, backend_config_dict: Dict[str, Any]) -> BackendConfig:
375
+ """
376
+ Create a ``BackendConfig`` from a dictionary with the following items:
377
+
378
+ "name": the name of the target backend
379
+
380
+ "configs": a list of dictionaries that each represents a `BackendPatternConfig`
381
+
382
+ """
383
+ conf = cls(backend_config_dict.get(NAME_DICT_KEY, ""))
384
+ for d in backend_config_dict.get(CONFIGS_DICT_KEY, []):
385
+ if isinstance(d, BackendPatternConfig):
386
+ conf.set_backend_pattern_config(d)
387
+ elif isinstance(d, Dict):
388
+ conf.set_backend_pattern_config(BackendPatternConfig.from_dict(d))
389
+ else:
390
+ raise ValueError(f"Expected backend_config_dict['{CONFIGS_DICT_KEY}'] to be a dictionary")
391
+ return conf
392
+
393
+ def to_dict(self) -> Dict[str, Any]:
394
+ """
395
+ Convert this ``BackendConfig`` to a dictionary with the items described in
396
+ :func:`~torch.ao.quantization.backend_config.BackendConfig.from_dict`.
397
+ """
398
+ return {
399
+ NAME_DICT_KEY: self.name,
400
+ CONFIGS_DICT_KEY: [c.to_dict() for c in self.configs],
401
+ }
402
+
403
+
404
+ class BackendPatternConfig:
405
+ """
406
+ Config object that specifies quantization behavior for a given operator pattern.
407
+ For a detailed example usage, see :class:`~torch.ao.quantization.backend_config.BackendConfig`.
408
+ """
409
+ def __init__(self, pattern: Optional[Pattern] = None):
410
+ self.pattern: Optional[Pattern] = pattern
411
+ self.observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
412
+ self.dtype_configs: List[DTypeConfig] = []
413
+ self.root_module: Optional[Type[torch.nn.Module]] = None
414
+ self.qat_module: Optional[Type[torch.nn.Module]] = None
415
+ self.reference_quantized_module: Optional[Type[torch.nn.Module]] = None
416
+ self.fused_module: Optional[Type[torch.nn.Module]] = None
417
+ self.fuser_method: Optional[Callable] = None
418
+
419
+ # Temporary/internal configs
420
+ self._root_node_getter: Optional[Callable] = None
421
+ self._extra_inputs_getter: Optional[Callable] = None
422
+ self._num_tensor_args_to_observation_type: Dict[int, ObservationType] = {}
423
+ self._input_type_to_index: Dict[str, int] = {}
424
+ self._pattern_complex_format: Optional[Pattern] = None
425
+
426
+ def __repr__(self):
427
+ dict_nonempty = {
428
+ k: v for k, v in self.__dict__.items()
429
+ if (
430
+ (not isinstance(v, (list, dict)) and v is not None)
431
+ or (isinstance(v, (list, dict)) and len(v) > 0)
432
+ )
433
+ }
434
+ return f"BackendPatternConfig({dict_nonempty})"
435
+
436
+ def set_pattern(self, pattern: Pattern) -> BackendPatternConfig:
437
+ """
438
+ Set the pattern to configure.
439
+
440
+ The pattern can be a float module, functional operator, pytorch operator, or a tuple
441
+ combination of the above. Tuple patterns are treated as sequential patterns, and
442
+ currently only tuples of 2 or 3 elements are supported.
443
+ """
444
+ if self._pattern_complex_format is not None:
445
+ raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set")
446
+ self.pattern = pattern
447
+ return self
448
+
449
+ def set_observation_type(self, observation_type: ObservationType) -> BackendPatternConfig:
450
+ """
451
+ Set how observers should be inserted in the graph for this pattern.
452
+
453
+ Observation type here refers to how observers (or quant-dequant ops) will be placed
454
+ in the graph. This is used to produce the desired reference patterns understood by
455
+ the backend. Weighted ops such as linear and conv require different observers
456
+ (or quantization parameters passed to quantize ops in the reference model) for the
457
+ input and the output.
458
+
459
+ There are two observation types:
460
+
461
+ `OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT` (default): the output observer instance
462
+ will be different from the input. This is the most common observation type.
463
+
464
+ `OUTPUT_SHARE_OBSERVER_WITH_INPUT`: the output observer instance will be the
465
+ same as the input. This is useful for operators like `cat`.
466
+
467
+ Note: This will be renamed in the near future, since we will soon insert QuantDeQuantStubs
468
+ with observers (and fake quantizes) attached instead of observers themselves.
469
+ """
470
+ self.observation_type = observation_type
471
+ return self
472
+
473
+ def add_dtype_config(self, dtype_config: DTypeConfig) -> BackendPatternConfig:
474
+ """
475
+ Add a set of supported data types passed as arguments to quantize ops in the
476
+ reference model spec.
477
+ """
478
+ self.dtype_configs.append(dtype_config)
479
+ return self
480
+
481
+ def set_dtype_configs(self, dtype_configs: List[DTypeConfig]) -> BackendPatternConfig:
482
+ """
483
+ Set the supported data types passed as arguments to quantize ops in the
484
+ reference model spec, overriding all previously registered data types.
485
+ """
486
+ self.dtype_configs = dtype_configs
487
+ return self
488
+
489
+ def set_root_module(self, root_module: Type[torch.nn.Module]) -> BackendPatternConfig:
490
+ """
491
+ Set the module that represents the root for this pattern.
492
+
493
+ When we construct the reference quantized model during the convert phase,
494
+ the root modules (e.g. torch.nn.Linear for torch.ao.nn.intrinsic.LinearReLU)
495
+ will be swapped to the corresponding reference quantized modules (e.g.
496
+ torch.ao.nn.reference.quantized.Linear). This allows custom backends to
497
+ specify custom reference quantized module implementations to match the
498
+ numerics of their lowered operators. Since this is a one-to-one mapping,
499
+ both the root module and the reference quantized module must be specified
500
+ in the same BackendPatternConfig in order for the conversion to take place.
501
+ """
502
+ self.root_module = root_module
503
+ return self
504
+
505
+ def set_qat_module(self, qat_module: Type[torch.nn.Module]) -> BackendPatternConfig:
506
+ """
507
+ Set the module that represents the QAT implementation for this pattern.
508
+ """
509
+ self.qat_module = qat_module
510
+ return self
511
+
512
+ def set_reference_quantized_module(self, reference_quantized_module: Type[torch.nn.Module]) -> BackendPatternConfig:
513
+ """
514
+ Set the module that represents the reference quantized implementation for
515
+ this pattern's root module.
516
+
517
+ For more detail, see :func:`~torch.ao.quantization.backend_config.BackendPatternConfig.set_root_module`.
518
+ """
519
+ self.reference_quantized_module = reference_quantized_module
520
+ return self
521
+
522
+ def set_fused_module(self, fused_module: Type[torch.nn.Module]) -> BackendPatternConfig:
523
+ """
524
+ Set the module that represents the fused implementation for this pattern.
525
+ """
526
+ self.fused_module = fused_module
527
+ return self
528
+
529
+ def set_fuser_method(self, fuser_method: Callable) -> BackendPatternConfig:
530
+ """
531
+ Set the function that specifies how to fuse this BackendPatternConfig's pattern.
532
+
533
+ The first argument of this function should be `is_qat`, and the rest of the arguments
534
+ should be the items in the tuple pattern. The return value of this function should be
535
+ the resulting fused module.
536
+
537
+ For example, the fuser method for the pattern `(torch.nn.Linear, torch.nn.ReLU)` can be:
538
+
539
+ def fuse_linear_relu(is_qat, linear, relu):
540
+ return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
541
+
542
+ For a more complicated example, see https://gist.github.com/jerryzh168/8bea7180a8ba3c279f2c9b050f2a69a6.
543
+ """
544
+ self.fuser_method = fuser_method
545
+ return self
546
+
547
+ def _set_root_node_getter(self, root_node_getter: Callable) -> BackendPatternConfig:
548
+ self._root_node_getter = root_node_getter
549
+ return self
550
+
551
+ def _set_extra_inputs_getter(self, extra_inputs_getter: Callable) -> BackendPatternConfig:
552
+ self._extra_inputs_getter = extra_inputs_getter
553
+ return self
554
+
555
+ def _set_num_tensor_args_to_observation_type(
556
+ self, num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> BackendPatternConfig:
557
+ self._num_tensor_args_to_observation_type = num_tensor_args_to_observation_type
558
+ return self
559
+
560
+ def _set_input_type_to_index(self, input_type_to_index: Dict[str, int]) -> BackendPatternConfig:
561
+ self._input_type_to_index = input_type_to_index
562
+ return self
563
+
564
+ def _set_pattern_complex_format(self, pattern: Pattern) -> BackendPatternConfig:
565
+ """
566
+ Set the pattern to configure, using the reversed nested tuple format.
567
+
568
+ See the BackendConfig README for more detail:
569
+ https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md#advanced-pattern-specification
570
+ """
571
+ if self.pattern is not None:
572
+ raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set")
573
+ self._pattern_complex_format = pattern
574
+ return self
575
+
576
+ @classmethod
577
+ def from_dict(cls, backend_pattern_config_dict: Dict[str, Any]) -> BackendPatternConfig:
578
+ """
579
+ Create a ``BackendPatternConfig`` from a dictionary with the following items:
580
+
581
+ "pattern": the pattern being configured
582
+ "observation_type": the :class:`~torch.ao.quantization.backend_config.ObservationType` that specifies how
583
+ observers should be inserted for this pattern
584
+ "dtype_configs": a list of dictionaries that represents :class:`~torch.ao.quantization.backend_config.DTypeConfig` s
585
+ "root_module": a :class:`torch.nn.Module` that represents the root for this pattern
586
+ "qat_module": a :class:`torch.nn.Module` that represents the QAT implementation for this pattern
587
+ "reference_quantized_module": a :class:`torch.nn.Module` that represents the reference quantized
588
+ implementation for this pattern's root module.
589
+ "fused_module": a :class:`torch.nn.Module` that represents the fused implementation for this pattern
590
+ "fuser_method": a function that specifies how to fuse the pattern for this pattern
591
+ "pattern_complex_format": the pattern specified in the reversed nested tuple format (deprecated)
592
+
593
+ """
594
+ def _get_dtype_config(obj: Any) -> DTypeConfig:
595
+ """
596
+ Convert the given object into a ``DTypeConfig`` if possible, else throw an exception.
597
+ """
598
+ if isinstance(obj, DTypeConfig):
599
+ return obj
600
+ if isinstance(obj, Dict):
601
+ return DTypeConfig.from_dict(obj)
602
+ raise ValueError(
603
+ f"Expected a list of DTypeConfigs in "
604
+ f"backend_pattern_config_dict[\"{DTYPE_CONFIGS_DICT_KEY}\"], got '{type(obj)}'"
605
+ )
606
+
607
+ conf = cls()
608
+ if PATTERN_DICT_KEY in backend_pattern_config_dict:
609
+ conf.set_pattern(backend_pattern_config_dict[PATTERN_DICT_KEY])
610
+ if OBSERVATION_TYPE_DICT_KEY in backend_pattern_config_dict:
611
+ conf.set_observation_type(backend_pattern_config_dict[OBSERVATION_TYPE_DICT_KEY])
612
+ for d in backend_pattern_config_dict.get(DTYPE_CONFIGS_DICT_KEY, []):
613
+ conf.add_dtype_config(_get_dtype_config(d))
614
+ conf.set_root_module(backend_pattern_config_dict.get(ROOT_MODULE_DICT_KEY, None))
615
+ conf.set_qat_module(backend_pattern_config_dict.get(QAT_MODULE_DICT_KEY, None))
616
+ conf.set_reference_quantized_module(backend_pattern_config_dict.get(REFERENCE_QUANTIZED_MODULE_DICT_KEY, None))
617
+ conf.set_fused_module(backend_pattern_config_dict.get(FUSED_MODULE_DICT_KEY, None))
618
+ conf.set_fuser_method(backend_pattern_config_dict.get(FUSER_METHOD_DICT_KEY, None))
619
+ conf._set_root_node_getter(backend_pattern_config_dict.get(ROOT_NODE_GETTER_DICT_KEY, None))
620
+ conf._set_extra_inputs_getter(backend_pattern_config_dict.get(EXTRA_INPUTS_GETTER_DICT_KEY, None))
621
+ conf._set_num_tensor_args_to_observation_type(
622
+ backend_pattern_config_dict.get(NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY, {}))
623
+ conf._set_input_type_to_index(backend_pattern_config_dict.get(INPUT_TYPE_TO_INDEX_DICT_KEY, {}))
624
+ if PATTERN_COMPLEX_FORMAT_DICT_KEY in backend_pattern_config_dict:
625
+ conf._set_pattern_complex_format(backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY])
626
+ return conf
627
+
628
+ def to_dict(self) -> Dict[str, Any]:
629
+ """
630
+ Convert this ``BackendPatternConfig`` to a dictionary with the items described in
631
+ :func:`~torch.ao.quantization.backend_config.BackendPatternConfig.from_dict`.
632
+ """
633
+ backend_pattern_config_dict: Dict[str, Any] = {
634
+ OBSERVATION_TYPE_DICT_KEY: self.observation_type,
635
+ DTYPE_CONFIGS_DICT_KEY: [c.to_dict() for c in self.dtype_configs],
636
+ }
637
+ if self.pattern is not None:
638
+ backend_pattern_config_dict[PATTERN_DICT_KEY] = self.pattern
639
+ if self.root_module is not None:
640
+ backend_pattern_config_dict[ROOT_MODULE_DICT_KEY] = self.root_module
641
+ if self.qat_module is not None:
642
+ backend_pattern_config_dict[QAT_MODULE_DICT_KEY] = self.qat_module
643
+ if self.reference_quantized_module is not None:
644
+ backend_pattern_config_dict[REFERENCE_QUANTIZED_MODULE_DICT_KEY] = self.reference_quantized_module
645
+ if self.fused_module is not None:
646
+ backend_pattern_config_dict[FUSED_MODULE_DICT_KEY] = self.fused_module
647
+ if self.fuser_method is not None:
648
+ backend_pattern_config_dict[FUSER_METHOD_DICT_KEY] = self.fuser_method
649
+ if self._root_node_getter is not None:
650
+ backend_pattern_config_dict[ROOT_NODE_GETTER_DICT_KEY] = self._root_node_getter
651
+ if self._extra_inputs_getter is not None:
652
+ backend_pattern_config_dict[EXTRA_INPUTS_GETTER_DICT_KEY] = self._extra_inputs_getter
653
+ if len(self._num_tensor_args_to_observation_type) > 0:
654
+ backend_pattern_config_dict[NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY] = self._num_tensor_args_to_observation_type
655
+ if len(self._input_type_to_index) > 0:
656
+ backend_pattern_config_dict[INPUT_TYPE_TO_INDEX_DICT_KEY] = self._input_type_to_index
657
+ if self._pattern_complex_format is not None:
658
+ backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY] = self._pattern_complex_format
659
+ return backend_pattern_config_dict
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/executorch.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO: rename executorch to qnnpack_executorch since executorch is a general runtime
2
+ # not a specific backend
3
+
4
+ import operator
5
+ from typing import List
6
+
7
+ import torch
8
+ import torch.ao.nn.qat as nnqat
9
+ import torch.ao.nn.quantized.reference as nnqr
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+
13
+ from ..fuser_method_mappings import (
14
+ _sequential_wrapper2,
15
+ fuse_conv_bn,
16
+ fuse_conv_bn_relu,
17
+ )
18
+ from ._common_operator_config_utils import _Conv2dMetadata
19
+ from .backend_config import (
20
+ BackendConfig,
21
+ BackendPatternConfig,
22
+ DTypeConfig,
23
+ DTypeWithConstraints,
24
+ ObservationType,
25
+ )
26
+ from .qnnpack import (
27
+ qnnpack_default_op_qint8_symmetric_dtype_config,
28
+ qnnpack_weighted_op_qint8_symmetric_dtype_config,
29
+ )
30
+
31
+
32
+ __all__ = [
33
+ "get_executorch_backend_config",
34
+ ]
35
+
36
+
37
+ # ===================
38
+ # | DTYPE CONFIGS |
39
+ # ===================
40
+
41
+ executorch_weighted_op_int8_dtype_config = DTypeConfig(
42
+ input_dtype=torch.quint8,
43
+ output_dtype=torch.quint8,
44
+ weight_dtype=torch.qint8,
45
+ bias_dtype=torch.float,
46
+ )
47
+
48
+ executorch_default_op_quint8_dtype_config = DTypeConfig(
49
+ input_dtype=torch.quint8,
50
+ output_dtype=torch.quint8,
51
+ )
52
+
53
+ executorch_default_dynamic_quint8_dtype_config = DTypeConfig(
54
+ input_dtype=torch.quint8,
55
+ output_dtype=torch.float,
56
+ weight_dtype=torch.qint8,
57
+ bias_dtype=torch.float,
58
+ is_dynamic=True,
59
+ )
60
+
61
+ executorch_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints(
62
+ dtype=torch.qint8,
63
+ scale_min_lower_bound=2**-12,
64
+ )
65
+
66
+ executorch_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints(
67
+ dtype=torch.qint8,
68
+ quant_min_lower_bound=-127,
69
+ quant_max_upper_bound=127,
70
+ scale_min_lower_bound=2**-12,
71
+ )
72
+
73
+ executorch_default_dynamic_qint8_dtype_config = DTypeConfig(
74
+ input_dtype=executorch_act_qint8_scale_min_2_neg_12,
75
+ output_dtype=torch.float,
76
+ weight_dtype=executorch_weight_qint8_neg_127_to_127_scale_min_2_neg_12,
77
+ bias_dtype=torch.float,
78
+ is_dynamic=True,
79
+ )
80
+
81
+ executorch_default_dynamic_float16_dtype_config = DTypeConfig(
82
+ input_dtype=torch.float16,
83
+ output_dtype=torch.float,
84
+ weight_dtype=torch.float16,
85
+ bias_dtype=torch.float,
86
+ is_dynamic=True,
87
+ )
88
+
89
+ executorch_weight_only_quint8_dtype_config = DTypeConfig(
90
+ input_dtype=torch.float,
91
+ output_dtype=torch.float,
92
+ weight_dtype=torch.quint8,
93
+ )
94
+
95
+
96
+ # =============================
97
+ # | BACKEND PATTERN CONFIGS |
98
+ # =============================
99
+
100
+
101
+ def _get_linear_configs() -> List[BackendPatternConfig]:
102
+ """
103
+ Return all configs related to linear modules and ops.
104
+ """
105
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
106
+ dtype_configs = [
107
+ qnnpack_weighted_op_qint8_symmetric_dtype_config,
108
+ executorch_weighted_op_int8_dtype_config,
109
+ executorch_default_dynamic_quint8_dtype_config,
110
+ executorch_default_dynamic_qint8_dtype_config,
111
+ executorch_default_dynamic_float16_dtype_config,
112
+ ]
113
+ linear_configs: List[BackendPatternConfig] = []
114
+ # linear module
115
+ linear_configs.append(
116
+ BackendPatternConfig(torch.nn.Linear)
117
+ .set_observation_type(observation_type) # noqa: E131
118
+ .set_dtype_configs(dtype_configs)
119
+ .set_root_module(torch.nn.Linear)
120
+ .set_reference_quantized_module(nnqr.Linear)
121
+ .set_qat_module(nnqat.Linear)
122
+ )
123
+ # linear qat module
124
+ linear_configs.append(
125
+ BackendPatternConfig(nnqat.Linear)
126
+ .set_observation_type(observation_type) # noqa: E131
127
+ .set_dtype_configs(dtype_configs)
128
+ .set_root_module(torch.nn.Linear)
129
+ .set_reference_quantized_module(nnqr.Linear)
130
+ )
131
+ # functional linear
132
+ linear_configs.append(
133
+ BackendPatternConfig(torch.nn.functional.linear)
134
+ .set_observation_type(observation_type) # noqa: E131
135
+ .set_dtype_configs(dtype_configs)
136
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
137
+ )
138
+ return linear_configs
139
+
140
+
141
+ def _get_conv_configs() -> List[BackendPatternConfig]:
142
+ """
143
+ Return all configs related to conv modules and ops.
144
+ """
145
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
146
+ dtype_configs = [
147
+ qnnpack_weighted_op_qint8_symmetric_dtype_config,
148
+ executorch_weighted_op_int8_dtype_config,
149
+ ]
150
+ conv_configs = []
151
+ for convs in [_Conv2dMetadata]:
152
+ # (1) Single conv modules/functions
153
+ # -----------------------------------
154
+ # conv module
155
+ conv_configs.append(
156
+ BackendPatternConfig(convs.root)
157
+ .set_observation_type(observation_type) # noqa: E131
158
+ .set_dtype_configs(dtype_configs)
159
+ .set_root_module(convs.root)
160
+ .set_reference_quantized_module(convs.reference)
161
+ .set_qat_module(convs.qat)
162
+ )
163
+ # conv qat module
164
+ conv_configs.append(
165
+ BackendPatternConfig(convs.qat)
166
+ .set_observation_type(observation_type) # noqa: E131
167
+ .set_dtype_configs(dtype_configs)
168
+ .set_root_module(convs.root)
169
+ .set_reference_quantized_module(convs.reference)
170
+ )
171
+ # functional conv
172
+ conv_configs.append(
173
+ BackendPatternConfig(convs.func)
174
+ .set_observation_type(observation_type) # noqa: E131
175
+ .set_dtype_configs(dtype_configs)
176
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
177
+ )
178
+
179
+ # (2) Conv + relu
180
+ # -----------------------------------
181
+ # conv module + relu module
182
+ conv_configs.append(
183
+ BackendPatternConfig((convs.root, nn.ReLU))
184
+ .set_dtype_configs(dtype_configs) # noqa: E131
185
+ .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
186
+ .set_fused_module(convs.fused_conv_relu)
187
+ )
188
+ # conv module + functional relu
189
+ conv_configs.append(
190
+ BackendPatternConfig((convs.root, F.relu))
191
+ .set_dtype_configs(dtype_configs) # noqa: E131
192
+ .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
193
+ .set_fused_module(convs.fused_conv_relu)
194
+ )
195
+ # fused conv relu module
196
+ conv_configs.append(
197
+ BackendPatternConfig(convs.fused_conv_relu)
198
+ .set_observation_type(observation_type) # noqa: E131
199
+ .set_dtype_configs(dtype_configs)
200
+ .set_root_module(convs.root)
201
+ .set_reference_quantized_module(convs.reference)
202
+ .set_qat_module(convs.relu_qat)
203
+ )
204
+ # conv relu, qat fused module
205
+ conv_configs.append(
206
+ BackendPatternConfig(convs.relu_qat)
207
+ .set_observation_type(observation_type) # noqa: E131
208
+ .set_dtype_configs(dtype_configs)
209
+ .set_root_module(convs.root)
210
+ .set_reference_quantized_module(convs.reference)
211
+ )
212
+ # functional conv + relu module
213
+ conv_configs.append(
214
+ BackendPatternConfig((convs.func, nn.ReLU))
215
+ .set_observation_type(observation_type) # noqa: E131
216
+ .set_dtype_configs(dtype_configs)
217
+ )
218
+ # functional conv + functional relu
219
+ conv_configs.append(
220
+ BackendPatternConfig((convs.func, F.relu))
221
+ .set_observation_type(observation_type) # noqa: E131
222
+ .set_dtype_configs(dtype_configs)
223
+ )
224
+ # fused conv relu
225
+ conv_configs.append(
226
+ BackendPatternConfig(convs.fused_conv_relu)
227
+ .set_dtype_configs(dtype_configs) # noqa: E131
228
+ .set_qat_module(convs.relu_qat)
229
+ )
230
+
231
+ conv_configs.append(
232
+ BackendPatternConfig(convs.relu_qat)
233
+ .set_dtype_configs(dtype_configs) # noqa: E131
234
+ .set_root_module(convs.root)
235
+ .set_reference_quantized_module(convs.reference)
236
+ )
237
+
238
+ # (3) Conv + batchnorm (+ relu)
239
+ # -------------------------------
240
+ # conv + batchnorm (+ relu)
241
+ conv_configs.append(
242
+ BackendPatternConfig((convs.root, convs.bn))
243
+ .set_dtype_configs(dtype_configs) # noqa: E131
244
+ .set_fuser_method(fuse_conv_bn)
245
+ .set_fused_module(convs.fused_conv_bn)
246
+ )
247
+ # conv + bn + relu module fusion
248
+ conv_configs.append(
249
+ BackendPatternConfig((convs.root, convs.bn, nn.ReLU))
250
+ .set_dtype_configs(dtype_configs) # noqa: E131
251
+ .set_fuser_method(fuse_conv_bn_relu)
252
+ .set_fused_module(convs.fused_conv_bn_relu)
253
+ )
254
+ # conv + bn + relu functional fusion
255
+ conv_configs.append(
256
+ BackendPatternConfig((convs.root, convs.bn, F.relu))
257
+ .set_dtype_configs(dtype_configs) # noqa: E131
258
+ .set_root_module(convs.root)
259
+ .set_fuser_method(fuse_conv_bn_relu)
260
+ .set_fused_module(convs.fused_conv_bn_relu)
261
+ )
262
+ # TODO: we can add fusion for torch.relu as well
263
+ # 3.2 conv + bn (+ relu) fused module configs
264
+ # fused conv bn
265
+ conv_configs.append(
266
+ BackendPatternConfig(convs.fused_conv_bn)
267
+ .set_dtype_configs(dtype_configs) # noqa: E131
268
+ .set_qat_module(convs.bn_qat)
269
+ )
270
+
271
+ # fused conv bn relu
272
+ conv_configs.append(
273
+ BackendPatternConfig(convs.fused_conv_bn_relu)
274
+ .set_dtype_configs(dtype_configs) # noqa: E131
275
+ .set_qat_module(convs.bn_relu_qat)
276
+ )
277
+
278
+ # conv bn, qat fused module
279
+ conv_configs.append(
280
+ BackendPatternConfig(convs.bn_qat)
281
+ .set_observation_type(observation_type) # noqa: E131
282
+ .set_dtype_configs(dtype_configs)
283
+ .set_root_module(convs.root)
284
+ .set_reference_quantized_module(convs.reference)
285
+ )
286
+ # conv bn relu, qat fused module
287
+ conv_configs.append(
288
+ BackendPatternConfig(convs.bn_relu_qat)
289
+ .set_observation_type(observation_type) # noqa: E131
290
+ .set_dtype_configs(dtype_configs)
291
+ .set_root_module(convs.root)
292
+ .set_reference_quantized_module(convs.reference)
293
+ )
294
+ return conv_configs
295
+
296
+
297
+ def _get_binary_ops_configs() -> List[BackendPatternConfig]:
298
+ """
299
+ Return all configs related to binary ops.
300
+ """
301
+ dtype_configs = [
302
+ qnnpack_default_op_qint8_symmetric_dtype_config,
303
+ executorch_weighted_op_int8_dtype_config,
304
+ ]
305
+ num_tensor_args_to_observation_type_mapping = {
306
+ # TODO: this is not used right now since we have extra check in prepare
307
+ # will need to change this to NO_OBSERVER later after we implemented
308
+ # Tensor dtype inference properly
309
+ 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
310
+ 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
311
+ 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
312
+ }
313
+ binary_op_configs: List[BackendPatternConfig] = []
314
+ for op in [operator.add, torch.add, operator.sub, torch.sub, operator.mul, torch.mul]:
315
+ bop_patterns = [
316
+ (op, torch.nn.ReLU),
317
+ (op, torch.nn.functional.relu),
318
+ (op, torch.relu),
319
+ op
320
+ ]
321
+ for bop_pattern in bop_patterns:
322
+ binary_op_configs.append(
323
+ BackendPatternConfig(bop_pattern)
324
+ .set_dtype_configs(dtype_configs) # noqa: E131
325
+ ._set_num_tensor_args_to_observation_type(
326
+ num_tensor_args_to_observation_type_mapping
327
+ )
328
+ )
329
+ return binary_op_configs
330
+
331
+
332
+ def _get_share_qparams_ops_configs() -> List[BackendPatternConfig]:
333
+ """
334
+ Return the operator configs for the operators that works for both float and quantized
335
+ input if input is quantized, the output Tensor shares the same quantization parameter
336
+ with input.
337
+
338
+ Example operator: avgpool2d, reshape, transpose, maxpool2d
339
+ Example observed operator:
340
+ observer_0 - avgpool2d - observer_0 (same observer instance as input)
341
+ """
342
+ observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
343
+ dtype_configs = [
344
+ qnnpack_default_op_qint8_symmetric_dtype_config,
345
+ executorch_default_op_quint8_dtype_config,
346
+ ]
347
+ share_qparams_ops = [
348
+ torch.nn.Flatten,
349
+ F.adaptive_avg_pool2d,
350
+ F.elu,
351
+ F.hardtanh,
352
+ F.max_pool2d,
353
+ F.pad,
354
+ F.relu,
355
+ F.relu6,
356
+ F.leaky_relu,
357
+ F.leaky_relu_,
358
+ torch.nn.AdaptiveAvgPool2d,
359
+ torch.nn.ConstantPad2d,
360
+ torch.nn.ELU,
361
+ torch.nn.MaxPool2d,
362
+ torch.nn.ReLU6,
363
+ torch.nn.Hardtanh,
364
+ torch.nn.LeakyReLU,
365
+ torch.clamp,
366
+ torch.flatten,
367
+ torch.mean,
368
+ torch.permute,
369
+ torch.permute_copy,
370
+ torch.squeeze,
371
+ "clamp",
372
+ "mean",
373
+ "permute",
374
+ "reshape",
375
+ "relu",
376
+ "relu_",
377
+ "squeeze",
378
+ "squeeze_",
379
+ "leaky_relu",
380
+ ]
381
+ share_qparams_op_configs: List[BackendPatternConfig] = []
382
+ for op in share_qparams_ops:
383
+ share_qparams_op_configs.append(
384
+ BackendPatternConfig(op)
385
+ .set_observation_type(observation_type) # noqa: E131
386
+ .set_dtype_configs(dtype_configs)
387
+ )
388
+ return share_qparams_op_configs
389
+
390
+
391
+ def _get_bn_configs() -> List[BackendPatternConfig]:
392
+ """
393
+ Return all configs related to batchnorm.
394
+ """
395
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
396
+ dtype_configs = [
397
+ qnnpack_default_op_qint8_symmetric_dtype_config,
398
+ executorch_default_op_quint8_dtype_config,
399
+ ]
400
+ bn_configs = []
401
+ bn_configs.append(
402
+ BackendPatternConfig(nn.BatchNorm2d)
403
+ .set_observation_type(observation_type) # noqa: E131
404
+ .set_dtype_configs(dtype_configs)
405
+ )
406
+ return bn_configs
407
+
408
+
409
+ def _get_cat_configs() -> List[BackendPatternConfig]:
410
+ dtype_configs = [
411
+ qnnpack_default_op_qint8_symmetric_dtype_config,
412
+ executorch_default_op_quint8_dtype_config,
413
+ ]
414
+ cat_configs = []
415
+ cat_configs.append(
416
+ BackendPatternConfig(torch.cat)
417
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
418
+ .set_dtype_configs(dtype_configs)
419
+ )
420
+ cat_configs.append(
421
+ BackendPatternConfig(torch.concat)
422
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
423
+ .set_dtype_configs(dtype_configs)
424
+ )
425
+ cat_configs.append(
426
+ BackendPatternConfig(torch.concatenate)
427
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
428
+ .set_dtype_configs(dtype_configs)
429
+ )
430
+ return cat_configs
431
+
432
+
433
+ def _get_embedding_op_configs() -> List[BackendPatternConfig]:
434
+ dtype_configs = [
435
+ executorch_weight_only_quint8_dtype_config,
436
+ ]
437
+ embedding_op_configs = []
438
+ for embedding_op, qat_embedding_op, ref_embedding_op in [
439
+ (nn.Embedding, nnqat.Embedding, nnqr.Embedding),
440
+ (nn.EmbeddingBag, nnqat.EmbeddingBag, nnqr.EmbeddingBag),
441
+ ]:
442
+ embedding_op_configs.append(
443
+ BackendPatternConfig(embedding_op)
444
+ .set_observation_type(
445
+ ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
446
+ ) # noqa: E131
447
+ .set_dtype_configs(dtype_configs)
448
+ .set_qat_module(qat_embedding_op)
449
+ .set_root_module(embedding_op)
450
+ .set_reference_quantized_module(ref_embedding_op)
451
+ )
452
+ # config for qat op
453
+ embedding_op_configs.append(
454
+ BackendPatternConfig(qat_embedding_op)
455
+ .set_observation_type(
456
+ ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
457
+ ) # noqa: E131
458
+ .set_dtype_configs(dtype_configs)
459
+ .set_root_module(embedding_op)
460
+ .set_reference_quantized_module(ref_embedding_op)
461
+ )
462
+
463
+ # config for functional embedding
464
+ embedding_op_configs.append(
465
+ BackendPatternConfig(torch.nn.functional.embedding)
466
+ .set_observation_type(
467
+ ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
468
+ ) # noqa: E131
469
+ .set_dtype_configs(dtype_configs)
470
+ ._set_input_type_to_index({"weight": 1})
471
+ )
472
+ return embedding_op_configs
473
+
474
+
475
+
476
+ # =====================
477
+ # | BACKEND CONFIGS |
478
+ # =====================
479
+
480
+
481
+ def get_executorch_backend_config() -> BackendConfig:
482
+ """
483
+ Return the `BackendConfig` for backends PyTorch lowers to through the Executorch stack.
484
+ """
485
+ return (
486
+ BackendConfig("executorch")
487
+ .set_backend_pattern_configs(_get_linear_configs())
488
+ .set_backend_pattern_configs(_get_conv_configs())
489
+ .set_backend_pattern_configs(_get_binary_ops_configs())
490
+ .set_backend_pattern_configs(_get_share_qparams_ops_configs())
491
+ .set_backend_pattern_configs(_get_bn_configs())
492
+ .set_backend_pattern_configs(_get_cat_configs())
493
+ .set_backend_pattern_configs(_get_embedding_op_configs())
494
+ )
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/fbgemm.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._common_operator_config_utils import (
3
+ _get_binary_op_configs,
4
+ _get_bn_configs,
5
+ _get_cat_config,
6
+ _get_conv_configs,
7
+ _get_default_op_configs,
8
+ _get_embedding_op_configs,
9
+ _get_fixed_qparams_op_configs,
10
+ _get_linear_configs,
11
+ _get_rnn_op_configs,
12
+ _get_share_qparams_op_configs,
13
+ _get_tensor_info_op_configs,
14
+ )
15
+ from .backend_config import BackendConfig, DTypeConfig
16
+
17
+ __all__ = [
18
+ "get_fbgemm_backend_config",
19
+ ]
20
+
21
+ # ===================
22
+ # | DTYPE CONFIGS |
23
+ # ===================
24
+
25
+ # TODO: For now, these DTypeConfigs are identical to the ones defined in native.py
26
+ # In the future, once we support specifying quant_min/quant_max and scale_min/scale_max,
27
+ # these will diverge. In particular, for FBGEMM, we will restrict the activation quantized
28
+ # values to within [0, 127].
29
+
30
+ fbgemm_weighted_op_quint8_dtype_config = DTypeConfig(
31
+ input_dtype=torch.quint8,
32
+ output_dtype=torch.quint8,
33
+ weight_dtype=torch.qint8,
34
+ bias_dtype=torch.float,
35
+ )
36
+
37
+ fbgemm_default_op_quint8_dtype_config = DTypeConfig(
38
+ input_dtype=torch.quint8,
39
+ output_dtype=torch.quint8,
40
+ )
41
+
42
+ fbgemm_default_op_fp16_dtype_config = DTypeConfig(
43
+ input_dtype=torch.float16,
44
+ output_dtype=torch.float16,
45
+ weight_dtype=torch.float16,
46
+ bias_dtype=torch.float16,
47
+ )
48
+
49
+ fbgemm_default_dynamic_int8_dtype_config = DTypeConfig(
50
+ input_dtype=torch.quint8,
51
+ output_dtype=torch.float,
52
+ weight_dtype=torch.qint8,
53
+ bias_dtype=torch.float,
54
+ is_dynamic=True,
55
+ )
56
+
57
+ fbgemm_default_dynamic_float16_dtype_config = DTypeConfig(
58
+ input_dtype=torch.float16,
59
+ output_dtype=torch.float,
60
+ weight_dtype=torch.float16,
61
+ bias_dtype=torch.float,
62
+ is_dynamic=True,
63
+ )
64
+
65
+ fbgemm_weight_only_quint8_dtype_config = DTypeConfig(
66
+ input_dtype=torch.float,
67
+ output_dtype=torch.float,
68
+ weight_dtype=torch.quint8,
69
+ )
70
+
71
+ fbgemm_weight_only_quint4x2_dtype_config = DTypeConfig(
72
+ input_dtype=torch.float,
73
+ output_dtype=torch.float,
74
+ weight_dtype=torch.quint4x2,
75
+ )
76
+
77
+
78
+ # =====================
79
+ # | BACKEND CONFIGS |
80
+ # =====================
81
+
82
+ def get_fbgemm_backend_config() -> BackendConfig:
83
+ """
84
+ Return the `BackendConfig` for PyTorch's native FBGEMM backend.
85
+ """
86
+ conv_dtype_configs = [fbgemm_weighted_op_quint8_dtype_config]
87
+ linear_dtype_configs = [
88
+ fbgemm_weighted_op_quint8_dtype_config,
89
+ fbgemm_default_dynamic_int8_dtype_config,
90
+ fbgemm_default_dynamic_float16_dtype_config,
91
+ ]
92
+ binary_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
93
+ default_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
94
+ fixed_qparams_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
95
+ share_qparams_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
96
+ tensor_info_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
97
+ rnn_op_dtype_configs = [
98
+ fbgemm_default_dynamic_int8_dtype_config,
99
+ fbgemm_default_dynamic_float16_dtype_config,
100
+ ]
101
+ embedding_op_dtype_configs = [
102
+ fbgemm_weight_only_quint8_dtype_config,
103
+ fbgemm_weight_only_quint4x2_dtype_config,
104
+ ]
105
+ return BackendConfig("fbgemm") \
106
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
107
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
108
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
109
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
110
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
111
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
112
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
113
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
114
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
115
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
116
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._common_operator_config_utils import (
3
+ _get_binary_op_configs,
4
+ _get_bn_configs,
5
+ _get_cat_config,
6
+ _get_conv_configs,
7
+ _get_default_op_configs,
8
+ _get_embedding_op_configs,
9
+ _get_fixed_qparams_op_configs,
10
+ _get_linear_configs,
11
+ _get_ln_configs,
12
+ _get_rnn_op_configs,
13
+ _get_share_qparams_op_configs,
14
+ _get_tensor_info_op_configs,
15
+ )
16
+ from .backend_config import BackendConfig, DTypeConfig
17
+
18
+ __all__ = [
19
+ "get_test_only_legacy_native_backend_config",
20
+ "default_op_quint8_dtype_config",
21
+ "default_op_fp16_dtype_config",
22
+ "default_dynamic_int8_dtype_config",
23
+ "default_dynamic_float16_dtype_config",
24
+ "input_output_only_quint8_dtype_config",
25
+ "weight_only_quint8_dtype_config",
26
+ "weight_only_quint4x2_dtype_config",
27
+ "get_native_backend_config",
28
+ "get_native_backend_config_dict",
29
+ "get_test_only_legacy_native_backend_config_dict",
30
+ ]
31
+
32
+ # ===================
33
+ # | DTYPE CONFIGS |
34
+ # ===================
35
+
36
+ # weighted op int8 dtype config
37
+ # this is config for ops that has quantized weights, like linear, conv
38
+ weighted_op_quint8_dtype_config = DTypeConfig(
39
+ input_dtype=torch.quint8,
40
+ output_dtype=torch.quint8,
41
+ weight_dtype=torch.qint8,
42
+ bias_dtype=torch.float,
43
+ )
44
+
45
+ default_op_quint8_dtype_config = DTypeConfig(
46
+ input_dtype=torch.quint8,
47
+ output_dtype=torch.quint8,
48
+ )
49
+
50
+ default_op_fp16_dtype_config = DTypeConfig(
51
+ input_dtype=torch.float16,
52
+ output_dtype=torch.float16,
53
+ weight_dtype=torch.float16,
54
+ bias_dtype=torch.float16,
55
+ )
56
+
57
+ default_dynamic_int8_dtype_config = DTypeConfig(
58
+ input_dtype=torch.quint8,
59
+ output_dtype=torch.float,
60
+ weight_dtype=torch.qint8,
61
+ bias_dtype=torch.float,
62
+ # currently the dtype check is not yet enabled, so we provided the dtype_configs but
63
+ # it is not really used yet,
64
+ # we will enable it a bit later after we moved everything to backend_config_dict
65
+ is_dynamic=True,
66
+ )
67
+
68
+ default_dynamic_float16_dtype_config = DTypeConfig(
69
+ input_dtype=torch.float16,
70
+ output_dtype=torch.float,
71
+ weight_dtype=torch.float16,
72
+ bias_dtype=torch.float,
73
+ # currently the dtype check is not yet enabled, so we provided the dtype_configs but
74
+ # it is not really used yet,
75
+ # we will enable it a bit later after we moved everything to backend_config_dict
76
+ is_dynamic=True,
77
+ )
78
+
79
+ # Needed for LayerNorm and f.layer_norm, since currently the kernel only supports float weights
80
+ input_output_only_quint8_dtype_config = DTypeConfig(
81
+ input_dtype=torch.quint8,
82
+ output_dtype=torch.quint8,
83
+ weight_dtype=torch.float,
84
+ bias_dtype=torch.float,
85
+ )
86
+
87
+ weight_only_quint8_dtype_config = DTypeConfig(
88
+ input_dtype=torch.float,
89
+ output_dtype=torch.float,
90
+ weight_dtype=torch.quint8,
91
+ )
92
+
93
+ weight_only_quint4x2_dtype_config = DTypeConfig(
94
+ input_dtype=torch.float,
95
+ output_dtype=torch.float,
96
+ weight_dtype=torch.quint4x2,
97
+ )
98
+
99
+
100
+ # =====================
101
+ # | BACKEND CONFIGS |
102
+ # =====================
103
+
104
+ def get_test_only_legacy_native_backend_config() -> BackendConfig:
105
+ """
106
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional fp16 ops.
107
+ """
108
+ conv_dtype_configs = [weighted_op_quint8_dtype_config]
109
+ linear_dtype_configs = [
110
+ weighted_op_quint8_dtype_config,
111
+ default_dynamic_int8_dtype_config,
112
+ default_dynamic_float16_dtype_config,
113
+ default_op_fp16_dtype_config,
114
+ ]
115
+ binary_op_dtype_configs = [
116
+ default_op_quint8_dtype_config,
117
+ default_op_fp16_dtype_config,
118
+ ]
119
+ default_op_dtype_configs = [default_op_quint8_dtype_config]
120
+ fixed_qparams_op_dtype_configs = [
121
+ default_op_quint8_dtype_config,
122
+ default_op_fp16_dtype_config,
123
+ ]
124
+ share_qparams_op_dtype_configs = [
125
+ default_op_quint8_dtype_config,
126
+ default_op_fp16_dtype_config
127
+ ]
128
+ tensor_info_op_dtype_configs = [
129
+ default_op_quint8_dtype_config,
130
+ ]
131
+ rnn_op_dtype_configs = [
132
+ default_dynamic_int8_dtype_config,
133
+ default_dynamic_float16_dtype_config,
134
+ ]
135
+ embedding_op_dtype_configs = [
136
+ weight_only_quint8_dtype_config,
137
+ weight_only_quint4x2_dtype_config,
138
+ ]
139
+ layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config]
140
+ return BackendConfig("_native_and_fp16") \
141
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
142
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
143
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
144
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
145
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
146
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
147
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
148
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
149
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
150
+ .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
151
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
152
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
153
+
154
+ def get_native_backend_config() -> BackendConfig:
155
+ """
156
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack).
157
+ """
158
+ # TODO: express this BackendConfig as a union of the FBGEMM and QNNPACK BackendConfigs
159
+ conv_dtype_configs = [weighted_op_quint8_dtype_config]
160
+ linear_dtype_configs = [
161
+ weighted_op_quint8_dtype_config,
162
+ default_dynamic_int8_dtype_config,
163
+ default_dynamic_float16_dtype_config,
164
+ ]
165
+ binary_op_dtype_configs = [default_op_quint8_dtype_config]
166
+ default_op_dtype_configs = [default_op_quint8_dtype_config]
167
+ fixed_qparams_op_dtype_configs = [default_op_quint8_dtype_config]
168
+ share_qparams_op_dtype_configs = [default_op_quint8_dtype_config]
169
+ tensor_info_op_dtype_configs = [default_op_quint8_dtype_config]
170
+ rnn_op_dtype_configs = [
171
+ default_dynamic_int8_dtype_config,
172
+ default_dynamic_float16_dtype_config,
173
+ ]
174
+ embedding_op_dtype_configs = [
175
+ weight_only_quint8_dtype_config,
176
+ weight_only_quint4x2_dtype_config,
177
+ ]
178
+ layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config]
179
+ return BackendConfig("native") \
180
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
181
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
182
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
183
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
184
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
185
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
186
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
187
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
188
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
189
+ .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
190
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
191
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
192
+
193
+ def get_native_backend_config_dict():
194
+ """
195
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) in dictionary form.
196
+ """
197
+ return get_native_backend_config().to_dict()
198
+
199
+ def get_test_only_legacy_native_backend_config_dict():
200
+ """
201
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional
202
+ fp16 ops in dictionary form.
203
+ """
204
+ return get_test_only_legacy_native_backend_config().to_dict()
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/observation_type.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/onednn.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.ao.nn.intrinsic as nni
4
+ import torch.nn.functional as F
5
+ import torch.ao.nn.quantized.reference as nnqr
6
+ from ._common_operator_config_utils import (
7
+ _get_conv_configs,
8
+ _get_linear_configs,
9
+ _get_binary_op_configs,
10
+ _get_bn_configs,
11
+ _get_cat_config,
12
+ _get_default_op_configs,
13
+ _get_embedding_op_configs,
14
+ _get_fixed_qparams_op_configs,
15
+ _get_ln_configs,
16
+ _get_rnn_op_configs,
17
+ _get_share_qparams_op_configs,
18
+ )
19
+ from .backend_config import (
20
+ BackendPatternConfig,
21
+ BackendConfig,
22
+ DTypeConfig,
23
+ ObservationType,
24
+ )
25
+ from ..fuser_method_mappings import (
26
+ _sequential_wrapper2,
27
+ )
28
+ import operator
29
+ from torch.ao.quantization.utils import MatchAllNode
30
+ import itertools
31
+
32
+ # ===================
33
+ # | DTYPE CONFIGS |
34
+ # ===================
35
+
36
+ onednn_weighted_op_int8_dtype_config = DTypeConfig(
37
+ input_dtype=torch.quint8,
38
+ output_dtype=torch.quint8,
39
+ weight_dtype=torch.qint8,
40
+ bias_dtype=torch.float,
41
+ )
42
+
43
+ onednn_op_quint8_dtype_config = DTypeConfig(
44
+ input_dtype=torch.quint8,
45
+ output_dtype=torch.quint8,
46
+ )
47
+
48
+ onednn_dynamic_int8_dtype_config = DTypeConfig(
49
+ input_dtype=torch.quint8,
50
+ output_dtype=torch.float,
51
+ weight_dtype=torch.qint8,
52
+ bias_dtype=torch.float,
53
+ is_dynamic=True,
54
+ )
55
+
56
+ onednn_weight_only_qint8_dtype_config = DTypeConfig(
57
+ input_dtype=torch.float,
58
+ output_dtype=torch.float,
59
+ weight_dtype=torch.qint8,
60
+ )
61
+
62
+ onednn_input_output_only_quint8_dtype_config = DTypeConfig(
63
+ input_dtype=torch.quint8,
64
+ output_dtype=torch.quint8,
65
+ weight_dtype=torch.float,
66
+ bias_dtype=torch.float,
67
+ )
68
+
69
+ # ===================
70
+ # | FUSER METHODS |
71
+ # ===================
72
+
73
+ def _fuse_linear_bn_leaky_relu(is_qat, linear, bn, leaky_relu):
74
+ r"""Given the linear, bn and leaky_relu modules, fuses them and returns the fused module
75
+ Args:
76
+ is_qat: a flag for whether we are using quantization aware training fusion
77
+ or post training quantization fusion
78
+ linear: Module instance of type Linear
79
+ bn: BatchNorm1d instance that needs to be fused with the linear layer
80
+ leaky_relu: LeakyReLU instance that needs to be fused with the linear layer
81
+ Examples::
82
+ >>> # xdoctest: +SKIP(failing)
83
+ >>> m1 = nn.Linear(20, 10)
84
+ >>> b1 = nn.BatchNorm1d(10)
85
+ >>> lr = nn.LeakyReLU(0.01)
86
+ >>> m2 = _fuse_linear_bn_leaky_relu(m1, b1, lr)
87
+ """
88
+ assert linear.training == bn.training and bn.training == leaky_relu.training, \
89
+ "Linear, BN and LeakyReLU all must be in the same mode (train or eval)."
90
+
91
+ if is_qat:
92
+ raise NotImplementedError(f"Cannot fuse train modules: {(linear, bn, leaky_relu)}")
93
+ else:
94
+ map_to_fused_module_eval = {
95
+ nn.Linear: nni.LinearLeakyReLU,
96
+ }
97
+ fused_module = map_to_fused_module_eval.get(type(linear), None)
98
+ if fused_module is not None:
99
+ fused_linear = nn.utils.fusion.fuse_linear_bn_eval(linear, bn)
100
+ fm = fused_module(fused_linear, leaky_relu)
101
+ return fm
102
+ else:
103
+ raise NotImplementedError(f"Cannot fuse eval modules: {(linear, bn, leaky_relu)}")
104
+
105
+ # ======================
106
+ # | CONFIGS FOR CONV |
107
+ # ======================
108
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
109
+
110
+ conv_dtype_configs = [onednn_weighted_op_int8_dtype_config]
111
+ conv_configs = _get_conv_configs(conv_dtype_configs)
112
+
113
+ # (1) Conv2d + Add
114
+
115
+ # conv2d Y
116
+ # \ /
117
+ # add
118
+
119
+ # include:
120
+ # conv2d conv2d
121
+ # \ /
122
+ # add
123
+
124
+ def _fuse_conv_add_left(is_qat, add, conv, _):
125
+ return nni.ConvAdd2d(conv, add)
126
+
127
+ def _conv_add_root_node_getter_left(pattern):
128
+ _, conv, _ = pattern
129
+ return conv
130
+
131
+ def _conv_add_extra_inputs_getter_left(pattern):
132
+ """ get inputs pattern for extra inputs, inputs for root node
133
+ are assumed to be copied over from root node to the fused node
134
+ """
135
+ _, conv, extra_input = pattern
136
+ return [extra_input]
137
+
138
+ # conv2d
139
+ # \
140
+ # bn Y
141
+ # \ /
142
+ # add
143
+
144
+ def _fuse_conv_bn_add_left(is_qat, add, bn_conv, _):
145
+ bn, conv = bn_conv
146
+ if is_qat:
147
+ raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add)}")
148
+ else:
149
+ fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
150
+ return nni.ConvAdd2d(fused_conv, add)
151
+
152
+ def _conv_bn_add_root_node_getter_left(add_pattern):
153
+ _, bn_conv, _ = add_pattern
154
+ bn, conv = bn_conv
155
+ return conv
156
+
157
+ def _conv_bn_add_extra_inputs_getter_left(add_pattern):
158
+ """ get inputs pattern for extra inputs, inputs for root node
159
+ are assumed to be copied over from root node to the fused node
160
+ """
161
+ _, bn_conv, extra_input = add_pattern
162
+ bn, conv = bn_conv
163
+ return [extra_input]
164
+
165
+ conv_add_left_optioins = itertools.product(
166
+ [True, False], # with_bn
167
+ [torch.add, operator.add], # add_op
168
+ )
169
+
170
+ for with_bn, add_op in conv_add_left_optioins:
171
+ if with_bn:
172
+ conv_configs.append(
173
+ BackendPatternConfig()
174
+ ._set_pattern_complex_format((add_op, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode)) # noqa: E131
175
+ .set_observation_type(observation_type)
176
+ .set_dtype_configs(conv_dtype_configs)
177
+ .set_fuser_method(_fuse_conv_bn_add_left)
178
+ ._set_root_node_getter(_conv_bn_add_root_node_getter_left)
179
+ ._set_extra_inputs_getter(_conv_bn_add_extra_inputs_getter_left)
180
+ .set_fused_module(nni.ConvAdd2d))
181
+ else:
182
+ conv_configs.append(
183
+ BackendPatternConfig()
184
+ ._set_pattern_complex_format((add_op, nn.Conv2d, MatchAllNode)) # noqa: E131
185
+ .set_observation_type(observation_type)
186
+ .set_dtype_configs(conv_dtype_configs)
187
+ .set_fuser_method(_fuse_conv_add_left)
188
+ ._set_root_node_getter(_conv_add_root_node_getter_left)
189
+ ._set_extra_inputs_getter(_conv_add_extra_inputs_getter_left)
190
+ .set_fused_module(nni.ConvAdd2d))
191
+
192
+ # Y conv2d
193
+ # \ /
194
+ # add
195
+
196
+ def _fuse_conv_add_right(is_qat, add, _, conv):
197
+ return nni.ConvAdd2d(conv, add)
198
+
199
+ def _conv_add_root_node_getter_right(pattern):
200
+ add, _, conv = pattern
201
+ return conv
202
+
203
+ def _conv_add_extra_inputs_getter_right(pattern):
204
+ """ get inputs pattern for extra inputs, inputs for root node
205
+ are assumed to be copied over from root node to the fused node
206
+ """
207
+ _, extra_input, conv = pattern
208
+ return [extra_input]
209
+
210
+ # conv2d
211
+ # /
212
+ # Y bn
213
+ # \ /
214
+ # add
215
+
216
+ def _fuse_conv_bn_add_right(is_qat, add, _, bn_conv):
217
+ bn, conv = bn_conv
218
+ if is_qat:
219
+ raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add)}")
220
+ else:
221
+ fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
222
+ return nni.ConvAdd2d(fused_conv, add)
223
+
224
+ def _conv_bn_add_root_node_getter_right(pattern):
225
+ add, _, bn_conv = pattern
226
+ bn, conv = bn_conv
227
+ return conv
228
+
229
+ def _conv_bn_add_extra_inputs_getter_right(pattern):
230
+ """ get inputs pattern for extra inputs, inputs for root node
231
+ are assumed to be copied over from root node to the fused node
232
+ """
233
+ _, extra_input, bn_conv = pattern
234
+ bn, conv = bn_conv
235
+ return [extra_input]
236
+
237
+ conv_add_optioins = itertools.product(
238
+ [True, False], # with_bn
239
+ [torch.add, operator.add], # add_op
240
+ )
241
+
242
+ for with_bn, add_op in conv_add_optioins:
243
+ if with_bn:
244
+ conv_configs.append(
245
+ BackendPatternConfig()
246
+ ._set_pattern_complex_format((add_op, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d))) # noqa: E131
247
+ .set_observation_type(observation_type)
248
+ .set_dtype_configs(conv_dtype_configs)
249
+ .set_fuser_method(_fuse_conv_bn_add_right)
250
+ ._set_root_node_getter(_conv_bn_add_root_node_getter_right)
251
+ ._set_extra_inputs_getter(_conv_bn_add_extra_inputs_getter_right)
252
+ .set_fused_module(nni.ConvAdd2d))
253
+ else:
254
+ conv_configs.append(
255
+ BackendPatternConfig()
256
+ ._set_pattern_complex_format((add_op, MatchAllNode, nn.Conv2d)) # noqa: E131
257
+ .set_observation_type(observation_type)
258
+ .set_dtype_configs(conv_dtype_configs)
259
+ .set_fuser_method(_fuse_conv_add_right)
260
+ ._set_root_node_getter(_conv_add_root_node_getter_right)
261
+ ._set_extra_inputs_getter(_conv_add_extra_inputs_getter_right)
262
+ .set_fused_module(nni.ConvAdd2d))
263
+
264
+ conv_configs.append(
265
+ BackendPatternConfig(nni.ConvAdd2d)
266
+ .set_observation_type(observation_type) # noqa: E131
267
+ .set_dtype_configs(conv_dtype_configs)
268
+ .set_root_module(nn.Conv2d)
269
+ .set_reference_quantized_module(nnqr.Conv2d))
270
+
271
+ # (2) Conv2d + Add + Relu
272
+
273
+ # conv2d Y
274
+ # \ /
275
+ # add
276
+ # \
277
+ # relu
278
+
279
+ def _fuse_conv_add_relu_left(is_qat, relu, add_pattern):
280
+ add, conv, _ = add_pattern
281
+ return nni.ConvAddReLU2d(conv, add, relu)
282
+
283
+ def _conv_add_relu_root_node_getter_left(pattern):
284
+ relu, add_pattern = pattern
285
+ _, conv, _ = add_pattern
286
+ return conv
287
+
288
+ def _conv_add_relu_extra_inputs_getter_left(pattern):
289
+ """ get inputs pattern for extra inputs, inputs for root node
290
+ are assumed to be copied over from root node to the fused node
291
+ """
292
+ relu, add_pattern = pattern
293
+ _, conv, extra_input = add_pattern
294
+ return [extra_input]
295
+
296
+ # conv2d
297
+ # \
298
+ # bn Y
299
+ # \ /
300
+ # add
301
+ # \
302
+ # relu
303
+
304
+ def _fuse_conv_bn_add_relu_left(is_qat, relu, add_pattern):
305
+ add, bn_conv, _ = add_pattern
306
+ bn, conv = bn_conv
307
+ if is_qat:
308
+ raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}")
309
+ else:
310
+ fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
311
+ return nni.ConvAddReLU2d(fused_conv, add, relu)
312
+
313
+ def _conv_bn_add_relu_root_node_getter_left(pattern):
314
+ relu, add_pattern = pattern
315
+ _, bn_conv, _ = add_pattern
316
+ bn, conv = bn_conv
317
+ return conv
318
+
319
+ def _conv_bn_add_relu_extra_inputs_getter_left(pattern):
320
+ """ get inputs pattern for extra inputs, inputs for root node
321
+ are assumed to be copied over from root node to the fused node
322
+ """
323
+ relu, add_pattern = pattern
324
+ _, bn_conv, extra_input = add_pattern
325
+ bn, conv = bn_conv
326
+ return [extra_input]
327
+
328
+ conv_add_relu_left_optioins = itertools.product(
329
+ [True, False], # with_bn
330
+ [torch.add, operator.add], # add_op
331
+ )
332
+
333
+ for with_bn, add_op in conv_add_relu_left_optioins:
334
+ if with_bn:
335
+ conv_configs.append(
336
+ BackendPatternConfig()
337
+ ._set_pattern_complex_format((nn.ReLU, (add_op, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode))) # noqa: E131
338
+ .set_observation_type(observation_type)
339
+ .set_dtype_configs(conv_dtype_configs)
340
+ .set_fuser_method(_fuse_conv_bn_add_relu_left)
341
+ ._set_root_node_getter(_conv_bn_add_relu_root_node_getter_left)
342
+ ._set_extra_inputs_getter(_conv_bn_add_relu_extra_inputs_getter_left)
343
+ .set_fused_module(nni.ConvAddReLU2d))
344
+ else:
345
+ conv_configs.append(
346
+ BackendPatternConfig()
347
+ ._set_pattern_complex_format((nn.ReLU, (add_op, nn.Conv2d, MatchAllNode))) # noqa: E131
348
+ .set_observation_type(observation_type)
349
+ .set_dtype_configs(conv_dtype_configs)
350
+ .set_fuser_method(_fuse_conv_add_relu_left)
351
+ ._set_root_node_getter(_conv_add_relu_root_node_getter_left)
352
+ ._set_extra_inputs_getter(_conv_add_relu_extra_inputs_getter_left)
353
+ .set_fused_module(nni.ConvAddReLU2d))
354
+
355
+ # Y conv2d
356
+ # \ /
357
+ # add
358
+ # \
359
+ # relu
360
+
361
+ def _fuse_conv_add_relu_right(is_qat, relu, add_pattern):
362
+ add, _, conv = add_pattern
363
+ return nni.ConvAddReLU2d(conv, add, relu)
364
+
365
+ def _conv_add_relu_root_node_getter_right(pattern):
366
+ relu, add_pattern = pattern
367
+ _, _, conv = add_pattern
368
+ return conv
369
+
370
+ def _conv_add_relu_extra_inputs_getter_right(pattern):
371
+ """ get inputs pattern for extra inputs, inputs for root node
372
+ are assumed to be copied over from root node to the fused node
373
+ """
374
+ relu, add_pattern = pattern
375
+ _, extra_input, conv = add_pattern
376
+ return [extra_input]
377
+
378
+ # conv2d
379
+ # /
380
+ # Y bn
381
+ # \ /
382
+ # add
383
+ # \
384
+ # relu
385
+
386
+ def _fuse_conv_bn_add_relu_right(is_qat, relu, add_pattern):
387
+ add, _, bn_conv = add_pattern
388
+ bn, conv = bn_conv
389
+ if is_qat:
390
+ raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}")
391
+ else:
392
+ fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
393
+ return nni.ConvAddReLU2d(fused_conv, add, relu)
394
+
395
+ def _conv_bn_add_relu_root_node_getter_right(pattern):
396
+ relu, add_pattern = pattern
397
+ _, _, bn_conv = add_pattern
398
+ bn, conv = bn_conv
399
+ return conv
400
+
401
+ def _conv_bn_add_relu_extra_inputs_getter_right(pattern):
402
+ """ get inputs pattern for extra inputs, inputs for root node
403
+ are assumed to be copied over from root node to the fused node
404
+ """
405
+ relu, add_pattern = pattern
406
+ _, extra_input, bn_conv = add_pattern
407
+ bn, conv = bn_conv
408
+ return [extra_input]
409
+
410
+ conv_add_relu_optioins = itertools.product(
411
+ [True, False], # with_bn
412
+ [torch.add, operator.add], # add_op
413
+ )
414
+
415
+ for with_bn, add_op in conv_add_relu_optioins:
416
+ if with_bn:
417
+ conv_configs.append(
418
+ BackendPatternConfig()
419
+ ._set_pattern_complex_format((nn.ReLU, (add_op, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d)))) # noqa: E131
420
+ .set_observation_type(observation_type)
421
+ .set_dtype_configs(conv_dtype_configs)
422
+ .set_fuser_method(_fuse_conv_bn_add_relu_right)
423
+ ._set_root_node_getter(_conv_bn_add_relu_root_node_getter_right)
424
+ ._set_extra_inputs_getter(_conv_bn_add_relu_extra_inputs_getter_right)
425
+ .set_fused_module(nni.ConvAddReLU2d))
426
+ else:
427
+ conv_configs.append(
428
+ BackendPatternConfig()
429
+ ._set_pattern_complex_format((nn.ReLU, (add_op, MatchAllNode, nn.Conv2d))) # noqa: E131
430
+ .set_observation_type(observation_type)
431
+ .set_dtype_configs(conv_dtype_configs)
432
+ .set_fuser_method(_fuse_conv_add_relu_right)
433
+ ._set_root_node_getter(_conv_add_relu_root_node_getter_right)
434
+ ._set_extra_inputs_getter(_conv_add_relu_extra_inputs_getter_right)
435
+ .set_fused_module(nni.ConvAddReLU2d))
436
+
437
+ conv_configs.append(
438
+ BackendPatternConfig(nni.ConvAddReLU2d)
439
+ .set_observation_type(observation_type) # noqa: E131
440
+ .set_dtype_configs(conv_dtype_configs)
441
+ .set_root_module(nn.Conv2d)
442
+ .set_reference_quantized_module(nnqr.Conv2d))
443
+
444
+ # ========================
445
+ # | CONFIGS FOR LINEAR |
446
+ # ========================
447
+
448
+ linear_dtype_configs = [
449
+ onednn_weighted_op_int8_dtype_config,
450
+ onednn_dynamic_int8_dtype_config,
451
+ ]
452
+ linear_configs = _get_linear_configs(linear_dtype_configs)
453
+
454
+ def _add_eltwise_fusion_configs(configs, root_module, root_op, post_module, post_op,
455
+ dtype_configs, fuser_method, fused_module, observation_type,
456
+ ref_quant_module):
457
+ # 1 base module + op module fusion config
458
+ configs.append(
459
+ BackendPatternConfig((root_module, post_module))
460
+ .set_dtype_configs(dtype_configs) # noqa: E131
461
+ .set_fuser_method(fuser_method)
462
+ .set_fused_module(fused_module))
463
+ # base module + functional post op
464
+ configs.append(
465
+ BackendPatternConfig((root_module, post_op))
466
+ .set_dtype_configs(dtype_configs) # noqa: E131
467
+ .set_fuser_method(fuser_method)
468
+ .set_fused_module(fused_module))
469
+
470
+ # 2 fused module configs
471
+ configs.append(
472
+ BackendPatternConfig(fused_module)
473
+ .set_observation_type(observation_type) # noqa: E131
474
+ .set_dtype_configs(dtype_configs)
475
+ .set_root_module(root_module)
476
+ .set_reference_quantized_module(ref_quant_module))
477
+
478
+ # 3 functional base op + post op configs
479
+ configs.append(
480
+ BackendPatternConfig((root_op, post_module))
481
+ .set_observation_type(observation_type) # noqa: E131
482
+ .set_dtype_configs(dtype_configs))
483
+ configs.append(
484
+ BackendPatternConfig((root_op, post_op))
485
+ .set_observation_type(observation_type) # noqa: E131
486
+ .set_dtype_configs(dtype_configs))
487
+
488
+ # Configs for linear + leaky_relu fusion
489
+ _add_eltwise_fusion_configs(linear_configs, nn.Linear, F.linear,
490
+ nn.LeakyReLU, F.leaky_relu, linear_dtype_configs,
491
+ _sequential_wrapper2(nni.LinearLeakyReLU),
492
+ nni.LinearLeakyReLU, observation_type, nnqr.Linear)
493
+
494
+ # Configs for linear module + batchnorm + leaky_relu
495
+ linear_configs.append(
496
+ BackendPatternConfig((nn.Linear, nn.BatchNorm1d, nn.LeakyReLU))
497
+ .set_dtype_configs(linear_dtype_configs) # noqa: E131
498
+ .set_fuser_method(_fuse_linear_bn_leaky_relu)
499
+ .set_fused_module(nni.LinearLeakyReLU))
500
+
501
+ # Configs for linear + tanh fusion
502
+ _add_eltwise_fusion_configs(linear_configs, nn.Linear, F.linear,
503
+ nn.Tanh, torch.tanh, linear_dtype_configs,
504
+ _sequential_wrapper2(nni.LinearTanh),
505
+ nni.LinearTanh, observation_type, nnqr.Linear)
506
+
507
+ # ===========================
508
+ # | CONFIGS FOR OTHER OPS |
509
+ # ===========================
510
+
511
+ binary_op_dtype_configs = [onednn_op_quint8_dtype_config]
512
+ default_op_dtype_configs = [onednn_op_quint8_dtype_config]
513
+ fixed_qparams_op_dtype_configs = [onednn_op_quint8_dtype_config]
514
+ share_qparams_op_dtype_configs = [onednn_op_quint8_dtype_config]
515
+ rnn_op_dtype_configs = [onednn_dynamic_int8_dtype_config]
516
+ embedding_op_dtype_configs = [onednn_weight_only_qint8_dtype_config]
517
+ layer_norm_op_dtype_configs = [onednn_input_output_only_quint8_dtype_config]
518
+
519
+ # =====================
520
+ # | BACKEND CONFIGS |
521
+ # =====================
522
+
523
+ def get_onednn_backend_config() -> BackendConfig:
524
+ """
525
+ Return the `BackendConfig` for PyTorch's native ONEDNN backend.
526
+ """
527
+ return BackendConfig("onednn") \
528
+ .set_backend_pattern_configs(conv_configs) \
529
+ .set_backend_pattern_configs(linear_configs) \
530
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
531
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
532
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
533
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
534
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
535
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
536
+ .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
537
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
538
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
539
+
540
+ __all__ = [
541
+ "get_onednn_backend_config",
542
+ ]
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._common_operator_config_utils import (
3
+ _get_binary_op_configs,
4
+ _get_bn_configs,
5
+ _get_cat_config,
6
+ _get_conv_configs,
7
+ _get_default_op_configs,
8
+ _get_embedding_op_configs,
9
+ _get_fixed_qparams_op_configs,
10
+ _get_linear_configs,
11
+ _get_rnn_op_configs,
12
+ _get_share_qparams_op_configs,
13
+ )
14
+ from .backend_config import BackendConfig, DTypeConfig, DTypeWithConstraints
15
+
16
+ __all__ = [
17
+ "get_qnnpack_backend_config",
18
+ ]
19
+
20
+ # ===================
21
+ # | DTYPE CONFIGS |
22
+ # ===================
23
+
24
+ qnnpack_weighted_op_quint8_dtype_config = DTypeConfig(
25
+ input_dtype=torch.quint8,
26
+ output_dtype=torch.quint8,
27
+ weight_dtype=torch.qint8,
28
+ bias_dtype=torch.float,
29
+ )
30
+
31
+ qnnpack_default_op_quint8_dtype_config = DTypeConfig(
32
+ input_dtype=torch.quint8,
33
+ output_dtype=torch.quint8,
34
+ )
35
+
36
+ qnnpack_default_op_fp16_dtype_config = DTypeConfig(
37
+ input_dtype=torch.float16,
38
+ output_dtype=torch.float16,
39
+ weight_dtype=torch.float16,
40
+ bias_dtype=torch.float16,
41
+ )
42
+
43
+ qnnpack_default_dynamic_int8_dtype_config = DTypeConfig(
44
+ input_dtype=torch.quint8,
45
+ output_dtype=torch.float,
46
+ weight_dtype=torch.qint8,
47
+ bias_dtype=torch.float,
48
+ is_dynamic=True,
49
+ )
50
+
51
+ qnnpack_default_dynamic_float16_dtype_config = DTypeConfig(
52
+ input_dtype=torch.float16,
53
+ output_dtype=torch.float,
54
+ weight_dtype=torch.float16,
55
+ bias_dtype=torch.float,
56
+ is_dynamic=True,
57
+ )
58
+
59
+ qnnpack_weight_only_quint8_dtype_config = DTypeConfig(
60
+ input_dtype=torch.float,
61
+ output_dtype=torch.float,
62
+ weight_dtype=torch.quint8,
63
+ )
64
+
65
+ qnnpack_weight_only_quint4x2_dtype_config = DTypeConfig(
66
+ input_dtype=torch.float,
67
+ output_dtype=torch.float,
68
+ weight_dtype=torch.quint4x2,
69
+ )
70
+
71
+ # xnnpack compatible dtype configs
72
+
73
+ # We restrict scale values to be 2 ** -12 to ensure the
74
+ # requantization scale never falls below the xnnpack lower
75
+ # threshold. Additionally, for qint8 weight, we restrict
76
+ # the quantization values to [-127, +127], excluding -128.
77
+ # For more detail, refer to the description of
78
+ # `default_symmetric_qnnpack_qconfig`.
79
+
80
+ # TODO: add additional restriction on qscheme to ensure it
81
+ # is either per_tensor_symmetric or per_channel_symmetric
82
+
83
+ qnnpack_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints(
84
+ dtype=torch.qint8,
85
+ scale_min_lower_bound=2 ** -12,
86
+ )
87
+
88
+ qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints(
89
+ dtype=torch.qint8,
90
+ quant_min_lower_bound=-127,
91
+ quant_max_upper_bound=127,
92
+ scale_min_lower_bound=2 ** -12,
93
+ )
94
+
95
+ qnnpack_weighted_op_qint8_symmetric_dtype_config = DTypeConfig(
96
+ input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
97
+ output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
98
+ weight_dtype=qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12,
99
+ bias_dtype=torch.float,
100
+ )
101
+
102
+ qnnpack_default_op_qint8_symmetric_dtype_config = DTypeConfig(
103
+ input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
104
+ output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
105
+ )
106
+
107
+
108
+ # =====================
109
+ # | BACKEND CONFIGS |
110
+ # =====================
111
+
112
+ def get_qnnpack_backend_config() -> BackendConfig:
113
+ """
114
+ Return the `BackendConfig` for PyTorch's native QNNPACK backend.
115
+ """
116
+ conv_dtype_configs = [
117
+ qnnpack_weighted_op_qint8_symmetric_dtype_config,
118
+ qnnpack_weighted_op_quint8_dtype_config,
119
+ ]
120
+ linear_dtype_configs = [
121
+ qnnpack_weighted_op_qint8_symmetric_dtype_config,
122
+ qnnpack_weighted_op_quint8_dtype_config,
123
+ qnnpack_default_dynamic_int8_dtype_config,
124
+ qnnpack_default_dynamic_float16_dtype_config,
125
+ ]
126
+ binary_op_dtype_configs = [
127
+ qnnpack_default_op_qint8_symmetric_dtype_config,
128
+ qnnpack_default_op_quint8_dtype_config,
129
+ ]
130
+ default_op_dtype_configs = [
131
+ qnnpack_default_op_qint8_symmetric_dtype_config,
132
+ qnnpack_default_op_quint8_dtype_config,
133
+ ]
134
+ fixed_qparams_op_dtype_configs = [
135
+ qnnpack_default_op_qint8_symmetric_dtype_config,
136
+ qnnpack_default_op_quint8_dtype_config,
137
+ ]
138
+ share_qparams_op_dtype_configs = [
139
+ qnnpack_default_op_qint8_symmetric_dtype_config,
140
+ qnnpack_default_op_quint8_dtype_config,
141
+ ]
142
+ rnn_op_dtype_configs = [
143
+ qnnpack_default_dynamic_int8_dtype_config,
144
+ qnnpack_default_dynamic_float16_dtype_config,
145
+ ]
146
+ embedding_op_dtype_configs = [
147
+ qnnpack_weight_only_quint8_dtype_config,
148
+ qnnpack_weight_only_quint4x2_dtype_config,
149
+ ]
150
+ return BackendConfig("qnnpack") \
151
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
152
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
153
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
154
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
155
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
156
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
157
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
158
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
159
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
160
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from .backend_config import (
3
+ BackendConfig,
4
+ BackendPatternConfig,
5
+ DTypeConfig,
6
+ ObservationType
7
+ )
8
+ from ._common_operator_config_utils import (
9
+ _get_binary_op_configs,
10
+ _get_linear_configs,
11
+ _get_conv_configs,
12
+ _get_share_qparams_op_configs,
13
+ _get_tensor_info_op_configs,
14
+ )
15
+
16
+ __all__ = [
17
+ "get_tensorrt_backend_config",
18
+ "get_tensorrt_backend_config_dict",
19
+ ]
20
+
21
+ def get_tensorrt_backend_config() -> BackendConfig:
22
+ """
23
+ Return the `BackendConfig` for the TensorRT backend.
24
+ NOTE: Current api will change in the future, it's just to unblock experimentation for
25
+ new backends, please don't use it right now.
26
+ TODO: add a README when it's more stable
27
+ """
28
+ # dtype configs
29
+ weighted_op_qint8_dtype_config = DTypeConfig(
30
+ input_dtype=torch.qint8,
31
+ output_dtype=torch.qint8,
32
+ weight_dtype=torch.qint8,
33
+ bias_dtype=torch.float,
34
+ )
35
+ non_weighted_op_qint8_dtype_config = DTypeConfig(
36
+ input_dtype=torch.qint8,
37
+ output_dtype=torch.qint8,
38
+ )
39
+
40
+ addmm_config = BackendPatternConfig(torch.addmm) \
41
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
42
+ .add_dtype_config(weighted_op_qint8_dtype_config) \
43
+ ._set_input_type_to_index({
44
+ "bias": 0,
45
+ "input": 1,
46
+ "weight": 2,
47
+ })
48
+ cat_config = BackendPatternConfig(torch.cat) \
49
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
50
+ .add_dtype_config(non_weighted_op_qint8_dtype_config)
51
+ conv_dtype_configs = [
52
+ weighted_op_qint8_dtype_config,
53
+ ]
54
+ linear_dtype_configs = [
55
+ weighted_op_qint8_dtype_config,
56
+ ]
57
+ binary_op_dtype_configs = [
58
+ weighted_op_qint8_dtype_config,
59
+ ]
60
+ share_qparams_op_dtype_configs = [
61
+ non_weighted_op_qint8_dtype_config,
62
+ ]
63
+ tensor_info_op_dtype_configs = [
64
+ non_weighted_op_qint8_dtype_config,
65
+ ]
66
+ # there might be things not supported in fx2trt, but it will error out
67
+ # during fx2trt conversion and can support them after that
68
+ return BackendConfig("tensorrt") \
69
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
70
+ .set_backend_pattern_config(addmm_config) \
71
+ .set_backend_pattern_config(cat_config) \
72
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
73
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
74
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
75
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs))
76
+
77
+ def get_tensorrt_backend_config_dict():
78
+ """
79
+ Return the `BackendConfig` for the TensorRT backend in dictionary form.
80
+ """
81
+ return get_tensorrt_backend_config().to_dict()
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List, Callable, Union, Tuple, Type
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from .backend_config import (
7
+ BackendConfig,
8
+ BackendPatternConfig,
9
+ DTypeConfig,
10
+ )
11
+ from ..utils import Pattern
12
+ from ..fuser_method_mappings import (
13
+ _reverse2,
14
+ _reverse3,
15
+ )
16
+
17
+ __all__ = [
18
+ "get_pattern_to_dtype_configs",
19
+ "get_qat_module_classes",
20
+ "get_fused_module_classes",
21
+ "get_pattern_to_input_type_to_index",
22
+ "get_root_module_to_quantized_reference_module",
23
+ "get_fuser_method_mapping",
24
+ "get_module_to_qat_module",
25
+ "get_fusion_pattern_to_root_node_getter",
26
+ "get_fusion_pattern_to_extra_inputs_getter",
27
+ "remove_boolean_dispatch_from_name",
28
+ "pattern_to_human_readable",
29
+ "entry_to_pretty_str",
30
+ ]
31
+
32
+ def get_pattern_to_dtype_configs(backend_config: BackendConfig) -> Dict[Pattern, List[DTypeConfig]]:
33
+ pattern_to_dtype_configs: Dict[Pattern, List[DTypeConfig]] = {}
34
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
35
+ pattern_to_dtype_configs[pattern] = config.dtype_configs
36
+ return pattern_to_dtype_configs
37
+
38
+ def get_qat_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
39
+ qat_module_classes = []
40
+ for config in backend_config.configs:
41
+ if config.qat_module is not None:
42
+ qat_module_classes.append(config.qat_module)
43
+ return tuple(set(qat_module_classes))
44
+
45
+ def get_fused_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
46
+ fused_module_classes = []
47
+ for config in backend_config.configs:
48
+ if config.fused_module is not None:
49
+ fused_module_classes.append(config.fused_module)
50
+ return tuple(set(fused_module_classes))
51
+
52
+ def get_pattern_to_input_type_to_index(backend_config: BackendConfig) -> Dict[Pattern, Dict[str, int]]:
53
+ pattern_to_input_type_to_index: Dict[Pattern, Dict[str, int]] = {}
54
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
55
+ pattern_to_input_type_to_index[pattern] = config._input_type_to_index
56
+ return pattern_to_input_type_to_index
57
+
58
+ def get_root_module_to_quantized_reference_module(
59
+ backend_config: BackendConfig) -> Dict[Type[torch.nn.Module], Type[torch.nn.Module]]:
60
+ mapping: Dict[Type[torch.nn.Module], Type[torch.nn.Module]] = {}
61
+ for config in backend_config.configs:
62
+ if config.root_module is not None and config.reference_quantized_module is not None:
63
+ mapping[config.root_module] = config.reference_quantized_module
64
+ return mapping
65
+
66
+ def get_fuser_method_mapping(backend_config: BackendConfig) -> Dict[Pattern, Union[nn.Sequential, Callable]]:
67
+ fuser_method_mapping : Dict[Pattern, Union[nn.Sequential, Callable]] = {}
68
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
69
+ if config.fuser_method is not None:
70
+ # Note: both the fuser method and the pattern are specified in forward order in the
71
+ # BackendConfig, but the internal pattern matching code uses the reversed nested tuple
72
+ # format, so we need to convert both to the internal format
73
+ fuser_method = _get_fuser_method_in_reversed_nested_tuple_format(config)
74
+ fuser_method_mapping[pattern] = fuser_method
75
+ return fuser_method_mapping
76
+
77
+ def get_module_to_qat_module(backend_config: BackendConfig) -> Dict[Pattern, Type[torch.nn.Module]]:
78
+ module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]] = {}
79
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
80
+ if config.qat_module is not None:
81
+ module_to_qat_module[pattern] = config.qat_module
82
+ return module_to_qat_module
83
+
84
+ def get_fusion_pattern_to_root_node_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
85
+ """ Get a map from fusion pattern to a function that returns the root node
86
+ from the fusion pattern, e.g. the most common one is:
87
+ def get_root_node(node_pattern):
88
+ while not isinstance(node_pattern[-1], Node):
89
+ node_pattern = node_pattern[-1]
90
+ return node_pattern[-1]
91
+ This can work for all patterns whose root node is the "last node" in the pattern,
92
+ e.g. (torch.add, MatchAllNode, (torch.ReLU, torch.Conv2d))
93
+ """
94
+ root_node_getter_mapping: Dict[Pattern, Callable] = {}
95
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
96
+ if config._root_node_getter is not None:
97
+ root_node_getter_mapping[pattern] = config._root_node_getter
98
+ return root_node_getter_mapping
99
+
100
+ def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
101
+ """ Get a map from fusion pattern to a function that returns extra input nodes
102
+ from the fusion pattern, in the order required by the root node. This is optional,
103
+ if not specified, we will not copy over any extra inputs for the root node.
104
+ Example:
105
+ # Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d))
106
+ # and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra
107
+ # argument to the fused module, we can unpack the pattern and return the node at
108
+ # MatchAllNode here
109
+ # we can implement extra_inputs_getter as follows:
110
+ def extra_inputs_getter(pattern) -> List[Any]:
111
+ add, extra_input, conv_pattern = pattern
112
+ return [extra_input]
113
+ """
114
+ extra_inputs_getter_mapping: Dict[Pattern, Callable] = {}
115
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
116
+ if config._extra_inputs_getter is not None:
117
+ extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter
118
+ return extra_inputs_getter_mapping
119
+
120
+ def remove_boolean_dispatch_from_name(p) -> Any:
121
+ """
122
+ Some ops have a default string representation such as
123
+ '<function boolean_dispatch.<locals>.fn at 0x7ff1106bf280>',
124
+ this function replaces them with the hardcoded function names.
125
+ """
126
+ if p is F.fractional_max_pool2d:
127
+ return "torch.nn.functional.fractional_max_pool2d"
128
+ elif p is F.fractional_max_pool3d:
129
+ return "torch.nn.functional.fractional_max_pool3d"
130
+ elif p is F.max_pool1d:
131
+ return "torch.nn.functional.max_pool1d"
132
+ elif p is F.max_pool2d:
133
+ return "torch.nn.functional.max_pool2d"
134
+ elif p is F.max_pool3d:
135
+ return "torch.nn.functional.max_pool3d"
136
+ elif p is F.adaptive_max_pool1d:
137
+ return "torch.nn.functional.adaptive_max_pool1d"
138
+ elif p is F.adaptive_max_pool2d:
139
+ return "torch.nn.functional.adaptive_max_pool2d"
140
+ elif p is F.adaptive_max_pool3d:
141
+ return "torch.nn.functional.adaptive_max_pool3d"
142
+ assert "boolean_dispatch" not in str(p), \
143
+ f"{p} does not have a human readable representation in " + \
144
+ "quantization documentation"
145
+ return p
146
+
147
+ def pattern_to_human_readable(p) -> Any:
148
+ if isinstance(p, tuple):
149
+ # nested patterns, recurse
150
+ return tuple(pattern_to_human_readable(inner_p) for inner_p in p)
151
+ elif isinstance(p, str):
152
+ # method names are already human readable
153
+ return p
154
+ else:
155
+ p = remove_boolean_dispatch_from_name(p)
156
+ return p
157
+
158
+ # TODO(future PR): move backend_config_dict to use dataclass and move this logic to
159
+ # the corresponding __str__ function
160
+ def entry_to_pretty_str(entry) -> str:
161
+ """
162
+ Given a backend_config_dict entry, returns a string with the human readable
163
+ representation of it.
164
+ """
165
+ s = "{\n"
166
+
167
+ # always output the pattern first
168
+ if "pattern" in entry:
169
+ pattern_str = pattern_to_human_readable(entry["pattern"])
170
+
171
+ s += f" 'pattern': {pattern_str},\n"
172
+
173
+ # custom output for dtype_configs to make it look nice
174
+ if "dtype_configs" in entry:
175
+ s += " 'dtype_configs': [\n"
176
+ for dtype_config in entry["dtype_configs"]:
177
+ s += " {\n"
178
+ for k, v in dtype_config.items():
179
+ s += f" '{k}': {v},\n"
180
+ s += " },\n"
181
+ s += " ],\n"
182
+
183
+ # custom output for num_tensor_args_to_observation_type to make it look nice
184
+ if "num_tensor_args_to_observation_type" in entry:
185
+ s += " 'num_tensor_args_to_observation_type': {\n"
186
+ for k, v in entry["num_tensor_args_to_observation_type"].items():
187
+ s += f" {k}: {v},\n"
188
+ s += " },\n"
189
+
190
+ # output all the other fields
191
+ custom_handled_fields = [
192
+ "pattern",
193
+ "dtype_configs",
194
+ "num_tensor_args_to_observation_type",
195
+ ]
196
+ for field_name in entry:
197
+ if field_name in custom_handled_fields:
198
+ continue
199
+ s += f" '{field_name}': {entry[field_name]},\n"
200
+
201
+ s += "}"
202
+ return s
203
+
204
+ def _get_pattern_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Pattern:
205
+ """
206
+ Return the pattern specified in the given config in the reversed nested tuple format
207
+ used internally in the quantization pattern matching code.
208
+
209
+ If the pattern is not a tuple, or the pattern is already specified in the reversed
210
+ nested tuple format, return the pattern as is. Otherwise:
211
+
212
+ For 2-tuples (a, b), return (b, a).
213
+ For 3-tuples (a, b, c), return (c, (b, a)).
214
+
215
+ For example:
216
+ * Given nn.Linear, return nn.Linear
217
+ * Given (nn.Linear, nn.ReLU), return (nn.ReLU, nn.Linear)
218
+ * Given (nn.Conv2d, nn.BatchNorm2d, nn.ReLU), return
219
+ (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d))
220
+
221
+ For context, the reason why this is needed is the user-facing BackendConfig
222
+ API accepts the flat 2-or-3-tuple format in forward order. While this simple
223
+ format handles the vast majority of use cases, it does not handle the more
224
+ complex ones, and so the internal pattern matching code for quantization uses
225
+ the following, more general reversed nested tuple format instead:
226
+
227
+ operator = module_type | functional | torch op | native op | MatchAllNode
228
+ Pattern = (operator, Pattern, Pattern, ...) | operator
229
+
230
+ In the future, we expect to replace the above complex format with the one used
231
+ by the subgraph rewriter in torch.fx, so we don't have to maintain our own
232
+ complex pattern matching code. Then we won't need this helper function anymore.
233
+ """
234
+ if config._pattern_complex_format is not None:
235
+ return config._pattern_complex_format
236
+ if config.pattern is None:
237
+ raise ValueError("Either 'pattern' or 'pattern_complex_format' must be specified")
238
+ if not isinstance(config.pattern, tuple):
239
+ return config.pattern
240
+
241
+ # Pattern is specified in the simple tuple format, need to convert
242
+ if len(config.pattern) == 2:
243
+ (a, b) = config.pattern
244
+ return (b, a)
245
+ elif len(config.pattern) == 3:
246
+ (a, b, c) = config.pattern
247
+ return (c, (b, a))
248
+ else:
249
+ raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
250
+
251
+ def _get_fuser_method_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Callable:
252
+ """
253
+ Return the fuser method specified in the given config in the reversed nested
254
+ tuple format used internally in the quantization pattern matching code.
255
+
256
+ If pattern is specified in the reversed nested tuple format, we assume the
257
+ fuser method is also specified in this format and simply return it as is.
258
+ Otherwise, we convert the fuser method as follows:
259
+
260
+ * Given f(is_qat, conv, relu), return f'(is_qat, relu, conv)
261
+ * Given f(is_qat, conv, bn, relu), return f'(is_qat, relu, bn_conv),
262
+ where bn_conv is a 2-tuple (bn, conv)
263
+
264
+ The first argument of a fuser method is always `is_qat` and is not affected
265
+ in the conversion. We currently only support functions with 3 or 4 arguments.
266
+ """
267
+ assert config.fuser_method is not None
268
+ if config._pattern_complex_format is not None:
269
+ return config.fuser_method
270
+ if not isinstance(config.pattern, tuple):
271
+ raise ValueError("Expected pattern to be a tuple, got: ", config.pattern)
272
+
273
+ # Pattern is specified in the simple tuple format, need to convert
274
+ if len(config.pattern) == 2:
275
+ return _reverse2(config.fuser_method)
276
+ elif len(config.pattern) == 3:
277
+ return _reverse3(config.fuser_method)
278
+ else:
279
+ raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
venv/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._common_operator_config_utils import (
3
+ _get_binary_op_configs,
4
+ _get_bn_configs,
5
+ _get_cat_config,
6
+ _get_conv_configs,
7
+ _get_default_op_configs,
8
+ _get_embedding_op_configs,
9
+ _get_fixed_qparams_op_configs,
10
+ _get_linear_configs,
11
+ _get_rnn_op_configs,
12
+ _get_share_qparams_op_configs,
13
+ _get_tensor_info_op_configs,
14
+ )
15
+ from .backend_config import BackendConfig, DTypeConfig
16
+
17
+ __all__ = [
18
+ "get_x86_backend_config",
19
+ ]
20
+
21
+ # ===================
22
+ # | DTYPE CONFIGS |
23
+ # ===================
24
+
25
+ # X86 aligns with FBGEMM for now
26
+
27
+ x86_weighted_op_int8_dtype_config = DTypeConfig(
28
+ input_dtype=torch.quint8,
29
+ output_dtype=torch.quint8,
30
+ weight_dtype=torch.qint8,
31
+ bias_dtype=torch.float,
32
+ )
33
+
34
+ x86_default_op_quint8_dtype_config = DTypeConfig(
35
+ input_dtype=torch.quint8,
36
+ output_dtype=torch.quint8,
37
+ )
38
+
39
+ x86_default_op_fp16_dtype_config = DTypeConfig(
40
+ input_dtype=torch.float16,
41
+ output_dtype=torch.float16,
42
+ weight_dtype=torch.float16,
43
+ bias_dtype=torch.float16,
44
+ )
45
+
46
+ x86_default_dynamic_int8_dtype_config = DTypeConfig(
47
+ input_dtype=torch.quint8,
48
+ output_dtype=torch.float,
49
+ weight_dtype=torch.qint8,
50
+ bias_dtype=torch.float,
51
+ is_dynamic=True,
52
+ )
53
+
54
+ x86_default_dynamic_float16_dtype_config = DTypeConfig(
55
+ input_dtype=torch.float16,
56
+ output_dtype=torch.float,
57
+ weight_dtype=torch.float16,
58
+ bias_dtype=torch.float,
59
+ is_dynamic=True,
60
+ )
61
+
62
+ x86_weight_only_quint8_dtype_config = DTypeConfig(
63
+ input_dtype=torch.float,
64
+ output_dtype=torch.float,
65
+ weight_dtype=torch.quint8,
66
+ )
67
+
68
+ x86_weight_only_quint4x2_dtype_config = DTypeConfig(
69
+ input_dtype=torch.float,
70
+ output_dtype=torch.float,
71
+ weight_dtype=torch.quint4x2,
72
+ )
73
+
74
+
75
+ # =====================
76
+ # | BACKEND CONFIGS |
77
+ # =====================
78
+
79
+ def get_x86_backend_config() -> BackendConfig:
80
+ """
81
+ Return the `BackendConfig` for PyTorch's native x86 backend.
82
+ """
83
+ conv_dtype_configs = [x86_weighted_op_int8_dtype_config]
84
+ linear_dtype_configs = [
85
+ x86_weighted_op_int8_dtype_config,
86
+ x86_default_dynamic_int8_dtype_config,
87
+ x86_default_dynamic_float16_dtype_config,
88
+ ]
89
+ binary_op_dtype_configs = [x86_weighted_op_int8_dtype_config]
90
+ default_op_dtype_configs = [x86_default_op_quint8_dtype_config]
91
+ fixed_qparams_op_dtype_configs = [x86_weighted_op_int8_dtype_config]
92
+ share_qparams_op_dtype_configs = [x86_default_op_quint8_dtype_config]
93
+ tensor_info_op_dtype_configs = [x86_default_op_quint8_dtype_config]
94
+ rnn_op_dtype_configs = [
95
+ x86_default_dynamic_int8_dtype_config,
96
+ x86_default_dynamic_float16_dtype_config,
97
+ ]
98
+ embedding_op_dtype_configs = [
99
+ x86_weight_only_quint8_dtype_config,
100
+ x86_weight_only_quint4x2_dtype_config,
101
+ ]
102
+ return BackendConfig("x86") \
103
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
104
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
105
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
106
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
107
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
108
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
109
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
110
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
111
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
112
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
113
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .prepare import prepare
2
+ from .convert import convert
3
+ from .fuse import fuse
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py ADDED
@@ -0,0 +1,925 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional, Tuple
3
+
4
+ import torch
5
+ from torch.library import Library, impl
6
+ from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax
7
+ from torch._refs import _unsqueeze_multiple
8
+
9
+
10
+ # Note: decomposed means decomposed quantized tensor, using decomposed so that the
11
+ # name is not too long
12
+ quantized_decomposed_lib = Library("quantized_decomposed", "DEF")
13
+
14
+ _DTYPE_TO_QVALUE_BOUNDS = {
15
+ torch.uint8: (0, 255),
16
+ torch.int8: (-128, 127),
17
+ torch.int16: (-(2**15), 2**15 - 1),
18
+ torch.int32: (-(2**31), 2**31 - 1)
19
+ }
20
+
21
+ # Helper to check the passed in quant min and max are valid for the dtype
22
+ def _quant_min_max_bounds_check(quant_min, quant_max, dtype):
23
+ if dtype not in _DTYPE_TO_QVALUE_BOUNDS:
24
+ raise ValueError(f"Unsupported dtype: {dtype}")
25
+ quant_min_lower_bound, quant_max_upper_bound = _DTYPE_TO_QVALUE_BOUNDS[dtype]
26
+
27
+ assert quant_min >= quant_min_lower_bound, \
28
+ "quant_min out of bound for dtype, " \
29
+ f"quant_min_lower_bound: {quant_min_lower_bound} quant_min: {quant_min}"
30
+
31
+ assert quant_max <= quant_max_upper_bound, \
32
+ "quant_max out of bound for dtype, " \
33
+ f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}"
34
+
35
+ quantized_decomposed_lib.define(
36
+ "quantize_per_tensor(Tensor input, float scale, int zero_point, "
37
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
38
+
39
+ @impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd")
40
+ def quantize_per_tensor(
41
+ input: torch.Tensor,
42
+ scale: float,
43
+ zero_point: int,
44
+ quant_min: int,
45
+ quant_max: int,
46
+ dtype: torch.dtype
47
+ ) -> torch.Tensor:
48
+ """ Affine quantization for the Tensor using the same quantization parameters to map
49
+ from floating point to quantized values
50
+
51
+ Args:
52
+ input (torch.Tensor): original float32 or bfloat16 Tensor
53
+ scale (float): quantization parameter for affine quantization
54
+ zero_point (int): quantization parameter for affine quantization
55
+ quant_min (int): minimum quantized value for output Tensor
56
+ quant_max (int): maximum quantized value for output Tensor
57
+ dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
58
+
59
+ Returns:
60
+ Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
61
+ are not stored in the Tensor, we are storing them in function arguments instead
62
+ """
63
+ if input.dtype == torch.bfloat16:
64
+ input = input.to(torch.float32)
65
+
66
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
67
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
68
+
69
+ inv_scale = 1.0 / scale
70
+ return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype)
71
+
72
+ quantized_decomposed_lib.define(
73
+ "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
74
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
75
+
76
+ @impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd")
77
+ def quantize_per_tensor_tensor(
78
+ input: torch.Tensor,
79
+ scale: torch.Tensor,
80
+ zero_point: torch.Tensor,
81
+ quant_min: int,
82
+ quant_max: int,
83
+ dtype: torch.dtype
84
+ ) -> torch.Tensor:
85
+ """ Affine quantization for the Tensor using the same quantization parameters to map
86
+ from floating point to quantized values
87
+ Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
88
+ scalar values
89
+ """
90
+ assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
91
+ assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
92
+ return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
93
+
94
+ @impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta")
95
+ def quantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype):
96
+ if input.dtype == torch.bfloat16:
97
+ input = input.to(torch.float32)
98
+ assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
99
+ assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
100
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
101
+ return torch.empty_like(input, dtype=dtype)
102
+
103
+ # TODO: remove other variants and keep this one
104
+ quantized_decomposed_lib.define(
105
+ "quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
106
+ "Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor")
107
+
108
+ @impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "CompositeExplicitAutograd")
109
+ def quantize_per_tensor_tensor2(
110
+ input: torch.Tensor,
111
+ scale: torch.Tensor,
112
+ zero_point: torch.Tensor,
113
+ quant_min: torch.Tensor,
114
+ quant_max: torch.Tensor,
115
+ dtype: torch.dtype
116
+ ) -> torch.Tensor:
117
+ """ Affine quantization for the Tensor using the same quantization parameters to map
118
+ from floating point to quantized values
119
+ Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
120
+ scalar values
121
+ """
122
+ assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
123
+ assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
124
+ return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)
125
+
126
+ @impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "Meta")
127
+ def quantize_per_tensor_tensor2_meta(input, scale, zero_point, quant_min, quant_max, dtype):
128
+ return quantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype)
129
+
130
+ # Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in
131
+ # the signature as metadata for the input Tensor, this might be useful for pattern
132
+ # matching in the future
133
+ # We will revisit this later if we found there are no use cases for it
134
+ quantized_decomposed_lib.define(
135
+ "dequantize_per_tensor(Tensor input, float scale, int zero_point, "
136
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
137
+
138
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd")
139
+ def dequantize_per_tensor(
140
+ input: torch.Tensor,
141
+ scale: float,
142
+ zero_point: int,
143
+ quant_min: int,
144
+ quant_max: int,
145
+ dtype: torch.dtype
146
+ ) -> torch.Tensor:
147
+ """ Affine dequantization for the Tensor using the same quantization parameters to map
148
+ from quantized values to floating point values
149
+
150
+ Args:
151
+ input (torch.Tensor): Tensor with dtype matching `dtype` argument,
152
+ e.g. (`torch.uint8`), it is a per tensor quantized Tensor if combined with
153
+ quantization parameters in the argument of this function (scale/zero_point)
154
+
155
+ scale (float): quantization parameter for affine quantization
156
+
157
+ zero_point (int): quantization parameter for affine quantization
158
+
159
+ quant_min (int): minimum quantized value for input Tensor (not used in computation,
160
+ reserved for pattern matching)
161
+
162
+ quant_max (int): maximum quantized value for input Tensor (not used in computation,
163
+ reserved for pattern matching)
164
+
165
+ dtype (torch.dtype): dtype for input Tensor (not used in computation,
166
+ reserved for pattern matching)
167
+
168
+ Returns:
169
+ dequantized float32 Tensor
170
+ """
171
+ assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}, but got {input.dtype}"
172
+ if dtype in _DTYPE_TO_QVALUE_BOUNDS:
173
+ # TODO: investigate why
174
+ # (input - zero_point).to(torch.float32) * scale
175
+ # failed the test
176
+ return (input.to(torch.float32) - zero_point) * scale
177
+ else:
178
+ raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
179
+
180
+
181
+ quantized_decomposed_lib.define(
182
+ "dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
183
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
184
+
185
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd")
186
+ def dequantize_per_tensor_tensor(
187
+ input: torch.Tensor,
188
+ scale: torch.Tensor,
189
+ zero_point: torch.Tensor,
190
+ quant_min: int,
191
+ quant_max: int,
192
+ dtype: torch.dtype
193
+ ) -> torch.Tensor:
194
+ """ Affine dequantization for the Tensor using the same quantization parameters to map
195
+ from quantized values to floating point values
196
+ Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
197
+ scalar values
198
+ """
199
+ assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
200
+ assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
201
+ return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
202
+
203
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta")
204
+ def dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype):
205
+ assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
206
+ assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
207
+ assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}"
208
+ if dtype in _DTYPE_TO_QVALUE_BOUNDS:
209
+ return torch.empty_like(input, dtype=torch.float32)
210
+ else:
211
+ raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
212
+
213
+ # TODO: remove other variants and keep this one
214
+ quantized_decomposed_lib.define(
215
+ "dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
216
+ "Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor")
217
+
218
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "CompositeExplicitAutograd")
219
+ def dequantize_per_tensor_tensor2(
220
+ input: torch.Tensor,
221
+ scale: torch.Tensor,
222
+ zero_point: torch.Tensor,
223
+ quant_min: torch.Tensor,
224
+ quant_max: torch.Tensor,
225
+ dtype: torch.dtype
226
+ ) -> torch.Tensor:
227
+ """ Affine dequantization for the Tensor using the same quantization parameters to map
228
+ from quantized values to floating point values
229
+ Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
230
+ scalar values
231
+ """
232
+ assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
233
+ assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
234
+ return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)
235
+
236
+ @impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "Meta")
237
+ def dequantize_per_tensor_tensor2_meta(input, scale, zero_point, quant_min, quant_max, dtype):
238
+ return dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype)
239
+
240
+ quantized_decomposed_lib.define(
241
+ "choose_qparams.tensor(Tensor input, int quant_min, int quant_max, "
242
+ "float eps, ScalarType dtype) -> (Tensor, Tensor)")
243
+
244
+ @impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd")
245
+ def choose_qparams_tensor(
246
+ input: torch.Tensor,
247
+ qmin: int,
248
+ qmax: int,
249
+ eps: float,
250
+ dtype: torch.dtype
251
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
252
+ """ Given an input Tensor, derive the per tensor affine quantization parameter
253
+ (scale and zero_point) for target quantized Tensor from the Tensor
254
+
255
+ Args:
256
+ input (torch.Tensor): floating point input Tensor
257
+ quant_min (int): minimum quantized value for target quantized Tensor
258
+ quant_max (int): maximum quantized value for target quantized Tensor
259
+ dtype (torch.dtype): dtype for target quantized Tensor
260
+
261
+ Returns:
262
+ scale (float): quantization parameter for the target quantized Tensor
263
+ zero_point (int): quantization parameter for the target quantized Tensor
264
+ """
265
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
266
+ assert dtype in _DTYPE_TO_QVALUE_BOUNDS, \
267
+ f"Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}"
268
+ validate_qmin_qmax(qmin, qmax)
269
+
270
+ min_val, max_val = torch.aminmax(input)
271
+
272
+ return determine_qparams(
273
+ min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False)
274
+
275
+ quantized_decomposed_lib.define(
276
+ "choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, "
277
+ "float eps, ScalarType dtype) -> (Tensor, Tensor)")
278
+
279
+ @impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd")
280
+ def choose_qparams_symmetric_tensor(
281
+ input: torch.Tensor,
282
+ qmin: int,
283
+ qmax: int,
284
+ eps: float,
285
+ dtype: torch.dtype
286
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
287
+ """ Given an input Tensor, derive the per tensor affine quantization parameter
288
+ (scale and zero_point) for target quantized Tensor from the Tensor
289
+
290
+ Args:
291
+ input (torch.Tensor): floating point input Tensor
292
+ quant_min (int): minimum quantized value for target quantized Tensor
293
+ quant_max (int): maximum quantized value for target quantized Tensor
294
+ dtype (torch.dtype): dtype for target quantized Tensor
295
+
296
+ Returns:
297
+ scale (float): quantization parameter for the target quantized Tensor
298
+ zero_point (int): quantization parameter for the target quantized Tensor
299
+ """
300
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
301
+ assert dtype in _DTYPE_TO_QVALUE_BOUNDS, \
302
+ f"Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}"
303
+ validate_qmin_qmax(qmin, qmax)
304
+
305
+ min_val, max_val = torch.aminmax(input)
306
+ return determine_qparams(
307
+ min_val,
308
+ max_val,
309
+ qmin,
310
+ qmax,
311
+ dtype,
312
+ torch.Tensor([eps]),
313
+ has_customized_qrange=False,
314
+ qscheme=torch.per_tensor_symmetric
315
+ )
316
+
317
+ @impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta")
318
+ def choose_qparams_tensor_meta(
319
+ input: torch.Tensor,
320
+ quant_min: int,
321
+ quant_max: int,
322
+ eps: float,
323
+ dtype: torch.dtype
324
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
325
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
326
+ assert quant_min < quant_max, f"Expecting quant_min to be smaller than quant_max but received min: \
327
+ {quant_min} max: {quant_max}"
328
+ return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device)
329
+
330
+ @impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta")
331
+ def choose_qparams_symmetric_tensor_meta(
332
+ input: torch.Tensor,
333
+ quant_min: int,
334
+ quant_max: int,
335
+ eps: float,
336
+ dtype: torch.dtype
337
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
338
+ return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device)
339
+
340
+ # Helper function used to implement per-channel quantization against any axis
341
+ def _permute_to_axis_zero(x, axis):
342
+ new_axis_list = list(range(x.dim()))
343
+ new_axis_list[axis] = 0
344
+ new_axis_list[0] = axis
345
+ y = x.permute(tuple(new_axis_list))
346
+ return y, new_axis_list
347
+
348
+ quantized_decomposed_lib.define(
349
+ "quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
350
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
351
+
352
+ @impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd")
353
+ def quantize_per_channel(
354
+ input: torch.Tensor,
355
+ scales: torch.Tensor,
356
+ zero_points: torch.Tensor,
357
+ axis: int,
358
+ quant_min: int,
359
+ quant_max: int,
360
+ dtype: torch.dtype
361
+ ) -> torch.Tensor:
362
+ """ Affine per channel quantization for the Tensor using the same quantization
363
+ parameters for each channel/axis to map from floating point to quantized values
364
+
365
+ Args:
366
+ input (torch.Tensor): original float32 or bfloat16 Tensor
367
+ scales (torch.Tensor): a list of scale quantization parameter for
368
+ affine quantization, one per channel
369
+ zero_point (torch.Tensor): a list of zero_point quantization parameter for
370
+ affine quantization, one per channel
371
+ quant_min (int): minimum quantized value for output Tensor
372
+ quant_max (int): maximum quantized value for output Tensor
373
+ dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
374
+
375
+ Returns:
376
+ Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
377
+ are not stored in the Tensor, we are storing them in function arguments instead
378
+ """
379
+ if input.dtype == torch.bfloat16:
380
+ input = input.to(torch.float32)
381
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
382
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
383
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
384
+ input, permute_axis_list = _permute_to_axis_zero(input, axis)
385
+ res = torch.zeros_like(input)
386
+
387
+ for i in range(input.size(0)):
388
+ res[i] = torch.clamp(
389
+ torch.round(input[i] * (1.0 / scales[i])) + zero_points[i],
390
+ quant_min,
391
+ quant_max
392
+ )
393
+
394
+ out = res.permute(tuple(permute_axis_list))
395
+ return out.to(dtype)
396
+
397
+ @impl(quantized_decomposed_lib, "quantize_per_channel", "Meta")
398
+ def quantize_per_channel_meta(
399
+ input: torch.Tensor,
400
+ scales: torch.Tensor,
401
+ zero_points: torch.Tensor,
402
+ axis: int,
403
+ quant_min: int,
404
+ quant_max: int,
405
+ dtype: torch.dtype
406
+ ) -> torch.Tensor:
407
+ if input.dtype == torch.bfloat16:
408
+ input = input.to(torch.float32)
409
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
410
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
411
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
412
+ return torch.empty_like(input, dtype=dtype)
413
+
414
+ # Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in
415
+ # the signature as metadata for the input Tensor, this might be useful for pattern
416
+ # matching in the future
417
+ # We will revisit this later if we found there are no use cases for it
418
+ quantized_decomposed_lib.define(
419
+ "dequantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
420
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
421
+
422
+ @impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd")
423
+ def dequantize_per_channel(
424
+ input: torch.Tensor,
425
+ scales: torch.Tensor,
426
+ zero_points: torch.Tensor,
427
+ axis: int,
428
+ quant_min: int,
429
+ quant_max: int,
430
+ dtype: torch.dtype
431
+ ) -> torch.Tensor:
432
+ """ Affine per channel dequantization for the Tensor using the same quantization
433
+ parameters for each channel/axis to map from quantized values to floating point values
434
+
435
+ Args:
436
+ input (torch.Tensor): Tensor with dtype matching `dtype` argument,
437
+ e.g. (`torch.uint8`), it is a per channel quantized Tensor if combined with
438
+ quantization parameter in the argument of this function (scales/zero_points/axis)
439
+
440
+ scales (torch.Tensor): a list of scale quantization parameter for
441
+ affine quantization, one per channel
442
+
443
+ zero_points (torch.Tensor): a list of zero_point quantization parameter for
444
+ affine quantization, one per channel
445
+
446
+ quant_min (int): minimum quantized value for output Tensor (not used in computation,
447
+ reserved for pattern matching)
448
+
449
+ quant_max (int): maximum quantized value for output Tensor (not used in computation,
450
+ reserved for pattern matching)
451
+
452
+ dtype (torch.dtype): requested dtype for output Tensor (not used in computation,
453
+ reserved for pattern matching)
454
+
455
+ Returns:
456
+ dequantized float32 Tensor
457
+ """
458
+ assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}"
459
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
460
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
461
+ input, permute_axis_list = _permute_to_axis_zero(input, axis)
462
+ res = torch.zeros_like(input, dtype=torch.float32)
463
+
464
+ for i in range(input.size(0)):
465
+ # TODO: investigate why
466
+ # (input[i] - zero_points[i]).to(torch.float32) * scales[i]
467
+ # failed the test
468
+ res[i] = (input[i].to(torch.float32) - zero_points[i]) * scales[i]
469
+
470
+ out = res.permute(tuple(permute_axis_list))
471
+ return out
472
+
473
+ @impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta")
474
+ def dequantize_per_channel_meta(
475
+ input: torch.Tensor,
476
+ scales: torch.Tensor,
477
+ zero_points: torch.Tensor,
478
+ axis: int,
479
+ quant_min: int,
480
+ quant_max: int,
481
+ dtype: torch.dtype
482
+ ) -> torch.Tensor:
483
+ assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}"
484
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
485
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
486
+ return torch.empty_like(input, dtype=torch.float32)
487
+
488
+
489
+ quantized_decomposed_lib.define(
490
+ "choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
491
+ )
492
+
493
+
494
+ @impl(
495
+ quantized_decomposed_lib,
496
+ "choose_qparams_per_token",
497
+ "CompositeExplicitAutograd",
498
+ )
499
+ def choose_qparams_per_token(
500
+ input: torch.Tensor,
501
+ dtype: torch.dtype,
502
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
503
+ """Choose quantization parameters for per token quantization. This means for a N dimension Tensor
504
+ (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
505
+ every N elements with the same quantization parameter. The dimension for scales/zero_points
506
+ will be (M1 * M2 ... * Mn)
507
+
508
+ Args:
509
+ input (torch.Tensor): original float32/float16 Tensor
510
+ dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
511
+
512
+ Returns:
513
+ scales and zero_points, both float32 Tensors
514
+ """
515
+
516
+ scales = input.abs().amax(dim=-1, keepdim=True)
517
+ if scales.dtype == torch.float16:
518
+ scales = (
519
+ scales.float()
520
+ ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range)
521
+ if dtype == torch.int8:
522
+ n_bits = 8
523
+ quant_max = 2 ** (n_bits - 1) - 1
524
+ else:
525
+ raise Exception(f"unsupported dtype in choose_qparams_per_token: {dtype}")
526
+
527
+ scales = scales.clamp(min=1e-5).div(quant_max)
528
+ zero_points = torch.zeros_like(scales)
529
+ return scales, zero_points
530
+
531
+
532
+ @impl(
533
+ quantized_decomposed_lib,
534
+ "choose_qparams_per_token",
535
+ "Meta",
536
+ )
537
+ def choose_qparams_per_token_meta(
538
+ input: torch.Tensor,
539
+ dtype: torch.dtype,
540
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
541
+ size = (1, input.size(-1))
542
+ return torch.empty(size, dtype=torch.double, device=input.device), torch.empty(
543
+ size, dtype=torch.int64, device=input.device
544
+ )
545
+
546
+
547
+ # TODO: move this to https://github.com/pytorch/pytorch/blob/main/torch/ao/quantization/fx/_decomposed.py
548
+ quantized_decomposed_lib.define(
549
+ "choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
550
+ )
551
+
552
+
553
+ @impl(
554
+ quantized_decomposed_lib,
555
+ "choose_qparams_per_token_asymmetric",
556
+ "CompositeExplicitAutograd",
557
+ )
558
+ def choose_qparams_per_token_asymmetric(
559
+ input: torch.Tensor,
560
+ dtype: torch.dtype,
561
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
562
+ """Choose quantization parameters for per token quantization. This means for a N dimension Tensor
563
+ (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
564
+ every N elements with the same quantization parameter. The dimension for scales/zero_points
565
+ will be (M1 * M2 ... * Mn)
566
+
567
+ Args:
568
+ input (torch.Tensor): original float32/float16 Tensor
569
+ dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
570
+
571
+ Returns:
572
+ scales and zero_points, both float32 Tensors
573
+ """
574
+ # Based on https://github.com/google/XNNPACK/blob/df156f0cf3db5a4576cc711123eeb54915f82ffc/src/xnnpack/quantization.h#L18
575
+ qmin, qmax = -128, 127
576
+ min_val, max_val = torch.aminmax(input, dim=-1, keepdim=True)
577
+ min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
578
+ max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
579
+ eps = torch.finfo(torch.float32).eps # use xnnpack eps?
580
+
581
+ # scale
582
+ scale = (max_val_pos - min_val_neg) / float(qmax - qmin)
583
+ scale = scale.clamp(min=eps)
584
+
585
+ # zero point
586
+ descaled_min = min_val_neg / scale
587
+ descaled_max = max_val_pos / scale
588
+ zero_point_from_min_error = qmin + descaled_min
589
+ zero_point_from_max_error = qmax + descaled_max
590
+ zero_point = torch.where(
591
+ zero_point_from_min_error + zero_point_from_max_error > 0,
592
+ qmin - descaled_min,
593
+ qmax - descaled_max,
594
+ )
595
+ zero_point = torch.clamp(zero_point, qmin, qmax).round()
596
+
597
+ return scale.to(torch.float32), zero_point.to(torch.float32)
598
+
599
+
600
+ @impl(
601
+ quantized_decomposed_lib,
602
+ "choose_qparams_per_token_asymmetric",
603
+ "Meta",
604
+ )
605
+ def choose_qparams_per_token_asymmetric_meta(
606
+ input: torch.Tensor,
607
+ dtype: torch.dtype,
608
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
609
+ size = (1, input.size(-1))
610
+ return torch.empty(size, dtype=torch.double, device=input.device), torch.empty(
611
+ size, dtype=torch.int64, device=input.device
612
+ )
613
+
614
+
615
+ def _per_token_quant_qparam_dim_check(input, scales, zero_points):
616
+ num_tokens = math.prod(list(input.size())[:-1])
617
+ assert (
618
+ num_tokens == scales.numel()
619
+ ), f"num_tokens: {num_tokens} scales: {scales.size()}"
620
+ assert (
621
+ num_tokens == zero_points.numel()
622
+ ), f"num_tokens: {num_tokens} zero_points: {zero_points.size()}"
623
+
624
+
625
+ quantized_decomposed_lib.define(
626
+ "quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
627
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor"
628
+ )
629
+
630
+
631
+ @impl(quantized_decomposed_lib, "quantize_per_token", "CompositeExplicitAutograd")
632
+ def quantize_per_token(
633
+ input: torch.Tensor,
634
+ scales: torch.Tensor,
635
+ zero_points: torch.Tensor,
636
+ quant_min: int,
637
+ quant_max: int,
638
+ dtype: torch.dtype,
639
+ ):
640
+ """Per token quantization for the Tensor using the quantization parameters to map
641
+ from floating point to quantized values. This means for a N dimension Tensor
642
+ (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
643
+ every N elements with the same quantization parameter. The dimension for scales/zero_points
644
+ will be (M1 * M2 ... * Mn)
645
+
646
+ Args:
647
+ input (torch.Tensor): original float32 or bfloat16 Tensor
648
+ scales (float32 torch.Tensor): quantization parameter for per token affine quantization
649
+ zero_points (int32 torch.Tensor): quantization parameter for per token affine quantization
650
+ quant_min (int): minimum quantized value for output Tensor
651
+ quant_max (int): maximum quantized value for output Tensor
652
+ dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
653
+
654
+ Returns:
655
+ Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
656
+ are not stored in the Tensor, we are storing them in function arguments instead
657
+ """
658
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
659
+ _per_token_quant_qparam_dim_check(input, scales, zero_points)
660
+ input = (
661
+ torch.round(input / scales + zero_points).clamp(quant_min, quant_max).to(dtype)
662
+ )
663
+ return input
664
+
665
+
666
+ @impl(quantized_decomposed_lib, "quantize_per_token", "Meta")
667
+ def quantize_per_token_meta(
668
+ input: torch.Tensor,
669
+ scales: torch.Tensor,
670
+ zero_points: torch.Tensor,
671
+ quant_min: int,
672
+ quant_max: int,
673
+ dtype: torch.dtype,
674
+ ):
675
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
676
+ return torch.empty_like(input, dtype=dtype)
677
+
678
+
679
+ quantized_decomposed_lib.define(
680
+ "dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
681
+ "int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor"
682
+ )
683
+
684
+
685
+ @impl(quantized_decomposed_lib, "dequantize_per_token", "CompositeExplicitAutograd")
686
+ def dequantize_per_token(
687
+ input: torch.Tensor,
688
+ scales: torch.Tensor,
689
+ zero_points: torch.Tensor,
690
+ quant_min: int,
691
+ quant_max: int,
692
+ dtype: torch.dtype,
693
+ output_dtype: torch.dtype = torch.float32,
694
+ ):
695
+ """Per token dequantization for the Tensor using the quantization parameters to map
696
+ from floating point to quantized values. This means for a N dimension Tensor
697
+ (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
698
+ every N elements with the same quantization parameter. The dimension for scales/zero_points
699
+ will be (M1 * M2 ... * Mn)
700
+
701
+ Args:
702
+ input (torch.Tensor): quantized Tensor (uint8, int8 etc.)
703
+ scales (float32 torch.Tensor): quantization parameter for per token affine quantization
704
+ zero_points (int32 torch.Tensor): quantization parameter for per token affine quantization
705
+ quant_min (int): minimum quantized value for input Tensor
706
+ quant_max (int): maximum quantized value for input Tensor
707
+ dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
708
+ output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor
709
+
710
+ Returns:
711
+ dequantized Tensor with dtype `output_dtype`
712
+ """
713
+ input = input - zero_points
714
+ input = input.to(output_dtype) * scales
715
+ return input
716
+
717
+
718
+ @impl(quantized_decomposed_lib, "dequantize_per_token", "Meta")
719
+ def dequantize_per_token_meta(
720
+ input: torch.Tensor,
721
+ scales: torch.Tensor,
722
+ zero_points: torch.Tensor,
723
+ quant_min: int,
724
+ quant_max: int,
725
+ dtype: torch.dtype,
726
+ output_dtype: torch.dtype = torch.float32,
727
+ ):
728
+ _quant_min_max_bounds_check(quant_min, quant_max, dtype)
729
+ # TODO: support fp16
730
+ return torch.empty_like(input, dtype=output_dtype)
731
+
732
+
733
+ quantized_decomposed_lib.define(
734
+ "quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, "
735
+ "int quant_max, ScalarType dtype, int group_size) -> Tensor"
736
+ )
737
+
738
+
739
+ # TODO: dtype is ignored for now
740
+ @impl(
741
+ quantized_decomposed_lib, "quantize_per_channel_group", "CompositeExplicitAutograd"
742
+ )
743
+ def quantize_per_channel_group(
744
+ input: torch.Tensor,
745
+ scales: torch.Tensor,
746
+ zero_points: torch.Tensor,
747
+ quant_min: int,
748
+ quant_max: int,
749
+ dtype: torch.dtype,
750
+ group_size=128,
751
+ ):
752
+ assert group_size > 1
753
+ # needed for GPTQ single column quantize
754
+ if group_size > input.shape[-1] and scales.shape[-1] == 1:
755
+ group_size = input.shape[-1]
756
+
757
+ assert input.shape[-1] % group_size == 0
758
+ assert input.dim() == 2
759
+
760
+ # TODO: check for dtype, currently we can't express torch.int4 so it's omitted
761
+ to_quant = input.reshape(-1, group_size)
762
+ assert torch.isnan(to_quant).sum() == 0
763
+
764
+ scales = scales.reshape(-1, 1)
765
+ zero_points = zero_points.reshape(-1, 1)
766
+
767
+ input_int8 = (
768
+ to_quant.div(scales)
769
+ .add(zero_points)
770
+ .round()
771
+ .clamp_(quant_min, quant_max)
772
+ .to(dtype)
773
+ .reshape_as(input)
774
+ )
775
+
776
+ return input_int8
777
+
778
+
779
+ @impl(quantized_decomposed_lib, "quantize_per_channel_group", "Meta")
780
+ def quantize_per_channel_group_meta(
781
+ input: torch.Tensor,
782
+ scales: torch.Tensor,
783
+ zero_points: torch.Tensor,
784
+ quant_min: int,
785
+ quant_max: int,
786
+ dtype: torch.dtype,
787
+ group_size=128,
788
+ ):
789
+ """Groupwise quantization within each channel for an 2-d Tensor using the quantization parameters
790
+ to map from floating point to quantized values. This means for each row of a 2-d Tensor
791
+ (M, N), we calculate scales/zero_points for each `group_size` elements
792
+ and quantize every `group_size` elements with the same quantization parameter.
793
+ The dimension for scales/zero_points will be (M * ceil(N, group_size),)
794
+
795
+ Args:
796
+ input (torch.Tensor): original float32 or bfloat16 Tensor
797
+ scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization
798
+ zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization
799
+ quant_min (int): minimum quantized value for output Tensor
800
+ quant_max (int): maximum quantized value for output Tensor
801
+ dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
802
+
803
+ Returns:
804
+ Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
805
+ are not stored in the Tensor, we are storing them in function arguments instead
806
+ """
807
+ assert group_size > 1
808
+ # needed for GPTQ single column quantize
809
+ if group_size > input.shape[-1] and scales.shape[-1] == 1:
810
+ group_size = input.shape[-1]
811
+
812
+ assert input.shape[-1] % group_size == 0
813
+ assert input.dim() == 2
814
+ return torch.empty_like(input, dtype=dtype)
815
+
816
+
817
+ quantized_decomposed_lib.define(
818
+ "dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, "
819
+ "int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor"
820
+ )
821
+
822
+
823
+ @impl(
824
+ quantized_decomposed_lib,
825
+ "dequantize_per_channel_group",
826
+ "CompositeExplicitAutograd",
827
+ )
828
+ def dequantize_per_channel_group(
829
+ w_int8: torch.Tensor,
830
+ scales: torch.Tensor,
831
+ zero_points: Optional[torch.Tensor],
832
+ quant_min: int,
833
+ quant_max: int,
834
+ dtype: torch.dtype,
835
+ group_size: int = 128,
836
+ output_dtype: torch.dtype = torch.float32,
837
+ ):
838
+ """Groupwise dequantization within each channel for an 2-d Tensor using the quantization parameters
839
+ to map from floating point to quantized values. This means for each row of a 2-d Tensor
840
+ (M, N), we calculate scales/zero_points for each `group_size` elements
841
+ and quantize every `group_size` elements with the same quantization parameter.
842
+ The dimension for scales/zero_points will be (M * ceil(N, group_size),)
843
+
844
+ Args:
845
+ input (torch.Tensor): quantized Tensor (uint8/int8 etc.)
846
+ scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization
847
+ zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization
848
+ quant_min (int): minimum quantized value for input Tensor
849
+ quant_max (int): maximum quantized value for input Tensor
850
+ dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
851
+ output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor
852
+
853
+ Returns:
854
+ dequantized Tensor with dtype `output_dtype`
855
+ """
856
+
857
+ assert group_size > 1
858
+ # needed for GPTQ single column dequantize
859
+ if group_size > w_int8.shape[-1] and scales.shape[-1] == 1:
860
+ group_size = w_int8.shape[-1]
861
+ assert w_int8.shape[-1] % group_size == 0
862
+ assert w_int8.dim() == 2
863
+
864
+ w_int8_grouped = w_int8.reshape(-1, group_size)
865
+ scales = scales.reshape(-1, 1)
866
+ if zero_points is not None:
867
+ zp = zero_points.reshape(-1, 1)
868
+ else:
869
+ zp = torch.zeros([], dtype=torch.int32, device=scales.device)
870
+ w_dq = w_int8_grouped.sub(zp).mul(scales).reshape_as(w_int8).to(output_dtype)
871
+ return w_dq
872
+
873
+
874
+ quantized_decomposed_lib.define(
875
+ "fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
876
+ "int quant_min, int quant_max) -> Tensor")
877
+
878
+ class FakeQuantPerChannel(torch.autograd.Function):
879
+ @staticmethod
880
+ def forward(ctx, input, scales, zero_points, axis, quant_min, quant_max):
881
+ with torch._C._AutoDispatchBelowAutograd():
882
+ if input.dtype == torch.bfloat16:
883
+ input = input.to(torch.float32)
884
+ if scales.dtype != torch.float32:
885
+ scales = scales.to(torch.float32)
886
+ if zero_points.dtype != torch.int32:
887
+ zero_points = zero_points.to(torch.int32)
888
+ assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
889
+ assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
890
+ broadcast_dims = list(range(0, axis)) + list(range(axis + 1, input.ndim))
891
+ unsqueeze_scales = _unsqueeze_multiple(scales, broadcast_dims)
892
+ unsqueeze_zero_points = _unsqueeze_multiple(zero_points, broadcast_dims)
893
+ temp = torch.round(input * (1.0 / unsqueeze_scales)) + unsqueeze_zero_points
894
+ out = (torch.clamp(temp, quant_min, quant_max) - unsqueeze_zero_points) * unsqueeze_scales
895
+ mask = torch.logical_and((temp >= quant_min), (temp <= quant_max))
896
+
897
+ ctx.save_for_backward(mask)
898
+ return out
899
+
900
+ @staticmethod
901
+ def backward(ctx, gy):
902
+ mask, = ctx.saved_tensors
903
+ return gy * mask, None, None, None, None, None
904
+
905
+ @impl(quantized_decomposed_lib, "fake_quant_per_channel", "AutogradCPU")
906
+ def fake_quant_per_channel(
907
+ input: torch.Tensor,
908
+ scales: torch.Tensor,
909
+ zero_points: torch.Tensor,
910
+ axis: int,
911
+ quant_min: int,
912
+ quant_max: int,
913
+ ) -> torch.Tensor:
914
+ return FakeQuantPerChannel.apply(input, scales, zero_points, axis, quant_min, quant_max)
915
+
916
+ @impl(quantized_decomposed_lib, "fake_quant_per_channel", "Meta")
917
+ def fake_quant_per_channel_meta(
918
+ input: torch.Tensor,
919
+ scales: torch.Tensor,
920
+ zero_points: torch.Tensor,
921
+ axis: int,
922
+ quant_min: int,
923
+ quant_max: int,
924
+ ) -> torch.Tensor:
925
+ return torch.empty_like(input)
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/_equalize.py ADDED
@@ -0,0 +1,820 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ from collections import namedtuple
4
+ from typing import Any, Dict, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import torch.ao.nn.intrinsic as nni
10
+ from torch.fx import GraphModule
11
+ from torch.fx.graph import Node
12
+ from torch.ao.quantization.fx.graph_module import _get_observed_graph_module_attr
13
+
14
+ from ..observer import _with_args, ObserverBase, PerChannelMinMaxObserver
15
+ from ..utils import _parent_name, check_min_max_valid
16
+
17
+ from .utils import (
18
+ get_new_attr_name_with_prefix,
19
+ maybe_get_next_module,
20
+ node_arg_is_weight,
21
+ )
22
+
23
+ CUSTOM_MODULE_SUPP_LIST: List[Any] = []
24
+
25
+ def reshape_scale(scale: torch.Tensor, axis: int, input: torch.Tensor) -> torch.Tensor:
26
+ """Reshapes the scale so that we can multiply it to the input by the given axis.
27
+ """
28
+ new_shape = [1] * input.ndim
29
+ new_shape[axis] = input.size(axis)
30
+ return scale.view(new_shape)
31
+
32
+ qsheme_mapping_per_tensor_to_per_channel = {
33
+ torch.per_tensor_affine: torch.per_channel_affine,
34
+ torch.per_tensor_symmetric: torch.per_channel_symmetric,
35
+ }
36
+
37
+
38
+ class _InputEqualizationObserver(nn.Module):
39
+ r"""Observer for tracking the running min/max values of input columns, and
40
+ computing the quantization parameters for the overall min/max input values.
41
+
42
+ Args:
43
+ dtype: Quantized data type
44
+ qscheme: Quantization scheme
45
+ quant_min: Minimum quantization value. If unspecified, it will
46
+ follow the 8-bit setup.
47
+ quant_max: Maximum quantization value. If unspecified, it will
48
+ follow the 8-bit setup.
49
+
50
+ The running minimum/maximum :math:`x_\text{min/max}` are computed in the
51
+ same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`,
52
+ with the difference that the running min/max values are stored per column.
53
+ This observer is intended to be used along with a WeightEqualizationObserver
54
+ to calculate the equalization scale.
55
+ """
56
+
57
+ def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
58
+ quant_min=None, quant_max=None, factory_kwargs=None) -> None:
59
+ super().__init__()
60
+
61
+ if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
62
+ raise TypeError("Input qscheme must be per-tensor")
63
+
64
+ self.dtype = dtype
65
+ self.qscheme = qscheme
66
+
67
+ per_channel_qscheme = qsheme_mapping_per_tensor_to_per_channel[qscheme]
68
+ self.input_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
69
+ qscheme=per_channel_qscheme,
70
+ quant_min=quant_min,
71
+ quant_max=quant_max,
72
+ factory_kwargs=factory_kwargs)
73
+
74
+ self.equalization_scale = torch.tensor(1)
75
+ self.equalization_shape: List[int] = []
76
+
77
+ def forward(self, x_orig):
78
+ if not (x_orig.ndim >= 2 and x_orig.ndim <= 5):
79
+ raise ValueError("InputEqualizationObserver only supports Linear and Conv layers")
80
+
81
+ # Calculate the shape needed to reshape the equalization scale later (needed for Conv layers)
82
+ self.equalization_shape = [1] * x_orig.ndim
83
+ self.equalization_shape[1] = x_orig.size(1)
84
+
85
+ return self.input_obs(x_orig)
86
+
87
+ def get_input_minmax(self):
88
+ return (self.input_obs.min_val, self.input_obs.max_val)
89
+
90
+ def set_equalization_scale(self, equalization_scale):
91
+ # Reshape the equalization scale along axis=1 so that it can be
92
+ # multiplied with the input along axis=1
93
+ if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
94
+ return
95
+ self.equalization_scale = torch.reshape(equalization_scale, self.equalization_shape)
96
+
97
+ def calculate_scaled_minmax(self):
98
+ r""" Returns the scaled min/max inputs
99
+ """
100
+ if self.equalization_scale.nelement() == 1 and self.equalization_scale == torch.tensor(1):
101
+ warnings.warn(
102
+ "Must call calculate_equalization_scale before calling calculate_scaled_minmax. " +
103
+ "Will not scale the next quantization observer."
104
+ )
105
+ return None, None
106
+
107
+ # Calculate qparams for the scaled min/max inputs
108
+ # Scale the input by the equalization scale located at the same column
109
+ # index
110
+ (min_inputs, max_inputs) = self.get_input_minmax()
111
+ equalization_scale_reshaped = reshape_scale(self.equalization_scale, 0, min_inputs)
112
+ min_input_scaled = torch.min(torch.mul(min_inputs, equalization_scale_reshaped))
113
+ max_input_scaled = torch.max(torch.mul(max_inputs, equalization_scale_reshaped))
114
+
115
+ return min_input_scaled, max_input_scaled
116
+
117
+ with_args = classmethod(_with_args)
118
+
119
+
120
+ class _WeightEqualizationObserver(nn.Module):
121
+ r"""Observer for tracking the running min/max values of weight columns and
122
+ rows, and computing the quantization parameters for the weight rows.
123
+
124
+ Args:
125
+ dtype: Quantized data type
126
+ qscheme: Quantization scheme
127
+ quant_min: Minimum quantization value. If unspecified, it will
128
+ follow the 8-bit setup.
129
+ quant_max: Maximum quantization value. If unspecified, it will
130
+ follow the 8-bit setup.
131
+
132
+ This observer is made up of 1 PerChannelMinMaxObserver `weight_col_obs` used
133
+ to record the running minimum and maximum of columns of incoming weight
134
+ tensors. This observer is intended to be used along with an
135
+ InputEqualizationObserver to calculate the equalization scale.
136
+
137
+ The running minimum/maximum :math:`w_\text{min/max}` are computed in the
138
+ same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`.
139
+ """
140
+
141
+ def __init__(self, dtype=torch.qint8, qscheme=torch.per_tensor_affine, quant_min=None,
142
+ quant_max=None, factory_kwargs=None) -> None:
143
+ super().__init__()
144
+
145
+ self.dtype = dtype
146
+ self.qscheme = qscheme
147
+ self.ch_axis = 1
148
+
149
+ per_channel_qscheme = qscheme
150
+ if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
151
+ per_channel_qscheme = qsheme_mapping_per_tensor_to_per_channel[qscheme]
152
+ self.weight_col_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
153
+ qscheme=per_channel_qscheme,
154
+ quant_min=quant_min,
155
+ quant_max=quant_max,
156
+ factory_kwargs=factory_kwargs)
157
+
158
+ self.equalization_scale = torch.tensor(1)
159
+
160
+ def forward(self, w_orig):
161
+ if not (w_orig.ndim >= 2 and w_orig.ndim <= 5):
162
+ raise ValueError("InputEqualizationObserver only supports Linear and Conv layers")
163
+
164
+ return self.weight_col_obs(w_orig)
165
+
166
+ def get_weight_col_minmax(self):
167
+ return (self.weight_col_obs.min_val, self.weight_col_obs.max_val)
168
+
169
+ def set_equalization_scale(self, equalization_scale):
170
+ self.equalization_scale = equalization_scale
171
+
172
+ with_args = classmethod(_with_args)
173
+
174
+
175
+ def calculate_equalization_scale(input_obs: _InputEqualizationObserver,
176
+ weight_obs: _WeightEqualizationObserver) -> torch.Tensor:
177
+ r""" Calculates the equalization scale and sets the equalization_scale value
178
+ in the observers.
179
+
180
+ Args:
181
+ input_obs: Observer that tracks the ranges for the input columns
182
+ weight_obs: Observer that tracks the ranges for the weight columns
183
+ """
184
+
185
+ (min_inputs, max_inputs) = input_obs.get_input_minmax()
186
+ (min_weights, max_weights) = weight_obs.get_weight_col_minmax()
187
+
188
+ if not (check_min_max_valid(min_inputs, max_inputs) and check_min_max_valid(min_weights, max_weights)):
189
+ warnings.warn(
190
+ "Must run observer before calling calculate_equalization_scale. " +
191
+ "Returning default equalization scale torch.tensor(1)."
192
+ )
193
+ return torch.tensor(1)
194
+
195
+ if not (min_inputs.shape == min_weights.shape):
196
+ raise ValueError(
197
+ "Input and Weight must have the same column dimension. " +
198
+ f"Found {min_inputs.shape} and {min_weights.shape} shapes instead."
199
+ )
200
+
201
+ equalization_scale = torch.sqrt((max_weights - min_weights) / (max_inputs - min_inputs))
202
+ # Replace all 'inf', 'nan', 0's with 1s to prevent errors
203
+ equalization_scale[equalization_scale == 0.] = 1
204
+ equalization_scale = torch.nan_to_num(equalization_scale, nan=1, posinf=1, neginf=1)
205
+ return equalization_scale
206
+
207
+
208
+ class EqualizationQConfig(namedtuple('EqualizationQConfig', ['input_activation', 'weight'])):
209
+ """
210
+ Describes how to quantize a layer or a part of the network specifically for
211
+ input-weight equalization by providing settings (observer classes) for
212
+ inputs, outputs, and weights.
213
+
214
+ Note that EqualizationQConfig needs to contain observer **classes** (like
215
+ MinMaxObserver) or a callable that returns instances on invocation, not the
216
+ concrete observer instances themselves.
217
+ Quantization function will instantiate observers multiple times for each of
218
+ the layers.
219
+
220
+ Observer classes have usually reasonable default arguments, but they can be
221
+ overwritten with `with_args` method (that behaves like functools.partial):
222
+
223
+ my_qconfig = EqualizationQConfig(input_activation=_InputEqualizationObserver.with_args(dtype=torch.qint8),
224
+ weight=_WeightEqualizationObserver.with_args(dtype=torch.qint8))
225
+ """
226
+ def __new__(cls, input_activation=torch.nn.Identity, weight=torch.nn.Identity):
227
+ if isinstance(input_activation, nn.Module) or isinstance(weight, nn.Module):
228
+ raise ValueError("EqualizationQConfig received observer instance, please pass observer class instead. " +
229
+ "Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
230
+ self = super().__new__(cls, input_activation, weight)
231
+ return self
232
+
233
+
234
+ input_equalization_observer = _InputEqualizationObserver.with_args(
235
+ dtype=torch.quint8, qscheme=torch.per_tensor_symmetric)
236
+ weight_equalization_observer = _WeightEqualizationObserver.with_args(
237
+ dtype=torch.qint8, qscheme=torch.per_channel_symmetric)
238
+ default_equalization_qconfig = EqualizationQConfig(input_activation=input_equalization_observer,
239
+ weight=weight_equalization_observer)
240
+
241
+
242
+ def fused_module_supports_equalization(module) -> bool:
243
+ """ Checks if the fused node supports equalization. """
244
+ return type(module) in [nni.LinearReLU, nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d]
245
+
246
+ def nn_module_supports_equalization(module) -> bool:
247
+ """ Checks if the torch.nn node supports equalization. """
248
+ return type(module) in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d]
249
+
250
+ def custom_module_supports_equalization(module) -> bool:
251
+ """ Checks if the custom node supports equalization. """
252
+ return type(module) in CUSTOM_MODULE_SUPP_LIST
253
+
254
+
255
+ def node_supports_equalization(node: Node, modules) -> bool:
256
+ """ Checks if the current node supports equalization
257
+ Currently we only support nn.Linear/F.Linear and nn.Conv/F.conv layers
258
+ """
259
+ if node.op == 'call_module':
260
+ return nn_module_supports_equalization(modules[str(node.target)]) or \
261
+ fused_module_supports_equalization(modules[str(node.target)]) or \
262
+ custom_module_supports_equalization(modules[str(node.target)])
263
+ elif node.op == 'call_function':
264
+ return node.target in [F.linear, F.conv1d, F.conv2d, F.conv3d]
265
+ return False
266
+
267
+ def is_equalization_observer(observer: nn.Module) -> bool:
268
+ return (isinstance(observer, (_InputEqualizationObserver, _WeightEqualizationObserver)))
269
+
270
+
271
+ ###############################################################################
272
+ # Functions for equalization during convert #
273
+ ###############################################################################
274
+
275
+ def get_op_node_and_weight_eq_obs(
276
+ input_eq_obs_node: Node,
277
+ model: GraphModule,
278
+ modules: Dict[str, nn.Module]
279
+ ) -> Tuple[Optional[Node], Optional[_WeightEqualizationObserver]]:
280
+ """ Gets the following weight equalization observer. There should always
281
+ exist a weight equalization observer after an input equalization observer.
282
+
283
+ Returns the operation node that follows the input equalization observer node
284
+ and the weight equalization observer
285
+ """
286
+
287
+ # Find the op node that comes directly after the input equalization observer
288
+ op_node = None
289
+ for user in input_eq_obs_node.users.keys():
290
+ if node_supports_equalization(user, modules):
291
+ op_node = user
292
+ break
293
+
294
+ assert op_node is not None
295
+ if op_node.op == 'call_module':
296
+ # If the op_node is a nn.Linear layer, then it must have a
297
+ # WeightEqualizationObserver configuration
298
+ maybe_equalization_node_name_to_config = _get_observed_graph_module_attr(model, "equalization_node_name_to_qconfig")
299
+ assert maybe_equalization_node_name_to_config is not None
300
+ equalization_node_name_to_qconfig: Dict[str, Any] = maybe_equalization_node_name_to_config # type: ignore[assignment]
301
+ assert equalization_node_name_to_qconfig.get(op_node.name, None) is not None
302
+ weight_eq_obs = equalization_node_name_to_qconfig.get(op_node.name, None).weight()
303
+
304
+ assert isinstance(weight_eq_obs, _WeightEqualizationObserver)
305
+ return op_node, weight_eq_obs
306
+
307
+ elif op_node.op == 'call_function':
308
+ weight_node = maybe_get_weight_eq_obs_node(op_node, modules)
309
+ if weight_node is not None:
310
+ weight_eq_obs = modules[str(weight_node.target)]
311
+ assert isinstance(weight_eq_obs, _WeightEqualizationObserver)
312
+ return op_node, weight_eq_obs
313
+
314
+ return None, None
315
+
316
+ def maybe_get_weight_eq_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> Optional[Node]:
317
+ """ Gets the weight equalization observer node if it exists.
318
+ """
319
+ assert op_node.op == 'call_function'
320
+ for node_arg in op_node.args:
321
+ if node_arg_is_weight(op_node, node_arg):
322
+ assert (isinstance(node_arg, Node) and node_arg.op == 'call_module' and
323
+ isinstance(modules[str(node_arg.target)], _WeightEqualizationObserver))
324
+ return node_arg
325
+ return None
326
+
327
+ def maybe_get_next_input_eq_obs(node: Node, modules: Dict[str, nn.Module]) -> Optional[_InputEqualizationObserver]:
328
+ """ Gets the following input equalization observer if it exists.
329
+
330
+ For example, in the case of connecting linear layers:
331
+ x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2
332
+ If the node being passed in is the linear1 node, then we want to return eq_obs2,
333
+ the following equalization observer for linear2.
334
+
335
+ However, if there are no connecting layers:
336
+ x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> add
337
+ Then we want to return None.
338
+
339
+ In the case of an unfused linear-relu layer with a connecting linear layer:
340
+ linear1 -> relu -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2
341
+ Since it is unfused, we want to skip over the relu layer and return eq_obs2,
342
+ the following equalization observer for linear2.
343
+ """
344
+
345
+ assert node_supports_equalization(node, modules)
346
+
347
+ # Locate the following nn.ReLU or F.relu node if it exists
348
+ maybe_relu_node = maybe_get_next_module(node, modules, nn.ReLU)
349
+ if maybe_relu_node is None:
350
+ maybe_relu_node = maybe_get_next_module(node, modules, target_functional_type=F.relu)
351
+
352
+ # Locate the following output observer if it exists.
353
+ # We will skip the relu node if it exists.
354
+ maybe_obs_node = (
355
+ maybe_get_next_module(node, modules, ObserverBase)
356
+ if maybe_relu_node is None
357
+ else maybe_get_next_module(maybe_relu_node, modules, ObserverBase)
358
+ )
359
+ if maybe_obs_node is None:
360
+ return None
361
+
362
+ maybe_eq_obs_node = maybe_get_next_module(maybe_obs_node, modules, _InputEqualizationObserver)
363
+ if maybe_eq_obs_node is None:
364
+ return None
365
+
366
+ maybe_eq_obs = modules[str(maybe_eq_obs_node)]
367
+ assert isinstance(maybe_eq_obs, _InputEqualizationObserver)
368
+ return maybe_eq_obs
369
+
370
+ def maybe_get_next_equalization_scale(node: Node, modules: Dict[str, nn.Module]) -> Optional[torch.Tensor]:
371
+ """ If the next next node is an InputEqualizationObserver then we want to
372
+ return its equalization scale, else we return 1
373
+
374
+ This is used in the case where there are two connecting linear layers:
375
+ linear1 -> LinearOutObs -> InputEqObs -> linear2
376
+ In this case, the node given is linear1 and we want to locate the InputEqObs.
377
+ """
378
+ next_inp_eq_obs = maybe_get_next_input_eq_obs(node, modules)
379
+ if next_inp_eq_obs:
380
+ if next_inp_eq_obs.equalization_scale.nelement() == 1 and \
381
+ next_inp_eq_obs.equalization_scale == torch.tensor(1):
382
+ return None
383
+ return next_inp_eq_obs.equalization_scale
384
+ return None
385
+
386
+ def scale_input_observer(node: Node, modules: Dict[str, nn.Module]) -> None:
387
+ """ Scales the following input quantization observer's min/max values by
388
+ updating the values with the scaled min/max values calculated by the input
389
+ equalization observer
390
+ """
391
+ input_eq_obs = modules[str(node.target)]
392
+ assert isinstance(input_eq_obs, _InputEqualizationObserver)
393
+
394
+ input_quant_obs_node = node.args[0]
395
+ assert isinstance(input_quant_obs_node, Node)
396
+
397
+ input_quant_obs = modules[str(input_quant_obs_node.target)]
398
+ if not isinstance(input_quant_obs, ObserverBase):
399
+ return
400
+
401
+ min_input_scaled, max_input_scaled = input_eq_obs.calculate_scaled_minmax()
402
+ if min_input_scaled is None and max_input_scaled is None:
403
+ return
404
+ input_quant_obs.min_val = min_input_scaled
405
+ input_quant_obs.max_val = max_input_scaled
406
+
407
+ def scale_weight_node(
408
+ node: Node,
409
+ modules: Dict[str, nn.Module],
410
+ equalization_scale: torch.Tensor,
411
+ next_equalization_scale: Optional[torch.Tensor],
412
+ ) -> None:
413
+ """ Scale the weights for input-weight equalization by multiplying the
414
+ weight by 1/equalization_scale and next_equalization_scale
415
+
416
+ Args:
417
+ node: Current node whose weights we want to scale
418
+ equalization_scale: Current node's calculated equalization scale
419
+ next_equalization_scale: Next node's calculated equalization scale if
420
+ the following node needs to be equalized, 1 otherwise
421
+ """
422
+ if equalization_scale is None:
423
+ return
424
+
425
+ if fused_module_supports_equalization(modules[str(node.target)]):
426
+ op_module = modules[str(node.target)][0] # type: ignore[index]
427
+ else:
428
+ op_module = modules[str(node.target)]
429
+ assert nn_module_supports_equalization(op_module) or custom_module_supports_equalization(op_module)
430
+
431
+ # Scale the weights for input-weight equalization
432
+ # If the following layer needs to be equalized then we will multiply its scale
433
+ weight = op_module.weight
434
+ assert isinstance(weight, torch.Tensor)
435
+
436
+ # Scale the weights by the reciprocal of the equalization scale
437
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=1
438
+ equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)
439
+ scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))
440
+
441
+ if next_equalization_scale is None:
442
+ op_module.weight = nn.Parameter(scaled_weight)
443
+ return
444
+
445
+ # Multiply the weights row wise by the next equalization scale
446
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=0
447
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, weight)
448
+ scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)
449
+
450
+ op_module.weight = nn.Parameter(scaled_weight)
451
+
452
+ # Multiply the bias element wise by the next equalization scale
453
+ bias = op_module.bias
454
+ if bias is None:
455
+ return
456
+ assert isinstance(bias, torch.Tensor)
457
+
458
+ # Reshape the equalization scale so that we can multiply it element-wise to the bias
459
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)
460
+ scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)
461
+ op_module.bias = nn.Parameter(scaled_bias)
462
+
463
+ def scale_weight_functional(
464
+ op_node: Node,
465
+ model: GraphModule,
466
+ modules: Dict[str, nn.Module],
467
+ equalization_scale: torch.Tensor,
468
+ next_equalization_scale: Optional[torch.Tensor],
469
+ ) -> None:
470
+ """ Scales the weight value for functional layers
471
+ """
472
+ if equalization_scale is None:
473
+ return
474
+
475
+ # From the given op_node, the path looks like:
476
+ # get_attr(weight) -> weight_quant_obs -> weight_eq_obs -> op_node
477
+ # So we want to trace back from the op_node to get the equalization observer
478
+ # node, then the quantization observer node, and then finally the weight
479
+ # node which contains the weight values.
480
+
481
+ # Get the equalization observer node
482
+ weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules)
483
+ if weight_eq_obs_node is None:
484
+ return
485
+
486
+ # Get the quantization observer node
487
+ weight_quant_obs_node = weight_eq_obs_node.args[0]
488
+ if weight_quant_obs_node is None:
489
+ return
490
+ assert (isinstance(weight_quant_obs_node, Node) and
491
+ isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase))
492
+
493
+ # Get the get_attr(weight) node
494
+ weight_node = weight_quant_obs_node.args[0]
495
+ if weight_node is None:
496
+ return
497
+ assert isinstance(weight_node, Node) and weight_node.op == 'get_attr'
498
+
499
+ weight_parent_name, weight_name = _parent_name(weight_node.target)
500
+ weight = getattr(modules[weight_parent_name], weight_name)
501
+
502
+ # Scale the weights for input-weight equalization
503
+ # If the following layer needs to be equalized then we will multiply its scale
504
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=1
505
+ equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)
506
+ scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))
507
+
508
+ if next_equalization_scale is None:
509
+ setattr(modules[weight_parent_name], weight_name, scaled_weight)
510
+ return
511
+
512
+ # Multiply the weights row wise by the next equalization scale
513
+ # Reshape the equalization scale so that we can multiply it to the weight along axis=1
514
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, scaled_weight)
515
+ scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)
516
+
517
+ setattr(modules[weight_parent_name], weight_name, scaled_weight)
518
+ assert torch.allclose(model.get_buffer(str(weight_node.target)), scaled_weight)
519
+
520
+ # Multiply the bias element wise by the next equalization scale
521
+ bias_node = None
522
+ for node in op_node.args:
523
+ # Find the node containing the weight values
524
+ if isinstance(node, Node) and node.op == 'get_attr' and 'bias' in node.name:
525
+ bias_node = node
526
+ break
527
+ if bias_node is None:
528
+ return
529
+
530
+ bias_parent_name, bias_name = _parent_name(bias_node.target)
531
+ bias = getattr(modules[bias_parent_name], bias_name)
532
+
533
+ # Reshape the equalization scale so that we can multiply it element-wise to the bias
534
+ next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)
535
+ scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)
536
+ setattr(modules[bias_parent_name], bias_name, scaled_bias)
537
+
538
+ def clear_weight_quant_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> None:
539
+ """ Given the operation node, we want find the corresponding quantization
540
+ observer and reset its min/max values
541
+ """
542
+ weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules)
543
+ if weight_eq_obs_node is None:
544
+ return
545
+
546
+ weight_quant_obs_node = weight_eq_obs_node.args[0]
547
+ if weight_quant_obs_node is None:
548
+ return
549
+ assert isinstance(weight_quant_obs_node, Node)
550
+
551
+ weight_quant_obs = modules[str(weight_quant_obs_node.target)]
552
+ assert isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase)
553
+ weight_quant_obs.reset_min_max_vals() # type: ignore[operator]
554
+
555
+ def remove_node(model: GraphModule, node: Node, prev_node: Node):
556
+ """ Removes the given node from the model by replacing all of its users with
557
+ the given previous node
558
+ """
559
+ # For all of the current node's users, replace the current node with
560
+ # the input quantization observer node
561
+ orig_users = list(node.users.keys())
562
+ for user_node in orig_users:
563
+ user_node.replace_input_with(node, prev_node)
564
+
565
+ # Erase the InputEqualizationObserver node
566
+ model.graph.erase_node(node)
567
+
568
+ def update_obs_for_equalization(model: GraphModule, modules: Dict[str, nn.Module]) -> Dict[str, _WeightEqualizationObserver]:
569
+ """ Update all of the observer's equalization scale. For each
570
+ InputEqualizationObserver, we will find the location of the next
571
+ WeightEqualizationObserver, create it, and calculate the equalization scale
572
+ based on the two observers.
573
+
574
+ We will then return a dictionary mapping operation node names to
575
+ the corresponding WeightEqualizationObservers for that operation.
576
+ """
577
+ weight_eq_obs_dict = {}
578
+ for node in model.graph.nodes:
579
+ if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):
580
+ input_eq_obs = modules[node.target]
581
+ assert isinstance(input_eq_obs, _InputEqualizationObserver)
582
+ op_node, weight_eq_obs = get_op_node_and_weight_eq_obs(node, model, modules)
583
+
584
+ if op_node is None or weight_eq_obs is None:
585
+ continue
586
+
587
+ if op_node.op == 'call_module':
588
+ # Calibrate the weight equalization observer since it has just
589
+ # been created
590
+ if fused_module_supports_equalization(modules[str(op_node.target)]):
591
+ module = modules[str(op_node.target)][0] # type: ignore[index]
592
+ assert nn_module_supports_equalization(module)
593
+ weight_eq_obs(module.weight)
594
+ else:
595
+ weight_eq_obs(modules[str(op_node.target)].weight)
596
+
597
+ # Calculate and set the equalization scale values
598
+ equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs)
599
+ input_eq_obs.set_equalization_scale(equalization_scale)
600
+ weight_eq_obs.set_equalization_scale(equalization_scale)
601
+
602
+ weight_eq_obs_dict[op_node.name] = weight_eq_obs
603
+
604
+ return weight_eq_obs_dict
605
+
606
+ def convert_eq_obs(
607
+ model: GraphModule,
608
+ modules: Dict[str, nn.Module],
609
+ weight_eq_obs_dict: Dict[str, _WeightEqualizationObserver],
610
+ ) -> None:
611
+ """ Converts the equalization operations and updates the other nodes in the
612
+ following way:
613
+ - Removes the input equalization observers and inserts a mul operator
614
+ along with an equalization scale node wherever applicable (we do not
615
+ want to insert a mul operator between connecting linear layers).
616
+ - Updates the input quantization observers with the scaled input min/max
617
+ values.
618
+ - Scales the weights by the current and next equalization scales.
619
+ - Removes the weight equalization observer node if it exists.
620
+
621
+ Before (after prepare):
622
+ weight values
623
+ |
624
+ WeightQuantObs
625
+ |
626
+ WeightEqObs
627
+ |
628
+ x -> InpQuantObs -> InpEqObs -> linear -> OutQuantObs
629
+
630
+ After this function:
631
+ scaled weight values
632
+ |
633
+ equalization scale WeightQuantObs
634
+ | |
635
+ x -> mul -> InpQuantObs (scaled min/max) -> linear -> OutQuantObs
636
+
637
+ After convert:
638
+ equalization scale scaled weight values
639
+ | |
640
+ x -> mul -> quantize_per_tensor -> quantized::linear
641
+
642
+ Note that although the equalization observer appeared after the quantization
643
+ observer after prepare_fx, the mul node appears before the quantization node
644
+ after convert_fx. This is because placing the equalization observer after
645
+ the quantization observer in prepare_fx would allow us to keep the invariant
646
+ that the graph before the current node inserts its observers is not
647
+ modified.
648
+
649
+ Having the equalization observer before the quantization observer would also
650
+ cause some inconsistences between the ordering of the quantization and
651
+ equalization observers.
652
+ For example, a single linear layer would look like:
653
+ x -> InpEqObs1 -> InpQuantObs1 -> linear1 -> OutQuantObs1
654
+ But between two connected linear layers, it would look like:
655
+ linear1 -> OutQuantObs1 -> InpEqObs2 -> linear2 -> OutQuantObs2
656
+ """
657
+ for node in model.graph.nodes:
658
+ if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):
659
+ inp_quant_obs_node = node.args[0]
660
+ prev_node = inp_quant_obs_node.args[0]
661
+
662
+ # If the previous node is a layer that needs to be equalized, then
663
+ # we will remove the current node because we do not need to add any
664
+ # equalization nodes between two layers that need to be equalized
665
+
666
+ # Before: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> input_eq_obs2 (node) -> linear2
667
+ # After: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> linear2
668
+ if node_supports_equalization(prev_node, modules) or "relu" in prev_node.name:
669
+ remove_node(model, node, inp_quant_obs_node)
670
+ continue
671
+
672
+ # Update the following input quantization observer's min/max values
673
+ scale_input_observer(node, modules)
674
+
675
+ # Remove the InputEqualization node and add a mul operator before
676
+ # the quantization observer node that appears before the equalization node
677
+ # Before: x -> input_quant_obs -> input_eq_obs -> linear
678
+ # After: x -> mul -> input_quant_obs -> linear
679
+
680
+ # Create a node containing the equalization scale
681
+ with model.graph.inserting_before(inp_quant_obs_node):
682
+ get_new_eq_scale_name = get_new_attr_name_with_prefix(prev_node.name + '_equalization_scale')
683
+ name = get_new_eq_scale_name(modules)
684
+ setattr(model, name, modules[node.target].equalization_scale)
685
+ eq_scale_node = model.graph.create_node('get_attr', name)
686
+
687
+ # Create a node multiplying the input with the equalization scale
688
+ with model.graph.inserting_after(eq_scale_node):
689
+ inputs = (prev_node, eq_scale_node)
690
+ mul_node = model.graph.create_node("call_function", torch.mul, inputs)
691
+
692
+ # Set the mul nod to be the input_quant_obs_node's input instead of
693
+ # the previous node
694
+ inp_quant_obs_node.replace_input_with(prev_node, mul_node)
695
+ remove_node(model, node, inp_quant_obs_node)
696
+
697
+ elif weight_eq_obs_dict.get(node.name, None) is not None:
698
+ weight_eq_obs = weight_eq_obs_dict.get(node.name)
699
+ assert isinstance(weight_eq_obs, _WeightEqualizationObserver)
700
+ equalization_scale = weight_eq_obs.equalization_scale
701
+
702
+ if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
703
+ equalization_scale = None # type: ignore[assignment]
704
+ maybe_next_equalization_scale = maybe_get_next_equalization_scale(node, modules)
705
+
706
+ # Scale the weight nodes
707
+ if node.op == 'call_module':
708
+ scale_weight_node(node, modules, equalization_scale, maybe_next_equalization_scale)
709
+ elif node.op == 'call_function':
710
+ scale_weight_functional(node, model, modules, equalization_scale, maybe_next_equalization_scale)
711
+
712
+ weight_eq_obs_node = maybe_get_weight_eq_obs_node(node, modules)
713
+ if weight_eq_obs_node is None:
714
+ return
715
+ assert isinstance(modules[str(weight_eq_obs_node.target)], _WeightEqualizationObserver)
716
+
717
+ # Clear the quantization observer's min/max values so that they
718
+ # can get updated later based on the new scale values
719
+ clear_weight_quant_obs_node(node, modules)
720
+
721
+ # Erase the weight equalization observer node
722
+ prev_node = weight_eq_obs_node.args[0]
723
+ remove_node(model, weight_eq_obs_node, prev_node)
724
+ else:
725
+ raise ValueError("Expected operation node to be 'call_module' or 'call_function" +
726
+ f"Instead got node {node.name} as '{node.op}'.")
727
+
728
+ def _convert_equalization_ref(model: GraphModule):
729
+ """ Reference function which applies changes needed for equalization, but
730
+ does not quantize the nodes
731
+ """
732
+ modules = dict(model.named_modules(remove_duplicate=False))
733
+
734
+ # Calculate the equalization scale, update the observers with the scaled
735
+ # inputs, and scale the weight
736
+ weight_eq_obs_dict = update_obs_for_equalization(model, modules)
737
+ convert_eq_obs(model, modules, weight_eq_obs_dict)
738
+
739
+ return GraphModule(model, model.graph)
740
+
741
+
742
+ ###############################################################################
743
+ # Functions for running the equalized model on the Numeric Suite #
744
+ ###############################################################################
745
+
746
+ def get_layer_sqnr_dict(model_a: nn.Module, model_b: nn.Module, x: torch.Tensor) -> Dict[str, float]:
747
+ """ Runs the Numeric Suite on model_a and model_b and returns a dictionary
748
+ containing the SQNR between layers in model_a and model_b.
749
+
750
+ Note: In order to support equalized models, this function has a hacky fix in
751
+ which we do not match any torch.mul operators. This is because equalized
752
+ models contain extra mul operators to scale the input by the equalization
753
+ scale, but this edge case has not been resolved yet within the numeric suite code.
754
+
755
+ Args:
756
+ model_a: A float model
757
+ model_b: A quantized model
758
+ x: Inputs to use during calibration
759
+ """
760
+ import torch.ao.ns._numeric_suite_fx as ns
761
+ from torch.ao.ns.fx.mappings import get_unmatchable_types_map
762
+
763
+ unmatchable_types_map = get_unmatchable_types_map()
764
+ unmatchable_types_map["funs_unmatchable"].add(torch.mul)
765
+
766
+ model_a_ns, model_b_ns = ns.add_loggers(
767
+ 'fp32', model_a,
768
+ 'int8', model_b,
769
+ ns.OutputLogger,
770
+ unmatchable_types_map=unmatchable_types_map
771
+ )
772
+
773
+ model_a_ns(x)
774
+ model_b_ns(x)
775
+
776
+ activation_comparison_dict = ns.extract_logger_info(
777
+ model_a_ns,
778
+ model_b_ns,
779
+ ns.OutputLogger,
780
+ 'int8')
781
+ ns.extend_logger_results_with_comparison(
782
+ activation_comparison_dict,
783
+ 'fp32', 'int8',
784
+ torch.ao.ns.fx.utils.compute_sqnr, 'sqnr'
785
+ )
786
+
787
+ # Construct a dictionary mapping layer names to the SQNR values
788
+ layer_sqnr_dict = {}
789
+ for key in activation_comparison_dict:
790
+ layer = activation_comparison_dict[key]['node_output']['int8'][0]['fqn']
791
+ sqnr = activation_comparison_dict[key]['node_output']['int8'][0]['sqnr'][0]
792
+ layer_sqnr_dict[layer] = sqnr
793
+
794
+ return layer_sqnr_dict
795
+
796
+ def get_equalization_qconfig_dict(
797
+ layer_sqnr_dict: Dict[str, float],
798
+ num_layers_to_equalize: int
799
+ ) -> Any:
800
+ """ Given the layer to SQNR dictionary, find the layers with the highest
801
+ quantization errors, and return an equalization_qconfig_dict
802
+ specifying to only equalize those top layers.
803
+
804
+ Args:
805
+ layer_sqnr_dict: Dictionary mapping layer names to SQNR values (found
806
+ when comparing an equalized model against a float model)
807
+ num_layers_to_equalize: Number of layers with the highest quantization
808
+ errors to equalize
809
+ """
810
+
811
+ # Sort the layer_sqnr_dictionary values and get the layers with the lowest
812
+ # SQNR values (aka highest quantization errors)
813
+ layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=lambda item: item[1])
814
+ layers_to_equalize = layer_sqnr_sorted[:num_layers_to_equalize]
815
+
816
+ # Constructs an equalization_qconfig_dict that specifies to only equalize
817
+ # the layers with the highest quantization errors
818
+ module_to_qconfig_list = [(item[0], default_equalization_qconfig) for item in layers_to_equalize]
819
+ equalization_qconfig_dict = {"module_name": module_to_qconfig_list}
820
+ return equalization_qconfig_dict
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py ADDED
@@ -0,0 +1,1170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx import map_arg, Node
3
+ from torch.fx.graph import Graph
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import torch.ao.nn.intrinsic as nni
7
+ import torch.ao.nn.intrinsic.quantized as nniq
8
+ import torch.ao.nn.intrinsic.quantized.dynamic as nniqd
9
+ import torch.ao.nn.quantized as nnq
10
+ import torch.ao.nn.quantized.dynamic as nnqd
11
+ import torch.ao.nn.quantized.reference as nnqr
12
+ from torch.ao.nn.quantized.modules.utils import WeightedQuantizedModule
13
+ from torch.fx import GraphModule
14
+ from .utils import (
15
+ collect_producer_nodes,
16
+ get_linear_prepack_op_for_dtype,
17
+ get_new_attr_name_with_prefix,
18
+ get_qconv_prepack_op,
19
+ graph_module_from_producer_nodes,
20
+ )
21
+ from ..utils import _parent_name
22
+ from ..qconfig import QConfigAny
23
+ from ..quantization_mappings import get_quantized_operator
24
+ from .utils import create_node_from_old_node_preserve_meta
25
+ from typing import Dict, Tuple, Type, List, Callable, Any, Union, Set, Optional
26
+ import operator
27
+
28
+ QOP_TO_ARG_NAMES_TO_SKIP = {
29
+ torch._ops.ops.quantized.hardswish: ['inplace'],
30
+ torch._ops.ops.quantized.elu: ['inplace'],
31
+ torch._ops.ops.quantized.dropout: ['inplace'],
32
+ torch._ops.ops.quantized.instance_norm:
33
+ ['running_mean', 'running_var', 'use_input_stats', 'momentum'],
34
+ }
35
+
36
+ def _is_node_in_list(node, modules, func_list, method_list, module_type_list):
37
+ is_call_function = node.op == "call_function" and node.target in func_list
38
+ is_call_method = node.op == "call_method" and node.target in method_list
39
+ is_call_module = node.op == "call_module" and type(modules[str(node.target)]) in module_type_list
40
+ return is_call_function, is_call_method, is_call_module
41
+
42
+ def is_fixed_qparams_node(node, modules):
43
+ func_list = [
44
+ torch.nn.functional.hardsigmoid,
45
+ torch.nn.functional.sigmoid,
46
+ torch.sigmoid,
47
+ torch.tanh,
48
+ ]
49
+ method_list = [
50
+ "hardsigmoid",
51
+ "hardsigmoid_",
52
+ "sigmoid",
53
+ "sigmoid_",
54
+ "tanh",
55
+ "tanh_",
56
+ ]
57
+ module_type_list = [
58
+ torch.nn.Hardsigmoid,
59
+ torch.nn.Sigmoid,
60
+ torch.nn.Tanh,
61
+ torch.nn.Softmax,
62
+ ]
63
+ return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
64
+
65
+ def is_default_node(node, modules):
66
+ func_list = [
67
+ torch.nn.functional.elu,
68
+ torch.nn.functional.hardswish,
69
+ torch.nn.functional.instance_norm,
70
+ torch.nn.functional.layer_norm,
71
+ torch.nn.functional.leaky_relu,
72
+ torch.nn.functional.dropout,
73
+ ]
74
+ method_list: List[Any] = []
75
+ module_type_list = [
76
+ nnqr.ConvTranspose1d,
77
+ nnqr.ConvTranspose2d,
78
+ nnqr.ConvTranspose3d,
79
+ torch.nn.ELU,
80
+ torch.nn.LeakyReLU,
81
+ torch.nn.Hardswish,
82
+ torch.nn.InstanceNorm1d,
83
+ torch.nn.InstanceNorm2d,
84
+ torch.nn.InstanceNorm3d,
85
+ torch.nn.LayerNorm,
86
+ torch.nn.Dropout,
87
+ torch.nn.PReLU,
88
+ torch.nn.BatchNorm2d,
89
+ torch.nn.BatchNorm3d,
90
+ torch.ao.nn.intrinsic.BNReLU2d,
91
+ torch.ao.nn.intrinsic.BNReLU3d,
92
+ ]
93
+ return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
94
+
95
+ def is_copy_node(node, modules):
96
+ func_list = [
97
+ torch.adaptive_avg_pool1d,
98
+ torch.nn.functional.adaptive_avg_pool2d,
99
+ torch.nn.functional.adaptive_avg_pool3d,
100
+ torch.nn.functional.hardtanh,
101
+ torch.nn.functional.hardtanh_,
102
+ torch.nn.functional.interpolate,
103
+ torch.nn.functional.max_pool1d,
104
+ torch.nn.functional.max_pool2d,
105
+ torch.nn.functional.max_pool3d,
106
+ torch.nn.functional.relu,
107
+ torch.nn.functional.relu6,
108
+ torch.avg_pool1d,
109
+ torch._C._nn.avg_pool2d,
110
+ torch._C._nn.avg_pool3d,
111
+ torch.clamp,
112
+ torch.flatten,
113
+ torch.mean,
114
+ operator.floordiv,
115
+ # F.channel_shuffle and torch.channel_shuffle are essentially the same thing
116
+ # so we only need to put one of them here
117
+ torch.channel_shuffle,
118
+ ]
119
+ method_list = [
120
+ "clamp",
121
+ "mean",
122
+ "relu",
123
+ "relu_",
124
+ ]
125
+ module_type_list = [
126
+ torch.nn.AdaptiveAvgPool1d,
127
+ torch.nn.AdaptiveAvgPool2d,
128
+ torch.nn.AdaptiveAvgPool3d,
129
+ torch.nn.AvgPool1d,
130
+ torch.nn.AvgPool2d,
131
+ torch.nn.AvgPool3d,
132
+ torch.nn.Hardtanh,
133
+ torch.nn.MaxPool1d,
134
+ torch.nn.MaxPool2d,
135
+ torch.nn.MaxPool3d,
136
+ torch.nn.ReLU,
137
+ torch.nn.ReLU6,
138
+ torch.nn.ChannelShuffle,
139
+ ]
140
+ return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
141
+
142
+ def is_general_tensor_shape_node(node, modules):
143
+ func_list = [
144
+ torch.narrow,
145
+ torch.transpose,
146
+ torch.repeat_interleave,
147
+ torch.squeeze,
148
+ torch.stack,
149
+ torch.unsqueeze,
150
+ torch.nn.functional.pixel_shuffle,
151
+ torch.nn.functional.pixel_unshuffle,
152
+ ]
153
+ method_list = [
154
+ "contiguous",
155
+ "detach",
156
+ "detach_",
157
+ "permute",
158
+ "repeat",
159
+ "repeat_interleave",
160
+ "reshape",
161
+ "resize_",
162
+ "shape",
163
+ "size",
164
+ "squeeze",
165
+ "squeeze_",
166
+ "transpose",
167
+ "unsqueeze",
168
+ "unsqueeze_",
169
+ "view",
170
+ ]
171
+ module_type_list = [
172
+ torch.nn.Identity,
173
+ torch.nn.PixelShuffle,
174
+ torch.nn.PixelUnshuffle,
175
+ ]
176
+ return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
177
+
178
+ def is_other_node(node, modules):
179
+ func_list = [
180
+ torch.cat,
181
+ ]
182
+ method_list: List[Any] = []
183
+ module_type_list: List[Any] = []
184
+ return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
185
+
186
+ def is_special_pattern_node(node, modules):
187
+ res_function, res_method, res_module = False, False, False
188
+ for checker in [is_fixed_qparams_node, is_default_node, is_copy_node, is_general_tensor_shape_node, is_other_node]:
189
+ is_call_function, is_call_method, is_call_module = checker(node, modules)
190
+ res_function = res_function or is_call_function
191
+ res_method = res_method or is_call_method
192
+ res_module = res_module or is_call_module
193
+ return res_function, res_method, res_module
194
+
195
+ def is_dequantize_node(node):
196
+ return isinstance(node, Node) and node.op == "call_method" and node.target == "dequantize"
197
+
198
+ def is_getattr_tensor_metadata_node(node):
199
+ return node.op == "call_function" and \
200
+ node.target == getattr and \
201
+ node.args[1] in ["shape"]
202
+
203
+ def is_get_tensor_info_node(node):
204
+ return node.op == "call_method" and \
205
+ node.target in ["shape", "size"]
206
+
207
+ def should_skip_lowering(op: torch.fx.node.Node, qconfig_map: Dict[str, QConfigAny]):
208
+ """
209
+ Return True if the op is configured with a None qconfig, False otherwise.
210
+ Note: maybe need to generalize this to also check for the dtype, and we
211
+ only lower when dtype matches, but right now fbgemm/qnnpack only support
212
+ a single dtype, so it is OK for now.
213
+ """
214
+ return op.name in qconfig_map and qconfig_map[op.name] is None
215
+
216
+ # Mapping from reference module class to the replacement static quantized module class for lowering
217
+ STATIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[WeightedQuantizedModule]] = {
218
+ nnqr.Linear: nnq.Linear,
219
+ nnqr.Conv1d: nnq.Conv1d,
220
+ nnqr.Conv2d: nnq.Conv2d,
221
+ nnqr.Conv3d: nnq.Conv3d,
222
+ }
223
+
224
+ # Mapping from reference module class to the replacement dynamic quantized module class for lowering
225
+ DYNAMIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = {
226
+ nnqr.Linear: nnqd.Linear,
227
+ nnqr.GRUCell: nnqd.GRUCell,
228
+ nnqr.LSTMCell: nnqd.LSTMCell,
229
+ nnqr.RNNCell: nnqd.RNNCell,
230
+ nnqr.LSTM: nnqd.LSTM,
231
+ nnqr.GRU: nnqd.GRU,
232
+ }
233
+
234
+ # Mapping from reference module class to the replacement weight only quantized module class for lowering
235
+ # TODO: correct the namespace for these modules
236
+ WEIGHT_ONLY_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = {
237
+ nnqr.Embedding: nnq.Embedding,
238
+ nnqr.EmbeddingBag: nnq.EmbeddingBag,
239
+ }
240
+
241
+ # TODO: merge with STATIC_LOWER_MODULE_MAP after we merge
242
+ # _lower_static_weighted_ref_module and special_pattern_replacement
243
+ SPECIAL_PATTERN_LOWER_MODULE_MAP = {
244
+ nn.BatchNorm2d: nnq.BatchNorm2d,
245
+ nn.BatchNorm3d: nnq.BatchNorm3d,
246
+ nnqr.ConvTranspose1d: nnq.ConvTranspose1d,
247
+ nnqr.ConvTranspose2d: nnq.ConvTranspose2d,
248
+ nnqr.ConvTranspose3d: nnq.ConvTranspose3d,
249
+ nn.ELU: nnq.ELU,
250
+ nn.LeakyReLU: nnq.LeakyReLU,
251
+ nn.Hardswish: nnq.Hardswish,
252
+ nn.InstanceNorm1d: nnq.InstanceNorm1d,
253
+ nn.InstanceNorm2d: nnq.InstanceNorm2d,
254
+ nn.InstanceNorm3d: nnq.InstanceNorm3d,
255
+ nn.LayerNorm: nnq.LayerNorm,
256
+ nn.Dropout: nnq.Dropout,
257
+ nn.Softmax: nnq.Softmax,
258
+ nn.PReLU: nnq.PReLU,
259
+ nni.BNReLU2d: nniq.BNReLU2d,
260
+ nni.BNReLU3d: nniq.BNReLU3d,
261
+ }
262
+
263
+ # Mapping from fused module class to a 2-tuple of:
264
+ # 1) The inner reference module class
265
+ # 2) The replacement static quantized module class for lowering
266
+ STATIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = {
267
+ nni.LinearReLU: (nnqr.Linear, nniq.LinearReLU),
268
+ # TODO: LinearLeakyReLU is registered as global but it is only fused and
269
+ # lowered when ondnn's backend config is used. Maybe need to separate
270
+ # registration and lowering functions for different backends in the future.
271
+ nni.LinearLeakyReLU: (nnqr.Linear, nniq.LinearLeakyReLU),
272
+ nni.LinearTanh: (nnqr.Linear, nniq.LinearTanh),
273
+ nni.ConvReLU1d: (nnqr.Conv1d, nniq.ConvReLU1d),
274
+ nni.ConvReLU2d: (nnqr.Conv2d, nniq.ConvReLU2d),
275
+ nni.ConvReLU3d: (nnqr.Conv3d, nniq.ConvReLU3d),
276
+ }
277
+
278
+ # The difference between STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP and STATIC_LOWER_FUSED_MODULE_MAP:
279
+ # The refer node inside STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP has 2 inputs.
280
+ # Mapping from fused module class to a 2-tuple of:
281
+ # 1) The inner reference module class
282
+ # 2) The replacement static quantized module class for lowering
283
+ STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = {
284
+ nni.ConvAdd2d: (nnqr.Conv2d, nniq.ConvAdd2d),
285
+ nni.ConvAddReLU2d: (nnqr.Conv2d, nniq.ConvAddReLU2d),
286
+ }
287
+
288
+ # Mapping from fused module class to a 2-tuple of:
289
+ # 1) The inner reference module class
290
+ # 2) The replacement dynamic quantized module class for lowering
291
+ DYNAMIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[nn.Module]]] = {
292
+ nni.LinearReLU: (nnqr.Linear, nniqd.LinearReLU),
293
+ }
294
+
295
+ # Mapping from a functional to lower to a 2-tuple of
296
+ # 1) The quantized version of the op
297
+ # 2) The quantized version of the op fused with relu, if it exists, else None
298
+ STATIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Tuple[Callable, Optional[Callable]]] = {
299
+ F.linear: (torch.ops.quantized.linear, torch.ops.quantized.linear_relu),
300
+ F.conv1d: (torch.ops.quantized.conv1d, torch.ops.quantized.conv1d_relu),
301
+ F.conv2d: (torch.ops.quantized.conv2d, torch.ops.quantized.conv2d_relu),
302
+ F.conv3d: (torch.ops.quantized.conv3d, torch.ops.quantized.conv3d_relu),
303
+ F.conv_transpose1d: (torch.ops.quantized.conv_transpose1d, None),
304
+ F.conv_transpose2d: (torch.ops.quantized.conv_transpose2d, None),
305
+ F.conv_transpose3d: (torch.ops.quantized.conv_transpose3d, None),
306
+ }
307
+
308
+ WEIGHT_PREPACK_OPS: Set[Callable] = {
309
+ torch._ops.ops.quantized.linear_prepack,
310
+ torch._ops.ops.quantized.linear_prepack_fp16,
311
+ torch._ops.ops.quantized.conv1d_prepack,
312
+ torch._ops.ops.quantized.conv2d_prepack,
313
+ torch._ops.ops.quantized.conv3d_prepack,
314
+ torch.ops.quantized.conv_transpose1d_prepack,
315
+ torch.ops.quantized.conv_transpose2d_prepack,
316
+ torch.ops.quantized.conv_transpose3d_prepack,
317
+ }
318
+
319
+ # Mapping from a functional to a dictionary, where the key is a 2-tuple of
320
+ # (input_activation_dtype, weight_dtype) and the value is a 2-tuple of
321
+ # 1) The dynamically quantized version of the op
322
+ # 2) The dynamically quantized version of the op fused with relu, if it exists, else None
323
+ DYNAMIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Dict[Tuple[torch.dtype, torch.dtype], Tuple[Callable, Optional[Callable]]]] = {
324
+ F.linear: {
325
+ (torch.quint8, torch.qint8): (torch.ops.quantized.linear_dynamic,
326
+ torch.ops.quantized.linear_relu_dynamic),
327
+ (torch.float16, torch.float16): (torch.ops.quantized.linear_dynamic_fp16,
328
+ torch.ops.quantized.linear_relu_dynamic_fp16)
329
+ },
330
+ # dynamic conv + relu is not available yet
331
+ F.conv1d: {
332
+ (torch.quint8, torch.qint8): (torch.ops.quantized.conv1d_dynamic, None),
333
+ },
334
+ F.conv2d: {
335
+ (torch.quint8, torch.qint8): (torch.ops.quantized.conv2d_dynamic, None),
336
+ },
337
+ F.conv3d: {
338
+ (torch.quint8, torch.qint8): (torch.ops.quantized.conv3d_dynamic, None),
339
+ },
340
+ }
341
+
342
+ CONV_FUNCTIONAL_OPS: Set[Callable] = {
343
+ F.conv1d,
344
+ F.conv2d,
345
+ F.conv3d,
346
+ }
347
+
348
+ CONV_TRANSPOSE_FUNCTIONAL_OPS: Set[Callable] = {
349
+ F.conv_transpose1d,
350
+ F.conv_transpose2d,
351
+ F.conv_transpose3d,
352
+ }
353
+
354
+ # TODO: add tests for lowering these ops
355
+ QBIN_OP_MAPPING: Dict[Union[Callable, str], Callable] = {
356
+ operator.add: torch.ops.quantized.add,
357
+ torch.add: torch.ops.quantized.add,
358
+ operator.mul: torch.ops.quantized.mul,
359
+ operator.matmul: torch.ops.quantized.matmul,
360
+ torch.mul: torch.ops.quantized.mul,
361
+ torch.matmul: torch.ops.quantized.matmul,
362
+ }
363
+ QBIN_RELU_OP_MAPPING: Dict[Union[Callable, str], Callable] = {
364
+ operator.add: torch.ops.quantized.add_relu,
365
+ torch.add: torch.ops.quantized.add_relu,
366
+ operator.mul: torch.ops.quantized.mul_relu,
367
+ torch.mul: torch.ops.quantized.mul_relu,
368
+ }
369
+
370
+ def _save_packed_weight(self, destination, prefix, keep_vars):
371
+ for attr_name in dir(self):
372
+ if "_packed_weight" in attr_name and \
373
+ isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined]
374
+ packed_weight = getattr(self, attr_name)
375
+ destination[prefix + attr_name] = packed_weight
376
+
377
+ def _load_packed_weight(self, state_dict, prefix, local_metadata, strict,
378
+ missing_keys, unexpected_keys, error_msgs):
379
+ attrs_to_pop = []
380
+ for attr_name in state_dict:
381
+ if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950
382
+ setattr(self, attr_name, state_dict[attr_name])
383
+ attrs_to_pop.append(attr_name)
384
+
385
+ # pop the packed param attributesn
386
+ for attr_name in attrs_to_pop:
387
+ state_dict.pop(attr_name)
388
+
389
+ def fold_weight(
390
+ quantized_model: GraphModule,
391
+ node_name_to_scope: Dict[str, Tuple[str, type]]
392
+ ) -> GraphModule:
393
+ """
394
+ Trace back from the weight node util we hit getattr, reconstruct the
395
+ graph module with the traced nodes and run the graph module to pack the
396
+ weight. then replace the original chain of ops with the packed weight.
397
+ """
398
+ packed_weights = {}
399
+ # map from folded node name to the prepacked weight name
400
+ folded_nodes = {}
401
+ # get packed weights
402
+ for node in quantized_model.graph.nodes:
403
+ if node.op == 'call_function' and node.target in WEIGHT_PREPACK_OPS:
404
+ nodes_to_fold = collect_producer_nodes(node)
405
+ if nodes_to_fold is not None:
406
+ for node_to_fold in nodes_to_fold:
407
+ folded_nodes[node_to_fold.name] = node
408
+
409
+ prepacking_module = graph_module_from_producer_nodes(
410
+ quantized_model, nodes_to_fold)
411
+ packed_weight = prepacking_module()
412
+ packed_weights[node.name] = packed_weight
413
+
414
+ # remove folded nodes and replace the prepacking node with getattr
415
+ folded_graph = Graph()
416
+ env: Dict[Any, Any] = {}
417
+
418
+ def load_arg(a):
419
+ return map_arg(a, lambda node: env[node.name])
420
+
421
+ for node in quantized_model.graph.nodes:
422
+ prepack_node = folded_nodes.get(node.name, None)
423
+ if prepack_node is node:
424
+ packed_weight = packed_weights[node.name]
425
+ # add a prepacked attribute to root
426
+ op_node = next(iter(prepack_node.users))
427
+ module_path, _ = node_name_to_scope[op_node.name]
428
+ get_new_packed_weight_name = \
429
+ get_new_attr_name_with_prefix(module_path + '_packed_weight_')
430
+ packed_weight_name = get_new_packed_weight_name(quantized_model)
431
+ setattr(quantized_model, packed_weight_name, packed_weight)
432
+ # replace prepack node with a getattr node
433
+ env[node.name] = folded_graph.create_node(
434
+ 'get_attr', packed_weight_name, (), {})
435
+ elif prepack_node is not None:
436
+ # remove the foled node
437
+ continue
438
+ else:
439
+ # copy other nodes
440
+ env[node.name] = folded_graph.node_copy(node, load_arg)
441
+
442
+ quantized_model = GraphModule(quantized_model, folded_graph)
443
+ quantized_model._register_state_dict_hook(_save_packed_weight)
444
+ quantized_model._register_load_state_dict_pre_hook(_load_packed_weight, with_module=True)
445
+ return quantized_model
446
+
447
+ def _get_module(node: Node, modules: Dict[str, nn.Module]) -> Optional[nn.Module]:
448
+ """
449
+ Return the `torch.nn.Module` that corresponds to the specified node's target.
450
+ If no such node exists, return None.
451
+ """
452
+ if node.op == "call_module" and str(node.target) in modules:
453
+ return modules[str(node.target)]
454
+ else:
455
+ return None
456
+
457
+ def _match_static_pattern(
458
+ node: Node,
459
+ modules: Dict[str, nn.Module],
460
+ qconfig_map: Dict[str, QConfigAny],
461
+ matching_modules_or_ops: List[Callable],
462
+ dequantize_node_arg_indices: List[int]
463
+ ) -> Union[Tuple[Node, Node, Node], Tuple[None, None, None]]:
464
+ """
465
+ Match the pattern (dequantize - ref node - quantize) against the node provided.
466
+
467
+ If there is a match, return a 3-tuple of:
468
+ 1) q_node: the quantize node,
469
+ 2) relu_node: a relu node wrapping the ref_node, and
470
+ 3) ref_node: a reference module or functional node to replace with its quantized counterpart
471
+ Otherwise, if there is no match, return a 3-tuple of (None, None, None).
472
+
473
+ Parameters:
474
+ node: The `torch.fx.Node` to match against.
475
+ modules: A mapping from node names to modules in the model graph, used for module lookup.
476
+ qconfig_map: A mapping from node names to the qconfigs associated with the nodes.
477
+ If the corresponding qconfig for the reference node is None, then return no match.
478
+ matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s.
479
+ If the reference node is not in this list, then return no match.
480
+ dequantize_node_arg_indices: A list of indices in the reference node args where dequantize
481
+ nodes may be present. An empty list means skipping the check for dequantize nodes.
482
+ """
483
+ SKIP_LOWERING_VALUE = (None, None, None)
484
+
485
+ # Match quantize node
486
+ if node.op != "call_function" or node.target != torch.quantize_per_tensor:
487
+ return SKIP_LOWERING_VALUE
488
+ q_node = node
489
+ ref_node = q_node.args[0]
490
+ assert isinstance(ref_node, Node)
491
+
492
+ # Handle cases where the node is wrapped in a ReLU
493
+ if (ref_node.op == "call_function" and ref_node.target in (F.relu, torch.relu)) or\
494
+ (ref_node.op == "call_module" and type(_get_module(ref_node, modules)) == nn.ReLU):
495
+ relu_node = ref_node
496
+ ref_node = relu_node.args[0]
497
+ assert isinstance(ref_node, Node)
498
+ else:
499
+ relu_node = None
500
+ if should_skip_lowering(ref_node, qconfig_map):
501
+ return SKIP_LOWERING_VALUE
502
+
503
+ # Match reference module or functional
504
+ if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module):
505
+ expected_op = "call_module"
506
+ match_key = type(_get_module(ref_node, modules))
507
+ else:
508
+ expected_op = "call_function"
509
+ match_key = ref_node.target
510
+ if ref_node.op != expected_op or match_key not in matching_modules_or_ops:
511
+ return SKIP_LOWERING_VALUE
512
+
513
+ # Match dequantize node(s). Both of the following conditions must pass:
514
+ # (1) All `torch.fx.Node`s at the matching indices must be a dequantize node
515
+ # (2) There must be at least one dequantize node
516
+ matched_dequantize = False
517
+ for i in dequantize_node_arg_indices:
518
+ assert i < len(ref_node.args), \
519
+ f"Dequantize index {i} exceeded reference node's arg length {len(ref_node.args)}"
520
+ arg = ref_node.args[i]
521
+ if is_dequantize_node(arg):
522
+ matched_dequantize = True
523
+ elif isinstance(arg, Node):
524
+ return SKIP_LOWERING_VALUE
525
+ if not matched_dequantize:
526
+ return SKIP_LOWERING_VALUE
527
+
528
+ return (q_node, relu_node, ref_node)
529
+
530
+ def _match_static_pattern_with_two_inputs(
531
+ node: Node,
532
+ modules: Dict[str, nn.Module],
533
+ qconfig_map: Dict[str, QConfigAny],
534
+ matching_modules_or_ops: List[Callable]
535
+ ) -> Union[Tuple[Node, Node], Tuple[None, None]]:
536
+ """
537
+ (dequantize \
538
+ Match the pattern (dequantize - ref node - quantize) against the node provided.
539
+
540
+ If there is a match, return a 2-tuple of:
541
+ 1) q_node: the quantize node,
542
+ 2) ref_node: a reference module or functional node to replace with its quantized counterpart
543
+ Otherwise, if there is no match, return a 2-tuple of (None, None).
544
+
545
+ Parameters:
546
+ node: The `torch.fx.Node` to match against.
547
+ modules: A mapping from node names to modules in the model graph, used for module lookup.
548
+ qconfig_map: A mapping from node names to the qconfigs associated with the nodes.
549
+ If the corresponding qconfig for the reference node is None, then return no match.
550
+ matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s.
551
+ If the reference node is not in this list, then return no match.
552
+ """
553
+ SKIP_LOWERING_VALUE = (None, None)
554
+
555
+ # Match quantize node
556
+ if node.op != "call_function" or node.target != torch.quantize_per_tensor:
557
+ return SKIP_LOWERING_VALUE
558
+ q_node = node
559
+ ref_node = q_node.args[0]
560
+ assert isinstance(ref_node, Node)
561
+
562
+ if should_skip_lowering(ref_node, qconfig_map):
563
+ return SKIP_LOWERING_VALUE
564
+
565
+ # Match reference module or functional
566
+ if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module):
567
+ expected_op = "call_module"
568
+ match_key = type(_get_module(ref_node, modules))
569
+ else:
570
+ # This pass only support op of "call_module"
571
+ return SKIP_LOWERING_VALUE
572
+
573
+ if ref_node.op != expected_op or match_key not in matching_modules_or_ops:
574
+ return SKIP_LOWERING_VALUE
575
+
576
+ # Check ref_node has 2 input nodes, both are dq node.
577
+ if len(ref_node.args) != 2:
578
+ return SKIP_LOWERING_VALUE
579
+ for i in range(len(ref_node.args)):
580
+ arg = ref_node.args[i]
581
+ if not is_dequantize_node(arg):
582
+ return SKIP_LOWERING_VALUE
583
+
584
+ return (q_node, ref_node)
585
+
586
+ def _lower_static_weighted_ref_module(
587
+ model: GraphModule,
588
+ qconfig_map: Dict[str, QConfigAny]):
589
+ """
590
+ Traverse the graph and find dequantize - ref module - quantize patterns
591
+ and replace them with the quantized version of the ref module.
592
+ """
593
+ modules = dict(model.named_modules(remove_duplicate=False))
594
+ nodes = list(model.graph.nodes)
595
+ for n in model.graph.nodes:
596
+ # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
597
+ matching_modules = list(STATIC_LOWER_MODULE_MAP.keys()) + list(STATIC_LOWER_FUSED_MODULE_MAP.keys())
598
+ (q_node, relu_node, ref_node) = _match_static_pattern(
599
+ n, modules, qconfig_map, matching_modules, dequantize_node_arg_indices=[0]) # type: ignore[arg-type]
600
+ if q_node is None:
601
+ continue
602
+ assert ref_node is not None
603
+ (_, scale_node, zero_point_node, _) = q_node.args
604
+ ref_module = _get_module(ref_node, modules)
605
+ ref_class = type(ref_module)
606
+ assert isinstance(scale_node, Node)
607
+ assert isinstance(zero_point_node, Node)
608
+ assert issubclass(ref_class, nn.Module)
609
+
610
+ # Step 1: Change this pattern to use the corresponding quantized module
611
+ # For fused modules, we also check whether the inner module is a reference module
612
+ # If so, we replace the entire fused module with the corresponding quantized module
613
+ if ref_class in STATIC_LOWER_FUSED_MODULE_MAP:
614
+ inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_MAP[ref_class]
615
+ if type(ref_module[0]) != inner_ref_class: # type: ignore[index]
616
+ continue
617
+ else:
618
+ q_class = STATIC_LOWER_MODULE_MAP[ref_class]
619
+ output_scale = getattr(model, scale_node.target)
620
+ output_zero_point = getattr(model, zero_point_node.target)
621
+ q_module = q_class.from_reference(ref_module, output_scale, output_zero_point)
622
+ # replace reference module with quantized module
623
+ parent_name, module_name = _parent_name(ref_node.target)
624
+ setattr(modules[parent_name], module_name, q_module)
625
+
626
+ # Step 2: Reroute around dq_node, and remove q_node and its args
627
+ assert len(ref_node.args) == 1
628
+ dq_node = ref_node.args[0]
629
+ assert isinstance(dq_node, Node)
630
+ ref_node.replace_input_with(dq_node, dq_node.args[0])
631
+ q_node.replace_all_uses_with(ref_node)
632
+ model.graph.erase_node(q_node)
633
+ model.graph.erase_node(scale_node)
634
+ model.graph.erase_node(zero_point_node)
635
+
636
+ def _lower_static_weighted_ref_module_with_two_inputs(
637
+ model: GraphModule,
638
+ qconfig_map: Dict[str, QConfigAny]):
639
+ """
640
+ Traverse the graph and find patterns
641
+ dequantize dequantize
642
+ \\ //
643
+ ref module
644
+ \\
645
+ quantize
646
+ and replace them with the quantized version of the ref module.
647
+ """
648
+ modules = dict(model.named_modules(remove_duplicate=False))
649
+ nodes = list(model.graph.nodes)
650
+ for n in model.graph.nodes:
651
+ # (dequantize \
652
+ # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
653
+ matching_modules = list(STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP.keys())
654
+ (q_node, ref_node) = _match_static_pattern_with_two_inputs(
655
+ n, modules, qconfig_map, matching_modules) # type: ignore[arg-type]
656
+ if q_node is None:
657
+ continue
658
+ assert ref_node is not None
659
+ (_, scale_node, zero_point_node, _) = q_node.args
660
+ ref_module = _get_module(ref_node, modules)
661
+ ref_class = type(ref_module)
662
+ assert isinstance(scale_node, Node)
663
+ assert isinstance(zero_point_node, Node)
664
+ assert issubclass(ref_class, nn.Module)
665
+
666
+ # Step 1: Change this pattern to use the corresponding quantized module
667
+ # For fused modules, we also check whether the inner module is a reference module
668
+ # If so, we replace the entire fused module with the corresponding quantized module
669
+ if ref_class in STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP:
670
+ inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP[ref_class]
671
+ if type(ref_module[0]) != inner_ref_class: # type: ignore[index]
672
+ continue
673
+ else:
674
+ continue
675
+ output_scale = getattr(model, scale_node.target)
676
+ output_zero_point = getattr(model, zero_point_node.target)
677
+ q_module = q_class.from_reference(ref_module, output_scale, output_zero_point)
678
+ # replace reference module with quantized module
679
+ parent_name, module_name = _parent_name(ref_node.target)
680
+ setattr(modules[parent_name], module_name, q_module)
681
+
682
+ # Step 2: Reroute around dq_node, and remove q_node and its args
683
+ assert len(ref_node.args) == 2
684
+ for arg in ref_node.args:
685
+ if not is_dequantize_node(arg):
686
+ continue
687
+ dq_node = arg
688
+ assert isinstance(dq_node, Node)
689
+ ref_node.replace_input_with(dq_node, dq_node.args[0])
690
+
691
+ q_node.replace_all_uses_with(ref_node)
692
+ model.graph.erase_node(q_node)
693
+ model.graph.erase_node(scale_node)
694
+ model.graph.erase_node(zero_point_node)
695
+
696
+ def _lower_dynamic_weighted_ref_module(model: GraphModule):
697
+ """
698
+ Traverse the graph and find quantize_per_tensor_dynamic - dequantize - ref_module patterns
699
+ and replace them with the dynamically quantized version of the ref module.
700
+ """
701
+ named_modules = dict(model.named_modules(remove_duplicate=False))
702
+ for n in model.graph.nodes:
703
+ if n.op != "call_module" or \
704
+ type(named_modules[str(n.target)]) not in \
705
+ set(DYNAMIC_LOWER_MODULE_MAP.keys()).union(
706
+ set(DYNAMIC_LOWER_FUSED_MODULE_MAP.keys())):
707
+ continue
708
+ ref_node = n
709
+ dq_node = ref_node.args[0]
710
+ if dq_node.op != "call_method" or dq_node.target != "dequantize":
711
+ continue
712
+
713
+ input_dynamic_q_node = dq_node.args[0]
714
+
715
+ if input_dynamic_q_node.op != "call_function" or \
716
+ input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic:
717
+ continue
718
+
719
+ activation_dtype = input_dynamic_q_node.args[1]
720
+ is_fp16 = activation_dtype == torch.float16
721
+ is_int8 = activation_dtype in [torch.quint8, torch.qint8]
722
+ if not is_int8 and not is_fp16:
723
+ continue
724
+
725
+ ref_module = named_modules[str(ref_node.target)]
726
+ ref_class = type(ref_module)
727
+ if ref_class in DYNAMIC_LOWER_FUSED_MODULE_MAP:
728
+ inner_ref_class, q_class = DYNAMIC_LOWER_FUSED_MODULE_MAP[ref_class]
729
+ if type(ref_module[0]) != inner_ref_class:
730
+ continue
731
+ else:
732
+ q_class = DYNAMIC_LOWER_MODULE_MAP.get(ref_class) # type: ignore[assignment]
733
+ # TODO: maybe define a WeightedDynamicallyQuantizedModule
734
+ q_module = q_class.from_reference(ref_module) # type: ignore[attr-defined]
735
+
736
+ # replace reference module with dynamically quantized module
737
+ parent_name, module_name = _parent_name(ref_node.target)
738
+ setattr(named_modules[parent_name], module_name, q_module)
739
+ ref_node.replace_input_with(dq_node, input_dynamic_q_node.args[0])
740
+
741
+ def _lower_weight_only_weighted_ref_module(model: GraphModule):
742
+ """
743
+ Traverse the graph and find ref_module patterns
744
+ and replace them with the weight only quantized version of the ref module.
745
+ """
746
+ named_modules = dict(model.named_modules(remove_duplicate=False))
747
+ for n in model.graph.nodes:
748
+ if n.op != "call_module" or \
749
+ type(named_modules[str(n.target)]) not in \
750
+ set(WEIGHT_ONLY_LOWER_MODULE_MAP.keys()):
751
+ continue
752
+ ref_node = n
753
+ ref_module = named_modules[str(ref_node.target)]
754
+ ref_class = type(ref_module)
755
+ q_class = WEIGHT_ONLY_LOWER_MODULE_MAP.get(ref_class)
756
+ # TODO: WeightedQuantizedModule is currently assuming static quant apis
757
+ # with output_scale, output_zero_point in from_reference, we may want to
758
+ # relax that, or rename this
759
+ # TODO: maybe define a WeightedWeightOnlyQuantizedModule
760
+ q_module = q_class.from_reference(ref_module) # type: ignore[union-attr]
761
+
762
+ # replace reference module with dynamically quantized module
763
+ parent_name, module_name = _parent_name(ref_node.target)
764
+ setattr(named_modules[parent_name], module_name, q_module)
765
+
766
+ def _lower_static_weighted_ref_functional(
767
+ model: GraphModule,
768
+ qconfig_map: Dict[str, QConfigAny]):
769
+ """
770
+ Traverse the graph and replace functional reference patterns with their quantized versions.
771
+ """
772
+ modules = dict(model.named_modules(remove_duplicate=False))
773
+ nodes = list(model.graph.nodes)
774
+ for n in model.graph.nodes:
775
+ # Step 0: Find nodes that match this pattern (dequantize - functional op - quantize)
776
+ matching_ops = list(STATIC_LOWER_FUNCTIONAL_MAP.keys())
777
+ (q_node, relu_node, func_node) = _match_static_pattern(
778
+ n, modules, qconfig_map, matching_ops, dequantize_node_arg_indices=[0, 1])
779
+ if q_node is None:
780
+ continue
781
+ assert func_node is not None
782
+ (_, output_scale_node, output_zp_node, _) = q_node.args
783
+ (input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args
784
+ assert isinstance(output_zp_node, Node)
785
+ assert isinstance(input_dq_node, Node)
786
+ assert isinstance(weight_dq_node, Node)
787
+ quantized_weight = weight_dq_node.args[0]
788
+ assert isinstance(quantized_weight, Node)
789
+ if quantized_weight.op != "call_function" or\
790
+ quantized_weight.target not in (torch.quantize_per_tensor, torch.quantize_per_channel):
791
+ continue
792
+
793
+ # Step 1: Replace quantized weights with packed weights, which will be folded later
794
+ # Use the right prepack op and prepare the corresponding args
795
+ # Linear prepack args: (quantized weights[, bias])
796
+ # Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups])
797
+ prepack_args = [quantized_weight] + remaining_func_args
798
+ if func_node.target == F.linear:
799
+ weight_dtype = quantized_weight.args[-1]
800
+ prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)
801
+ elif func_node.target in CONV_FUNCTIONAL_OPS:
802
+ prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type]
803
+ # For conv1d, the stride, padding, and dilation args may be ints,
804
+ # in which case we need to convert them to tuples
805
+ if func_node.target == F.conv1d:
806
+ for i in [2, 3, 4]:
807
+ if len(prepack_args) > i and isinstance(prepack_args[i], int):
808
+ prepack_args[i] = (prepack_args[i],)
809
+ elif func_node.target in CONV_TRANSPOSE_FUNCTIONAL_OPS:
810
+ prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type]
811
+ # For conv_transpose1d, the stride, padding, and dilation args may be ints,
812
+ # in which case we need to convert them to tuples
813
+ if func_node.target == F.conv_transpose1d:
814
+ # Note prepack_args[5] is groups.
815
+ for i in [2, 3, 4, 6]:
816
+ if len(prepack_args) > i and isinstance(prepack_args[i], int):
817
+ prepack_args[i] = (prepack_args[i],)
818
+ # swap dilation and groups
819
+ # prepack op has arguments: {w, b, stride, padding, output_padding, dilation, groups}
820
+ # transposed conv op has arguments: {x, w, b, stride, padding, output_padding, groups, dilation}
821
+ if (len(prepack_args) > 6):
822
+ prepack_args[5], prepack_args[6] = prepack_args[6], prepack_args[5]
823
+ else:
824
+ raise ValueError(f"Lowering is not supported for op '{func_node.target}'")
825
+ with model.graph.inserting_before(output_scale_node):
826
+ # kwargs of the func node are needed for prepack op (i.e., quantized::linear_prepack)
827
+ # They are not needed for compute op (i.e., quantized::linear)
828
+ kwargs = func_node.kwargs
829
+ # F.linear uses 'bias' key for bias while qlinear_prepack uses 'B' for bias
830
+ if func_node.target == F.linear and 'bias' in kwargs:
831
+ kwargs = kwargs.copy()
832
+ kwargs['B'] = kwargs['bias']
833
+ del kwargs['bias']
834
+ packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), kwargs)
835
+
836
+ # Step 2: Replace reference pattern with the corresponding quantized op
837
+ (q_func, q_relu_func) = STATIC_LOWER_FUNCTIONAL_MAP[func_node.target] # type: ignore[index]
838
+ # conv_transpose does not support fusion with relu yet. q_relu_func is None in such cases
839
+ if q_relu_func is not None:
840
+ func_node.target = q_relu_func if relu_node is not None else q_func
841
+ else:
842
+ func_node.target = q_func
843
+ func_node.args = (input_dq_node.args[0], packed_weight, output_scale_node, output_zp_node)
844
+ # kwargs for func_node has been moved to kwargs for prepack op
845
+ func_node.kwargs = {}
846
+ q_node.replace_all_uses_with(func_node)
847
+ # Move func_node after output_zp_node in the graph
848
+ output_zp_node.append(func_node)
849
+
850
+ # Clean up: Remove quantize node, and the relu node if it exists
851
+ model.graph.erase_node(q_node)
852
+ if relu_node is not None and q_relu_func is not None:
853
+ model.graph.erase_node(relu_node)
854
+
855
+ def _lower_dynamic_weighted_ref_functional(
856
+ model: GraphModule,
857
+ qconfig_map: Dict[str, QConfigAny]):
858
+ """
859
+ Traverse the graph and replace functional reference patterns with their dynamically
860
+ quantized versions.
861
+ Examples:
862
+ quantize_per_tensor_dynamic - dequantize - functional linear --> linear_dynamic
863
+ to(torch.float16) - dequantize - functional linear --> linear_dynamic_fp16
864
+ """
865
+ modules = dict(model.named_modules(remove_duplicate=False))
866
+ nodes = list(model.graph.nodes)
867
+ # we want to search in reserved order so that we can match the larger patterns first
868
+ # e.g. we want to match linear - relu before linear.
869
+ for n in reversed(model.graph.nodes):
870
+
871
+ # Step 0: Find nodes that match this pattern
872
+ # (quantize_per_tensor_dynamic - dequantize - dynamically quantized op)
873
+ # We search for the pattern backwards, starting with the quantize node
874
+ # Quantize node args: (func, scale, zp, dtype)
875
+ func_node = n
876
+ # Handle cases where the functional op is wrapped in a ReLU
877
+ if func_node.op == "call_function" and func_node.target == F.relu or \
878
+ func_node.op == "call_module" and \
879
+ type(modules[str(func_node.target)]) == torch.nn.ReLU:
880
+ relu_node = func_node
881
+ func_node = relu_node.args[0]
882
+ else:
883
+ relu_node = None
884
+ if should_skip_lowering(func_node, qconfig_map):
885
+ continue
886
+ # Linear args: (dequantized inputs, dequantized weights[, bias])
887
+ # Conv args: (dequantized inputs, dequantized weights[, bias, stride, padding, dilation, groups])
888
+ if func_node.op != "call_function" or func_node.target not in DYNAMIC_LOWER_FUNCTIONAL_MAP:
889
+ continue
890
+ (input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args
891
+ if input_dq_node.op != "call_method" or input_dq_node.target != "dequantize" or \
892
+ weight_dq_node.op != "call_method" or weight_dq_node.target != "dequantize":
893
+ continue
894
+
895
+ input_dynamic_q_node = input_dq_node.args[0]
896
+
897
+ if input_dynamic_q_node.op != "call_function" or \
898
+ input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic:
899
+ continue
900
+
901
+ reduce_range_node = None
902
+ (pattern_input, activation_dtype, reduce_range_node) = input_dynamic_q_node.args
903
+ is_fp16 = activation_dtype == torch.float16
904
+ is_int8 = activation_dtype in [torch.quint8, torch.qint8]
905
+ if not is_int8 and not is_fp16:
906
+ continue
907
+
908
+ quantized_weight = weight_dq_node.args[0]
909
+ weight_dtype = quantized_weight.args[-1]
910
+
911
+ # Step 1: Try to select reference pattern with the corresponding quantized op
912
+ dynamic_quant_dtype_key = (activation_dtype, weight_dtype)
913
+ if dynamic_quant_dtype_key not in DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target]:
914
+ print(f"Didn't find dtype combination {dynamic_quant_dtype_key} during "
915
+ f"dynamic quantized op lowering for {func_node.target}")
916
+ continue
917
+ (q_func, q_relu_func) = DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target][dynamic_quant_dtype_key]
918
+
919
+ if q_func is None or q_relu_func is None:
920
+ print("Didn't find corresponding quantized function or quantized relu function "
921
+ f"for {func_node.target}, {dynamic_quant_dtype_key}")
922
+ continue
923
+
924
+ # Step 2: Replace quantized weights with packed weights, which will be folded later
925
+ # Use the right prepack op and prepare the corresponding args
926
+ # Linear prepack args: (quantized weights[, bias])
927
+ # Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups])
928
+ prepack_args = [quantized_weight] + remaining_func_args
929
+ if func_node.target == F.linear:
930
+ prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)
931
+ elif func_node.target in CONV_FUNCTIONAL_OPS:
932
+ prepack_op = get_qconv_prepack_op(func_node.target)
933
+ # For conv1d, the stride, padding, and dilation args may be ints,
934
+ # in which case we need to convert them to tuples
935
+ if func_node.target == F.conv1d:
936
+ for i in [2, 3, 4]:
937
+ if len(prepack_args) > i and isinstance(prepack_args[i], int):
938
+ prepack_args[i] = (prepack_args[i],)
939
+ else:
940
+ raise ValueError(f"Lowering is not supported for op '{func_node.target}'")
941
+ with model.graph.inserting_before(func_node):
942
+ packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), {})
943
+
944
+ # Step 3: Replace reference pattern with the corresponding quantized op
945
+ func_node.target = q_relu_func if relu_node is not None else q_func
946
+ if is_int8:
947
+ func_node.args = (pattern_input, packed_weight, reduce_range_node)
948
+ else:
949
+ func_node.args = (pattern_input, packed_weight)
950
+
951
+ if relu_node is not None:
952
+ relu_node.replace_all_uses_with(func_node)
953
+
954
+ # Step 4: Remove the relu node if it exists
955
+ if relu_node is not None:
956
+ model.graph.erase_node(relu_node)
957
+
958
+ def _lower_quantized_binary_op(
959
+ model: GraphModule,
960
+ qconfig_map: Dict[str, QConfigAny]):
961
+ binary_ops_to_lower: List[Callable] = [operator.add, torch.add, operator.mul, torch.mul, torch.matmul]
962
+ modules = dict(model.named_modules(remove_duplicate=False))
963
+ for n in model.graph.nodes:
964
+ # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
965
+ (q_node, relu_node, bop_node) = _match_static_pattern(
966
+ n, modules, qconfig_map, binary_ops_to_lower, dequantize_node_arg_indices=[0, 1])
967
+ if q_node is None:
968
+ continue
969
+ assert bop_node is not None
970
+ (_, scale_node, zero_point_node, _) = q_node.args
971
+
972
+ # Step 1: Remove dequant nodes
973
+ num_dq_nodes = 0
974
+ for arg in bop_node.args:
975
+ if not is_dequantize_node(arg):
976
+ continue
977
+ dq_node = arg
978
+ assert isinstance(dq_node, Node)
979
+ dn_input = dq_node.args[0]
980
+ bop_node.replace_input_with(dq_node, dn_input)
981
+ num_dq_nodes += 1
982
+ assert num_dq_nodes > 0
983
+
984
+ # Step 2: Swap binary op to quantized binary op
985
+ assert bop_node.target in QBIN_OP_MAPPING
986
+ binop_to_qbinop = QBIN_OP_MAPPING if relu_node is None else QBIN_RELU_OP_MAPPING
987
+ qbin_op = binop_to_qbinop[bop_node.target]
988
+ # prepare the args for quantized binary op
989
+ # (x, y)
990
+ qop_node_args = list(bop_node.args)
991
+ # (x, y, scale, zero_point)
992
+ # add scale and zero_point arguments for Tensor - Tensor operation
993
+ if num_dq_nodes == 2:
994
+ qop_node_args.extend([scale_node, zero_point_node])
995
+ # insert a call to quantized binary op and remove the original binary op
996
+ with model.graph.inserting_after(q_node):
997
+ qop_node = create_node_from_old_node_preserve_meta(
998
+ model.graph,
999
+ ("call_function", qbin_op, tuple(qop_node_args), {}),
1000
+ bop_node)
1001
+ q_node.replace_all_uses_with(qop_node)
1002
+
1003
+ # Step 3: Remove quantize node, binary op node, and relu node if any
1004
+ model.graph.erase_node(q_node)
1005
+ if relu_node is not None:
1006
+ model.graph.erase_node(relu_node)
1007
+ model.graph.erase_node(bop_node)
1008
+
1009
+ def special_pattern_replacement(model: GraphModule):
1010
+ modules = dict(model.named_modules(remove_duplicate=False))
1011
+ for n in model.graph.nodes:
1012
+ q_node = n
1013
+ is_quantize = q_node.target == torch.quantize_per_tensor
1014
+ is_to_fp16 = q_node.op == "call_method" and q_node.target == "to" and \
1015
+ len(q_node.args) == 2 and q_node.args[1] == torch.float16
1016
+ if not (is_quantize or is_to_fp16):
1017
+ continue
1018
+ ref_node = q_node.args[0]
1019
+ # get output scale/zero_point/dtype from the quantize node
1020
+ # ref_node, scale_node, zero_point_node, dtype = q_node.args
1021
+ # TODO: add safety checks that users for the ref_node and dq_node needs to be one
1022
+ is_call_function, is_call_method, is_call_module = is_fixed_qparams_node(ref_node, modules)
1023
+ if is_to_fp16 and (is_call_function or is_call_method or is_call_module):
1024
+ # TODO: add a warning or error out here? (bc-breaking if error out)
1025
+ # warnings.warn(
1026
+ # "Only reference patterns are currently supported for {dtype} dtype with {op} op"
1027
+ # "".format(dtype=dtypes, op=ref_node))
1028
+ continue
1029
+
1030
+ is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules)
1031
+ if is_to_fp16 and (is_call_function or is_call_method or is_call_module):
1032
+ # TODO: add a warning or error out here? (bc-breaking if error out)
1033
+ continue
1034
+
1035
+ # This check includes all supported ops
1036
+ is_call_function, is_call_method, is_call_module = is_special_pattern_node(ref_node, modules)
1037
+ if not (is_call_module or is_call_function or is_call_method):
1038
+ continue
1039
+ assert len(ref_node.args) > 0 or len(ref_node.kwargs) > 0
1040
+ dq_node_or_nodes = ref_node.args[0] if len(ref_node.args) > 0 else next(iter(ref_node.kwargs.values()))
1041
+ assert isinstance(dq_node_or_nodes, (Node, tuple, list))
1042
+ is_dequantize = False
1043
+ if isinstance(dq_node_or_nodes, Node):
1044
+ is_dequantize = dq_node_or_nodes.op == 'call_method' and \
1045
+ dq_node_or_nodes.target == 'dequantize'
1046
+ elif isinstance(dq_node_or_nodes, (tuple, list)):
1047
+ is_dequantize = all(
1048
+ x.op == 'call_method' and x.target == 'dequantize'
1049
+ for x in dq_node_or_nodes)
1050
+
1051
+ if not is_dequantize:
1052
+ continue
1053
+
1054
+ # TODO: enable we have patterns that needs to swap the modules
1055
+ if is_call_module:
1056
+ ref_module = modules[ref_node.target]
1057
+ if type(ref_module) in SPECIAL_PATTERN_LOWER_MODULE_MAP and is_quantize:
1058
+ qmodule_cls = SPECIAL_PATTERN_LOWER_MODULE_MAP.get(type(ref_module))
1059
+ scale_node = q_node.args[1]
1060
+ zero_point_node = q_node.args[2]
1061
+ output_scale = getattr(model, scale_node.target)
1062
+ output_zero_point = getattr(model, zero_point_node.target)
1063
+
1064
+ qmodule = qmodule_cls.from_reference(ref_module, output_scale, output_zero_point) # type:ignore[union-attr]
1065
+ # replace reference module with quantized module
1066
+ parent_name, module_name = _parent_name(ref_node.target)
1067
+ setattr(modules[parent_name], module_name, qmodule)
1068
+
1069
+ # reroute around dq node:
1070
+ dq_nodes: List[Node] = []
1071
+ if isinstance(dq_node_or_nodes, Node):
1072
+ dq_nodes = [dq_node_or_nodes]
1073
+ elif isinstance(dq_node_or_nodes, (tuple, list)):
1074
+ dq_nodes = list(dq_node_or_nodes)
1075
+
1076
+ for dq_node in dq_nodes:
1077
+ dn_input = dq_node.args[0]
1078
+ ref_node.replace_input_with(dq_node, dn_input)
1079
+
1080
+ # store q node args
1081
+ qnode_qparams = list(q_node.args)[1:]
1082
+ # replace uses of q node with input and remove q node
1083
+ q_node_input = q_node.args[0]
1084
+ q_node.replace_all_uses_with(q_node_input)
1085
+ model.graph.erase_node(q_node)
1086
+
1087
+ is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules)
1088
+ if is_call_function:
1089
+ # pass scale/zer_point arguments from quantize_per_tensor to the default node operator
1090
+ # insert an op after the zero_point node so that the scale/zero_point
1091
+ # nodes are is available
1092
+ qop = get_quantized_operator(ref_node.target)
1093
+ args = list(ref_node.args)
1094
+ kwargs = dict(ref_node.kwargs)
1095
+ if qop in QOP_TO_ARG_NAMES_TO_SKIP:
1096
+ args_to_skip = QOP_TO_ARG_NAMES_TO_SKIP[qop]
1097
+ for arg in args_to_skip:
1098
+ if arg in kwargs:
1099
+ kwargs.pop(arg)
1100
+ kwargs["output_scale"] = qnode_qparams[0]
1101
+ kwargs["output_zero_point"] = qnode_qparams[1]
1102
+ with model.graph.inserting_after(qnode_qparams[1]):
1103
+ qop_node = create_node_from_old_node_preserve_meta(
1104
+ model.graph,
1105
+ ("call_function", qop, tuple(args), kwargs),
1106
+ ref_node)
1107
+ ref_node.replace_all_uses_with(qop_node)
1108
+ model.graph.erase_node(ref_node)
1109
+ else:
1110
+ # remove scale/zero_point node for quantize node
1111
+ for n in qnode_qparams:
1112
+ if isinstance(n, Node):
1113
+ model.graph.erase_node(n)
1114
+
1115
+ return model
1116
+
1117
+ def _lower_getattr_tensor_metadta_op(model: GraphModule):
1118
+ """ Modified the graph of the model inplace, to skip extra dequantize op before
1119
+ the general tensor shape ops when possible
1120
+ """
1121
+ for n in model.graph.nodes:
1122
+ if is_getattr_tensor_metadata_node(n):
1123
+ maybe_dq = n.args[0]
1124
+ if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize":
1125
+ continue
1126
+ # skip the dequantize node
1127
+ args = list(n.args)
1128
+ args[0] = n.args[0].args[0]
1129
+ n.args = tuple(args)
1130
+
1131
+ def _lower_get_tensor_info_op(model: GraphModule):
1132
+ """ Modified the graph of the model inplace, to skip extra dequantize op before
1133
+ the general tensor shape ops when possible
1134
+ """
1135
+ for n in model.graph.nodes:
1136
+ if not is_get_tensor_info_node(n):
1137
+ continue
1138
+ maybe_dq = n.args[0]
1139
+ if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize":
1140
+ continue
1141
+ # skip the dequantize node
1142
+ args = list(n.args)
1143
+ args[0] = n.args[0].args[0]
1144
+ n.args = tuple(args)
1145
+
1146
+ def _lower_to_native_backend(
1147
+ model: GraphModule,
1148
+ qconfig_map: Dict[str, QConfigAny],
1149
+ node_name_to_scope: Dict[str, Tuple[str, type]]
1150
+ ) -> GraphModule:
1151
+ """ Lower a quantized reference model (with reference quantized operator patterns)
1152
+ to the native backend in PyTorch (fbgemm/qnnpack), both backends shares the same
1153
+ operator signature so they can be lowered with the same function
1154
+ """
1155
+ _lower_static_weighted_ref_module(model, qconfig_map)
1156
+ _lower_static_weighted_ref_module_with_two_inputs(model, qconfig_map)
1157
+ _lower_dynamic_weighted_ref_module(model)
1158
+ _lower_weight_only_weighted_ref_module(model)
1159
+ _lower_static_weighted_ref_functional(model, qconfig_map)
1160
+ _lower_dynamic_weighted_ref_functional(model, qconfig_map)
1161
+ _lower_quantized_binary_op(model, qconfig_map)
1162
+ _lower_getattr_tensor_metadta_op(model)
1163
+ _lower_get_tensor_info_op(model)
1164
+ special_pattern_replacement(model)
1165
+ model.graph.eliminate_dead_code()
1166
+ model = fold_weight(model, node_name_to_scope)
1167
+ model.graph.eliminate_dead_code()
1168
+ model.recompile()
1169
+ model.graph.lint()
1170
+ return model
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py ADDED
@@ -0,0 +1,1131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union, Type, Callable
4
+ from torch.ao.quantization.quant_type import QuantType
5
+ import torch
6
+ import copy
7
+ import warnings
8
+ from torch.fx import (
9
+ GraphModule,
10
+ )
11
+ from torch.fx.graph import (
12
+ Graph,
13
+ Node,
14
+ Argument,
15
+ )
16
+ from ..utils import (
17
+ activation_is_statically_quantized,
18
+ weight_is_quantized,
19
+ get_qparam_dict,
20
+ _parent_name,
21
+ get_swapped_custom_module_class,
22
+ )
23
+ from ..qconfig import (
24
+ QConfigAny,
25
+ qconfig_equals
26
+ )
27
+ from ..qconfig_mapping import QConfigMapping
28
+ from .qconfig_mapping_utils import (
29
+ _generate_node_name_to_qconfig,
30
+ _compare_prepare_convert_qconfig_mappings,
31
+ _update_qconfig_for_fusion,
32
+ _is_qconfig_supported_by_dtype_configs,
33
+ _update_qconfig_for_qat,
34
+ )
35
+ from torch.ao.quantization.backend_config.utils import (
36
+ get_root_module_to_quantized_reference_module,
37
+ get_pattern_to_dtype_configs,
38
+ get_fused_module_classes,
39
+ get_qat_module_classes,
40
+ )
41
+ from torch.ao.quantization.backend_config import (
42
+ BackendConfig,
43
+ get_native_backend_config,
44
+ )
45
+ from torch.ao.quantization.observer import _is_activation_post_process
46
+ from .graph_module import (
47
+ _is_observed_module,
48
+ _is_observed_standalone_module,
49
+ )
50
+ from ._equalize import update_obs_for_equalization, convert_eq_obs
51
+ from torch.nn.utils.parametrize import type_before_parametrizations
52
+ from .utils import (
53
+ _get_module,
54
+ _is_custom_module_lstm,
55
+ _is_custom_module_mha,
56
+ assert_and_get_unique_device,
57
+ get_custom_module_class_keys,
58
+ create_getattr_from_value,
59
+ collect_producer_nodes,
60
+ graph_module_from_producer_nodes,
61
+ node_arg_is_weight,
62
+ )
63
+ from torch.ao.quantization.utils import (
64
+ is_per_channel,
65
+ to_underlying_dtype,
66
+ )
67
+ from torch.ao.quantization.quantize import (
68
+ _remove_qconfig,
69
+ )
70
+ from torch.ao.quantization.stubs import DeQuantStub
71
+ from .custom_config import (
72
+ ConvertCustomConfig,
73
+ PrepareCustomConfig,
74
+ )
75
+ from .lower_to_fbgemm import lower_to_fbgemm
76
+ # importing the lib so that the quantized_decomposed ops are registered
77
+ from ._decomposed import quantized_decomposed_lib # noqa: F401
78
+ import operator
79
+
80
+ __all__ = [
81
+ "convert",
82
+ "convert_custom_module",
83
+ "convert_standalone_module",
84
+ "convert_weighted_module",
85
+ ]
86
+
87
+ _QSCHEME_TO_CHOOSE_QPARAMS_OP = {
88
+ torch.per_tensor_affine: torch.ops.quantized_decomposed.choose_qparams.tensor,
89
+ torch.per_tensor_symmetric: torch.ops.quantized_decomposed.choose_qparams_symmetric.tensor,
90
+ }
91
+
92
+ def _replace_observer_with_quantize_dequantize_node_decomposed(
93
+ model: torch.fx.GraphModule,
94
+ node: Node,
95
+ modules: Dict[str, torch.nn.Module],
96
+ node_name_to_scope: Dict[str, Tuple[str, type]],
97
+ node_name_to_qconfig: Dict[str, QConfigAny]) -> None:
98
+ """ Replace activation_post_process module call node with quantize and
99
+ dequantize node working with decomposed Tensor
100
+
101
+ Before:
102
+ ... -> observer_0(x) -> ...
103
+ After:
104
+ ... -> torch.ops.quantized_decomposed.quantize_per_tensor(x, ...) ->
105
+ torch.ops.quantized_decomposed.dequantize_per_tensor() -> ...
106
+
107
+ or quantize_per_channel and dequantize_per_channel
108
+ """
109
+ graph = model.graph
110
+ assert modules is not None
111
+ assert isinstance(node.target, str)
112
+ module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig)
113
+ activation_post_process = modules[node.target]
114
+ if hasattr(activation_post_process, "convert"):
115
+ activation_post_process.convert(model, node)
116
+ return
117
+ # skip replacing observers to quant/dequant nodes if the qconfigs of all
118
+ # consumers and producers of this observer are None
119
+ skip_replacement = all(_has_none_qconfig(n, node_name_to_qconfig) for n in
120
+ list(node.args) + list(node.users.keys()))
121
+ if skip_replacement or not _is_conversion_supported(activation_post_process):
122
+ # didn't find corresponding quantize op and info for the activation_post_process
123
+ # so we just remove the observer
124
+ with graph.inserting_before(node):
125
+ node.replace_all_uses_with(node.args[0])
126
+ graph.erase_node(node)
127
+ return
128
+
129
+ # otherwise, we can convert the activation_post_process module call to quantize/dequantize node
130
+
131
+ # 1. extract the information from activation_post_process module for generating
132
+ # the quantize and dequantize operator
133
+ dtype = activation_post_process.dtype # type: ignore[attr-defined]
134
+
135
+ is_dynamic = False
136
+ if hasattr(activation_post_process, "is_dynamic"):
137
+ is_dynamic = activation_post_process.is_dynamic # type: ignore[assignment]
138
+
139
+ if dtype in [torch.quint8, torch.qint8, torch.qint32, torch.uint8, torch.int8, torch.int16, torch.int32] and \
140
+ (not is_dynamic):
141
+ # TODO: probably should cleanup this condition check, it's hard
142
+ # to reason about this if and the following elif
143
+
144
+ # uint8/int8/int32 static quantization branch
145
+
146
+ # 1. extract information for inserting q/dq node from activation_post_process
147
+ node_type = "call_function"
148
+ quantize_op : Optional[Callable] = None
149
+ scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator]
150
+ if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined]
151
+ ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type]
152
+ quantize_op = torch.ops.quantized_decomposed.quantize_per_channel.default
153
+ dequantize_op = torch.ops.quantized_decomposed.dequantize_per_channel.default
154
+ quant_min = activation_post_process.quant_min
155
+ quant_max = activation_post_process.quant_max
156
+ dtype_ = to_underlying_dtype(dtype)
157
+ qparams = {
158
+ "_scale_": scale,
159
+ "_zero_point_": zero_point,
160
+ "_axis_": ch_axis,
161
+ "_quant_min_": quant_min,
162
+ "_quant_max_": quant_max,
163
+ "_dtype_": dtype_
164
+ }
165
+ else:
166
+ quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor.default
167
+ dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.default
168
+ scale = float(scale)
169
+ zero_point = int(zero_point)
170
+ quant_min = activation_post_process.quant_min # type: ignore[attr-defined]
171
+ quant_max = activation_post_process.quant_max # type: ignore[attr-defined]
172
+ dtype_ = to_underlying_dtype(dtype)
173
+ qparams = {
174
+ "_scale_": scale,
175
+ "_zero_point_": zero_point,
176
+ "_quant_min_": quant_min,
177
+ "_quant_max_": quant_max,
178
+ "_dtype_": dtype_
179
+ }
180
+
181
+ # 2. replace activation_post_process node with quantize and dequantize
182
+ with graph.inserting_before(node):
183
+ input_node = node.args[0]
184
+ quantize_op_inputs = [input_node]
185
+ for key, value_or_node in qparams.items():
186
+ # TODO: we can add the information of whether a value needs to
187
+ # be registered as an attribute in qparams dict itself
188
+ if key in ['_scale_', '_zero_point_'] and (not isinstance(value_or_node, (float, int))):
189
+ # For scale and zero_point values we register them as buffers in the root module.
190
+ # However, note that when the values are not tensors, as in the case of
191
+ # per_tensor quantization, they will be treated as literals.
192
+ # However, registering them as a node seems to cause issue with dynamo
193
+ # tracing where it may consider tensor overload as opposed to default.
194
+ # With extra check of scale and zero_point being scalar, it makes
195
+ # sure that the default overload can be used.
196
+ # TODO: maybe need more complex attr name here
197
+ qparam_node = create_getattr_from_value(
198
+ model, graph, module_path + prefix + key, value_or_node)
199
+ quantize_op_inputs.append(qparam_node)
200
+ else:
201
+ # for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph.
202
+ quantize_op_inputs.append(value_or_node)
203
+
204
+ quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
205
+ # use the same qparams from quantize op
206
+ dq_inputs = [quantized_node] + quantize_op_inputs[1:]
207
+ dequantized_node = graph.call_function(
208
+ dequantize_op,
209
+ tuple(dq_inputs),
210
+ {}
211
+ )
212
+
213
+ def remap_fn(x):
214
+ return dequantized_node if x is node else x
215
+
216
+ # remap numeric_debug_handle
217
+ for user_node in node.users:
218
+ if "numeric_debug_handle" in user_node.meta:
219
+ numeric_debug_handle = user_node.meta["numeric_debug_handle"]
220
+ user_node.meta["numeric_debug_handle"] = {remap_fn(k): v for k, v in numeric_debug_handle.items()}
221
+ node.replace_all_uses_with(dequantized_node)
222
+ graph.erase_node(node)
223
+ elif is_dynamic:
224
+
225
+ # uint8/int8/fp16 dynamic quantization
226
+
227
+ # 1. extract information for inserting q/dq node from activation_post_process
228
+ node_type = "call_function"
229
+ quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor.tensor
230
+ # we only use choose_qparams for is_decomposed now,
231
+ # but we should probably align the non-decomposed path with this as well,
232
+ # and that can be done after we remove reduce_range flag
233
+ # 1. extract qparams from activation_post_process module
234
+ dtype_ = to_underlying_dtype(dtype)
235
+ assert dtype_ in [torch.uint8, torch.int8], \
236
+ "only uint8 and int8 are supported in reference flow for " \
237
+ "dynamic quantization right now"
238
+ quant_min = activation_post_process.quant_min # type: ignore[attr-defined]
239
+ quant_max = activation_post_process.quant_max # type: ignore[attr-defined]
240
+ qscheme = getattr(activation_post_process, "qscheme", torch.per_tensor_affine) # type: ignore[attr-defined]
241
+ eps = getattr(activation_post_process, "eps", torch.finfo(torch.float32).eps) # type: ignore[attr-defined]
242
+ # note: scale and zero_point are missing for quantize_per_tensor op
243
+ # we'll need to get this from choose_qparams op, which we'll add after
244
+ # this step
245
+ qparams = {
246
+ "_quant_min_": quant_min,
247
+ "_quant_max_": quant_max,
248
+ "_eps_": eps,
249
+ "_dtype_": dtype_
250
+ }
251
+
252
+ choose_qparams_op = _QSCHEME_TO_CHOOSE_QPARAMS_OP[qscheme]
253
+ # 2. insert choose_qparams op and update the qparams list
254
+ with graph.inserting_before(node):
255
+ input_node = node.args[0]
256
+ choose_qparams_op_inputs = [node.args[0]]
257
+ for key, value in qparams.items():
258
+ # we have quant_min, quant_max and dtype, all should be stored
259
+ # as literals
260
+ choose_qparams_op_inputs.append(value)
261
+ choose_qparams_node = graph.create_node(
262
+ "call_function",
263
+ choose_qparams_op,
264
+ tuple(choose_qparams_op_inputs),
265
+ {}
266
+ )
267
+ # choose_qparms returns (scale, zero_point)
268
+ scale_node = graph.create_node(
269
+ "call_function",
270
+ operator.getitem,
271
+ (choose_qparams_node, 0),
272
+ {}
273
+ )
274
+ zero_point_node = graph.create_node(
275
+ "call_function",
276
+ operator.getitem,
277
+ (choose_qparams_node, 1),
278
+ {}
279
+ )
280
+ quant_min = qparams["_quant_min_"]
281
+ quant_max = qparams["_quant_max_"]
282
+ dtype = qparams["_dtype_"]
283
+ qparams = {
284
+ "_scale_": scale_node,
285
+ "_zero_point_": zero_point_node,
286
+ "_quant_min_": quant_min,
287
+ "_quant_max_": quant_max,
288
+ "_dtype_": dtype
289
+ }
290
+
291
+ # 3. replace activation_post_process node to quantize and dequantize node
292
+ with graph.inserting_before(node):
293
+ input_node = node.args[0]
294
+ quantize_op_inputs = [input_node]
295
+ for key, value_or_node in qparams.items():
296
+ # TODO: we can add the information of whether a value needs to
297
+ # be registered as an attribute in qparams dict itself
298
+ if key in ['_scale_', '_zero_point_']:
299
+ # in this case we have a node in the graph since it's dynamically
300
+ # computed from the input, with choose_qparams op
301
+ qparam_node = value_or_node
302
+ quantize_op_inputs.append(qparam_node)
303
+ else:
304
+ # for qparams that are not scale/zero_point (like axis, dtype) we
305
+ # store them as literals in the graph.
306
+ quantize_op_inputs.append(value_or_node)
307
+
308
+ quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
309
+ # use the same qparams from quantize op
310
+ dq_inputs = [quantized_node] + quantize_op_inputs[1:]
311
+ # need to use the tensor variant of this op, since scale and zero_point
312
+ # from choose_qparam are Tensors, instead of float/int, this is to
313
+ # prevent these nodes being traced away by downstream systems
314
+ dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.tensor
315
+ dequantized_node = graph.call_function(
316
+ dequantize_op,
317
+ tuple(dq_inputs),
318
+ {}
319
+ )
320
+
321
+ def remap_fn(x):
322
+ return dequantized_node if x is node else x
323
+
324
+ # remap numeric_debug_handle
325
+ for user_node in node.users:
326
+ if "numeric_debug_handle" in user_node.meta:
327
+ numeric_debug_handle = user_node.meta["numeric_debug_handle"]
328
+ user_node.meta["numeric_debug_handle"] = {remap_fn(k): v for k, v in numeric_debug_handle.items()}
329
+ node.replace_all_uses_with(dequantized_node)
330
+ graph.erase_node(node)
331
+ elif dtype == torch.float16:
332
+ raise NotImplementedError("decomposed to float16 op not implemented yet")
333
+
334
+ # should not reach since we have checks in the beginning to make sure the
335
+ # activation_post_process is supported
336
+
337
+ def _replace_observer_with_quantize_dequantize_node(
338
+ model: torch.fx.GraphModule,
339
+ node: Node,
340
+ modules: Dict[str, torch.nn.Module],
341
+ node_name_to_scope: Dict[str, Tuple[str, type]],
342
+ node_name_to_qconfig: Dict[str, QConfigAny]) -> None:
343
+ """ Replace activation_post_process module call node with quantize and
344
+ dequantize node
345
+
346
+ Before:
347
+ ... -> observer_0(x) -> ...
348
+ After:
349
+ ... -> torch.quantize_per_tensor(x, ...) -> x.dequantize() -> ...
350
+ """
351
+ assert modules is not None
352
+ assert isinstance(node.target, str)
353
+ graph = model.graph
354
+ module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig)
355
+ activation_post_process = modules[node.target]
356
+ # skip replacing observers to quant/dequant nodes if the qconfigs of all
357
+ # consumers and producers of this observer are None
358
+ skip_replacement = all(_has_none_qconfig(n, node_name_to_qconfig) for n in
359
+ list(node.args) + list(node.users.keys()))
360
+ if skip_replacement or not _is_conversion_supported(activation_post_process):
361
+ # didn't find corresponding quantize op and info for the activation_post_process
362
+ # so we just remove the observer
363
+ with graph.inserting_before(node):
364
+ node.replace_all_uses_with(node.args[0])
365
+ graph.erase_node(node)
366
+ return
367
+
368
+ # otherwise, we can convert the activation_post_process module call to quantize/dequantize node
369
+ dtype = activation_post_process.dtype # type: ignore[attr-defined]
370
+
371
+ is_dynamic = False
372
+ if hasattr(activation_post_process, "is_dynamic"):
373
+ is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment]
374
+
375
+ if dtype in [torch.quint8, torch.qint8, torch.qint32] and \
376
+ (not is_dynamic):
377
+ # TODO: probably should cleanup this condition check, it's hard
378
+ # to reason about this if and the following elif
379
+
380
+ # uint8/int8/int32 static quantization branch
381
+
382
+ # 1. extract the information from activation_post_process module for generating
383
+ # the quantize and dequantize operator
384
+ node_type = "call_function"
385
+ quantize_op : Optional[Callable] = None
386
+ scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator]
387
+ if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined]
388
+ ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type]
389
+ qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype}
390
+ quantize_op = torch.quantize_per_channel
391
+ else:
392
+ scale = float(scale)
393
+ zero_point = int(zero_point)
394
+ qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype}
395
+ quantize_op = torch.quantize_per_tensor
396
+
397
+ # 2. replace activation_post_process node with quantize and dequantize
398
+ with graph.inserting_before(node):
399
+ input_node = node.args[0]
400
+ quantize_op_inputs = [input_node]
401
+ for key, value_or_node in qparams.items():
402
+ # TODO: we can add the information of whether a value needs to
403
+ # be registered as an attribute in qparams dict itself
404
+ if key in ['_scale_', '_zero_point_']:
405
+ # For scale and zero_point values we register them as buffers in the root module.
406
+ # TODO: maybe need more complex attr name here
407
+ qparam_node = create_getattr_from_value(
408
+ model, graph, module_path + prefix + key, value_or_node)
409
+ quantize_op_inputs.append(qparam_node)
410
+ else:
411
+ # for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph.
412
+ quantize_op_inputs.append(value_or_node)
413
+
414
+ quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
415
+ dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
416
+ node.replace_all_uses_with(dequantized_node)
417
+ graph.erase_node(node)
418
+ elif is_dynamic:
419
+
420
+ # uint8/int8/fp16 dynamic quantization branch
421
+
422
+ node_type = "call_function"
423
+ quantize_op = torch.quantize_per_tensor_dynamic
424
+ # TODO: get reduce range from observer
425
+ # reduce_range = activation_post_process.reduce_range
426
+ reduce_range = torch.backends.quantized.engine in ("fbgemm", "x86")
427
+ qparams = {"_dtype_": dtype, "_reduce_range_": reduce_range}
428
+
429
+ with graph.inserting_before(node):
430
+ input_node = node.args[0]
431
+ quantize_op_inputs = [input_node]
432
+ for key, value in qparams.items():
433
+ quantize_op_inputs.append(value)
434
+
435
+ quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
436
+ dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
437
+ node.replace_all_uses_with(dequantized_node)
438
+ graph.erase_node(node)
439
+ elif dtype == torch.float16:
440
+ node_type = "call_method"
441
+ quantize_op = "to" # type: ignore[assignment]
442
+ qparams = {"_dtype_": dtype}
443
+ with graph.inserting_before(node):
444
+ input_node = node.args[0]
445
+ quantize_op_inputs = [input_node]
446
+ for key, value in qparams.items():
447
+ # TODO: we can add the information of whether a value needs to
448
+ # be registered as an attribute in qparams dict itself
449
+ quantize_op_inputs.append(value)
450
+
451
+ quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
452
+ dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
453
+ node.replace_all_uses_with(dequantized_node)
454
+ graph.erase_node(node)
455
+
456
+ # should not reach since we have checks in the beginning to make sure the
457
+ # activation_post_process is supported
458
+
459
+ # this is a temporary hack for custom module, we may want to implement
460
+ # this properly after the custom module class design is finalized
461
+ # TODO: DeQuantStubs are currently inserted only after custom module LSTM, while observers are inserted
462
+ # after all other custom modules. In the future, we should simply insert QuantStubs before and DeQuantStubs
463
+ # after custom modules in general, and replace these with "quantize" and "dequantize" nodes respectively.
464
+ def _replace_observer_or_dequant_stub_with_dequantize_node(node: Node, graph: Graph) -> None:
465
+ call_custom_module_node = node.args[0]
466
+ assert isinstance(call_custom_module_node, Node), \
467
+ f"Expecting the for call custom module node to be a Node, but got {call_custom_module_node}"
468
+ node.replace_all_uses_with(call_custom_module_node)
469
+ graph.erase_node(node)
470
+ _insert_dequantize_node(call_custom_module_node, graph)
471
+
472
+ def _is_conversion_supported(activation_post_process: torch.nn.Module) -> bool:
473
+ dtype = activation_post_process.dtype # type: ignore[attr-defined]
474
+
475
+ is_dynamic = False
476
+ if hasattr(activation_post_process, "is_dynamic"):
477
+ is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment]
478
+
479
+ return (
480
+ (dtype in [
481
+ torch.quint8,
482
+ torch.qint8,
483
+ torch.qint32,
484
+ torch.uint8,
485
+ torch.int8,
486
+ torch.int16,
487
+ torch.int32
488
+ ] and (not is_dynamic)) or # type: ignore[return-value]
489
+ is_dynamic or
490
+ dtype == torch.float16
491
+ )
492
+
493
+ def _has_none_qconfig(node: Argument, node_name_to_qconfig: Dict[str, QConfigAny]) -> bool:
494
+ """ Check if a node has a qconfig of None, i.e. user requested to not quantize
495
+ the node
496
+ """
497
+ return isinstance(node, Node) and node.name in node_name_to_qconfig and node_name_to_qconfig[node.name] is None
498
+
499
+ def _run_weight_observers(observed: GraphModule, backend_config: BackendConfig) -> None:
500
+ """ Extract the subgraph that produces the weight for dynamic quant
501
+ or weight only quant node and run the subgraph to observe the weight.
502
+ Note that the observers of dynamic quant or weight only quant ops are
503
+ run during the convert step.
504
+ """
505
+ for node in observed.graph.nodes:
506
+ if node.op != "call_function":
507
+ continue
508
+ for node_arg in node.args:
509
+ # node_arg is weight
510
+ if node_arg and node_arg_is_weight(node, node_arg):
511
+ weight_observer_nodes = collect_producer_nodes(node_arg)
512
+ if weight_observer_nodes is None:
513
+ continue
514
+ weight_observer_module = \
515
+ graph_module_from_producer_nodes(
516
+ observed, weight_observer_nodes)
517
+ # run the weight observer
518
+ weight_observer_module()
519
+
520
+ def _maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph) -> None:
521
+ """ If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node,
522
+ we'll recursively remove the dequantize Node
523
+ """
524
+ if isinstance(arg, Node) and \
525
+ arg.op == "call_method" and \
526
+ arg.target == "dequantize":
527
+ quantize_node = arg.args[0]
528
+ # we only replace the specific use since dequantize could be used by other nodes
529
+ # as well
530
+ node.replace_input_with(arg, quantize_node)
531
+ elif isinstance(arg, (list, tuple)):
532
+ for arg_element in arg:
533
+ _maybe_recursive_remove_dequantize(arg_element, node, graph)
534
+ elif isinstance(arg, dict):
535
+ for arg_element in arg.values():
536
+ _maybe_recursive_remove_dequantize(arg_element, node, graph)
537
+ else:
538
+ warnings.warn(f"Unsupported node type in recursive remove dequantize: {type(arg)}")
539
+
540
+ def _get_module_path_and_prefix(
541
+ obs_node: Node,
542
+ node_name_to_scope: Dict[str, Tuple[str, type]],
543
+ node_name_to_qconfig: Dict[str, QConfigAny]) -> Tuple[str, str]:
544
+ """ Given and observer node, get the `Scope` or the fully qualified name for
545
+ the submodule containing the observed node, also return a prefix of "_input"
546
+ when the observed node is an input of a F.linear op, and not the output of another
547
+ quantized op.
548
+ TODO: this logic is hacky, we should think about how to remove it or make it more
549
+ general
550
+ """
551
+ observed_node = obs_node.args[0]
552
+ # an observer can be inserted for both input of the next operator or output of the previous
553
+ # operator (they can be the same)
554
+ # this flag identifies if the observer is inserted only because the observed node is
555
+ # the input of the next operator
556
+ assert isinstance(observed_node, Node), \
557
+ f"Expecting observed node to be a Node, but got {observed_node}"
558
+ is_input_observer_only = node_name_to_qconfig[observed_node.name] is None \
559
+ if observed_node.name in node_name_to_qconfig else None
560
+ if is_input_observer_only:
561
+ # if the quantize function is at the input of op, then we find the first user of the observer_node
562
+ # to get the path. If a linear call_function is in the user list, we return the first instance
563
+ # of linear node to get the FQN.
564
+ users = list(obs_node.users)
565
+ first_linear_use_or_first_use = users[0] if users else None
566
+ linear_node = None
567
+ for n in users:
568
+ if n.op == "call_function" and n.target == torch.nn.functional.linear:
569
+ linear_node = n
570
+ break
571
+ if linear_node:
572
+ first_linear_use_or_first_use = linear_node
573
+ prefix = "_input"
574
+ else:
575
+ # if the quantize function is at the output of the op, we use the observer input node to get the path
576
+ first_linear_use_or_first_use = observed_node
577
+ prefix = ""
578
+
579
+ if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope:
580
+ module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name]
581
+ else:
582
+ # TODO: it's not used, so actually we can skip quantization
583
+ # but this requires changing return type of quantize_node
584
+ # we can fix it later if needed
585
+ module_path = ""
586
+ return module_path, prefix
587
+
588
+ def _insert_dequantize_node(
589
+ node: Node,
590
+ graph: Graph) -> None:
591
+ """ Inserts dequantize node for `node` in `graph`
592
+ """
593
+ with graph.inserting_after(node):
594
+ dequantize_node = graph.call_method("dequantize", (node,))
595
+ for user_node in dict(node.users):
596
+ if user_node is not dequantize_node:
597
+ user_node.replace_input_with(node, dequantize_node)
598
+
599
+ def _maybe_get_observer_for_node(
600
+ node: Node,
601
+ modules: Dict[str, torch.nn.Module]
602
+ ) -> Optional[torch.nn.Module]:
603
+ """
604
+ If the node is observed, return the observer
605
+ instance. Otherwise, return None.
606
+ """
607
+ for maybe_obs_node in node.users.keys():
608
+ if maybe_obs_node.op == 'call_module':
609
+ maybe_obs = modules[str(maybe_obs_node.target)]
610
+ if _is_activation_post_process(maybe_obs):
611
+ return maybe_obs
612
+ return None
613
+
614
+ def convert_standalone_module(
615
+ node: Node,
616
+ modules: Dict[str, torch.nn.Module],
617
+ model: torch.fx.GraphModule,
618
+ is_reference: bool,
619
+ backend_config: Optional[BackendConfig]) -> None:
620
+ """ Converts a observed standalone module to a quantized standalone module by calling
621
+ the fx convert api, currently using the same `is_reference` flag as parent, but we may
622
+ changing this behavior in the future (e.g. separating quantization and lowering for
623
+ standalone module as well)
624
+
625
+ Args:
626
+ - node: The call_module node of the observed standalone module
627
+ - modules: named_module of original model
628
+ - model: original model
629
+ - is_reference: a flag from parent provided by user to decide if we want to
630
+ produce a reference model or a fbgemm/qnnpack model
631
+ - backend_config: backend configuration of the target backend of quantization
632
+ """
633
+ # TODO: remove is_reference flag
634
+ if is_reference:
635
+ convert_fn = torch.ao.quantization.quantize_fx.convert_to_reference_fx
636
+ else:
637
+ convert_fn = torch.ao.quantization.quantize_fx.convert_fx # type: ignore[attr-defined]
638
+ # We know that observed standalone module is a GraphModule since
639
+ # it's produced by us
640
+ observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]
641
+ sm_input_quantized_idxs = \
642
+ observed_standalone_module \
643
+ .meta["_observed_graph_module_attrs"].standalone_module_input_quantized_idxs
644
+ # remove the dequantize nodes for inputs
645
+ args = list(node.args)
646
+ for idx in range(len(args)):
647
+ if idx in sm_input_quantized_idxs:
648
+ arg = args[idx]
649
+ if arg.op == "call_method" and arg.target == "dequantize": # type: ignore[union-attr]
650
+ quantize_node = arg.args[0] # type: ignore[union-attr]
651
+ node.replace_input_with(arg, quantize_node)
652
+ if len(arg.users) == 0: # type: ignore[union-attr]
653
+ model.graph.erase_node(arg)
654
+ # add dequantize node for output
655
+ sm_output_quantized_idxs = \
656
+ observed_standalone_module \
657
+ .meta["_observed_graph_module_attrs"].standalone_module_output_quantized_idxs
658
+ if len(sm_output_quantized_idxs) > 0:
659
+ assert sm_output_quantized_idxs[0] == 0, "Currently only quantized"
660
+ "output idxs = [0] is supported"
661
+
662
+ # if it's non-empty, then it means the output is kept in quantized form
663
+ # we'll just add a dequantize node after this node
664
+ _insert_dequantize_node(node, model.graph)
665
+
666
+ # TODO: allow convert_custom_config to override backend_config
667
+ # for standalone module
668
+ quantized_standalone_module = convert_fn(
669
+ observed_standalone_module,
670
+ backend_config=backend_config)
671
+ parent_name, name = _parent_name(node.target)
672
+ # update the modules dict
673
+ setattr(modules[parent_name], name, quantized_standalone_module)
674
+ modules[str(node.target)] = quantized_standalone_module
675
+
676
+ def convert_weighted_module(
677
+ node: Node,
678
+ modules: Dict[str, torch.nn.Module],
679
+ observed_node_names: Set[str],
680
+ node_name_to_qconfig: Dict[str, QConfigAny],
681
+ backend_config: BackendConfig,
682
+ is_decomposed: bool = False,
683
+ is_reference: bool = False,
684
+ ) -> None:
685
+ """ Convert a weighted module to reference quantized module in the model
686
+ If the QConfig of a QAT module is not set, the module will still be converted to
687
+ a float module.
688
+
689
+ Args:
690
+ - node: The call_module node of the observed standalone module
691
+ - modules: named_module of original model
692
+ - observed_node_names: names for the set of observed fx node, we can skip
693
+ this conversion if the node is not observed
694
+ """
695
+ original_module = modules[str(node.target)]
696
+ qconfig: QConfigAny = original_module.qconfig # type: ignore[assignment]
697
+ weight_post_process = None
698
+ qat_module_classes = get_qat_module_classes(backend_config)
699
+
700
+ if isinstance(
701
+ original_module,
702
+ qat_module_classes):
703
+ # Converting qat module to a float module, we need to attach
704
+ # weight fake_quant to the module, weight fake_quant is assumed to be run during
705
+ # QAT so we don't need to run it again here
706
+ weight_post_process = original_module.weight_fake_quant
707
+ original_module = original_module.to_float() # type: ignore[operator]
708
+ # change qat module to float module
709
+ parent_name, name = _parent_name(node.target)
710
+ setattr(modules[parent_name], name, original_module)
711
+
712
+ is_observed = node.name in observed_node_names
713
+ # If a qconfig is not defined for this node, then skip converting to a reference module
714
+ if qconfig is None or _has_none_qconfig(node, node_name_to_qconfig) or not is_observed:
715
+ return
716
+
717
+ # skip converting to reference quantized module if the qconfig is not supported
718
+ pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config)
719
+ dtype_configs = pattern_to_dtype_configs.get(type(original_module), [])
720
+ if not _is_qconfig_supported_by_dtype_configs(qconfig, dtype_configs):
721
+ return
722
+
723
+ # TODO: rename weight_is_statically_quantized to weight_is_int8_quantized
724
+ is_weight_quantized = weight_is_quantized(qconfig)
725
+
726
+ # the condition for swapping the module to reference quantized module is:
727
+ # weights need to be quantized
728
+ if not is_weight_quantized:
729
+ return
730
+
731
+ fused_module = None
732
+ float_module = original_module
733
+ # extract the individual float_module and fused module
734
+ if isinstance(original_module, torch.ao.nn.intrinsic._FusedModule):
735
+ fused_module = float_module
736
+ float_module = fused_module[0] # type: ignore[index]
737
+
738
+ # TODO: move this to the reference quantized module
739
+ # weight_qparams or weight_qparams dict
740
+ wq_or_wq_dict = {"is_decomposed": is_decomposed}
741
+ if isinstance(float_module, torch.nn.RNNCellBase):
742
+ weight_post_process_ih = qconfig.weight() # type: ignore[union-attr, operator]
743
+ weight_post_process_hh = qconfig.weight() # type: ignore[union-attr, operator]
744
+ weight_post_process_ih(float_module.weight_ih)
745
+ weight_post_process_hh(float_module.weight_hh)
746
+ weight_qparams_ih = get_qparam_dict(weight_post_process_ih)
747
+ weight_qparams_hh = get_qparam_dict(weight_post_process_hh)
748
+ wq_or_wq_dict.update({
749
+ "weight_ih": weight_qparams_ih,
750
+ "weight_hh": weight_qparams_hh,
751
+ })
752
+ elif isinstance(float_module, (torch.nn.LSTM, torch.nn.GRU)):
753
+ # format for wq_or_wq_dict (flattened attributes):
754
+ # {"weight_ih_l0_scale": ..., "weight_ih_l0_qscheme": ..., ...}
755
+ for wn in float_module._flat_weights_names:
756
+ if hasattr(float_module, wn) and wn.startswith("weight"):
757
+ weight = getattr(float_module, wn)
758
+ weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
759
+ if weight_post_process.dtype == torch.qint8: # type: ignore[union-attr]
760
+ weight_post_process(weight) # type: ignore[operator, misc]
761
+ wq_or_wq_dict[wn] = get_qparam_dict(weight_post_process)
762
+ else:
763
+ # weight_post_process is None means the original module is not a QAT module
764
+ # we need to get weight_post_process from qconfig in this case
765
+ is_ptq = weight_post_process is None
766
+ if is_ptq:
767
+ weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
768
+ device = assert_and_get_unique_device(float_module)
769
+ if device:
770
+ weight_post_process.to(device)
771
+
772
+ # Call weight observer/fake_quant at least once to ensure the scales and zero points
773
+ # have the right shapes. Note: there are two cases where we don't have to do this:
774
+ #
775
+ # (1) QAT: The model's forward method already calls the weight observer/fake_quant,
776
+ # and this typically happens during training, so we don't need to do it here.
777
+ #
778
+ # (2) Non-reference (lowered) case: The quantized module's from_float method already
779
+ # calls the weight observer/fake_quant, so we don't have to do it here.
780
+ #
781
+ # Currently we ignore both cases and call the weight observer/fake_quant here
782
+ # regardless, which is technically incorrect. For (1), this is mainly to preserve BC
783
+ # in test code, which may not always train before convert. In the future, we should
784
+ # break BC for these two cases. See https://github.com/pytorch/pytorch/issues/73941.
785
+ #
786
+ # For PT2, however, we don't need to preserve BC here, so we can skip this hack
787
+ # for QAT. We identify this case as (is_decomposed + is_reference + is_qat).
788
+ # Note that we still need it for PTQ in the PT2 flow since the model's forward
789
+ # method doesn't call the weight observer.
790
+ is_qat = not is_ptq
791
+ if not (is_decomposed and is_reference and is_qat):
792
+ weight_post_process(float_module.weight) # type: ignore[operator]
793
+
794
+ wq_or_wq_dict.update(get_qparam_dict(weight_post_process))
795
+
796
+ # We use the same reference module for all modes of quantization: static, dynamic, weight_only
797
+ # root_module_to_quantized_reference_module: module mapping from root (floating point) module class
798
+ # to quantized reference module class, e.g. nn.Conv2d to nn.quantized._reference.Conv2d
799
+ root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config)
800
+ ref_qmodule_cls = root_module_to_quantized_reference_module.get(type_before_parametrizations(float_module), None)
801
+ assert (
802
+ ref_qmodule_cls is not None
803
+ ), f"No reference quantized module class configured for {type_before_parametrizations(float_module)}"
804
+ ref_qmodule = ref_qmodule_cls.from_float(float_module, wq_or_wq_dict) # type: ignore[attr-defined]
805
+ if fused_module is not None:
806
+ fused_module[0] = ref_qmodule # type: ignore[operator]
807
+ else:
808
+ parent_name, name = _parent_name(node.target)
809
+ setattr(modules[parent_name], name, ref_qmodule)
810
+
811
+ def _remove_previous_dequantize_in_custom_module(node: Node, prev_node: Node, graph: Graph) -> None:
812
+ """
813
+ Given a custom module `node`, if the previous node is a dequantize, reroute the custom as follows:
814
+
815
+ Before: quantize - dequantize - custom_module
816
+ After: quantize - custom_module
817
+ \\ - dequantize
818
+ """
819
+ # expecting the input node for a custom module node to be a Node
820
+ assert isinstance(prev_node, Node), \
821
+ f"Expecting the argument for custom module node to be a Node, but got {prev_node}"
822
+ if prev_node.op == "call_method" and prev_node.target == "dequantize":
823
+ node.replace_input_with(prev_node, prev_node.args[0])
824
+ # Remove the dequantize node if it doesn't have other users
825
+ if len(prev_node.users) == 0:
826
+ graph.erase_node(prev_node)
827
+
828
+ def convert_custom_module(
829
+ node: Node,
830
+ graph: Graph,
831
+ modules: Dict[str, torch.nn.Module],
832
+ custom_module_class_mapping: Dict[QuantType, Dict[Type, Type]],
833
+ statically_quantized_custom_module_nodes: Set[Node]) -> None:
834
+ """ Converts an observed custom module to a quantized custom module based on
835
+ `custom_module_class_mapping`
836
+ For static quantization, we'll also remove the previous `dequantize` node and
837
+ attach the observer node for output to the module, the observer for the node
838
+ will be converted to a dequantize node instead of quantize-dequantize pairs
839
+ later in the graph. In the end we would have a quantized custom module that
840
+ has the same interface as a default quantized module in nn.quantized namespace,
841
+ i.e. quantized input and quantized output.
842
+
843
+ Args:
844
+ - node: The call_module node of the observed standalone module
845
+ - graph: The graph containing the node
846
+ - modules: named_module of original model
847
+ - custom_module_class_mapping: mapping from observed custom module class to
848
+ quantized custom module class, used to swap custom modules
849
+ - statically_quantized_custom_module_nodes: we'll add the custom module node
850
+ if we find it is statically quantized, this will be used later when converting
851
+ observers to quant/dequant node pairs, if the observed node is a statically
852
+ quantized custom module nodes, we'll convert the observer to a dequantize node,
853
+ this is to keep the interface the same as the default quantized module.
854
+ TODO: maybe we want to redesign this part to align with reference model design
855
+ as well, but there has been some discussions around the interface, so we can do
856
+ it later.
857
+ """
858
+ observed_custom_module = modules[str(node.target)]
859
+ maybe_obs = _maybe_get_observer_for_node(node, modules)
860
+ qconfig = observed_custom_module.qconfig
861
+ if activation_is_statically_quantized(qconfig):
862
+ statically_quantized_custom_module_nodes.add(node)
863
+ if _is_custom_module_lstm(node, modules):
864
+ # The inputs are tuples in the form (input, (hidden0, hidden1))
865
+ # Ensure all three input nodes are quantized
866
+ assert (
867
+ len(node.args) == 2 and
868
+ isinstance(node.args[1], tuple) and
869
+ len(node.args[1]) == 2
870
+ )
871
+ (inputs, (hidden0, hidden1)) = node.args # type: ignore[misc]
872
+ assert isinstance(inputs, Node)
873
+ assert isinstance(hidden0, Node)
874
+ assert isinstance(hidden1, Node)
875
+ _remove_previous_dequantize_in_custom_module(node, inputs, graph)
876
+ _remove_previous_dequantize_in_custom_module(node, hidden0, graph)
877
+ _remove_previous_dequantize_in_custom_module(node, hidden1, graph)
878
+ elif _is_custom_module_mha(node, modules):
879
+ # Inputs are in the form (query, key, value)
880
+ # TODO: This is the first step in enabling the full fx custom module
881
+ # quantization path for MultiheadAttention, and only covers the inputs
882
+ # to the module.
883
+ # Additional handling is yet to be implemented for the outputs, similar
884
+ # to LSTM custom module
885
+ assert len(node.args) == 3
886
+ query, key, value = node.args
887
+ assert isinstance(query, Node)
888
+ assert isinstance(key, Node)
889
+ assert isinstance(value, Node)
890
+ _remove_previous_dequantize_in_custom_module(node, query, graph)
891
+ _remove_previous_dequantize_in_custom_module(node, key, graph)
892
+ _remove_previous_dequantize_in_custom_module(node, value, graph)
893
+ else:
894
+ # remove the previous dequant node to ensure the inputs are quantized
895
+ arg = node.args[0]
896
+ assert isinstance(arg, Node)
897
+ _remove_previous_dequantize_in_custom_module(node, arg, graph)
898
+ # absorb the following observer into the module conversion
899
+ activation_post_process = _maybe_get_observer_for_node(node, modules)
900
+ assert activation_post_process is not None
901
+ observed_custom_module.activation_post_process = activation_post_process
902
+
903
+ # swap the observed custom module to quantized custom module
904
+ quantized_custom_module_class = get_swapped_custom_module_class(
905
+ observed_custom_module, custom_module_class_mapping, qconfig)
906
+ quantized_custom_module = \
907
+ quantized_custom_module_class.from_observed(observed_custom_module)
908
+ parent_name, name = _parent_name(node.target)
909
+ setattr(modules[parent_name], name, quantized_custom_module)
910
+
911
+ def convert(
912
+ model: GraphModule, is_reference: bool = False,
913
+ convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None,
914
+ is_standalone_module: bool = False,
915
+ _remove_qconfig_flag: bool = True,
916
+ qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None,
917
+ backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
918
+ is_decomposed: bool = False) -> GraphModule:
919
+ """
920
+ We will convert an observed model (a module with observer calls) to a reference
921
+ quantized model, the rule is simple:
922
+ 1. for each observer module call in the graph, we'll convert it to calls to
923
+ quantize and dequantize functions based on the observer instance
924
+ 2. for weighted operations like linear/conv, we need to convert them to reference
925
+ quantized module, this requires us to know whether the dtype configured for the
926
+ weight is supported in the backend, this is done in prepare step and the result
927
+ is stored in observed_node_names, we can decide whether we need to swap the
928
+ module based on this set
929
+
930
+ Args:
931
+ * `is_standalone_module`: when this flag is True, it means we are quantizing
932
+ a submodule that is not inlined in parent module, and will be quantized
933
+ separately as one unit.
934
+
935
+ * `is_decomposed`: a boolean flag to indicate whether we want to use the
936
+ quantize operator for decomposed quantized tensor
937
+ (torch.ops.quantized_decomposed.quantize_per_tensor) or default/standalone
938
+ quantized tensor (torch.quantize_per_tensor)
939
+
940
+ Returns:
941
+ a quantized standalone module, whether input/output is quantized is
942
+ specified by prepare_custom_config, with
943
+ input_quantized_idxs, output_quantized_idxs, please
944
+ see docs for :func:`~torch.ao.quantization.prepare_fx` for details
945
+ """
946
+ if convert_custom_config is None:
947
+ convert_custom_config = ConvertCustomConfig()
948
+
949
+ if isinstance(convert_custom_config, Dict):
950
+ warnings.warn(
951
+ "Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
952
+ "in a future version. Please pass in a ConvertCustomConfig instead.")
953
+ convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
954
+
955
+ if isinstance(qconfig_mapping, Dict):
956
+ warnings.warn(
957
+ "Passing a QConfig dictionary to convert is deprecated and will not be supported "
958
+ "in a future version. Please pass in a QConfigMapping instead.")
959
+ qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) if qconfig_mapping else None
960
+ qconfig_mapping = copy.deepcopy(qconfig_mapping)
961
+ assert qconfig_mapping is None or isinstance(qconfig_mapping, QConfigMapping)
962
+
963
+ if isinstance(backend_config, Dict):
964
+ warnings.warn(
965
+ "Passing a backend_config_dict to prepare is deprecated and will not be supported "
966
+ "in a future version. Please pass in a BackendConfig instead.")
967
+ backend_config = BackendConfig.from_dict(backend_config)
968
+
969
+ if backend_config is None:
970
+ backend_config = get_native_backend_config()
971
+
972
+ assert _is_observed_module(model), \
973
+ 'incoming model must be produced by prepare_fx'
974
+ observed_graph_module_attrs = model.meta["_observed_graph_module_attrs"]
975
+ node_name_to_scope: Dict[str, Tuple[str, type]] = observed_graph_module_attrs.node_name_to_scope
976
+ prepare_custom_config: PrepareCustomConfig = observed_graph_module_attrs.prepare_custom_config
977
+ observed_node_names: Set[str] = observed_graph_module_attrs.observed_node_names
978
+ node_name_to_qconfig: Dict[str, QConfigAny] = observed_graph_module_attrs.node_name_to_qconfig # type: ignore[assignment]
979
+
980
+ # mapping from fully qualified module name to module instance
981
+ # for example,
982
+ # {
983
+ # '': Model(...),
984
+ # 'linear': Linear(...),
985
+ # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),
986
+ # }
987
+ # We use remove_duplicate=False here because torch.cat uses
988
+ # the same activation_post_process module instance but different names
989
+ modules = dict(model.named_modules(remove_duplicate=False))
990
+
991
+ # TODO refactor this code once we update the prepare logic to have additional information on
992
+ # which graph nodes have been observed and share that with convert to decide which observers to ignore.
993
+ if qconfig_mapping:
994
+ prepare_qconfig_mapping: QConfigMapping = observed_graph_module_attrs.qconfig_mapping # type: ignore[assignment]
995
+ modules_copy = copy.deepcopy(modules)
996
+
997
+ if observed_graph_module_attrs.is_qat:
998
+ _update_qconfig_for_qat(qconfig_mapping, backend_config)
999
+ _update_qconfig_for_fusion(model, qconfig_mapping)
1000
+
1001
+ _compare_prepare_convert_qconfig_mappings(prepare_qconfig_mapping, qconfig_mapping) # type: ignore[arg-type]
1002
+ convert_node_name_to_qconfig = _generate_node_name_to_qconfig(
1003
+ model, modules_copy, model.graph, qconfig_mapping, node_name_to_scope)
1004
+ # check the convert_node_name_to_qconfig generated and ensure that
1005
+ # all the values either match what was set in prepare node_name_to_qconfig
1006
+ # or are set to None in the convert_node_name_to_qconfig.
1007
+ for k, v in node_name_to_qconfig.items():
1008
+ assert k in convert_node_name_to_qconfig, f'Expected key {k} in convert node_name_to_qconfig'
1009
+ if convert_node_name_to_qconfig[k] is not None:
1010
+ assert qconfig_equals(v, convert_node_name_to_qconfig[k]), \
1011
+ f"Expected k {k} to have the same value in prepare and convert QConfigMappings, " \
1012
+ f"but {v} was updated to {convert_node_name_to_qconfig[k]}"
1013
+ node_name_to_qconfig = convert_node_name_to_qconfig
1014
+
1015
+ custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping)
1016
+ custom_module_class_mapping = convert_custom_config.observed_to_quantized_mapping
1017
+
1018
+ if observed_graph_module_attrs.equalization_node_name_to_qconfig is not None:
1019
+ # If we want to do equalization then do the following:
1020
+ # Calculate the equalization scale, update the observers with the scaled
1021
+ # inputs, and scale the weight
1022
+ weight_eq_obs_dict = update_obs_for_equalization(model, modules)
1023
+ convert_eq_obs(model, modules, weight_eq_obs_dict)
1024
+
1025
+ # always run weight observers in the top level forward method
1026
+ # for dynamic quant ops or weight only quant ops
1027
+ _run_weight_observers(model, backend_config)
1028
+
1029
+ graph_inputs: List[str] = []
1030
+ for node in model.graph.nodes:
1031
+ if node.op == 'placeholder':
1032
+ graph_inputs.append(node.name)
1033
+
1034
+ # additional state to override inputs to be quantized, if specified
1035
+ # by the user
1036
+ placeholder_node_seen_cnt = 0
1037
+ input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes
1038
+ output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes
1039
+
1040
+ root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config)
1041
+ # convert tuples so that it can work with isinstance(module, tuple_of_classes)
1042
+ root_module_classes = tuple(root_module_to_quantized_reference_module.keys())
1043
+ qat_module_classes = get_qat_module_classes(backend_config)
1044
+ fused_module_classes = get_fused_module_classes(backend_config)
1045
+ statically_quantized_custom_module_nodes: Set[Node] = set()
1046
+
1047
+ for node in list(model.graph.nodes):
1048
+ if node.op == 'placeholder':
1049
+ cur_placeholder_node_idx = placeholder_node_seen_cnt
1050
+ placeholder_node_seen_cnt += 1
1051
+ if cur_placeholder_node_idx in input_quantized_idxs:
1052
+ # Inputs are assumed to be quantized if the user specified the
1053
+ # input_quantized_idxs override.
1054
+ # we need to dequantize the inputs since all operators took
1055
+ # floating point inputs in reference quantized models
1056
+ _insert_dequantize_node(node, model.graph)
1057
+ elif node.op == "output":
1058
+ # If the argument is empty we don't need to do anything
1059
+ if len(output_quantized_idxs) == 0:
1060
+ continue
1061
+ # Result are kept quantized if the user specified the
1062
+ # output_quantized_idxs override.
1063
+ # Remove the dequantize operator for the node in the end if any
1064
+ return_node = node
1065
+ output = node.args[0]
1066
+ # outputs can be Node, list, tuple, dict, other cases are not supported yet
1067
+ if isinstance(output, (list, tuple)):
1068
+ for idx in output_quantized_idxs:
1069
+ _maybe_recursive_remove_dequantize(output[idx], return_node, model.graph)
1070
+ elif isinstance(output, (Node, dict)):
1071
+ # we treat dict as a single argument currently, but it can be extended
1072
+ # to support {"key": dtype} after we change output_quantized_idxs to
1073
+ # dict
1074
+ if 0 in output_quantized_idxs:
1075
+ _maybe_recursive_remove_dequantize(output, return_node, model.graph)
1076
+ else:
1077
+ warnings.warn(f"Unsupported node type for output_quantized_idxs: {type(output)}")
1078
+ elif node.op == "call_module":
1079
+ mod = _get_module(node, modules)
1080
+ assert mod is not None
1081
+ if _is_activation_post_process(mod):
1082
+ observed_node = node.args[0]
1083
+ if observed_node in statically_quantized_custom_module_nodes:
1084
+ _replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph)
1085
+ else:
1086
+ if is_decomposed:
1087
+ _replace_observer_with_quantize_dequantize_node_decomposed(
1088
+ model, node, modules, node_name_to_scope,
1089
+ node_name_to_qconfig)
1090
+ else:
1091
+ _replace_observer_with_quantize_dequantize_node(
1092
+ model, node, modules, node_name_to_scope,
1093
+ node_name_to_qconfig)
1094
+ elif isinstance(mod, DeQuantStub):
1095
+ _replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph)
1096
+ elif _is_observed_standalone_module(mod):
1097
+ convert_standalone_module(
1098
+ node, modules, model, is_reference, backend_config)
1099
+ # below this point `type_before_parametrizations` is used
1100
+ # instead of `type` to handle situations with fx quant + sparsity
1101
+ elif type_before_parametrizations(mod) in set(
1102
+ root_module_classes).union(qat_module_classes).union(fused_module_classes):
1103
+ # extra check for fused module classes to make sure they are fused module classes
1104
+ # of target modules
1105
+ if type_before_parametrizations(mod) in fused_module_classes and \
1106
+ type_before_parametrizations(mod[0]) not in root_module_classes: # type: ignore[index]
1107
+ continue
1108
+ convert_weighted_module(
1109
+ node, modules, observed_node_names, node_name_to_qconfig, backend_config,
1110
+ is_decomposed, is_reference)
1111
+ elif type_before_parametrizations(mod) in custom_module_classes:
1112
+ convert_custom_module(
1113
+ node, model.graph, modules, custom_module_class_mapping,
1114
+ statically_quantized_custom_module_nodes)
1115
+
1116
+ # remove deadcode after converting observers to quant/dequant ops
1117
+ model.graph.eliminate_dead_code()
1118
+ model = GraphModule(model, model.graph)
1119
+
1120
+ # TODO: maybe move this to quantize_fx.py
1121
+ if not is_reference:
1122
+ model = lower_to_fbgemm(model, node_name_to_qconfig, node_name_to_scope)
1123
+
1124
+ # TODO: this looks hacky, we want to check why we need this and see if we can
1125
+ # remove this
1126
+ # removes qconfig and activation_post_process modules
1127
+ if _remove_qconfig_flag:
1128
+ _remove_qconfig(model)
1129
+ model.delete_all_unused_submodules()
1130
+ model.meta.pop("_observed_graph_module_attrs", None)
1131
+ return model
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx import (
2
+ GraphModule,
3
+ Node,
4
+ map_arg
5
+ )
6
+ from torch.fx.graph import Graph
7
+ from .match_utils import (
8
+ _is_match,
9
+ MatchAllNode,
10
+ )
11
+ from .pattern_utils import (
12
+ _sorted_patterns_dict,
13
+ )
14
+
15
+ from ..backend_config import (
16
+ BackendConfig,
17
+ get_native_backend_config,
18
+ )
19
+ from ..backend_config.utils import (
20
+ get_fuser_method_mapping,
21
+ get_fusion_pattern_to_root_node_getter,
22
+ get_fusion_pattern_to_extra_inputs_getter,
23
+ )
24
+
25
+ from .custom_config import FuseCustomConfig
26
+
27
+ from .fuse_handler import (
28
+ _get_fusion_pattern_to_fuse_handler_cls,
29
+ FuseHandler,
30
+ )
31
+
32
+ from typing import Any, Callable, Dict, List, Tuple, Union
33
+ import warnings
34
+
35
+ from torch.ao.quantization.utils import Pattern, NodePattern
36
+
37
+
38
+ __all__ = [
39
+ "fuse",
40
+ # TODO: We should make this private in the future
41
+ # This is currently needed for test_public_bindings for some reason
42
+ "FuseHandler",
43
+ ]
44
+
45
+
46
+ def fuse(
47
+ model: GraphModule,
48
+ is_qat: bool,
49
+ fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None,
50
+ backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
51
+ ) -> GraphModule:
52
+ if fuse_custom_config is None:
53
+ fuse_custom_config = FuseCustomConfig()
54
+
55
+ if isinstance(fuse_custom_config, Dict):
56
+ warnings.warn(
57
+ "Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
58
+ "in a future version. Please pass in a FuseCustomConfig instead.")
59
+ fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
60
+
61
+ if isinstance(backend_config, Dict):
62
+ warnings.warn(
63
+ "Passing a backend_config_dict to prepare is deprecated and will not be supported "
64
+ "in a future version. Please pass in a BackendConfig instead.")
65
+ backend_config = BackendConfig.from_dict(backend_config)
66
+
67
+ named_modules = dict(model.named_modules())
68
+
69
+ if backend_config is None:
70
+ backend_config = get_native_backend_config()
71
+
72
+ fusion_pattern_to_fuse_handler_cls = _sorted_patterns_dict(_get_fusion_pattern_to_fuse_handler_cls(backend_config))
73
+ fuser_method_mapping = get_fuser_method_mapping(backend_config)
74
+ fusion_pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config)
75
+ fusion_pattern_to_extra_inputs_getter = get_fusion_pattern_to_extra_inputs_getter(backend_config)
76
+
77
+ # find fusion
78
+ fusion_pairs = _find_matches(
79
+ model, model.graph, fusion_pattern_to_fuse_handler_cls)
80
+ # TODO: change this to inplace changes to graph, since we no longer construct
81
+ # new GraphModule anymore
82
+ fused_graph = Graph()
83
+ env: Dict[Any, Any] = {}
84
+
85
+ def load_arg(a):
86
+ return map_arg(a, lambda node: env[node.name])
87
+
88
+ def default_root_node_getter(node_pattern):
89
+ while not isinstance(node_pattern[-1], Node):
90
+ node_pattern = node_pattern[-1]
91
+ return node_pattern[-1]
92
+
93
+ for node in model.graph.nodes:
94
+ maybe_last_node, pattern, matched_node_pattern, obj, node_to_subpattern = \
95
+ fusion_pairs.get(node.name, (None, None, None, None, None))
96
+ # get the corresponding subpattern for the current node
97
+ if node_to_subpattern is not None:
98
+ node_subpattern = node_to_subpattern.get(node, None)
99
+ else:
100
+ node_subpattern = None
101
+ if maybe_last_node is node:
102
+ assert obj is not None
103
+ root_node_getter = fusion_pattern_to_root_node_getter.get(pattern, default_root_node_getter)
104
+ root_node = root_node_getter(matched_node_pattern) # type: ignore[index]
105
+ extra_inputs_getter = fusion_pattern_to_extra_inputs_getter.get(pattern, None)
106
+ extra_inputs = []
107
+ if extra_inputs_getter is not None:
108
+ extra_inputs = extra_inputs_getter(matched_node_pattern)
109
+ # TODO: add validation that root_node is a module and has the same type
110
+ # as the root_module in the configuration
111
+ env[node.name] = obj.fuse(
112
+ load_arg, named_modules, fused_graph, root_node, extra_inputs, matched_node_pattern, # type: ignore[arg-type]
113
+ fuse_custom_config, fuser_method_mapping, is_qat)
114
+ elif maybe_last_node is None or node_subpattern is MatchAllNode:
115
+ env[node.name] = fused_graph.node_copy(node, load_arg)
116
+ # node matched in patterns and is not root is removed here
117
+
118
+ model = GraphModule(model, fused_graph)
119
+ return model
120
+
121
+ def _find_matches(
122
+ root: GraphModule,
123
+ graph: Graph,
124
+ pattern_to_fuse_handler_cls: Dict[Pattern, Callable],
125
+ ) -> Dict[str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]]:
126
+ modules = dict(root.named_modules())
127
+ # node name -> (root_node, match_value)
128
+ match_map : Dict[
129
+ str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]] = {}
130
+ # a map from node to the matched subpattern
131
+ node_to_subpattern: Dict[Node, Any] = {}
132
+
133
+ # TODO: dedup with quantization matching function in match_utils.py
134
+ def apply_match(pattern, node, match, matched_node_pattern, node_to_subpattern):
135
+ if isinstance(pattern, tuple):
136
+ s, *args = pattern
137
+ current_node_pattern: List[Node] = []
138
+ apply_match(s, node, match, current_node_pattern, node_to_subpattern)
139
+ for subpattern, arg in zip(args, node.args):
140
+ apply_match(subpattern, arg, match, current_node_pattern, node_to_subpattern)
141
+ matched_node_pattern.append(tuple(current_node_pattern))
142
+ else:
143
+ # the first pattern matches will take precedence
144
+ if node.name not in match_map:
145
+ matched_node_pattern.append(node)
146
+ # MatchAllNode here is actually MatchAllInputNode which should not
147
+ # be added to match_map
148
+ if pattern is not MatchAllNode:
149
+ node_to_subpattern[node] = pattern
150
+ root_node, pattern, handler = match
151
+ match_map[node.name] = (root_node, pattern, matched_node_pattern, handler, node_to_subpattern)
152
+
153
+ for node in reversed(graph.nodes):
154
+ if node.name not in match_map:
155
+ for pattern, fuse_handler_cls in pattern_to_fuse_handler_cls.items():
156
+ matched_node_pattern: List[Node] = []
157
+ if _is_match(modules, node, pattern):
158
+ apply_match(pattern, node, (node, pattern, fuse_handler_cls(node)), matched_node_pattern, node_to_subpattern)
159
+ break
160
+
161
+ return match_map
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse_handler.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.ao.quantization.backend_config import BackendConfig
3
+ from torch.fx.graph import Node, Graph
4
+ from ..utils import _parent_name, NodePattern, Pattern
5
+ from ..fuser_method_mappings import get_fuser_method_new
6
+ from abc import ABC, abstractmethod
7
+ from typing import Any, Callable, Dict, List, Union
8
+ from .custom_config import FuseCustomConfig
9
+ from .match_utils import MatchAllNode
10
+ from torch.nn.utils.parametrize import type_before_parametrizations
11
+
12
+ __all__ = [
13
+ "DefaultFuseHandler",
14
+ "FuseHandler",
15
+ ]
16
+
17
+
18
+ # ----------------------------
19
+ # Fusion Pattern Registrations
20
+ # ----------------------------
21
+
22
+ # Base Pattern Handler
23
+ class FuseHandler(ABC):
24
+ """ Base handler class for the fusion patterns
25
+ """
26
+ @abstractmethod
27
+ def __init__(self, node: Node):
28
+ pass
29
+
30
+ @abstractmethod
31
+ def fuse(self,
32
+ load_arg: Callable,
33
+ named_modules: Dict[str, torch.nn.Module],
34
+ fused_graph: Graph,
35
+ root_node: Node,
36
+ extra_inputs: List[Any],
37
+ matched_node_pattern: NodePattern,
38
+ fuse_custom_config: FuseCustomConfig,
39
+ fuser_method_mapping: Dict[Pattern, Union[torch.nn.Sequential, Callable]],
40
+ is_qat: bool) -> Node:
41
+ pass
42
+
43
+ class DefaultFuseHandler(FuseHandler):
44
+ def __init__(
45
+ self,
46
+ node: Node):
47
+ super().__init__(node)
48
+
49
+ def fuse(self,
50
+ load_arg: Callable,
51
+ named_modules: Dict[str, torch.nn.Module],
52
+ fused_graph: Graph,
53
+ root_node: Node,
54
+ extra_inputs: List[Any],
55
+ matched_node_pattern: NodePattern,
56
+ fuse_custom_config: FuseCustomConfig,
57
+ fuser_method_mapping: Dict[Pattern, Union[torch.nn.Sequential, Callable]],
58
+ is_qat: bool) -> Node:
59
+ assert root_node.op == "call_module", "Expecting module node to be a call_module Node"
60
+ root_module = named_modules[str(root_node.target)]
61
+
62
+ def get_modules(pattern):
63
+ """ Given a node pattern, extract the corresponding modules
64
+ e.g. input: (relu_node, (bn_node, conv_node))
65
+ output: (relu_module, (bn_module, conv_module))
66
+ """
67
+ if isinstance(pattern, (tuple, list)):
68
+ n, *args = pattern
69
+ modules: List[torch.nn.Module] = []
70
+ modules.append(get_modules(n))
71
+ for a in args:
72
+ modules.append(get_modules(a))
73
+ return tuple(modules)
74
+ else:
75
+ n = pattern
76
+ if n.op == "call_module":
77
+ return named_modules[n.target]
78
+ elif n.op == "call_function" and n.target == torch.nn.functional.relu:
79
+ relu = torch.nn.ReLU()
80
+ relu.training = root_module.training
81
+ return relu
82
+ elif n.op == "call_function" or n.op == "call_method":
83
+ return n.target
84
+ else:
85
+ return MatchAllNode
86
+
87
+ # since relu can be used multiple times, we'll need to create a relu module for each match
88
+ matched_modules = get_modules(matched_node_pattern)
89
+
90
+ def get_matched_types(m):
91
+ if isinstance(m, tuple):
92
+ return tuple(map(get_matched_types, m))
93
+ if isinstance(m, torch.nn.Module):
94
+ return type_before_parametrizations(m)
95
+ return m
96
+
97
+ matched_module_types = get_matched_types(matched_modules)
98
+ module_parent_name, module_name = _parent_name(root_node.target)
99
+ fuser_method = get_fuser_method_new(matched_module_types, fuser_method_mapping)
100
+ # TODO: change the signature for fuser_method to take matched module patterns
101
+ # as input
102
+ fused_module = fuser_method(is_qat, *matched_modules)
103
+ setattr(named_modules[module_parent_name], module_name, fused_module)
104
+ extra_args = []
105
+ for input in extra_inputs:
106
+ extra_args.append(load_arg(input))
107
+ node = fused_graph.node_copy(root_node, load_arg)
108
+ args = list(node.args)
109
+ args.extend(extra_args)
110
+ node.args = tuple(args)
111
+ return node
112
+
113
+ def _get_fusion_pattern_to_fuse_handler_cls(
114
+ backend_config: BackendConfig) -> Dict[Pattern, Callable]:
115
+ fusion_pattern_to_fuse_handlers: Dict[Pattern, Callable] = {}
116
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
117
+ if config.fuser_method is not None:
118
+ # TODO: is this logic right?
119
+ fusion_pattern_to_fuse_handlers[pattern] = DefaultFuseHandler
120
+ return fusion_pattern_to_fuse_handlers
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import copy
3
+ from torch.fx import GraphModule
4
+ from torch.fx.graph import Graph
5
+ from typing import Union, Dict, Any, Set
6
+
7
+ __all__ = [
8
+ "FusedGraphModule",
9
+ "ObservedGraphModule",
10
+ "ObservedStandaloneGraphModule",
11
+ "QuantizedGraphModule",
12
+ ]
13
+
14
+ class FusedGraphModule(GraphModule):
15
+ def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
16
+ self.preserved_attr_names = preserved_attr_names
17
+ preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)}
18
+ super().__init__(root, graph)
19
+ for attr in preserved_attrs:
20
+ setattr(self, attr, preserved_attrs[attr])
21
+
22
+ # GraphModule does not copy attributes which are not in the __dict__
23
+ # of vanilla nn.Module. So, we override __deepcopy__ in order
24
+ # to copy the quantization specific attributes correctly.
25
+ def __deepcopy__(self, memo):
26
+ fake_mod = torch.nn.Module()
27
+ fake_mod.__dict__ = copy.deepcopy(self.__dict__)
28
+ return FusedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
29
+
30
+ class ObservedGraphModule(GraphModule):
31
+
32
+ def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
33
+ self.preserved_attr_names = {
34
+ '_activation_post_process_map',
35
+ '_activation_post_process_indexes',
36
+ '_patterns',
37
+ '_node_name_to_qconfig',
38
+ '_prepare_custom_config',
39
+ '_equalization_node_name_to_qconfig',
40
+ '_node_name_to_scope',
41
+ '_qconfig_mapping',
42
+ '_is_qat',
43
+ '_observed_node_names'}.union(preserved_attr_names)
44
+ preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)}
45
+ super().__init__(root, graph)
46
+ for attr in preserved_attrs:
47
+ setattr(self, attr, preserved_attrs[attr])
48
+
49
+ # GraphModule does not copy attributes which are not in the __dict__
50
+ # of vanilla nn.Module. So, we override __deepcopy__ in order
51
+ # to copy the quantization specific attributes correctly.
52
+ def __deepcopy__(self, memo):
53
+ fake_mod = torch.nn.Module()
54
+ fake_mod.__dict__ = copy.deepcopy(self.__dict__)
55
+ return ObservedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
56
+
57
+ def _is_observed_module(module: Any) -> bool:
58
+ return hasattr(module, "meta") and "_observed_graph_module_attrs" in module.meta
59
+
60
+ def _get_observed_graph_module_attr(model: Union[torch.nn.Module, GraphModule], attr_name: str) -> Any:
61
+ if hasattr(model, "meta") and "_observed_graph_module_attrs" in model.meta: # type: ignore[operator, index]
62
+ return getattr(model.meta["_observed_graph_module_attrs"], attr_name) # type: ignore[index]
63
+ return None
64
+
65
+ class ObservedStandaloneGraphModule(ObservedGraphModule):
66
+ def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
67
+ preserved_attr_names = preserved_attr_names.union({
68
+ "_standalone_module_input_quantized_idxs",
69
+ "_standalone_module_output_quantized_idxs"})
70
+ super().__init__(root, graph, preserved_attr_names)
71
+
72
+ def __deepcopy__(self, memo):
73
+ fake_mod = torch.nn.Module()
74
+ fake_mod.__dict__ = copy.deepcopy(self.__dict__)
75
+ return ObservedStandaloneGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
76
+
77
+ def _is_observed_standalone_module(module: Any) -> bool:
78
+ return _is_observed_module(module) and module.meta["_observed_graph_module_attrs"].is_observed_standalone_module
79
+
80
+ def _save_packed_weight(self, destination, prefix, keep_vars):
81
+ for attr_name in dir(self):
82
+ if "_packed_weight" in attr_name and \
83
+ isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined]
84
+ packed_weight = getattr(self, attr_name)
85
+ destination[prefix + attr_name] = packed_weight
86
+
87
+ class QuantizedGraphModule(GraphModule):
88
+ """ This class is created to make sure PackedParams
89
+ (e.g. LinearPackedParams, Conv2dPackedParams) to appear in state_dict
90
+ so that we can serialize and deserialize quantized graph module with
91
+ torch.save(m.state_dict()) and m.load_state_dict(state_dict)
92
+ """
93
+ def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
94
+ self.preserved_attr_names = preserved_attr_names
95
+ preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)}
96
+ super().__init__(root, graph)
97
+ for attr in preserved_attrs:
98
+ setattr(self, attr, preserved_attrs[attr])
99
+ self._register_state_dict_hook(_save_packed_weight)
100
+
101
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
102
+ missing_keys, unexpected_keys, error_msgs):
103
+ attrs_to_pop = []
104
+ for attr_name in state_dict:
105
+ if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950
106
+ setattr(self, attr_name, state_dict[attr_name])
107
+ attrs_to_pop.append(attr_name)
108
+
109
+ # pop the packed param attributesn
110
+ for attr_name in attrs_to_pop:
111
+ state_dict.pop(attr_name)
112
+
113
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
114
+
115
+
116
+ def __deepcopy__(self, memo):
117
+ fake_mod = torch.nn.Module()
118
+ fake_mod.__dict__ = copy.deepcopy(self.__dict__)
119
+ return QuantizedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_qnnpack.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._lower_to_native_backend import _lower_to_native_backend
2
+ from ..qconfig import QConfigAny
3
+ from torch.fx import GraphModule
4
+ from typing import Dict, Tuple
5
+
6
+ __all__ = [
7
+ "lower_to_qnnpack"
8
+ ]
9
+
10
+ def lower_to_qnnpack(
11
+ model: GraphModule,
12
+ qconfig_map: Dict[str, QConfigAny],
13
+ node_name_to_scope: Dict[str, Tuple[str, type]]
14
+ ) -> GraphModule:
15
+ """ Lower a quantized reference model (with reference quantized operator patterns)
16
+ to qnnpack
17
+ """
18
+ return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/lstm_utils.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import operator
3
+ import torch
4
+ from typing import Any, Callable, Optional, Tuple
5
+ from torch.ao.quantization import (
6
+ default_weight_observer,
7
+ default_weight_fake_quant,
8
+ FakeQuantizeBase,
9
+ QConfig,
10
+ QConfigMapping,
11
+ )
12
+ from torch.ao.quantization.backend_config import BackendConfig
13
+ from torch.ao.quantization.observer import _PartialWrapper
14
+ from torch.ao.quantization.quantize_fx import (
15
+ convert_to_reference_fx,
16
+ prepare_fx,
17
+ )
18
+
19
+ # TODO: move all LSTM util functions from fx/utils.py to this file
20
+ def _get_lstm_with_individually_observed_parts(
21
+ float_lstm: torch.nn.LSTM,
22
+ example_inputs: Tuple[Any, ...],
23
+ backend_config: Optional[BackendConfig] = None,
24
+ linear_output_obs_ctr: Optional[_PartialWrapper] = None,
25
+ sigmoid_obs_ctr: Optional[_PartialWrapper] = None,
26
+ tanh_obs_ctr: Optional[_PartialWrapper] = None,
27
+ cell_state_obs_ctr: Optional[_PartialWrapper] = None,
28
+ hidden_state_obs_ctr: Optional[_PartialWrapper] = None,
29
+ ) -> torch.ao.nn.quantizable.LSTM:
30
+ """
31
+ Return an observed `torch.ao.nn.quantizable.LSTM` created from a `torch.nn.LSTM`
32
+ with specific observers or fake quantizes assigned to the inner ops or submodules.
33
+
34
+ In both eager and FX graph mode quantization, `torch.ao.nn.quantizable.LSTM` is
35
+ used as an observed custom module, which is responsible for inserting its own
36
+ observers. By default, all inner ops inherit the parent custom module's QConfig.
37
+ Users who wish to override this behavior may extend `torch.ao.nn.quantizable.LSTM`
38
+ and use this helper function to customize the observer insertion logic.
39
+
40
+ This is meant to be used to convert a float module to an observed module in the
41
+ custom module flow.
42
+
43
+ Args:
44
+ `float_lstm`: The float LSTM module
45
+ `example_inputs`: example inputs for the forward function of the LSTM module
46
+ `backend_config`: BackendConfig to use to observe the LSTM module
47
+ `linear_output_obs_ctr`: observer or fake quantize for linear outputs Wx + b,
48
+ where W is the weight matrix, b is the bias, and x is either the inputs
49
+ or the hidden state from the previous layer (if any)
50
+ `sigmoid_obs_ctr`: observer or fake quantize for sigmoid activations
51
+ `tanh_obs_ctr`: observer or fake quantize for tanh activations
52
+ `cell_state_obs_ctr`: observer or fake quantize for the cell state
53
+ `hidden_state_obs_ctr`: observer or fake quantize for the hidden state and
54
+ the output
55
+
56
+ Return:
57
+ A `torch.ao.nn.quantizable.LSTM` with the specified observers or fake quantizes
58
+ assigned to the inner ops.
59
+ """
60
+ def make_qconfig(obs_ctr: _PartialWrapper) -> QConfig:
61
+ """
62
+ Make a QConfig with fixed qparams observers or fake quantizes.
63
+ """
64
+ if isinstance(obs_ctr(), FakeQuantizeBase):
65
+ weight = default_weight_fake_quant
66
+ else:
67
+ weight = default_weight_observer
68
+ return QConfig(activation=obs_ctr, weight=weight)
69
+
70
+ quantizable_lstm = torch.ao.nn.quantizable.LSTM(
71
+ float_lstm.input_size, float_lstm.hidden_size, float_lstm.num_layers, float_lstm.bias,
72
+ float_lstm.batch_first, float_lstm.dropout, float_lstm.bidirectional)
73
+ quantizable_lstm.qconfig = float_lstm.qconfig
74
+
75
+ for idx in range(float_lstm.num_layers):
76
+ quantizable_lstm.layers[idx] = torch.ao.nn.quantizable.modules.rnn._LSTMLayer.from_float(float_lstm,
77
+ idx,
78
+ float_lstm.qconfig,
79
+ batch_first=False)
80
+
81
+ # Build QConfigMapping for the LSTM cell
82
+ # Note: FloatFunctional qconfigs will be configured separately below
83
+ cell_qm = QConfigMapping().set_global(float_lstm.qconfig) # type: ignore[arg-type]
84
+ if sigmoid_obs_ctr is not None:
85
+ cell_qm.set_module_name("input_gate", make_qconfig(sigmoid_obs_ctr))
86
+ cell_qm.set_module_name("forget_gate", make_qconfig(sigmoid_obs_ctr))
87
+ cell_qm.set_module_name("output_gate", make_qconfig(sigmoid_obs_ctr))
88
+ if tanh_obs_ctr is not None:
89
+ cell_qm.set_module_name("cell_gate", make_qconfig(tanh_obs_ctr))
90
+
91
+ # Insert observers into each LSTM cell
92
+ # TODO: maybe make this work for layer_bw as well
93
+ for layer in quantizable_lstm.layers:
94
+ cell = layer.layer_fw.cell
95
+ cell = prepare_fx(cell, cell_qm, example_inputs, backend_config=backend_config)
96
+ # HACK: Manually replace the activation_post_process following these ops.
97
+ # This is needed for FloatFunctional ops because there is currently no way
98
+ # to configure these ops in FX graph mode quantization today. This is because
99
+ # the FloatFunctional modules simply disappear from the graph after tracing.
100
+ # In the future, we should rewrite quantizable LSTM without FloatFunctionals.
101
+ op_index_to_activation_post_process_ctr = {
102
+ (torch.add, 0): linear_output_obs_ctr, # gates.add
103
+ (torch.mul, 0): cell_state_obs_ctr, # fgate_cx.mul
104
+ (torch.mul, 1): cell_state_obs_ctr, # igate_cgate.mul
105
+ (torch.add, 1): cell_state_obs_ctr, # fgate_cx_igate_cgate.add
106
+ (torch.mul, 2): hidden_state_obs_ctr, # ogate_cy.mul
107
+ }
108
+ add_count = 0
109
+ mul_count = 0
110
+ for node in cell.graph.nodes:
111
+ op_index: Optional[Tuple[Callable, int]] = None # e.g. (torch.add, 1)
112
+ if node.target == torch.add:
113
+ op_index = (torch.add, add_count)
114
+ add_count += 1
115
+ elif node.target == torch.mul:
116
+ op_index = (torch.mul, mul_count)
117
+ mul_count += 1
118
+ else:
119
+ # Neither torch.add nor torch.mul
120
+ continue
121
+ if op_index not in op_index_to_activation_post_process_ctr:
122
+ continue
123
+ assert len(node.users) == 1
124
+ activation_post_process_name = next(iter(node.users.keys())).name
125
+ activation_post_process_ctr = op_index_to_activation_post_process_ctr[op_index]
126
+ if activation_post_process_ctr is not None:
127
+ setattr(cell, activation_post_process_name, activation_post_process_ctr())
128
+ layer.layer_fw.cell = cell
129
+ return quantizable_lstm
130
+
131
+ def _get_reference_quantized_lstm_module(
132
+ observed_lstm: torch.ao.nn.quantizable.LSTM,
133
+ backend_config: Optional[BackendConfig] = None,
134
+ ) -> torch.ao.nn.quantized.LSTM:
135
+ """
136
+ Return a `torch.ao.nn.quantized.LSTM` created from a `torch.ao.nn.quantizable.LSTM`
137
+ with observers or fake quantizes inserted through `prepare_fx`, e.g. from
138
+ `_get_lstm_with_individually_observed_parts`.
139
+
140
+ This is meant to be used to convert an observed module to a quantized module in the
141
+ custom module flow.
142
+
143
+ Args:
144
+ `observed_lstm`: a `torch.ao.nn.quantizable.LSTM` observed through `prepare_fx`
145
+ `backend_config`: BackendConfig to use to produce the reference quantized model
146
+
147
+ Return:
148
+ A reference `torch.ao.nn.quantized.LSTM` module.
149
+ """
150
+ quantized_lstm = torch.ao.nn.quantized.LSTM(
151
+ observed_lstm.input_size, observed_lstm.hidden_size, observed_lstm.num_layers,
152
+ observed_lstm.bias, observed_lstm.batch_first, observed_lstm.dropout,
153
+ observed_lstm.bidirectional)
154
+
155
+ for i, layer in enumerate(quantized_lstm.layers):
156
+ cell = copy.deepcopy(observed_lstm.layers.get_submodule(str(i)).layer_fw.cell) # type: ignore[union-attr]
157
+ cell = convert_to_reference_fx(cell, backend_config=backend_config) # type: ignore[arg-type]
158
+ assert isinstance(cell, torch.fx.GraphModule)
159
+ # HACK: Manually remove input quantize nodes and output dequantize nodes,
160
+ # since custom modules expect quint8 inputs and outputs for now. Note that
161
+ # this functionality is supposedly handled through PrepareCustomConfig's
162
+ # `set_input_quantized_indexes` and `set_output_quantized_indexes`, but that
163
+ # API doesn't currently handle tuple inputs and outputs, so we have to do
164
+ # this manually for now. In the future we should (1) relax the restriction
165
+ # on custom module input/output dtypes, and (2) expand support for complex
166
+ # input/output structures.
167
+ for node in cell.graph.nodes:
168
+ if node.target == torch.quantize_per_tensor:
169
+ arg = node.args[0]
170
+ # Remove quantize(x), quantize(hidden[0]), and quantize(hidden[1])
171
+ if arg.target == "x" or (arg.target == operator.getitem and arg.args[0].target == "hidden"):
172
+ with cell.graph.inserting_before(node):
173
+ node.replace_all_uses_with(arg)
174
+ cell.graph.erase_node(node)
175
+ if node.target == "output":
176
+ # Remove all dequantize nodes in the output tuple
177
+ for arg in node.args[0]:
178
+ with cell.graph.inserting_before(node):
179
+ node.replace_input_with(arg, arg.args[0])
180
+ cell.graph.eliminate_dead_code()
181
+ cell.recompile()
182
+ layer.layer_fw.cell = cell
183
+ return quantized_lstm
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import torch
3
+ from torch.fx.graph import (
4
+ Graph,
5
+ Node,
6
+ )
7
+ from torch.ao.quantization.utils import Pattern
8
+ from .quantize_handler import (
9
+ QuantizeHandler,
10
+ )
11
+ from ..qconfig import (
12
+ QConfigAny,
13
+ )
14
+ from ..utils import (
15
+ MatchAllNode
16
+ )
17
+ from .graph_module import (
18
+ _is_observed_standalone_module,
19
+ )
20
+ from torch.nn.utils.parametrize import type_before_parametrizations
21
+ from typing import Any, Dict, List, Callable, Optional, Tuple, Type, Set, Iterable
22
+
23
+
24
+ __all__: List[str] = []
25
+
26
+ # TODO(future PR): the 1st argument is typed as `List[Node]`, but a better type
27
+ # would be a recursive `List[Union[Node, Tuple[Union[Node, ...]]]]`
28
+ _MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler]
29
+
30
+ _MatchResultWithQConfig = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler,
31
+ QConfigAny]
32
+
33
+ # Note: The order of patterns is important! match function will take whatever is matched first, so we'll
34
+ # need to put the fusion patterns before single patterns. For example, add_relu should be registered come before relu.
35
+ # decorators are applied in the reverse order we see. Also when we match the nodes in the graph with these patterns,
36
+ # we'll start from the last node of the graph and traverse back.
37
+ def _is_match(modules, node, pattern, max_uses=sys.maxsize):
38
+ """ Matches a node in fx against a pattern
39
+ """
40
+ if isinstance(pattern, tuple):
41
+ self_match, *arg_matches = pattern
42
+ if self_match is getattr:
43
+ assert len(pattern) == 2, 'Expecting getattr pattern to have two elements'
44
+ arg_matches = []
45
+ else:
46
+ self_match = pattern
47
+ arg_matches = []
48
+
49
+ if isinstance(self_match, type) and issubclass(self_match, MatchAllNode):
50
+ return True
51
+
52
+ if node == pattern:
53
+ return True
54
+
55
+ if not isinstance(node, Node) or len(node.users) > max_uses:
56
+ return False
57
+
58
+ if isinstance(self_match, type) and issubclass(self_match, torch.nn.Module):
59
+ if node.op != 'call_module':
60
+ return False
61
+ if not type_before_parametrizations(modules[node.target]) == self_match:
62
+ return False
63
+ elif callable(self_match):
64
+ if node.op != 'call_function' or node.target is not self_match:
65
+ return False
66
+ elif node.target is getattr:
67
+ if node.args[1] != pattern[1]:
68
+ return False
69
+ elif isinstance(self_match, str):
70
+ if node.op != 'call_method' or node.target != self_match:
71
+ return False
72
+ elif node.target != self_match:
73
+ return False
74
+
75
+ if not arg_matches:
76
+ return True
77
+
78
+ if len(arg_matches) != len(node.args):
79
+ return False
80
+
81
+ return all(_is_match(modules, node, arg_match, max_uses=1) for node, arg_match in zip(node.args, arg_matches))
82
+
83
+ def _find_matches(
84
+ graph: Graph,
85
+ modules: Dict[str, torch.nn.Module],
86
+ patterns: Dict[Pattern, QuantizeHandler],
87
+ root_node_getter_mapping: Dict[Pattern, Callable],
88
+ standalone_module_names: Optional[List[str]] = None,
89
+ standalone_module_classes: Optional[List[Type]] = None,
90
+ custom_module_classes: Optional[List[Any]] = None) -> Dict[str, _MatchResult]:
91
+ """
92
+ Matches the nodes in the input graph to quantization patterns, and
93
+ outputs the information needed to quantize them in future steps.
94
+
95
+ Inputs:
96
+ - graph: an fx.Graph object
97
+ - modules: a mapping of fully qualified module name to instance,
98
+ for example, {'foo': ModuleFoo, ...}
99
+ - patterns: a mapping from a tuple of nodes in reverse order to
100
+ uninitialized QuantizeHandler subclass.
101
+
102
+ Outputs a map of
103
+ node_name ->
104
+ (node, matched_values, matched_pattern, QuantizeHandler instance,
105
+ qconfig)
106
+
107
+ For example, {
108
+ 'relu_1': (relu_1, [relu_1], torch.nn.functional.relu,
109
+ <CopyNodeQuantizeHandler instance>, QConfig(...)),
110
+ ...
111
+ }
112
+ """
113
+ if custom_module_classes is None:
114
+ custom_module_classes = []
115
+
116
+ if standalone_module_classes is None:
117
+ standalone_module_classes = []
118
+
119
+ if standalone_module_names is None:
120
+ standalone_module_names = []
121
+
122
+ match_map: Dict[str, _MatchResult] = {}
123
+ all_matched : Set[str] = set()
124
+
125
+ def _recursive_record_node_in_match_map(
126
+ last_node,
127
+ match_map,
128
+ node_pattern,
129
+ matched_node_pattern,
130
+ pattern,
131
+ match_value):
132
+ if isinstance(node_pattern, Node):
133
+ match_map[node_pattern.name] = (
134
+ last_node, matched_node_pattern, pattern, match_value)
135
+ elif not isinstance(node_pattern, Iterable):
136
+ return
137
+ else:
138
+ for n in node_pattern:
139
+ _recursive_record_node_in_match_map(last_node, match_map, n, matched_node_pattern, pattern, match_value)
140
+
141
+ # TODO: 1. merge with fuse matcher 2. document the code
142
+ def record_match(
143
+ pattern,
144
+ node,
145
+ last_node,
146
+ matched_node_pattern,
147
+ match_map):
148
+ if isinstance(pattern, tuple):
149
+ s, *args = pattern
150
+ is_single_arg = len(args) == 1
151
+ current_node_pattern: List[Node] = []
152
+ record_match(
153
+ s,
154
+ node,
155
+ last_node,
156
+ matched_node_pattern,
157
+ match_map)
158
+ if pattern[0] is not getattr:
159
+ for subpattern, arg in zip(args, node.args):
160
+ record_match(
161
+ subpattern,
162
+ arg,
163
+ node,
164
+ current_node_pattern,
165
+ match_map)
166
+ if len(current_node_pattern) > 1:
167
+ # current_node_pattern is the node pattern we get from matching
168
+ # the subpattern with arguments of the node
169
+ # we use is_single_arg to recover the original structure of the pattern
170
+ # if the original pattern has a single argument, we will have
171
+ # (original_op, (original_arg, ...))
172
+ # otherwise, we'll have a list of arguments
173
+ # (original_op, arg0, arg1, arg2, ...)
174
+ if is_single_arg:
175
+ matched_node_pattern.append(tuple(current_node_pattern))
176
+ else:
177
+ matched_node_pattern.extend(list(current_node_pattern))
178
+ else:
179
+ matched_node_pattern.append(current_node_pattern[0])
180
+ else:
181
+ matched_node_pattern.append(node)
182
+
183
+ for node in reversed(graph.nodes):
184
+ if node.name not in match_map and node.name not in all_matched:
185
+ for pattern, quantize_handler_cls in patterns.items():
186
+ root_node_getter = root_node_getter_mapping.get(pattern, None)
187
+ if _is_match(modules, node, pattern) and node.name not in match_map:
188
+ matched_node_pattern: List[Node] = []
189
+ record_match(
190
+ pattern,
191
+ node,
192
+ node,
193
+ matched_node_pattern,
194
+ match_map)
195
+ quantize_handler = quantize_handler_cls( # type: ignore[operator]
196
+ matched_node_pattern,
197
+ modules,
198
+ root_node_getter)
199
+ last_node = node
200
+ # record the match for all nodes in the pattern
201
+ _recursive_record_node_in_match_map(
202
+ last_node,
203
+ match_map,
204
+ # we need to record all nodes in the matched pattern in the match_map
205
+ matched_node_pattern,
206
+ # this is a part of the value corresponding to the node
207
+ matched_node_pattern,
208
+ pattern,
209
+ quantize_handler)
210
+ break
211
+
212
+ # add custom module instances to the match result
213
+ assert modules is not None
214
+ for node in graph.nodes:
215
+ if node.op == 'call_module' and \
216
+ type(modules[node.target]) in custom_module_classes:
217
+ match_map[node.name] = (
218
+ node, node, None, QuantizeHandler(node, modules, is_custom_module=True))
219
+
220
+ def is_standalone_module(node_target: str, modules: Dict[str, torch.nn.Module]):
221
+ assert modules is not None
222
+ return (
223
+ node_target in standalone_module_names or # type: ignore[operator]
224
+ type(modules[node_target]) in standalone_module_classes # type: ignore[operator]
225
+ )
226
+
227
+ # add standalone modules to the match
228
+ for node in graph.nodes:
229
+ if node.op == 'call_module' and \
230
+ (is_standalone_module(node.target, modules) or
231
+ _is_observed_standalone_module(modules[node.target])):
232
+ # add node to matched nodes
233
+ match_map[node.name] = (
234
+ node, node, None,
235
+ QuantizeHandler(node, modules, is_standalone_module=True))
236
+
237
+ return match_map
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+ from typing import Dict, Any
3
+ from torch.ao.quantization.utils import Pattern
4
+ from ..fake_quantize import FixedQParamsFakeQuantize
5
+ from ..observer import ObserverBase
6
+ import copy
7
+
8
+ __all__ = [
9
+ "get_default_fusion_patterns",
10
+ "get_default_quant_patterns",
11
+ "get_default_output_activation_post_process_map",
12
+ ]
13
+
14
+ # TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency)
15
+ QuantizeHandler = Any
16
+
17
+ # pattern for conv bn fusion
18
+ _DEFAULT_FUSION_PATTERNS: Dict[Pattern, QuantizeHandler] = OrderedDict()
19
+ def _register_fusion_pattern(pattern):
20
+ def insert(fn):
21
+ _DEFAULT_FUSION_PATTERNS[pattern] = fn
22
+ return fn
23
+ return insert
24
+
25
+ def get_default_fusion_patterns() -> Dict[Pattern, QuantizeHandler]:
26
+ return copy.copy(_DEFAULT_FUSION_PATTERNS)
27
+
28
+ _DEFAULT_QUANTIZATION_PATTERNS: Dict[Pattern, QuantizeHandler] = OrderedDict()
29
+
30
+ # Mapping from pattern to activation_post_process(observer/fake_quant) constructor for output activation
31
+ # e.g. pattern: torch.sigmoid,
32
+ # output_activation_post_process: default_fixed_qparams_range_0to1_fake_quant
33
+ _DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP: Dict[Pattern, QuantizeHandler] = {}
34
+ _DEFAULT_OUTPUT_OBSERVER_MAP: Dict[Pattern, QuantizeHandler] = {}
35
+
36
+ # Register pattern for both static quantization and qat
37
+ def _register_quant_pattern(pattern, fixed_qparams_observer=None):
38
+ def insert(fn):
39
+ _DEFAULT_QUANTIZATION_PATTERNS[pattern] = fn
40
+ if fixed_qparams_observer is not None:
41
+ _DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP[pattern] = FixedQParamsFakeQuantize.with_args(observer=fixed_qparams_observer)
42
+ _DEFAULT_OUTPUT_OBSERVER_MAP[pattern] = fixed_qparams_observer
43
+ return fn
44
+ return insert
45
+
46
+ # Get patterns for both static quantization and qat
47
+ def get_default_quant_patterns() -> Dict[Pattern, QuantizeHandler]:
48
+ return copy.copy(_DEFAULT_QUANTIZATION_PATTERNS)
49
+
50
+ # a map from pattern to output activation post process constructor
51
+ # e.g. torch.sigmoid -> default_affine_fixed_qparam_fake_quant
52
+ def get_default_output_activation_post_process_map(is_training) -> Dict[Pattern, ObserverBase]:
53
+ if is_training:
54
+ return copy.copy(_DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP)
55
+ else:
56
+ return copy.copy(_DEFAULT_OUTPUT_OBSERVER_MAP)
57
+
58
+ # Example use of register pattern function:
59
+ # @_register_fusion_pattern(torch.nn.ReLU, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
60
+ # class ConvOrLinearBNReLUFusion():
61
+ # def __init__(...):
62
+ # ...
63
+ #
64
+
65
+ def _sorted_patterns_dict(patterns_dict: Dict[Pattern, QuantizeHandler]) -> Dict[Pattern, QuantizeHandler]:
66
+ """
67
+ Return a sorted version of the patterns dictionary such that longer patterns are matched first,
68
+ e.g. match (F.relu, F.linear) before F.relu.
69
+ This works for current use cases, but we may need to have a more clever way to sort
70
+ things to address more complex patterns
71
+ """
72
+
73
+ def get_len(pattern):
74
+ """ this will calculate the length of the pattern by counting all the entries
75
+ in the pattern.
76
+ this will make sure (nn.ReLU, (nn.BatchNorm, nn.Conv2d)) comes before
77
+ (nn.BatchNorm, nn.Conv2d) so that we can match the former first
78
+ """
79
+ len = 0
80
+ if isinstance(pattern, tuple):
81
+ for item in pattern:
82
+ len += get_len(item)
83
+ else:
84
+ len += 1
85
+ return len
86
+
87
+ return OrderedDict(sorted(patterns_dict.items(), key=lambda kv: -get_len(kv[0]) if isinstance(kv[0], tuple) else 1))
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/prepare.py ADDED
@@ -0,0 +1,1880 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import torch
3
+ import warnings
4
+ from torch.fx import (
5
+ GraphModule,
6
+ )
7
+ from torch.fx.graph import (
8
+ Graph,
9
+ Node,
10
+ )
11
+ from torch.fx.node import Argument
12
+
13
+ from ..quantize import (
14
+ propagate_qconfig_,
15
+ )
16
+ from ..observer import (
17
+ _is_activation_post_process,
18
+ _PartialWrapper,
19
+ )
20
+ from ..qconfig import (
21
+ _is_reuse_input_qconfig,
22
+ QConfigAny,
23
+ )
24
+ from ..qconfig_mapping import (
25
+ QConfigMapping,
26
+ )
27
+ from .qconfig_mapping_utils import (
28
+ _generate_node_name_to_qconfig,
29
+ _update_qconfig_for_fusion,
30
+ _get_flattened_qconfig_dict,
31
+ _update_qconfig_for_qat,
32
+ )
33
+
34
+ from .quantize_handler import (
35
+ _default_root_node_getter,
36
+ _get_pattern_to_quantize_handlers,
37
+ QuantizeHandler,
38
+ )
39
+
40
+ from torch.ao.quantization import (
41
+ ObserverBase,
42
+ FixedQParamsObserver,
43
+ FixedQParamsFakeQuantize,
44
+ _DerivedObserverOrFakeQuantize,
45
+ )
46
+
47
+ from torch.ao.quantization.utils import (
48
+ Pattern,
49
+ NodePattern,
50
+ )
51
+
52
+ from ._equalize import (
53
+ is_equalization_observer,
54
+ node_supports_equalization,
55
+ )
56
+
57
+ from .pattern_utils import (
58
+ _sorted_patterns_dict,
59
+ )
60
+
61
+ from .match_utils import (
62
+ _MatchResultWithQConfig,
63
+ _find_matches,
64
+ )
65
+
66
+ from .utils import (
67
+ _insert_dequant_stubs_for_custom_module_lstm_output,
68
+ _is_custom_module_lstm,
69
+ _maybe_get_custom_module_lstm_from_node_arg,
70
+ _qconfig_satisfies_dtype_config_constraints,
71
+ get_custom_module_class_keys,
72
+ all_node_args_have_no_tensors,
73
+ assert_and_get_unique_device,
74
+ get_non_observable_arg_indexes_and_types,
75
+ get_new_attr_name_with_prefix,
76
+ node_arg_is_weight,
77
+ node_arg_is_bias,
78
+ NON_QUANTIZABLE_WEIGHT_OPS,
79
+ ObservedGraphModuleAttrs,
80
+ )
81
+
82
+ from torch.ao.quantization import (
83
+ PlaceholderObserver
84
+ )
85
+ from torch.ao.quantization.quantize import (
86
+ convert
87
+ )
88
+
89
+ from ..utils import (
90
+ _parent_name,
91
+ get_qconfig_dtypes,
92
+ get_swapped_custom_module_class,
93
+ )
94
+
95
+ from ..backend_config.utils import (
96
+ get_pattern_to_dtype_configs,
97
+ get_module_to_qat_module,
98
+ get_fusion_pattern_to_root_node_getter,
99
+ )
100
+ from ..backend_config import (
101
+ BackendConfig,
102
+ DTypeConfig,
103
+ get_native_backend_config,
104
+ )
105
+ from .custom_config import (
106
+ PrepareCustomConfig,
107
+ StandaloneModuleConfigEntry,
108
+ )
109
+ from torch.ao.quantization.quantizer import (
110
+ EdgeOrNode,
111
+ QuantizationSpec,
112
+ QuantizationSpecBase,
113
+ FixedQParamsQuantizationSpec,
114
+ SharedQuantizationSpec,
115
+ DerivedQuantizationSpec,
116
+ )
117
+ from torch.ao.quantization import ObserverOrFakeQuantize
118
+
119
+ from torch._subclasses import FakeTensor
120
+
121
+ from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union
122
+ from dataclasses import asdict
123
+
124
+ __all__ = [
125
+ "insert_observers_for_model",
126
+ "prepare",
127
+ "propagate_dtypes_for_known_nodes",
128
+ ]
129
+
130
+
131
+ # list of dtypes to not add observers to
132
+ _DO_NOT_OBS_DTYPE_LIST = [int, float, torch.bool, None]
133
+ _OBS_DTYPE_LIST = [
134
+ torch.quint8,
135
+ torch.qint8,
136
+ torch.qint32,
137
+ torch.float16,
138
+ torch.uint8,
139
+ torch.int8,
140
+ torch.int16,
141
+ torch.int32
142
+ ]
143
+
144
+ _DEFAULT_FP32_OBS_OR_FQ_CTR = PlaceholderObserver.with_args(dtype=torch.float)
145
+
146
+ # note: the following default target dtype info dicts are temporary,
147
+ # should be moved to the new programmable API class soon
148
+ _DEFAULT_FP32_QCONFIG_FOR_TARGET_DTYPE_INFO = {
149
+ "input_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_fp32_placeholder_qconfig.activation,
150
+ "output_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_fp32_placeholder_qconfig.activation
151
+ }
152
+
153
+ _DEFAULT_QUINT8_QCONFIG_FOR_TARGET_DTYPE_INFO = {
154
+ "input_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_quint8_placeholder_qconfig.activation,
155
+ "output_act_obs_or_fq_ctr": torch.ao.quantization.qconfig._default_quint8_placeholder_qconfig.activation
156
+ }
157
+
158
+
159
+ def _get_observer_kwargs(quant_spec: Union[QuantizationSpec, FixedQParamsQuantizationSpec]):
160
+ kwargs_dict = asdict(quant_spec)
161
+ return copy.deepcopy(kwargs_dict)
162
+
163
+ def _get_qspec_for_arg(
164
+ arg: Node,
165
+ input_qspec_map: Dict[Node, QuantizationSpecBase],
166
+ named_modules: Dict[str, torch.nn.Module]
167
+ ) -> Optional[QuantizationSpecBase]:
168
+ while _is_activation_post_process_node(arg, named_modules):
169
+ arg = arg.args[0] # type: ignore[assignment]
170
+ return input_qspec_map.get(arg, None)
171
+
172
+ def _create_obs_or_fq_from_qspec(
173
+ quantization_spec: Optional[QuantizationSpecBase],
174
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
175
+ is_qat: bool,
176
+ ):
177
+ """ Create observer or fake quantize objects based on quantization spec
178
+
179
+ Args:
180
+ quantization_spec: used to store parameters to create the observer or fake quantizer
181
+ obs_or_fq_map: this is a map from edge/output to the corresponding observer/fake_quant
182
+ instance, it may be reused for different edge/output depending on configuration
183
+ """
184
+ if quantization_spec is None:
185
+ return None
186
+ if isinstance(quantization_spec, SharedQuantizationSpec):
187
+ edge_or_node = quantization_spec.edge_or_node
188
+ assert edge_or_node in obs_or_fq_map, \
189
+ "please make sure only refer to edge or node that has " \
190
+ f"observer/fake_quant inserted: '{edge_or_node}' not in\n{obs_or_fq_map.keys()}"
191
+ return obs_or_fq_map[edge_or_node]
192
+ elif isinstance(quantization_spec, DerivedQuantizationSpec):
193
+ # can't use asdict, so not calling get_observer_kwargs here
194
+ kwargs = {
195
+ "dtype": quantization_spec.dtype,
196
+ "derive_qparams_fn": quantization_spec.derive_qparams_fn,
197
+ "quant_min": quantization_spec.quant_min,
198
+ "quant_max": quantization_spec.quant_max,
199
+ "qscheme": quantization_spec.qscheme,
200
+ "ch_axis": quantization_spec.ch_axis,
201
+ }
202
+ edge_or_nodes = quantization_spec.derived_from
203
+ obs_or_fqs = [obs_or_fq_map[k] for k in edge_or_nodes]
204
+ kwargs["obs_or_fqs"] = obs_or_fqs
205
+ return _DerivedObserverOrFakeQuantize.with_args(**kwargs)()
206
+ elif isinstance(quantization_spec, FixedQParamsQuantizationSpec):
207
+ kwargs = _get_observer_kwargs(quantization_spec)
208
+ observer_ctr = FixedQParamsObserver.with_args(**kwargs)
209
+ if is_qat:
210
+ return FixedQParamsFakeQuantize.with_args(observer=observer_ctr)
211
+ else:
212
+ return observer_ctr()
213
+
214
+ assert isinstance(quantization_spec, QuantizationSpec)
215
+ observer_or_fake_quant_ctr = quantization_spec.observer_or_fake_quant_ctr
216
+ kwargs = _get_observer_kwargs(quantization_spec)
217
+ kwargs.pop("observer_or_fake_quant_ctr")
218
+ # we will remove is_dynamic from QuantizationSpec because
219
+ # it seems that dynamic range quantization
220
+ obs_or_fq_class = observer_or_fake_quant_ctr
221
+ if isinstance(observer_or_fake_quant_ctr, _PartialWrapper):
222
+ obs_or_fq_class = observer_or_fake_quant_ctr.p.func # type: ignore[union-attr, assignment]
223
+ if "PerChannel" not in obs_or_fq_class.__name__: # type: ignore[operator, union-attr]
224
+ kwargs.pop("ch_axis")
225
+ return observer_or_fake_quant_ctr.with_args(**kwargs)()
226
+
227
+ def _needs_obs_or_fq(
228
+ prev_output_dtype: Any,
229
+ prev_output_is_dynamic: bool,
230
+ cur_target_dtype: Any,
231
+ cur_target_is_dynamic: bool,
232
+ reuse_input_obs_or_fq: bool,
233
+ is_zeroth_arg: bool = False) -> bool:
234
+ """
235
+ note: we will treat "not specified" as torch.float for now
236
+ utility function that checks if we should insert an observer or fake quant node
237
+ base on the requested dtype for the nodes from user
238
+
239
+ is_zeroth_arg: we only dynamically quantize the first arg of the node right now
240
+ this should be removed when we enable configuring dynamic quantization
241
+ for a specific argument, this can be removed if we deprecate fx graph mode
242
+ quantization
243
+
244
+ """
245
+
246
+ # need to insert placeholder observer for dynamic quantization so that it can
247
+ # be converted to choose_qparams -> q -> dq in convert step
248
+ if cur_target_is_dynamic:
249
+ assert cur_target_dtype in _OBS_DTYPE_LIST, \
250
+ f"Expected cur_target_dtype to be torch.float, but got: {cur_target_dtype}"
251
+ assert prev_output_dtype not in _DO_NOT_OBS_DTYPE_LIST
252
+ return is_zeroth_arg
253
+ if reuse_input_obs_or_fq:
254
+ return False
255
+ # non dynamic quantization
256
+ if cur_target_dtype in _OBS_DTYPE_LIST:
257
+ return prev_output_dtype in _OBS_DTYPE_LIST + [torch.float] and cur_target_dtype != prev_output_dtype
258
+
259
+ # lots of error checking are skipped here for now
260
+ return False
261
+
262
+ def _is_activation_post_process_node(node: Node, named_modules: Dict[str, torch.nn.Module]) -> bool:
263
+ return isinstance(node, torch.fx.Node) and node.op == "call_module" and \
264
+ _is_activation_post_process(named_modules[str(node.target)])
265
+
266
+ def _get_dtype_and_is_dynamic(obs_or_fq: Optional[ObserverOrFakeQuantize]) -> Tuple[Optional[torch.dtype], bool]:
267
+ """ Given a constructor for observer or fake quant module, returns
268
+ a Tuple of dtype and is_dynamic
269
+ """
270
+ # TODO: instead of instantiating the instance, we can use inspect to get the default args
271
+ if obs_or_fq is None:
272
+ return None, False
273
+ else:
274
+ return obs_or_fq.dtype, getattr(obs_or_fq, "is_dynamic", False) # type: ignore[return-value]
275
+
276
+ def _is_input_arg_dtype_supported_by_backend(
277
+ arg: Argument,
278
+ node: Node,
279
+ qconfig: QConfigAny,
280
+ dtype_config: DTypeConfig,
281
+ backend_config: BackendConfig,
282
+ ) -> bool:
283
+ """ Check if the configured qconfig for the argument
284
+ is supported by the backend or not
285
+ """
286
+ if isinstance(arg, (list, tuple)):
287
+ return all(_is_input_arg_dtype_supported_by_backend(
288
+ a, node, qconfig,
289
+ dtype_config, backend_config) for a in arg)
290
+ if not isinstance(arg, Node):
291
+ return True
292
+ # TODO: support check for standalone module
293
+ is_weight = node_arg_is_weight(node, arg)
294
+ is_bias = node_arg_is_bias(node, arg)
295
+ is_activation = not is_weight and not is_bias
296
+ if is_activation:
297
+ input_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("input_act_obs_or_fq_ctr")
298
+ input_act_obs_or_fq = input_act_obs_or_fq_ctr() if input_act_obs_or_fq_ctr else None
299
+ qconfig_dtype, qconfig_is_dynamic = _get_dtype_and_is_dynamic(input_act_obs_or_fq)
300
+ # TODO(future PR): remove the cast to bool below after figuring
301
+ # out why backend_config has is_dynamic set to None in some cases.
302
+ return (dtype_config.input_dtype is None) or (
303
+ dtype_config.input_dtype == qconfig_dtype and
304
+ bool(dtype_config.is_dynamic) == bool(qconfig_is_dynamic) and
305
+ _qconfig_satisfies_dtype_config_constraints(qconfig, dtype_config.input_dtype_with_constraints)
306
+ )
307
+ elif is_weight:
308
+ # TODO: move dtype check into `_qconfig_satisfies_dtype_config_constraints` as well
309
+ weight_obs_or_fq_ctr = node.meta["target_dtype_info"].get("weight_obs_or_fq_ctr", None)
310
+ weight_obs_or_fq = weight_obs_or_fq_ctr() if weight_obs_or_fq_ctr else None
311
+ qconfig_weight_dtype, _ = _get_dtype_and_is_dynamic(weight_obs_or_fq)
312
+ backend_config_weight_dtype = dtype_config.weight_dtype
313
+ dtype_matches = qconfig_weight_dtype == backend_config_weight_dtype
314
+ qconfig_satisfies_constraints = _qconfig_satisfies_dtype_config_constraints(
315
+ qconfig, dtype_config.weight_dtype_with_constraints, is_activation=False)
316
+ return backend_config_weight_dtype is None or (dtype_matches and qconfig_satisfies_constraints)
317
+ else: # bias
318
+ # TODO: move dtype check into `_qconfig_satisfies_dtype_config_constraints` as well
319
+ bias_obs_or_fq_ctr = node.meta["target_dtype_info"].get("bias_obs_or_fq_ctr", None)
320
+ bias_obs_or_fq = bias_obs_or_fq_ctr() if bias_obs_or_fq_ctr else None
321
+ qconfig_bias_dtype, _ = _get_dtype_and_is_dynamic(bias_obs_or_fq)
322
+ backend_config_bias_dtype = dtype_config.bias_dtype
323
+ return backend_config_bias_dtype is None or qconfig_bias_dtype == backend_config_bias_dtype
324
+
325
+ def _is_output_dtype_supported_by_backend(
326
+ node: Node,
327
+ qconfig: QConfigAny,
328
+ dtype_config: DTypeConfig,
329
+ ) -> bool:
330
+ """ Check if the configured qconfig for the output
331
+ is supported by the backend or not
332
+ """
333
+ # TODO: move dtype check into `_qconfig_satisfies_dtype_config_constraints` as well
334
+ backend_config_output_dtype = dtype_config.output_dtype
335
+ # TODO: we should check is_dynamic here as well, the code from _is_input_arg_dtype_supported_by_backend
336
+ # from input activation check can be reused here
337
+ qconfig_output_dtype = None
338
+ output_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("output_act_obs_or_fq_ctr", _DEFAULT_FP32_OBS_OR_FQ_CTR)
339
+ output_act_obs_or_fq = output_act_obs_or_fq_ctr() if output_act_obs_or_fq_ctr else None
340
+ qconfig_output_dtype, qconfig_output_is_dynamic = _get_dtype_and_is_dynamic(output_act_obs_or_fq)
341
+ # TODO: this is a hack because we can only specify one activation_obs_or_fq for
342
+ # qconfig (qconfig.activation), and we are only supporting dynamically quantized
343
+ # linear op which has fp32 output dtype, this should be removed if we generalize
344
+ # the structure of qconfig in the future
345
+ if qconfig_output_is_dynamic:
346
+ qconfig_output_dtype = torch.float32
347
+ dtype_matches = qconfig_output_dtype == backend_config_output_dtype
348
+ qconfig_satisfies_constraints = _qconfig_satisfies_dtype_config_constraints(
349
+ qconfig, dtype_config.output_dtype_with_constraints)
350
+ return backend_config_output_dtype is None or (dtype_matches and qconfig_satisfies_constraints)
351
+
352
+ def _is_observer_in_same_graph(
353
+ node: Node,
354
+ named_modules: Dict[str, torch.nn.Module],
355
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
356
+ is_qat,
357
+ ):
358
+ """ Check if observer in same graph
359
+ when the node output is not fp32 and input is 'placeholder'
360
+ the input is assumed to be quantized, so it is observed
361
+ in a different place rather than not observed.
362
+ """
363
+ node_output_dtype = _get_arg_target_dtype_as_output(node, named_modules, obs_or_fq_map, is_qat)
364
+ if len(node.args) > 0 and isinstance(node.args[0], Node):
365
+ if node_output_dtype in [torch.quint8, torch.uint8] and node.args[0].op == 'placeholder':
366
+ return False
367
+ return True
368
+
369
+ def _is_pattern_dtype_config_and_qconfig_supported_by_backend(
370
+ pattern: Optional[Pattern],
371
+ matched_node_pattern: Optional[List[Node]],
372
+ qconfig: QConfigAny,
373
+ backend_config: BackendConfig,
374
+ ) -> bool:
375
+ """ Check if the dtype configuration of a pattern is supported by
376
+ the backend or not, and whether the qconfig satisfies constraints
377
+ specified in the corresponding dtype config.
378
+ """
379
+ if backend_config is None or pattern is None:
380
+ return True
381
+ assert matched_node_pattern is not None and len(matched_node_pattern) >= 1
382
+ pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config)
383
+ dtype_configs: List[DTypeConfig] = pattern_to_dtype_configs.get(pattern, [])
384
+ pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config)
385
+
386
+ root_node_getter = pattern_to_root_node_getter.get(pattern, _default_root_node_getter)
387
+ root_node = root_node_getter(matched_node_pattern)
388
+ input_node = root_node
389
+ output_node = matched_node_pattern[0]
390
+ for dtype_config in dtype_configs:
391
+ # check if arg dtype are supported
392
+ supported = True
393
+ for arg in list(input_node.args) + list(input_node.kwargs.values()):
394
+ supported = supported and _is_input_arg_dtype_supported_by_backend(
395
+ arg, input_node, qconfig, dtype_config, backend_config)
396
+ # check if output dtype is supported
397
+ supported = supported and _is_output_dtype_supported_by_backend(
398
+ output_node, qconfig, dtype_config)
399
+ if supported:
400
+ return True
401
+ return False
402
+
403
+ def _get_standalone_module_configs(
404
+ node: Node,
405
+ named_modules: Dict[str, torch.nn.Module],
406
+ prepare_custom_config: PrepareCustomConfig,
407
+ parent_qconfig: QConfigAny,
408
+ parent_backend_config: Optional[BackendConfig],
409
+ ) -> Tuple[QConfigMapping, Tuple[Any, ...], PrepareCustomConfig, Optional[BackendConfig]]:
410
+ """
411
+ Returns the standalone module QConfigMapping and PrepareCustomConfig
412
+ for `node`, assuming that the module pointed to by `node` is
413
+ a standalone modules.
414
+ """
415
+ module_name = str(node.target)
416
+ module_type = type(named_modules[module_name]) # type: ignore[index]
417
+ # name config has precedence over type config
418
+ config_entry = StandaloneModuleConfigEntry(None, (), None, None)
419
+ config_entry = prepare_custom_config.standalone_module_classes.get(module_type, config_entry)
420
+ config_entry = prepare_custom_config.standalone_module_names.get(module_name, config_entry)
421
+ # fallback to use parent module's qconfig if user didn't specify qconfig dict
422
+ qconfig_mapping = config_entry.qconfig_mapping or QConfigMapping().set_global(parent_qconfig)
423
+ example_inputs = config_entry.example_inputs
424
+ prepare_custom_config = config_entry.prepare_custom_config or PrepareCustomConfig()
425
+ backend_config = config_entry.backend_config or parent_backend_config
426
+ return (qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
427
+
428
+ def _qat_swap_modules(
429
+ root: torch.nn.Module,
430
+ module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]]) -> None:
431
+ convert(root, mapping=module_to_qat_module, inplace=True, remove_qconfig=False)
432
+
433
+ def _add_matched_node_name_to_set(matched_node_pattern: NodePattern, s: Set[str]):
434
+ if isinstance(matched_node_pattern, Node):
435
+ s.add(matched_node_pattern.name)
436
+ elif isinstance(matched_node_pattern, (list, tuple)):
437
+ for maybe_node in matched_node_pattern:
438
+ _add_matched_node_name_to_set(maybe_node, s)
439
+
440
+ def _insert_obs_or_fq(
441
+ node: Node,
442
+ obs_or_fq: ObserverOrFakeQuantize,
443
+ model: torch.nn.Module,
444
+ named_modules: Dict[str, torch.nn.Module],
445
+ graph: Graph,
446
+ ) -> Node:
447
+ """
448
+ Attaches `obs_or_fq` to `model`, and creates a node which calls
449
+ `obs_or_fq` on the output of `node`.
450
+
451
+ obs_or_fq: an instance of Observer or FakeQuantize module
452
+ """
453
+ model_device = assert_and_get_unique_device(model)
454
+ if model_device:
455
+ obs_or_fq.to(model_device)
456
+ # add obs_or_fq module as attribute
457
+ if is_equalization_observer(obs_or_fq):
458
+ prefix = node.name + '_equalization_process_'
459
+ else:
460
+ prefix = 'activation_post_process_'
461
+ get_new_obs_or_fq_name = get_new_attr_name_with_prefix(prefix)
462
+ obs_or_fq_name = get_new_obs_or_fq_name(model)
463
+ setattr(model, obs_or_fq_name, obs_or_fq)
464
+ named_modules[obs_or_fq_name] = obs_or_fq
465
+ with graph.inserting_after(node):
466
+ new_obs = graph.create_node(
467
+ 'call_module', obs_or_fq_name, (node,), {})
468
+ return new_obs
469
+
470
+ def _set_target_dtype_info_for_matched_node_pattern(
471
+ matched_node_pattern: NodePattern,
472
+ last_node: Node,
473
+ qconfig: QConfigAny,
474
+ qhandler: Optional[QuantizeHandler],
475
+ backend_config: BackendConfig,
476
+ named_modules: Dict[str, torch.nn.Module],
477
+ cache_for_no_tensor_check: Dict[Node, bool],
478
+ processed_nodes: Set[Node],
479
+ ) -> None:
480
+ """ Sets the target_dtype_info for each node in matched_node_pattern
481
+ Note: processed_nodes is used to ensure we only process each node once
482
+ """
483
+ if isinstance(matched_node_pattern, (list, tuple)):
484
+ for node_pattern in matched_node_pattern:
485
+ _set_target_dtype_info_for_matched_node_pattern(
486
+ node_pattern,
487
+ last_node,
488
+ qconfig,
489
+ qhandler,
490
+ backend_config,
491
+ named_modules,
492
+ cache_for_no_tensor_check,
493
+ processed_nodes
494
+ )
495
+
496
+ # set target_dtype_info if matched_node_pattern is a Node
497
+ # other types of matched object, e.g. int, float literals, are ignored
498
+ elif isinstance(matched_node_pattern, Node):
499
+ # for pyre
500
+ assert isinstance(matched_node_pattern, Node)
501
+ node = matched_node_pattern
502
+ if node in processed_nodes:
503
+ return
504
+ processed_nodes.add(node)
505
+
506
+ if qconfig is None:
507
+ return
508
+ # TODO: refactor the following code in terms of apply a qconfig to a pattern
509
+ # e.g. for a pattern with op1 -> op2 -> op3, and qconfig = QConfig(input_act=obs0, output_act=obs1)
510
+ # we set the input_obs_or_fq_ctr for the arguments of op1 to based on qconfig.input_act,
511
+ # and set output_obs_or_fq_ctr based on qconfig.output_act
512
+ # this also requires we extend the structure of QConfig to support more fine
513
+ # grained configurations
514
+ target_dtype_info: Dict[str, Any] = (
515
+ _get_target_activation_dtype_for_node(
516
+ node,
517
+ qconfig,
518
+ qhandler,
519
+ named_modules,
520
+ backend_config,
521
+ cache_for_no_tensor_check,
522
+ )
523
+ )
524
+ node.meta["target_dtype_info"] = target_dtype_info
525
+
526
+ def _get_target_activation_dtype_for_node(
527
+ node: Node,
528
+ qconfig: QConfigAny,
529
+ qhandler: Optional[QuantizeHandler],
530
+ named_modules: Dict[str, torch.nn.Module],
531
+ backend_config: BackendConfig,
532
+ cache_for_no_tensor_check: Dict[Node, bool],
533
+ ) -> Dict[str, Any]:
534
+ """
535
+ For each op attribute in the op's input activation, output activation,
536
+ weight, bias - returns the settings of dtype and is_dynamic we expect
537
+ for the `quantize` call in the reference model representation, or None
538
+ if there is no `quantize` call needed.
539
+
540
+ For example, if we have a node corresponding to `op0` in
541
+
542
+ x0 -> op0 -> x1
543
+
544
+ And we want a reference quantized representation to be
545
+
546
+ x0 -> quant_static -> dequant -> op0 -> quant_dynamic -> dequant -> x1
547
+
548
+ Then this function will return
549
+
550
+ {
551
+ "input_act_obs_or_fq_ctr": MinMaxObserver.with_args(dtype=torch.quint8, is_dynamic=False),
552
+ "output_act_obs_or_fq_ctr": MinMaxObserver.with_args(dtype=torch.quint8, is_dynamic=False),
553
+ }
554
+
555
+ TODO(future PR, if needed): explicitly spell out the non-Tensor
556
+ dtypes.
557
+ """
558
+ args_have_no_tensors = \
559
+ all_node_args_have_no_tensors(
560
+ node, named_modules, cache_for_no_tensor_check)
561
+ if args_have_no_tensors:
562
+ return {
563
+ "input_act_obs_or_fq_ctr": None,
564
+ "output_act_obs_or_fq_ctr": None,
565
+ }
566
+ # get qconfig to determine the eventual dtype of this node
567
+ if qconfig is not None:
568
+ act_dtype, weight_dtype, input_act_is_dynamic = \
569
+ get_qconfig_dtypes(qconfig)
570
+
571
+ # Currently `QConfig` only has one `activation` field.
572
+ # For static quantization, it is reused for both input
573
+ # and output activation. For dynamic quantization, this
574
+ # field is currently only used for the input activation,
575
+ # with the output activation being in fp32.
576
+ # In the future this may change as we add more fields
577
+ # to the `QConfig` object.
578
+ output_act_dtype = act_dtype \
579
+ if (not input_act_is_dynamic) else torch.float
580
+
581
+ bias_dtype = torch.float16 \
582
+ if (
583
+ act_dtype == torch.float16
584
+ and weight_dtype == torch.float16
585
+ and (not input_act_is_dynamic)
586
+ ) else torch.float
587
+
588
+ is_general_tensor_value_op = \
589
+ (qhandler is not None and qhandler.is_general_tensor_value_op())
590
+
591
+ _is_standalone_module = (
592
+ qhandler is not None and qhandler.is_standalone_module()
593
+ )
594
+
595
+ weight_index = None
596
+ if isinstance(node, Node) and node.op == "call_function" and \
597
+ node.target in backend_config._pattern_complex_format_to_config:
598
+ weight_index = backend_config._pattern_complex_format_to_config[node.target]._input_type_to_index.get("weight")
599
+
600
+ bias_index = None
601
+ if isinstance(node, Node) and node.op == "call_function" and \
602
+ node.target in backend_config._pattern_complex_format_to_config:
603
+ bias_index = backend_config._pattern_complex_format_to_config[node.target]._input_type_to_index.get("bias")
604
+
605
+ return {
606
+ "input_act_obs_or_fq_ctr": qconfig.activation,
607
+ "weight_obs_or_fq_ctr": qconfig.weight,
608
+ "bias_obs_or_fq_ctr": PlaceholderObserver.with_args(dtype=bias_dtype),
609
+ "weight_index": weight_index,
610
+ "bias_index": bias_index,
611
+ "output_act_obs_or_fq_ctr": qconfig.activation,
612
+ "reuse_input_obs_or_fq": _is_reuse_input_qconfig(qconfig),
613
+ "input_output_share_observers": is_general_tensor_value_op,
614
+ "_is_standalone_module": _is_standalone_module,
615
+ }
616
+ return copy.copy(_DEFAULT_FP32_QCONFIG_FOR_TARGET_DTYPE_INFO)
617
+
618
+ def _get_output_act_obs_or_fq(
619
+ arg: Node,
620
+ named_modules: Dict[str, torch.nn.Module],
621
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
622
+ is_qat: bool,
623
+ ) -> ObserverOrFakeQuantize:
624
+ """ Get the constructor for observer or fake quant object for
625
+ the argument in the original graph as the output of previous node,
626
+ skipping inserted observers
627
+
628
+ We are assuming that the observers are inserted correctly, and the dtype for
629
+ argument in quantized graph will match what is specified by the qconfig
630
+ """
631
+ assert isinstance(arg, Node)
632
+ if "quantization_annotation" in arg.meta:
633
+ return _create_obs_or_fq_from_qspec(arg.meta["quantization_annotation"].output_qspec, obs_or_fq_map, is_qat)
634
+
635
+ # Custom module LSTM output is a tuple that we broke down into the internal nodes in order
636
+ # to insert DeQuantStubs (see `_insert_dequant_stubs_for_custom_module_lstm_output`).
637
+ # Since we modified the graph in this case, we must trace back from the args through
638
+ # the specific nodes we added in order to reach the original LSTM node. Otherwise, we would
639
+ # not be able to accurately detect whether this node is a consumer of custom module LSTM.
640
+ custom_module_lstm_node = _maybe_get_custom_module_lstm_from_node_arg(arg, named_modules)
641
+ output_act_obs_or_fq_ctr = None
642
+ if custom_module_lstm_node is not None:
643
+ output_act_obs_or_fq_ctr = custom_module_lstm_node.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"]
644
+ output_act_obs_or_fq = output_act_obs_or_fq_ctr() if output_act_obs_or_fq_ctr else None
645
+ elif _is_activation_post_process_node(arg, named_modules):
646
+ observed_arg = arg.args[0]
647
+ assert isinstance(observed_arg, Node), "Currently we only support observing Node"
648
+ if "quantization_annotation" in observed_arg.meta:
649
+ output_act_obs_or_fq = \
650
+ _create_obs_or_fq_from_qspec(
651
+ observed_arg.meta["quantization_annotation"].output_qspec, obs_or_fq_map, is_qat)
652
+ else:
653
+ assert "target_dtype_info" in observed_arg.meta
654
+ output_act_obs_or_fq_ctr = observed_arg.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"]
655
+ output_act_obs_or_fq = output_act_obs_or_fq_ctr() if output_act_obs_or_fq_ctr else None
656
+ else:
657
+ if "target_dtype_info" in arg.meta:
658
+ output_act_obs_or_fq_ctr = \
659
+ arg.meta["target_dtype_info"].get("output_act_obs_or_fq_ctr", _DEFAULT_FP32_OBS_OR_FQ_CTR)
660
+ else:
661
+ output_act_obs_or_fq_ctr = _DEFAULT_FP32_OBS_OR_FQ_CTR
662
+ output_act_obs_or_fq = output_act_obs_or_fq_ctr() if output_act_obs_or_fq_ctr else None
663
+
664
+ return output_act_obs_or_fq
665
+
666
+ def _get_arg_target_dtype_as_output(
667
+ arg: Node,
668
+ named_modules: Dict[str, torch.nn.Module],
669
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
670
+ is_qat: bool,
671
+ ) -> Optional[torch.dtype]:
672
+ arg_as_output_act_obs_or_fq = _get_output_act_obs_or_fq(arg, named_modules, obs_or_fq_map, is_qat)
673
+ arg_as_output_target_dtype, _ = _get_dtype_and_is_dynamic(arg_as_output_act_obs_or_fq)
674
+ return arg_as_output_target_dtype
675
+
676
+ def _get_arg_as_input_act_obs_or_fq(
677
+ arg: Node,
678
+ node: Node,
679
+ named_modules: Dict[str, torch.nn.Module],
680
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
681
+ is_qat: bool,
682
+ ) -> Optional[ObserverOrFakeQuantize]:
683
+ """ Get the observer or fake quant constructor for the Argument `arg`, as input
684
+ to Node `node`
685
+ """
686
+ assert isinstance(arg, Node)
687
+ # "input_qspec_map" is the more general design we'll use for pt2e path
688
+ # it is a map from input argument node to observer or fake quant constructor, for example
689
+ # for the following graph:
690
+ # x -> conv -> output
691
+ #
692
+ # we may annotate conv node like the following:
693
+ # conv.meta[...] = QuantizationAnnotation("input_qspec_map": {x: MinMaxObserver.with_args(dtype=torch.qint8)}, ...)
694
+ #
695
+ if "quantization_annotation" in node.meta:
696
+ input_qspec_map = node.meta["quantization_annotation"].input_qspec_map
697
+ input_arg_qspec = _get_qspec_for_arg(arg, input_qspec_map, named_modules)
698
+ if input_arg_qspec is None:
699
+ input_arg_obs_or_fq = _DEFAULT_FP32_OBS_OR_FQ_CTR()
700
+ else:
701
+ input_arg_obs_or_fq = _create_obs_or_fq_from_qspec(input_arg_qspec, obs_or_fq_map, is_qat)
702
+ return input_arg_obs_or_fq
703
+
704
+ # we can remove the following path in the future if fx graph mode quantization is
705
+ # no longer used
706
+ is_weight = node_arg_is_weight(node, arg)
707
+ is_bias = node_arg_is_bias(node, arg)
708
+ is_activation = not is_weight and not is_bias
709
+ obs_or_fq_ctr = None
710
+ if is_activation:
711
+ obs_or_fq_ctr = node.meta["target_dtype_info"].get("input_act_obs_or_fq_ctr", _DEFAULT_FP32_OBS_OR_FQ_CTR)
712
+ elif is_weight:
713
+ if node.target not in NON_QUANTIZABLE_WEIGHT_OPS:
714
+ obs_or_fq_ctr = node.meta["target_dtype_info"].get("weight_obs_or_fq_ctr", _DEFAULT_FP32_OBS_OR_FQ_CTR)
715
+ else:
716
+ obs_or_fq_ctr = node.meta["target_dtype_info"].get("bias_obs_or_fq_ctr", _DEFAULT_FP32_OBS_OR_FQ_CTR)
717
+ return obs_or_fq_ctr() if obs_or_fq_ctr else None
718
+
719
+ def _maybe_insert_input_observer_for_arg_or_kwarg(
720
+ node: Union[Node, Any],
721
+ arg: Argument,
722
+ qconfig: QConfigAny,
723
+ model: torch.nn.Module,
724
+ named_modules: Dict[str, torch.nn.Module],
725
+ graph: Graph,
726
+ qhandler: Optional[QuantizeHandler],
727
+ prepare_custom_config: PrepareCustomConfig,
728
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
729
+ is_qat: bool,
730
+ backend_config: Optional[BackendConfig] = None,
731
+ ) -> Argument:
732
+ """
733
+ Given a `node` and an `arg`, inserts an input observer between
734
+ `node` and `arg` if necessary.
735
+ """
736
+ # for ops such as torch.cat([x0, x1]),
737
+ # traverse through the list
738
+ if isinstance(arg, (list, tuple)):
739
+ new_arg_to_return = []
740
+ for inner_arg in arg:
741
+ new_inner_arg = _maybe_insert_input_observer_for_arg_or_kwarg(
742
+ node, inner_arg, qconfig, model, named_modules,
743
+ graph,
744
+ qhandler,
745
+ prepare_custom_config,
746
+ obs_or_fq_map,
747
+ is_qat,
748
+ backend_config)
749
+ new_arg_to_return.append(new_inner_arg)
750
+ return type(arg)(new_arg_to_return)
751
+
752
+ if not isinstance(arg, Node):
753
+ return arg
754
+ assert isinstance(arg, Node)
755
+ # default (no observer)
756
+ new_arg = arg
757
+
758
+ is_standalone_module = qhandler is not None and qhandler.is_standalone_module()
759
+ # TODO: move this to a separate function
760
+ if not is_standalone_module:
761
+ # Note: qconfig can be None in this branch this we are getting act/fq from
762
+ # node.meta now
763
+ # regular flow for most nodes, except standalone modules
764
+
765
+ if "quantization_annotation" in node.meta:
766
+ reuse_input_obs_or_fq = node.meta["quantization_annotation"]._reuse_input_obs_or_fq
767
+ else:
768
+ assert "target_dtype_info" in node.meta
769
+ # TODO: we are assuming "target_dtype_info" exists here, maybe
770
+ # a default value also need to be provided here
771
+ target_dtype_info = node.meta["target_dtype_info"]
772
+ # for nodes that doesn't have `reuse_input_obs_or_fq` configured,
773
+ # we'll default to False, this makes configuring this field optional for users
774
+ reuse_input_obs_or_fq = target_dtype_info.get("reuse_input_obs_or_fq", False)
775
+ arg_as_input_act_obs_or_fq = _get_arg_as_input_act_obs_or_fq(arg, node, named_modules, obs_or_fq_map, is_qat)
776
+ arg_as_input_target_dtype, arg_as_input_target_is_dynamic = _get_dtype_and_is_dynamic(arg_as_input_act_obs_or_fq)
777
+
778
+ arg_as_output_act_obs_or_fq = _get_output_act_obs_or_fq(arg, named_modules, obs_or_fq_map, is_qat)
779
+ arg_as_output_target_dtype, arg_as_output_target_is_dynamic = _get_dtype_and_is_dynamic(arg_as_output_act_obs_or_fq)
780
+
781
+
782
+ needs_obs_or_fq = _needs_obs_or_fq(
783
+ arg_as_output_target_dtype,
784
+ arg_as_output_target_is_dynamic,
785
+ arg_as_input_target_dtype,
786
+ arg_as_input_target_is_dynamic,
787
+ reuse_input_obs_or_fq,
788
+ is_zeroth_arg=len(node.args) > 0 and arg is node.args[0],
789
+ )
790
+
791
+ else:
792
+ assert qconfig is not None
793
+ # custom flow for standalone modules
794
+ _, _, sm_prepare_custom_config, _ = \
795
+ _get_standalone_module_configs(
796
+ node, named_modules, prepare_custom_config, qconfig, backend_config)
797
+ sm_input_quantized_idxs = sm_prepare_custom_config.input_quantized_indexes
798
+
799
+ # for args, this is set to the index of the current arg
800
+ # for kwargs, this is left at None
801
+ cur_input_idx = None
802
+ for arg_idx, arg_to_check in enumerate(node.args):
803
+ if arg_to_check is arg:
804
+ cur_input_idx = arg_idx
805
+ break
806
+
807
+ if cur_input_idx is None:
808
+ needs_obs_or_fq = False
809
+ else:
810
+ arg_as_output_target_dtype = _get_arg_target_dtype_as_output(arg, named_modules, obs_or_fq_map, is_qat)
811
+ arg_as_input_target_dtype = torch.quint8 if cur_input_idx in sm_input_quantized_idxs \
812
+ else torch.float
813
+ needs_obs_or_fq = (
814
+ (arg_as_output_target_dtype != arg_as_input_target_dtype) and
815
+ (arg_as_input_target_dtype != torch.float)
816
+ )
817
+
818
+ act_post_process_ctr = qconfig.activation
819
+ arg_as_input_act_obs_or_fq = act_post_process_ctr() if act_post_process_ctr else None
820
+
821
+ if needs_obs_or_fq:
822
+
823
+ existing_obs_node = None
824
+
825
+ # Before using the new observer, check if an observer
826
+ # of the correct type already exists. If it does, use it.
827
+ # This prevents duplicate observer insertions if a node is
828
+ # used by multiple nodes.
829
+ # TODO: this is looking into how the value is used in the future
830
+ # we should remove this
831
+ # removing this means we insert one observer for each use, even if they
832
+ # have the same dtype, we can have an extra pass that removes the extra observers
833
+ for maybe_obs_node in arg.users.keys():
834
+ if maybe_obs_node.op == 'call_module':
835
+ maybe_obs_mod = named_modules[maybe_obs_node.target] # type: ignore[index]
836
+ if (
837
+ type(maybe_obs_mod) == type(arg_as_input_act_obs_or_fq) and
838
+ maybe_obs_mod.dtype == arg_as_input_target_dtype # type: ignore[possibly-undefined]
839
+ ):
840
+ arg_as_input_act_obs_or_fq = maybe_obs_mod # type: ignore[assignment]
841
+ existing_obs_node = maybe_obs_node
842
+ break
843
+
844
+ assert arg_as_input_act_obs_or_fq is not None
845
+ obs_or_fq_map[(arg, node)] = arg_as_input_act_obs_or_fq
846
+ if existing_obs_node is None:
847
+ new_obs_node = _insert_obs_or_fq(
848
+ arg, arg_as_input_act_obs_or_fq, model, named_modules, graph)
849
+ # override this arg to be the observed arg
850
+ new_arg = new_obs_node
851
+ else:
852
+ new_arg = existing_obs_node
853
+
854
+ return new_arg
855
+
856
+
857
+ def _maybe_insert_input_observers_for_node(
858
+ node: Node,
859
+ qconfig: QConfigAny,
860
+ model: torch.nn.Module,
861
+ named_modules: Dict[str, torch.nn.Module],
862
+ graph: Graph,
863
+ qhandler: Optional[QuantizeHandler],
864
+ prepare_custom_config: PrepareCustomConfig,
865
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
866
+ is_qat: bool,
867
+ backend_config: Optional[BackendConfig] = None
868
+ ) -> None:
869
+ """
870
+ If needed, inserts observers to the input args and kwargs of `node`.
871
+ Note: modifies `node` inplace.
872
+
873
+ For example, if cur_node needs an observer after prev_node, we change from
874
+
875
+ prev_node -> cur_node
876
+
877
+ To
878
+
879
+ prev_node -> obs -> cur_node
880
+
881
+ Note: backend_config only needed for standalone_module node
882
+ """
883
+ # Look through every input arg. If that arg's target dtype does not
884
+ # match the current node's target dtype, insert an observer.
885
+ new_args = []
886
+ for arg in node.args:
887
+ new_arg = _maybe_insert_input_observer_for_arg_or_kwarg(
888
+ node, arg, qconfig, model, named_modules, graph,
889
+ qhandler,
890
+ prepare_custom_config,
891
+ obs_or_fq_map,
892
+ is_qat,
893
+ backend_config)
894
+ new_args.append(new_arg)
895
+
896
+ new_kwargs = {}
897
+ for k, kwarg in node.kwargs.items():
898
+ new_kwarg = _maybe_insert_input_observer_for_arg_or_kwarg(
899
+ node, kwarg, qconfig, model, named_modules, graph,
900
+ qhandler,
901
+ prepare_custom_config,
902
+ obs_or_fq_map,
903
+ is_qat,
904
+ backend_config)
905
+ new_kwargs[k] = new_kwarg
906
+
907
+ # assign the new args and kwargs to the node, inplace
908
+ node.args = tuple(new_args)
909
+ node.kwargs = new_kwargs
910
+
911
+ def _maybe_insert_input_equalization_observers_for_node(
912
+ node: Node,
913
+ equalization_qconfig: Any,
914
+ model: torch.nn.Module,
915
+ named_modules: Dict[str, torch.nn.Module],
916
+ graph: Graph,
917
+ is_branch: bool,
918
+ ) -> None:
919
+ """
920
+ If `node` needs to be equalized, find the input/weight observers it needs in
921
+ `equalization_qconfig`, creates them, and inserts it into `graph`.
922
+
923
+ If `node` does not need an equalization observer, returns None.
924
+ """
925
+ if equalization_qconfig is None or not node_supports_equalization(node, named_modules):
926
+ return
927
+
928
+ if is_branch:
929
+ warnings.warn(
930
+ f"Cannot equalize {node} because it is part of a branch."
931
+ )
932
+ return
933
+
934
+ new_args = []
935
+ for arg in node.args:
936
+ if not isinstance(arg, Node) or node_arg_is_bias(node, arg):
937
+ new_args.append(arg)
938
+ continue
939
+
940
+ is_weight = node_arg_is_weight(node, arg)
941
+
942
+ act_eq_process_ctr = equalization_qconfig.weight if is_weight else \
943
+ equalization_qconfig.input_activation
944
+
945
+ new_eq_obs_mod = act_eq_process_ctr()
946
+ new_eq_obs_node = _insert_obs_or_fq(
947
+ arg, new_eq_obs_mod, model, named_modules, graph)
948
+
949
+ new_args.append(new_eq_obs_node)
950
+
951
+ # assign the new args and kwargs to the node, inplace
952
+ node.args = tuple(new_args)
953
+
954
+ def _maybe_insert_output_observer_for_node(
955
+ node: Node,
956
+ model: torch.nn.Module,
957
+ named_modules: Dict[str, torch.nn.Module],
958
+ graph: Graph,
959
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
960
+ is_qat: bool,
961
+ ) -> Optional[Node]:
962
+ """
963
+ If `node` needs an output observer, creates it, inserts it into `graph`
964
+ and returns it.
965
+
966
+ If `node` does not need an output observer, returns None.
967
+
968
+ Note: inserting dynamic quantization ops for output is not supported in fx graph mode
969
+ quantization code path right now
970
+ """
971
+ assert node.op != 'output', 'observer insertion for outputs is handled elsewhere'
972
+
973
+ is_standalone_module = False
974
+ if "quantization_annotation" in node.meta:
975
+ output_act_obs_or_fq = _create_obs_or_fq_from_qspec(
976
+ node.meta["quantization_annotation"].output_qspec, obs_or_fq_map, is_qat
977
+ )
978
+ else:
979
+ assert "target_dtype_info" in node.meta
980
+ is_standalone_module = node.meta["target_dtype_info"].get("_is_standalone_module", False)
981
+ output_act_obs_or_fq_ctr = node.meta["target_dtype_info"].get("output_act_obs_or_fq_ctr")
982
+ output_act_obs_or_fq = output_act_obs_or_fq_ctr() if output_act_obs_or_fq_ctr else None
983
+ target_dtype, target_is_dynamic = _get_dtype_and_is_dynamic(output_act_obs_or_fq)
984
+ # uncomment after we support reuse_input_obs_or_fq properly by having separate
985
+ # implemntations for this key instead of reusing the input_output_share_observers
986
+ # code
987
+ # reuse_input_obs_or_fq = node.meta["target_dtype_info"].get("reuse_input_obs_or_fq", False)
988
+ # for now we set this to False since reuse_input_obs_or_fq for
989
+ # the output of a node is implementation in the same code path as observer sharing,
990
+ # we should refactor this part to make it clearer in the future
991
+ # and we would be able to read this from config directly
992
+ reuse_input_obs_or_fq = False
993
+
994
+ # Note: prev_output_dtype = torch.float and prev_output_is_dynamic=False
995
+ # because the prev_output is the output of an fp32 op, althought technically
996
+ # we should get the dtype of the output from node.meta["val"] in the future
997
+ # if we deprecate fx graph mode quantization
998
+ needs_obs_or_fq = _needs_obs_or_fq(torch.float, False, target_dtype, target_is_dynamic, reuse_input_obs_or_fq)
999
+ # currently the activation in QConfig(activation=...,) is for both input
1000
+ # and output, and when the activation is configured to be dynamic quantization
1001
+ # e.g. PlaceholderObserver(dtype=torch.quint8, is_dynamic=True, ...), it means
1002
+ # the input should by dynamically quantized, but output should not be quantized
1003
+ #
1004
+ # there is no way we can specify different observer/fq for input and output
1005
+ # activation through QConfig today, this limitation is lifted in the
1006
+ # quantizer/annotation API in pytorch 2.0 export quantization code path,
1007
+ # but since this code is reused, annotating output to be dynamically quantized
1008
+ # would not work either for that.
1009
+ # we can change QConfig to support input/output activation if we want
1010
+ # to remove the following check, or if we can deprecate fx graph mode quantization
1011
+ if target_is_dynamic:
1012
+ needs_obs_or_fq = False
1013
+
1014
+ # we never insert observers to output of standalone module, we assume
1015
+ # if needed, they are inserted inside the standalone module
1016
+ needs_obs_or_fq = needs_obs_or_fq and \
1017
+ (not is_standalone_module)
1018
+
1019
+ if needs_obs_or_fq:
1020
+ obs_or_fq_map[node] = output_act_obs_or_fq
1021
+ return _insert_obs_or_fq(node, output_act_obs_or_fq, model, named_modules, graph)
1022
+ else:
1023
+ return None
1024
+
1025
+ def _maybe_insert_observers_before_graph_output(
1026
+ graph_output_node: Node,
1027
+ model: torch.nn.Module,
1028
+ named_modules: Dict[str, torch.nn.Module],
1029
+ graph: Graph,
1030
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
1031
+ is_qat: bool,
1032
+ ) -> None:
1033
+ """
1034
+ If the output needs to be quantized and there are any nodes
1035
+ in the output which are not already observed, inserts observers
1036
+ for those nodes.
1037
+ """
1038
+
1039
+ def _recursive_maybe_replace_node_with_obs(
1040
+ maybe_node: Argument,
1041
+ model: torch.nn.Module,
1042
+ named_modules: Dict[str, torch.nn.Module],
1043
+ graph: Graph,
1044
+ ) -> Argument:
1045
+ """
1046
+ Navigate an arbitrary data structure of lists, tuples, dicts.
1047
+ For each container type, recurse on all inputs. Once any Node
1048
+ is found, insert an observer if needed and do not recurse further.
1049
+
1050
+ For example, given a structure of
1051
+
1052
+ {'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}}
1053
+
1054
+ we recurse down to bar1 and bar3, observe them if necessary,
1055
+ and if we inserted an observer then replace the original node
1056
+ with its observer.
1057
+
1058
+ Returns the data structure with all nodes needing observation being
1059
+ replaced by their observers.
1060
+ """
1061
+ if isinstance(maybe_node, Node):
1062
+ # check dtype of this node
1063
+ arg_as_output_target_dtype = _get_arg_target_dtype_as_output(maybe_node, named_modules, obs_or_fq_map, is_qat)
1064
+ observer_mod = None
1065
+ arg_as_input_target_dtype = torch.float
1066
+ if "target_dtype_info" in maybe_node.meta:
1067
+ observer_cls = maybe_node.meta["target_dtype_info"].get("input_act_obs_or_fq_ctr", None)
1068
+ if observer_cls is not None:
1069
+ observer_mod = observer_cls()
1070
+ arg_as_input_target_dtype = observer_mod.dtype
1071
+ # TODO: this does not handle dynamic quantization yet
1072
+ need_obs = (
1073
+ arg_as_output_target_dtype != arg_as_input_target_dtype and
1074
+ arg_as_input_target_dtype != torch.float
1075
+ )
1076
+ if need_obs:
1077
+ assert observer_mod is not None
1078
+ # insert observer
1079
+ observer_node = _insert_obs_or_fq(
1080
+ maybe_node, observer_mod, model, named_modules, graph)
1081
+ return observer_node
1082
+ else:
1083
+ return maybe_node
1084
+ elif isinstance(maybe_node, (list, tuple)):
1085
+ results = []
1086
+ for inner_node in maybe_node:
1087
+ results.append(_recursive_maybe_replace_node_with_obs(
1088
+ inner_node, model, named_modules, graph))
1089
+ if isinstance(maybe_node, list):
1090
+ return results
1091
+ else:
1092
+ return tuple(results)
1093
+ elif isinstance(maybe_node, dict):
1094
+ results_dict = {}
1095
+ for k, inner_v in maybe_node.items():
1096
+ results_dict[k] = _recursive_maybe_replace_node_with_obs(
1097
+ inner_v, model, named_modules, graph)
1098
+ return results_dict
1099
+ elif maybe_node is None:
1100
+ return None
1101
+ else:
1102
+ raise Exception("Unhandled type for returned node:", maybe_node)
1103
+
1104
+ new_args = []
1105
+ for old_arg in graph_output_node.args:
1106
+ new_args.append(
1107
+ _recursive_maybe_replace_node_with_obs(
1108
+ old_arg, model, named_modules, graph))
1109
+
1110
+ graph_output_node.args = tuple(new_args) # type: ignore[assignment]
1111
+
1112
+
1113
+ def _maybe_propagate_dtype_for_node(
1114
+ node: Node,
1115
+ target_dtype: Union[torch.dtype, type],
1116
+ node_name_to_match_result_with_qconfig: Dict[str, _MatchResultWithQConfig],
1117
+ ) -> None:
1118
+ """
1119
+ Assigns `target_dtype` to `node`, setting `is_dynamic` to False. If `node`
1120
+ is a general tensor shape op, also call this function recursively on
1121
+ the first argument, to propagate the dtype to the caller.
1122
+ """
1123
+ node.meta["target_dtype_info"]["input_act_obs_or_fq_ctr"] = None
1124
+ node.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"] = None
1125
+ # if this is a copy node, propagate to first arg
1126
+ root_node, _, pattern, qhandler, qconfig = node_name_to_match_result_with_qconfig.get(
1127
+ node.name, (None, None, None, None, None))
1128
+ # TODO: probably need to remove `is_general_tensor_value_op`
1129
+ if qhandler is not None and qhandler.is_general_tensor_value_op():
1130
+ prev_node = node.args[0]
1131
+ if isinstance(prev_node, Node):
1132
+ _maybe_propagate_dtype_for_node(
1133
+ prev_node, target_dtype, node_name_to_match_result_with_qconfig)
1134
+
1135
+ def propagate_dtypes_for_known_nodes(
1136
+ graph: Graph,
1137
+ node_name_to_match_result_with_qconfig: Dict[str, _MatchResultWithQConfig],
1138
+ ) -> None:
1139
+ """
1140
+ Currently we assume that inputs to the graph are either `torch.float` or
1141
+ `torch.quint8`, which is not always correct. For ops such as
1142
+ `x.masked_fill(mask, value)`, we know that the dtype of `mask` is a
1143
+ `BoolTensor`. Propagate this information throughout the graph.
1144
+
1145
+ Note: not all dtypes in the graph will be correct after this pass, but a
1146
+ higher percentage of them will be correct. Hopefully in the future we can
1147
+ replace this with a better way to reason about dtypes of tensors.
1148
+ """
1149
+ for node in graph.nodes:
1150
+ non_observable_arg_dict = get_non_observable_arg_indexes_and_types(node)
1151
+
1152
+ for arg_type in non_observable_arg_dict:
1153
+ non_observable_indices = non_observable_arg_dict[arg_type](node)
1154
+
1155
+ for index in non_observable_indices:
1156
+ arg = node.args[index]
1157
+
1158
+ # when an argument is a tuple, it does not show up as another node so we need to go through
1159
+ # all elements of the tuple manually
1160
+ if isinstance(arg, (tuple, list)):
1161
+ arg_list = list(arg)
1162
+ else:
1163
+ arg_list = [arg]
1164
+
1165
+ for cur_arg in arg_list:
1166
+ # hard coded arguments show up but aren't `Node` typed and do not need dtype propagated
1167
+ if isinstance(cur_arg, torch.fx.node.Node):
1168
+ _maybe_propagate_dtype_for_node(
1169
+ cur_arg, arg_type, node_name_to_match_result_with_qconfig)
1170
+
1171
+ def _maybe_make_input_output_share_observers(
1172
+ node: Node,
1173
+ model: torch.nn.Module,
1174
+ named_modules: Dict[str, torch.nn.Module],
1175
+ ) -> bool:
1176
+ """
1177
+ Ensures that we share an observer
1178
+ for all input arguments as well as the output argument. In detail, given
1179
+ a graph of
1180
+
1181
+ x0 -> obs0 -> op -> x2
1182
+ /
1183
+ x1 -> obs1 /
1184
+
1185
+ where node obs0 points to observer instance observer0,
1186
+ obs1 points to observer1 and obs2 points to observer2, we make nodes obs1
1187
+ and ob2 point to observer0.
1188
+ Returns: whether the operation succeeded or not
1189
+ """
1190
+ first_arg = None
1191
+ # find the first non-Tensor arg
1192
+ for i in range(len(node.args)):
1193
+ if isinstance(node.args[i], (Node, list, tuple)):
1194
+ first_arg = node.args[i]
1195
+ break
1196
+
1197
+ # if there is no non-Tensor arg, return directly
1198
+ if first_arg is None:
1199
+ return False
1200
+
1201
+ if isinstance(first_arg, (list, tuple)):
1202
+ first_arg_arg = first_arg[0]
1203
+ elif isinstance(first_arg, Node):
1204
+ first_arg_arg = first_arg
1205
+ else:
1206
+ return False
1207
+
1208
+ # if we have a graph such as
1209
+ # observed_node -> non_observed_node -> cat
1210
+ # we need to navigate up to the first observer
1211
+ iteration_guard = 0
1212
+ while not _is_activation_post_process_node(first_arg_arg, named_modules):
1213
+ if not isinstance(first_arg_arg, Node):
1214
+ return False
1215
+ # did not find an activation_post_process for the op
1216
+ if first_arg_arg.op == "placeholder":
1217
+ return False
1218
+ # trace back the args until we found the first Tensor/Node
1219
+ trace_back_node = None
1220
+ for i in range(len(first_arg_arg.args)):
1221
+ trace_back_node = first_arg_arg.args[i]
1222
+ if isinstance(trace_back_node, Node):
1223
+ break
1224
+ if trace_back_node is None:
1225
+ return False
1226
+ first_arg_arg = trace_back_node
1227
+
1228
+ iteration_guard += 1
1229
+ if iteration_guard > 10000:
1230
+ raise AssertionError('Unable to find observer of previous node')
1231
+
1232
+ assert isinstance(first_arg_arg, Node)
1233
+ target_to_use = first_arg_arg.target
1234
+ assert isinstance(target_to_use, str)
1235
+ obs_mod_to_use = named_modules[target_to_use]
1236
+
1237
+ if isinstance(first_arg, (list, tuple)):
1238
+ # set all other input observer nodes to use that module
1239
+ for input_idx, input_arg in enumerate(first_arg):
1240
+ if input_idx == 0:
1241
+ continue
1242
+ iteration_guard = 0
1243
+ while not _is_activation_post_process_node(input_arg, named_modules):
1244
+ # failed to trace back since no input arg for the current node
1245
+ if len(input_arg.args) < 1:
1246
+ return False
1247
+ input_arg = input_arg.args[0]
1248
+ iteration_guard += 1
1249
+ if iteration_guard > 10000:
1250
+ raise AssertionError('Unable to find observer of previous node')
1251
+
1252
+ parent_name, name = _parent_name(input_arg.target)
1253
+ setattr(named_modules[parent_name], name, obs_mod_to_use)
1254
+
1255
+ # set the output observer node to use that module
1256
+ for output_obs_node in node.users.keys():
1257
+ assert _is_activation_post_process_node(output_obs_node, named_modules)
1258
+ parent_name, name = _parent_name(output_obs_node.target)
1259
+ setattr(named_modules[parent_name], name, obs_mod_to_use)
1260
+
1261
+ # TODO(future PR): delete the orphaned observer modules
1262
+ return True
1263
+
1264
+ def _remove_output_observer(
1265
+ node: Node,
1266
+ model: torch.nn.Module,
1267
+ named_modules: Dict[str, torch.nn.Module]):
1268
+ items = list(node.users.items())
1269
+ for output_obs_node, _ in items:
1270
+ assert _is_activation_post_process_node(output_obs_node, named_modules)
1271
+ output_obs_node.replace_all_uses_with(node)
1272
+ model.graph.erase_node(output_obs_node) # type: ignore[union-attr, operator]
1273
+
1274
+ def _swap_custom_module_to_observed(
1275
+ node: Node,
1276
+ qconfig: QConfigAny,
1277
+ named_modules: Dict[str, torch.nn.Module],
1278
+ prepare_custom_config: PrepareCustomConfig):
1279
+ custom_module = named_modules[node.target] # type: ignore[index]
1280
+ custom_module_class_mapping = prepare_custom_config.float_to_observed_mapping
1281
+ observed_custom_module_class = \
1282
+ get_swapped_custom_module_class(
1283
+ custom_module, custom_module_class_mapping, qconfig)
1284
+ observed_custom_module = \
1285
+ observed_custom_module_class.from_float(custom_module)
1286
+ parent_name, name = _parent_name(node.target)
1287
+ setattr(named_modules[parent_name], name, observed_custom_module)
1288
+
1289
+ def insert_observers_for_model(
1290
+ model: GraphModule,
1291
+ node_name_to_match_result_with_qconfig: Dict[str, _MatchResultWithQConfig],
1292
+ node_name_to_qconfig: Dict[str, QConfigAny],
1293
+ prepare_custom_config: PrepareCustomConfig,
1294
+ equalization_config_map: Dict[str, Any],
1295
+ backend_config: BackendConfig,
1296
+ observed_node_names: Set[str],
1297
+ is_qat: bool,
1298
+ ) -> Optional[Node]:
1299
+ """
1300
+ Inserts observers, using the following high level algorithm:
1301
+
1302
+ For each node in the graph:
1303
+ 1. determine the target dtype of this node in the quantized graph, and save
1304
+ it for future steps
1305
+ 2. determine the target dtype or all args and kwargs of this node
1306
+ 3. if any arg or kwarg's target dtype does not match the current node's
1307
+ dtype, insert an observer
1308
+ 4. if the current node needs an output observer, insert it
1309
+
1310
+ For example:
1311
+
1312
+ - starting graph:
1313
+ x0 -> linear -> x1
1314
+
1315
+ - observed graph after processing x0:
1316
+ x0(fp32)
1317
+
1318
+ - observed graph after processing linear:
1319
+ x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8)
1320
+
1321
+ - observed graph after processing x1:
1322
+ x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) -> x1
1323
+
1324
+ After a node is processed, the naive observer placement is guaranteed to be
1325
+ complete for that node and all of its predecessors. There can be future
1326
+ passes which optimize the graph by deduplicating observers, etc.
1327
+ """
1328
+
1329
+ # node.meta["target_dtype_info"] stores the target dtype information
1330
+ # that's derived from qconfig for the Node, for example, if we have
1331
+ # a conv2d node that has a qconfig
1332
+ # qconfig = QConfig(activation=..., weight=...)
1333
+ # # information for input and bias node omitted
1334
+ # # for getattr node
1335
+ # # weight = getattr(self, 'weight')
1336
+ # weight.meta["target_dtype_info"] = {
1337
+ # 'output_act_obs_or_fq_ctr': qconfig.weight,
1338
+ # }
1339
+ # # for conv2d node
1340
+ # # conv2d = call_function[target=torch.nn.functional.conv2d](
1341
+ # # args=(input, weight, bias))
1342
+ # conv2d.meta["target_dtype_info"] = {
1343
+ # 'input_act_obs_or_fq_ctr': qconfig.activation
1344
+ # 'weight_obs_or_fq_ctr': qconfig.weight,
1345
+ # 'bias_obs_or_fq_ctr': PlaceholderObserver.with_args(dtype=torch.float32),
1346
+ # 'output_act_obs_or_fq_ctr': qconfig.activation,
1347
+ # }
1348
+ #
1349
+ cache_for_no_tensor_check: Dict[Node, bool] = {}
1350
+
1351
+ # first, populate the dtype map based only on qconfig and qhandler
1352
+ # this assumes:
1353
+ # graph inputs are fp32 by default, and int8 where overriden
1354
+ # other nodes output dtype is specified by the qconfig
1355
+ named_modules = dict(model.named_modules(remove_duplicate=False))
1356
+
1357
+ input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes
1358
+ output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes
1359
+ processed_nodes: Set[Node] = set()
1360
+ # initialize target_dtype_info
1361
+ for node in model.graph.nodes:
1362
+ node.meta["target_dtype_info"] = copy.copy(_DEFAULT_FP32_QCONFIG_FOR_TARGET_DTYPE_INFO)
1363
+
1364
+ inputs_seen_counter = 0
1365
+ outputs_seen_counter = 0
1366
+ placeholder_node_to_input_index: Dict[Node, int] = {}
1367
+ # TODO: we probably don't need this counter since each graph will only have
1368
+ # one output node?
1369
+ output_node_to_output_index: Dict[Node, int] = {}
1370
+ for node in model.graph.nodes:
1371
+ if node.op == "placeholder":
1372
+ placeholder_node_to_input_index[node] = inputs_seen_counter
1373
+ inputs_seen_counter += 1
1374
+ if node.op == "output":
1375
+ output_node_to_output_index[node] = outputs_seen_counter
1376
+ outputs_seen_counter += 1
1377
+
1378
+ # Step 1, set the observer or fake quantize module constructor for each node in the
1379
+ # matched_node_pattern
1380
+
1381
+ for match_res_with_qconfig in node_name_to_match_result_with_qconfig.values():
1382
+ last_node, matched_node_pattern, pattern, qhandler, qconfig = match_res_with_qconfig
1383
+ assert qhandler is not None
1384
+ _set_target_dtype_info_for_matched_node_pattern(
1385
+ matched_node_pattern,
1386
+ last_node,
1387
+ qconfig,
1388
+ qhandler,
1389
+ backend_config,
1390
+ named_modules,
1391
+ cache_for_no_tensor_check,
1392
+ processed_nodes
1393
+ )
1394
+
1395
+ # Step 2. Special cases for some operators, we might be able to remove them
1396
+ # in the future if we know dtype information of each node better
1397
+
1398
+ # Step 2.1. some settings are not based on patterns, we need to process each node
1399
+ # instead
1400
+ for node in model.graph.nodes:
1401
+ if node.op == "placeholder" and placeholder_node_to_input_index[node] in input_quantized_idxs:
1402
+ # users are not supposed to call calculate_qparams on PlaceholderObserver, and
1403
+ # this is OK because we are using this as a way to encode the dtypes of input
1404
+ # tensor, we won't actually insert these observers in the graph and won't
1405
+ # actually call calculate_qparams
1406
+ node.meta["target_dtype_info"] = copy.copy(_DEFAULT_QUINT8_QCONFIG_FOR_TARGET_DTYPE_INFO)
1407
+ elif node.op in ("call_module", "call_method", "call_function"):
1408
+ args_have_no_tensors = \
1409
+ all_node_args_have_no_tensors(
1410
+ node, named_modules, cache_for_no_tensor_check)
1411
+ if args_have_no_tensors:
1412
+ node.meta["target_dtype_info"] = {
1413
+ "input_act_obs_or_fq_ctr": None,
1414
+ "output_act_obs_or_fq_ctr": None,
1415
+ }
1416
+ elif node.op == "output" and output_node_to_output_index[node] in output_quantized_idxs:
1417
+ # TODO(future PR): update the output_quantized_idxs API to match
1418
+ # arbitrary data structures. There is always a single output, and
1419
+ # that output can have arbitrary nesting of values. List[int] is
1420
+ # not the right data type for this.
1421
+
1422
+ # TODO(future PR): support more dtypes in model outputs, if necessary
1423
+ node.meta["target_dtype_info"] = copy.copy(_DEFAULT_QUINT8_QCONFIG_FOR_TARGET_DTYPE_INFO)
1424
+
1425
+ # Step 2.2, for nodes with known input dtypes, propagate them throughout the
1426
+ # graph. For example, if there is a call such as
1427
+ # x1 = x0.masked_fill(mask, 1)
1428
+ # we propagate the type of mask to be torch.bool
1429
+ propagate_dtypes_for_known_nodes(model.graph, node_name_to_match_result_with_qconfig)
1430
+
1431
+ # Step 3, check if the requested target_dtype_info is supported by backend or not
1432
+ # if not, we'll reset the target_dtye_info to use the default (float Tensor)
1433
+
1434
+ # reset the counters and set of processed_nodes
1435
+ processed_nodes: Set[Node] = set()
1436
+ for match_res_with_qconfig in node_name_to_match_result_with_qconfig.values():
1437
+ last_node, matched_node_pattern, pattern, qhandler, qconfig = match_res_with_qconfig
1438
+ is_supported_by_backend = _is_pattern_dtype_config_and_qconfig_supported_by_backend(
1439
+ pattern, matched_node_pattern, qconfig, backend_config)
1440
+ assert qhandler is not None
1441
+
1442
+ # get output_act_dtype so that we don't also reset the special typed nodes
1443
+ # TODO: we might want to handle these more uniformly with the default path
1444
+ # this can be improved if we can use node.meta["val"]
1445
+ output_act_or_fq_ctr = node.meta["target_dtype_info"]["output_act_obs_or_fq_ctr"]
1446
+ output_act_or_fq = output_act_or_fq_ctr() if output_act_or_fq_ctr else None
1447
+ output_act_dtype, _ = _get_dtype_and_is_dynamic(output_act_or_fq)
1448
+ if not is_supported_by_backend and output_act_dtype not in [None, int, float, torch.bool]:
1449
+ # restore target_dtype_info to default if it is not supported by backend
1450
+ _set_target_dtype_info_for_matched_node_pattern(
1451
+ matched_node_pattern,
1452
+ last_node,
1453
+ torch.ao.quantization.qconfig._default_fp32_placeholder_qconfig,
1454
+ None,
1455
+ backend_config,
1456
+ named_modules,
1457
+ cache_for_no_tensor_check,
1458
+ processed_nodes
1459
+ )
1460
+
1461
+ # After this point, the current node and all of its arguments
1462
+ # have a target_dtype_info assigned. Now, we insert observers for inputs
1463
+ # of this node (if needed for this node), and the output of this node
1464
+ # (if needed for this node).
1465
+
1466
+ # Since we are mutating the graph as we go, we iterate over the original
1467
+ # nodes before observer insertion, instead of model.graph.nodes.
1468
+ nodes_before_observation = list(model.graph.nodes)
1469
+
1470
+ # Avoid duplicates custom module swaps for multiple nodes with same target.
1471
+ custom_module_names_already_swapped: Set[str] = set()
1472
+
1473
+ # TODO: reuse placeholder_node_to_input_index and output_node_to_output_index
1474
+ # reset inputs/outputs counters
1475
+ inputs_seen_counter = 0
1476
+ outputs_seen_counter = 0
1477
+ results_node = None
1478
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize] = {}
1479
+
1480
+ # TODO: change this to insert obs/fq by pattern instead of by node
1481
+ for node in nodes_before_observation:
1482
+
1483
+ if node.op == 'placeholder':
1484
+ # if a graph input is in fp32, it does not need observation
1485
+ # if a graph input is in int8, we assume the observation happens
1486
+ # outside of the graph, and no additional observation is needed
1487
+ pass
1488
+
1489
+ elif node.op in ('call_module', 'call_method', 'call_function', 'output'):
1490
+ # check for matches
1491
+ last_node, matched_node_pattern, pattern, qhandler, qconfig = (
1492
+ node_name_to_match_result_with_qconfig.get(node.name, (None, None, None, None, None)) # type: ignore[assignment]
1493
+ )
1494
+ equalization_qconfig = equalization_config_map.get(node.name, None)
1495
+
1496
+ this_node_dtype_info = node.meta["target_dtype_info"]
1497
+ if "val" in node.meta:
1498
+ output_is_a_tensor = (
1499
+ this_node_dtype_info is not None and
1500
+ isinstance(node.meta["val"], FakeTensor)
1501
+ )
1502
+ else:
1503
+ output_is_a_tensor = this_node_dtype_info is not None
1504
+
1505
+ skip_inserting_observers = (
1506
+ (qconfig is None) or
1507
+ not output_is_a_tensor
1508
+ ) and (
1509
+ not node.op == 'output'
1510
+ )
1511
+
1512
+ # TODO: take a closer look to see if we can remove this check
1513
+ # right now it is here because of `observed_node_names`, we are using
1514
+ # it as an indicator for swapping the modules to reference modules in
1515
+ # convert
1516
+ is_supported_by_backend = _is_pattern_dtype_config_and_qconfig_supported_by_backend(
1517
+ pattern, matched_node_pattern, qconfig, backend_config)
1518
+
1519
+ if not skip_inserting_observers and is_supported_by_backend:
1520
+ named_modules = dict(model.named_modules(remove_duplicate=False))
1521
+ if node.op != 'output':
1522
+ assert matched_node_pattern is not None
1523
+ # add matched nodes to the observed node name set
1524
+ _add_matched_node_name_to_set(matched_node_pattern, observed_node_names)
1525
+
1526
+ # This is currently only used for equalization.
1527
+ # Checks if the current node is in a branch in which the two
1528
+ # first layers are both being quantized.
1529
+ #
1530
+ # ex. conv2
1531
+ # /
1532
+ # x -> conv1
1533
+ #
1534
+ # If this is the case, we will not apply equalization to the
1535
+ # initial two layers.
1536
+ is_quantized_branch = False
1537
+ if (
1538
+ len(node.args) > 0 and
1539
+ isinstance(node.args[0], Node) and
1540
+ len(node.args[0].users) > 1
1541
+ ):
1542
+ for user in node.args[0].users:
1543
+ # Checks if there exists another user being quantized
1544
+ is_user_quantized = (
1545
+ node_name_to_qconfig.get(user.name, None) is not None or
1546
+ (user.op == 'call_module' and isinstance(named_modules[str(user.target)], ObserverBase))
1547
+ )
1548
+ if user != node and is_user_quantized:
1549
+ is_quantized_branch = True
1550
+
1551
+ pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config)
1552
+ root_node_getter = pattern_to_root_node_getter.get(pattern, _default_root_node_getter)
1553
+ root_node = root_node_getter(matched_node_pattern)
1554
+ is_input_node_of_the_pattern = node is root_node
1555
+ if is_input_node_of_the_pattern:
1556
+ # this modifies node inplace
1557
+ _maybe_insert_input_observers_for_node(
1558
+ node, qconfig, model, named_modules, model.graph,
1559
+ qhandler,
1560
+ prepare_custom_config,
1561
+ obs_or_fq_map,
1562
+ is_qat,
1563
+ backend_config)
1564
+
1565
+ # insert equalization input observers if needed
1566
+ _maybe_insert_input_equalization_observers_for_node(
1567
+ node, equalization_qconfig, model, named_modules, model.graph,
1568
+ is_quantized_branch)
1569
+
1570
+ is_last_node_of_pattern = node is last_node
1571
+ input_output_share_observers = node.meta["target_dtype_info"].get("input_output_share_observers", False)
1572
+ reuse_input_obs_or_fq = node.meta["target_dtype_info"].get("reuse_input_obs_or_fq", False)
1573
+
1574
+ if is_last_node_of_pattern:
1575
+ if _is_custom_module_lstm(node, named_modules, qconfig, qhandler):
1576
+ # Currently custom module outputs are assumed to be already quantized,
1577
+ # so we need to insert a DeQuantStub after the output. For custom module
1578
+ # LSTM specifically, the outputs are also a nested tuple, so we must first
1579
+ # break down the tuple to insert DeQuantStubs after the internal nodes.
1580
+
1581
+ # TODO: This currently diverges from how custom modules are handled today,
1582
+ # where we insert observers after the output instead of DeQuantStubs, and
1583
+ # replace these observers with "dequantize" nodes during convert. Conceptually,
1584
+ # these output observers are the same as DeQuantStubs. In the future, we
1585
+ # should resolve this inconsistency by inserting DeQuantStubs for all custom
1586
+ # modules, not just for LSTM.
1587
+ _insert_dequant_stubs_for_custom_module_lstm_output(node, model, named_modules, model.graph)
1588
+ if node.target not in custom_module_names_already_swapped:
1589
+ custom_module_names_already_swapped.add(node.target)
1590
+ _swap_custom_module_to_observed(node, qconfig, named_modules, prepare_custom_config)
1591
+ else:
1592
+ # this returns the new observer node if it was needed
1593
+ maybe_output_obs_node = _maybe_insert_output_observer_for_node(
1594
+ node, model, named_modules, model.graph, obs_or_fq_map, is_qat)
1595
+
1596
+ if maybe_output_obs_node is not None:
1597
+ # Update users of original node to use the output observer
1598
+ # instead. For example, change
1599
+ #
1600
+ # next_node
1601
+ # /
1602
+ # cur_node -> obs
1603
+ #
1604
+ # to
1605
+ #
1606
+ # next_node
1607
+ # /
1608
+ # cur_node -> obs
1609
+ #
1610
+ # We need to save orig users before updating uses because
1611
+ # the list of users will change as we update uses
1612
+ orig_users = list(node.users.keys())
1613
+ for user_node in orig_users:
1614
+ if user_node is maybe_output_obs_node:
1615
+ continue
1616
+ user_node.replace_input_with(node, maybe_output_obs_node)
1617
+
1618
+ _is_observer_in_same_graph_ = _is_observer_in_same_graph(
1619
+ node, named_modules, obs_or_fq_map, is_qat)
1620
+
1621
+ # for ops whose inputs and outputs share observer/fqs, we modify the graph
1622
+ # to make all inputs and outputs use the first input's
1623
+ # observer/fq
1624
+ if (input_output_share_observers and _is_observer_in_same_graph_) or \
1625
+ reuse_input_obs_or_fq:
1626
+ if not _maybe_make_input_output_share_observers(node, model, named_modules):
1627
+ _remove_output_observer(node, model, named_modules)
1628
+
1629
+ if qhandler is not None and qhandler.is_custom_module():
1630
+ if node.target not in custom_module_names_already_swapped:
1631
+ custom_module_names_already_swapped.add(node.target)
1632
+ _swap_custom_module_to_observed(node, qconfig, named_modules, prepare_custom_config)
1633
+
1634
+ else: # output
1635
+ _maybe_insert_observers_before_graph_output(node, model, named_modules, model.graph, obs_or_fq_map, is_qat)
1636
+
1637
+ #
1638
+ # After this point, the current node has input and output observers
1639
+ # that it needs for itself inserted.
1640
+ #
1641
+
1642
+ # increment the counters, so future inputs and outputs are assigned
1643
+ # correct dtypes
1644
+ if node.op == 'placeholder':
1645
+ inputs_seen_counter += 1
1646
+ elif node.op == 'output':
1647
+ outputs_seen_counter += 1
1648
+ results_node = node
1649
+
1650
+ return results_node
1651
+
1652
+ def _run_prepare_fx_on_standalone_modules(
1653
+ model: torch.nn.Module,
1654
+ is_qat: bool,
1655
+ named_modules: Dict[str, torch.nn.Module],
1656
+ node_name_to_match_result_with_qconfig: Any,
1657
+ prepare_custom_config: PrepareCustomConfig,
1658
+ backend_config: BackendConfig,
1659
+ ) -> None:
1660
+ """
1661
+ Runs prepare_fx on each standalone module. Note: this does
1662
+ not modify the graph, it just replaces the unobserved modules with
1663
+ their observed versions.
1664
+ """
1665
+ for (root_node, _, pattern, qhandler, qconfig) in node_name_to_match_result_with_qconfig.values():
1666
+ if qhandler is None:
1667
+ continue
1668
+ elif not qhandler.is_standalone_module():
1669
+ continue
1670
+
1671
+ sm_qconfig_mapping, sm_example_inputs, sm_prepare_custom_config, \
1672
+ sm_backend_config = _get_standalone_module_configs(
1673
+ root_node, named_modules, prepare_custom_config, qconfig, backend_config)
1674
+
1675
+ standalone_module = named_modules[root_node.target]
1676
+ prepare = \
1677
+ torch.ao.quantization.quantize_fx._prepare_standalone_module_fx # type: ignore[attr-defined]
1678
+ observed_standalone_module = \
1679
+ prepare(
1680
+ standalone_module,
1681
+ sm_qconfig_mapping,
1682
+ is_qat,
1683
+ example_inputs=sm_example_inputs,
1684
+ prepare_custom_config=sm_prepare_custom_config,
1685
+ backend_config=sm_backend_config)
1686
+ parent_name, name = _parent_name(root_node.target)
1687
+ setattr(named_modules[parent_name], name, observed_standalone_module)
1688
+ named_modules[root_node.target] = observed_standalone_module
1689
+
1690
+ def _save_state(
1691
+ observed: GraphModule,
1692
+ node_name_to_qconfig: Dict[str, QConfigAny],
1693
+ node_name_to_scope: Dict[str, Tuple[str, type]],
1694
+ prepare_custom_config: PrepareCustomConfig,
1695
+ equalization_node_name_to_qconfig: Dict[str, Any],
1696
+ qconfig_mapping: QConfigMapping,
1697
+ is_qat: bool,
1698
+ observed_node_names: Set[str],
1699
+ ) -> None:
1700
+ observed.meta["_observed_graph_module_attrs"] = (
1701
+ ObservedGraphModuleAttrs(
1702
+ node_name_to_qconfig=node_name_to_qconfig,
1703
+ node_name_to_scope=node_name_to_scope,
1704
+ prepare_custom_config=prepare_custom_config,
1705
+ equalization_node_name_to_qconfig=equalization_node_name_to_qconfig,
1706
+ qconfig_mapping=qconfig_mapping,
1707
+ is_qat=is_qat,
1708
+ observed_node_names=observed_node_names,
1709
+ )
1710
+ )
1711
+
1712
+ def prepare(
1713
+ model: GraphModule,
1714
+ qconfig_mapping: Union[QConfigMapping, Dict[str, Any]],
1715
+ is_qat: bool,
1716
+ node_name_to_scope: Dict[str, Tuple[str, type]],
1717
+ example_inputs: Tuple[Any, ...],
1718
+ prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None,
1719
+ _equalization_config: Union[QConfigMapping, Dict[str, Any], None] = None,
1720
+ backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
1721
+ is_standalone_module: bool = False) -> GraphModule:
1722
+ """ standalone_module means it a submodule that is not inlined in
1723
+ parent module, and will be quantized separately as one unit.
1724
+
1725
+ How the standalone module is observed is specified by `input_quantized_idxs` and
1726
+ `output_quantized_idxs` in the prepare_custom_config for the standalone module
1727
+ Args:
1728
+ node_name_to_scope: mapping from node name to the scope of the module which contains the node.
1729
+ The scope is a tuple of fully qualified path of the module and the type of the module
1730
+ Returns:
1731
+ model(GraphModule): prepared standalone module
1732
+ attributes related to standalone module
1733
+ in model.meta["_observed_graph_module_attrs"]:
1734
+ is_observed_standalone_module (bool): boolean value that shows whether the
1735
+ current model is a observed standalone module or not
1736
+ standalone_module_input_quantized_idxs(List[Int]): a list of
1737
+ indexes for the graph input that is expected to be quantized,
1738
+ same as input_quantized_idxs configuration provided
1739
+ for the standalone module
1740
+ standalone_module_output_quantized_idxs(List[Int]): a list of
1741
+ indexs for the graph output that is quantized
1742
+ same as input_quantized_idxs configuration provided
1743
+ for the standalone module
1744
+ """
1745
+ if prepare_custom_config is None:
1746
+ prepare_custom_config = PrepareCustomConfig()
1747
+ if _equalization_config is None:
1748
+ _equalization_config = QConfigMapping()
1749
+
1750
+ if isinstance(qconfig_mapping, Dict):
1751
+ warnings.warn(
1752
+ "Passing a QConfig dictionary to prepare is deprecated and will not be supported "
1753
+ "in a future version. Please pass in a QConfigMapping instead.")
1754
+ qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping)
1755
+
1756
+ if isinstance(_equalization_config, Dict):
1757
+ warnings.warn(
1758
+ "Passing a QConfig dictionary to prepare for equalization is deprecated and will not "
1759
+ "be supported in a future version. Please pass in a QConfigMapping instead.")
1760
+ _equalization_config = QConfigMapping.from_dict(_equalization_config)
1761
+
1762
+ if isinstance(prepare_custom_config, Dict):
1763
+ warnings.warn(
1764
+ "Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported "
1765
+ "in a future version. Please pass in a PrepareCustomConfig instead.")
1766
+ prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config)
1767
+
1768
+ if isinstance(backend_config, Dict):
1769
+ warnings.warn(
1770
+ "Passing a backend_config_dict to prepare is deprecated and will not be supported "
1771
+ "in a future version. Please pass in a BackendConfig instead.")
1772
+ backend_config = BackendConfig.from_dict(backend_config)
1773
+
1774
+ assert isinstance(qconfig_mapping, QConfigMapping)
1775
+ assert isinstance(_equalization_config, QConfigMapping)
1776
+ qconfig_mapping = copy.deepcopy(qconfig_mapping)
1777
+ _equalization_config = copy.deepcopy(_equalization_config)
1778
+
1779
+ # mapping from a tuple of nodes in reverse order to uninitialized
1780
+ # QuantizeHandler subclass. For example,
1781
+ # {
1782
+ # # match a single node
1783
+ # (<class 'torch.nn.modules.conv.Conv3d'>:
1784
+ # <class 'torch.ao.quantization.fx.quantize.ConvRelu'>),
1785
+ # # match multiple nodes in reverse order
1786
+ # ((<function relu at 0x7f766a7360d0>, <built-in function add>):
1787
+ # <class 'torch.ao.quantization.fx.quantize.Add'>),
1788
+ # }
1789
+
1790
+ pattern_to_quantize_handler: Dict[Pattern, QuantizeHandler] = {}
1791
+ if backend_config is None:
1792
+ backend_config = get_native_backend_config()
1793
+ pattern_to_quantize_handler = _get_pattern_to_quantize_handlers(backend_config)
1794
+ pattern_to_quantize_handler = _sorted_patterns_dict(pattern_to_quantize_handler)
1795
+
1796
+ root_node_getter_mapping = \
1797
+ get_fusion_pattern_to_root_node_getter(backend_config)
1798
+
1799
+ _update_qconfig_for_fusion(model, qconfig_mapping)
1800
+ _update_qconfig_for_fusion(model, _equalization_config)
1801
+ flattened_qconfig_dict = _get_flattened_qconfig_dict(qconfig_mapping)
1802
+ # TODO: support regex as well
1803
+ propagate_qconfig_(model, flattened_qconfig_dict, prepare_custom_config.to_dict())
1804
+
1805
+ if is_qat:
1806
+ module_to_qat_module = get_module_to_qat_module(backend_config)
1807
+ _qat_swap_modules(model, module_to_qat_module)
1808
+ _update_qconfig_for_qat(qconfig_mapping, backend_config)
1809
+
1810
+ # mapping from fully qualified module name to module instance
1811
+ # for example,
1812
+ # {
1813
+ # '': Model(...),
1814
+ # 'linear': Linear(...),
1815
+ # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),
1816
+ # }
1817
+ named_modules = dict(model.named_modules(remove_duplicate=False))
1818
+
1819
+ # fill node_name_to_qconfig, a map from node name to qconfig, used in _find_matches
1820
+ equalization_node_name_to_qconfig = _generate_node_name_to_qconfig(
1821
+ model, named_modules, model.graph, _equalization_config, node_name_to_scope)
1822
+ node_name_to_qconfig = _generate_node_name_to_qconfig(model, named_modules, model.graph, qconfig_mapping, node_name_to_scope)
1823
+
1824
+ # match the patterns that will get quantized
1825
+ standalone_module_names = list(prepare_custom_config.standalone_module_names.keys())
1826
+ standalone_module_classes = list(prepare_custom_config.standalone_module_classes.keys())
1827
+
1828
+ custom_module_classes = get_custom_module_class_keys(prepare_custom_config.float_to_observed_mapping)
1829
+ matches_without_qconfig = _find_matches(
1830
+ model.graph, named_modules, pattern_to_quantize_handler, root_node_getter_mapping,
1831
+ standalone_module_names, standalone_module_classes, custom_module_classes)
1832
+
1833
+ # map qconfig instances to matches
1834
+ node_name_to_match_result_with_qconfig = {}
1835
+ for node_name, match_without_qconfig in matches_without_qconfig.items():
1836
+ match_with_qconfig = (*match_without_qconfig, node_name_to_qconfig[node_name])
1837
+ node_name_to_match_result_with_qconfig[node_name] = match_with_qconfig
1838
+
1839
+ _run_prepare_fx_on_standalone_modules(
1840
+ model, is_qat, named_modules, node_name_to_match_result_with_qconfig, prepare_custom_config, backend_config)
1841
+
1842
+ # record names for the set of observed node, so that in convert step
1843
+ # we know whether we need to convert a floating point module to reference
1844
+ # quantized module or not
1845
+ observed_node_names: Set[str] = set()
1846
+
1847
+ result_node = insert_observers_for_model(
1848
+ model,
1849
+ node_name_to_match_result_with_qconfig,
1850
+ node_name_to_qconfig,
1851
+ prepare_custom_config,
1852
+ equalization_node_name_to_qconfig,
1853
+ backend_config,
1854
+ observed_node_names,
1855
+ is_qat,
1856
+ )
1857
+ model = GraphModule(model, model.graph)
1858
+
1859
+ _save_state(model, node_name_to_qconfig, node_name_to_scope,
1860
+ prepare_custom_config, equalization_node_name_to_qconfig,
1861
+ qconfig_mapping, is_qat, observed_node_names)
1862
+
1863
+ if is_standalone_module:
1864
+ assert result_node is not None
1865
+ assert isinstance(result_node.args[0], Node), \
1866
+ "standalone module only supports returning simple value currently"\
1867
+ "(not tuple, dict etc.)"
1868
+ # these inputs are observed in parent
1869
+ # converting List[int] to Tensor since module attribute is
1870
+ # Union[Tensor, Module]
1871
+ input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes
1872
+ output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes
1873
+ observed_graph_module_attrs = model.meta["_observed_graph_module_attrs"]
1874
+ # inplace modification
1875
+ observed_graph_module_attrs.is_observed_standalone_module = True
1876
+ observed_graph_module_attrs.standalone_module_input_quantized_idxs = \
1877
+ input_quantized_idxs
1878
+ observed_graph_module_attrs.standalone_module_output_quantized_idxs = \
1879
+ output_quantized_idxs
1880
+ return model
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import re
3
+ from collections import defaultdict, OrderedDict
4
+ from typing import Callable, Any, Dict, Tuple, Set, List, Union
5
+ from torch.ao.quantization import QConfig
6
+ from torch.ao.quantization.qconfig import _add_module_to_qconfig_obs_ctr, QConfigAny, qconfig_equals
7
+ from torch.ao.quantization.observer import (
8
+ _is_activation_post_process,
9
+ )
10
+ from torch.ao.quantization.backend_config import (
11
+ BackendConfig,
12
+ DTypeConfig,
13
+ )
14
+ from torch.ao.quantization.backend_config.utils import (
15
+ get_module_to_qat_module,
16
+ )
17
+
18
+ from torch.fx import (
19
+ GraphModule,
20
+ )
21
+ from torch.fx.graph import (
22
+ Graph,
23
+ )
24
+ from torch.ao.nn.intrinsic import _FusedModule
25
+
26
+ from ..utils import (
27
+ _parent_name,
28
+ get_qconfig_dtypes,
29
+ )
30
+ from ..qconfig_mapping import (
31
+ _OBJECT_TYPE_DICT_KEY,
32
+ _MODULE_NAME_DICT_KEY,
33
+ _MODULE_NAME_REGEX_DICT_KEY,
34
+ QConfigMapping,
35
+ )
36
+
37
+ __all__: List[str] = []
38
+
39
+
40
+
41
+ def _maybe_adjust_qconfig_for_module_name_object_type_order(
42
+ qconfig_mapping: QConfigMapping,
43
+ cur_module_path: str,
44
+ cur_object_type: Callable,
45
+ cur_object_type_idx: int,
46
+ fallback_qconfig: QConfigAny,
47
+ ) -> QConfigAny:
48
+ for (module_name, object_type, index), qconfig in qconfig_mapping.module_name_object_type_order_qconfigs.items():
49
+ if (
50
+ (module_name == cur_module_path) and
51
+ (object_type == cur_object_type) and
52
+ (index == cur_object_type_idx)
53
+ ):
54
+ return qconfig
55
+ return fallback_qconfig
56
+
57
+
58
+ def _update_qconfig_for_fusion(model: GraphModule, qconfig_mapping: QConfigMapping):
59
+ """
60
+ Update the QConfigMapping to account for fused modules such as LinearReLU.
61
+ This assumes the QConfigMapping's attributes have already been converted to OrderedDicts.
62
+ """
63
+ object_type_dict = qconfig_mapping.object_type_qconfigs
64
+ if len(object_type_dict) == 0:
65
+ return qconfig_mapping
66
+
67
+ modules = dict(model.named_modules())
68
+
69
+ for node in model.graph.nodes:
70
+ if node.op == 'call_module' and node.target in modules:
71
+ maybe_fused_module = modules[str(node.target)]
72
+ if not isinstance(maybe_fused_module, _FusedModule):
73
+ continue
74
+
75
+ ops = list(maybe_fused_module._modules.values())
76
+ fused_qconfig = object_type_dict.get(type(ops[0]), None)
77
+
78
+ # Raise an error if the modules in the fused module have
79
+ # different qconfigs specified in the qconfig_dict
80
+ # TODO: currently it only works for modules,
81
+ # need to make this work for torch.nn.functional.relu
82
+ # TODO: currently it only works for object_type configurations,
83
+ # ideally it should work for different types of configurations,
84
+ # maybe we want to redesign this part
85
+ for op in ops[1:]:
86
+ if not qconfig_equals(object_type_dict.get(type(op), None), fused_qconfig):
87
+ raise LookupError(
88
+ "During fusion, we need to specify the same " +
89
+ f"qconfigs for all module types in {type(maybe_fused_module)} " +
90
+ f"offending type: {type(op)}")
91
+
92
+ if fused_qconfig is not None:
93
+ object_type_dict[type(maybe_fused_module)] = fused_qconfig
94
+
95
+ def _generate_node_name_to_qconfig(
96
+ root: torch.nn.Module,
97
+ modules: Dict[str, torch.nn.Module],
98
+ input_graph: Graph,
99
+ qconfig_mapping: QConfigMapping,
100
+ node_name_to_scope: Dict[str, Tuple[str, type]]) -> Dict[str, QConfigAny]:
101
+ global_qconfig = qconfig_mapping.global_qconfig
102
+ node_name_to_qconfig = {}
103
+
104
+ # example:
105
+ #
106
+ # {'foo.bar': {F.linear: 0, F.conv2d: 1, ...}, ...}
107
+ #
108
+ # meaning in submodule 'foo.bar', we have seen 0 F.linear and
109
+ # 1 F.conv2d invocations so far.
110
+ submodule_to_object_type_to_cur_idx: Dict[str, Dict[Callable, int]] = \
111
+ defaultdict(lambda: defaultdict(int))
112
+ for node in input_graph.nodes:
113
+ qconfig = None
114
+ if node.op == "get_attr":
115
+ module_name, _ = _parent_name(node.target)
116
+ qconfig = _maybe_adjust_qconfig_for_module_type_or_name(
117
+ qconfig_mapping, type(modules[module_name]), module_name, global_qconfig)
118
+ qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
119
+ elif node.op == "call_function":
120
+ # precedence: module_name_qconfig
121
+ # > function_qconfig > global_qconfig
122
+ # module_name takes precedence over function qconfig
123
+ function_qconfig = _get_object_type_qconfig(
124
+ qconfig_mapping, node.target, global_qconfig)
125
+ module_path, module_type = node_name_to_scope[node.name]
126
+ qconfig = _maybe_adjust_qconfig_for_module_type_or_name(
127
+ qconfig_mapping, module_type, module_path, function_qconfig)
128
+
129
+ cur_object_type_idx = \
130
+ submodule_to_object_type_to_cur_idx[module_path][node.target]
131
+ submodule_to_object_type_to_cur_idx[module_path][node.target] += 1
132
+ qconfig = _maybe_adjust_qconfig_for_module_name_object_type_order(
133
+ qconfig_mapping, module_path, node.target, cur_object_type_idx, qconfig)
134
+ qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
135
+
136
+ elif node.op == "call_method":
137
+ module_path, module_type = node_name_to_scope[node.name]
138
+ # first use node.target (string) to get the qconfig
139
+ # this is to support configs like
140
+ # "object_type": [("reshape", qconfig)]
141
+ qconfig = _maybe_adjust_qconfig_for_module_type_or_name(
142
+ qconfig_mapping, node.target, module_path, global_qconfig)
143
+ # if there is no special config for the method, we'll fall back to the
144
+ # config for the module that contains the call_method node
145
+ qconfig = _maybe_adjust_qconfig_for_module_type_or_name(
146
+ qconfig_mapping, module_type, module_path, qconfig)
147
+ # currently call_method does not support modifying qconfig
148
+ # by order, we can add this later if it is needed.
149
+ qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
150
+
151
+ elif node.op == 'call_module':
152
+ # if the node is an observer, just continue - don't add it to the qconfig_map
153
+ if _is_activation_post_process(modules[node.target]):
154
+ continue
155
+ qconfig = _maybe_adjust_qconfig_for_module_type_or_name(
156
+ qconfig_mapping, type(modules[node.target]), node.target, global_qconfig)
157
+
158
+ module_path, module_type = node_name_to_scope[node.name]
159
+ # Note: for call_module, the module_path is the current module's name.
160
+ # to meaningfully count invocations, we need to count them in the parent
161
+ # module.
162
+ parent_name, _ = _parent_name(module_path)
163
+ cur_object_type_idx = \
164
+ submodule_to_object_type_to_cur_idx[parent_name][module_type]
165
+ submodule_to_object_type_to_cur_idx[parent_name][module_type] += 1
166
+ qconfig = _maybe_adjust_qconfig_for_module_name_object_type_order(
167
+ qconfig_mapping, parent_name, module_type, cur_object_type_idx,
168
+ qconfig)
169
+ qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
170
+
171
+ # regex is not supported eager mode propagate_qconfig_, we'll
172
+ # need to set the qconfig explicitly here in case regex
173
+ # is used
174
+ modules[node.target].qconfig = qconfig_with_device_check
175
+ else:
176
+ qconfig_with_device_check = None
177
+
178
+ node_name_to_qconfig[node.name] = qconfig_with_device_check
179
+ return node_name_to_qconfig
180
+
181
+
182
+ def _check_is_valid_config_dict(config_dict: Any, allowed_keys: Set[str], dict_name: str) -> None:
183
+ r""" Checks if the given config_dict has the correct keys
184
+
185
+ Args:
186
+ `config_dict`: dictionary whose keys we want to check
187
+ """
188
+
189
+ for k in config_dict.keys():
190
+ if k not in allowed_keys:
191
+ raise ValueError(
192
+ 'Expected ' + dict_name + ' to have the following keys: ' +
193
+ str(allowed_keys) + '. But found \'' + k +
194
+ '\' instead.')
195
+
196
+
197
+ def _compare_prepare_convert_qconfig_mappings(
198
+ prepare_qconfig_mapping: QConfigMapping,
199
+ convert_qconfig_mapping: QConfigMapping):
200
+ r""" Compare the qconfig_mapping passed in convert to the one from prepare and check the values
201
+
202
+ Args:
203
+ `prepare_qconfig_mapping`: configuration for prepare quantization step
204
+ `convert_qconfig_mapping`: configuration for convert quantization step
205
+ """
206
+ assert qconfig_equals(prepare_qconfig_mapping.global_qconfig, convert_qconfig_mapping.global_qconfig), \
207
+ "Expected global qconfigs to be the same in the prepare and convert quantization configs"
208
+ prepare_dicts: List[OrderedDict] = [
209
+ prepare_qconfig_mapping.object_type_qconfigs,
210
+ prepare_qconfig_mapping.module_name_qconfigs,
211
+ prepare_qconfig_mapping.module_name_regex_qconfigs,
212
+ ]
213
+ convert_dicts: List[OrderedDict] = [
214
+ convert_qconfig_mapping.object_type_qconfigs,
215
+ convert_qconfig_mapping.module_name_qconfigs,
216
+ convert_qconfig_mapping.module_name_regex_qconfigs,
217
+ ]
218
+ dict_names = [_OBJECT_TYPE_DICT_KEY, _MODULE_NAME_DICT_KEY, _MODULE_NAME_REGEX_DICT_KEY]
219
+ for i in range(len(prepare_dicts)):
220
+ for name in prepare_dicts[i].keys():
221
+ assert name in convert_dicts[i], f"Missing key {dict_names[i]} {name} in convert QConfigMapping \
222
+ when it was present in prepare"
223
+ assert convert_dicts[i][name] is None \
224
+ or qconfig_equals(prepare_dicts[i][name], convert_dicts[i][name]), \
225
+ f"Expected convert QConfigMapping to have the same qconfig as prepare for key {dict_names[i]} {name}; \
226
+ prepare: {prepare_dicts[i][name]}; convert: {convert_dicts[i][name]}"
227
+
228
+ def _is_qconfig_supported_by_dtype_configs(qconfig: QConfig, dtype_configs: List[DTypeConfig]):
229
+ for dtype_config in dtype_configs:
230
+ is_dynamic = dtype_config.is_dynamic
231
+ if is_dynamic is None:
232
+ is_dynamic = False
233
+ input_dtype = dtype_config.input_dtype or torch.float
234
+ weight_dtype = dtype_config.weight_dtype or torch.float
235
+ bias_dtype = dtype_config.bias_dtype or torch.float
236
+ output_dtype = dtype_config.output_dtype or torch.float
237
+ qconfig_activation_dtype, qconfig_weight_dtype, qconfig_input_act_is_dynamic = \
238
+ get_qconfig_dtypes(qconfig)
239
+ qconfig_bias_dtype = torch.float16 \
240
+ if (
241
+ qconfig_activation_dtype == torch.float16
242
+ and qconfig_weight_dtype == torch.float16
243
+ and not is_dynamic
244
+ ) else torch.float
245
+
246
+ if is_dynamic:
247
+ is_match = qconfig_input_act_is_dynamic and \
248
+ input_dtype == qconfig_activation_dtype and \
249
+ output_dtype == torch.float and \
250
+ weight_dtype == qconfig_weight_dtype
251
+ else:
252
+ is_match = input_dtype == qconfig_activation_dtype and \
253
+ output_dtype == qconfig_activation_dtype and \
254
+ weight_dtype == qconfig_weight_dtype and \
255
+ bias_dtype == qconfig_bias_dtype
256
+ if is_match:
257
+ return True
258
+ return False
259
+
260
+ def _get_object_type_qconfig(
261
+ qconfig_mapping: QConfigMapping,
262
+ object_type: Union[Callable, str],
263
+ fallback_qconfig: QConfigAny) -> QConfigAny:
264
+ return qconfig_mapping.object_type_qconfigs.get(object_type, fallback_qconfig)
265
+
266
+
267
+ def _get_module_name_regex_qconfig(qconfig_mapping, module_name, fallback_qconfig):
268
+ for regex_pattern, qconfig in qconfig_mapping.module_name_regex_qconfigs.items():
269
+ if re.match(regex_pattern, module_name):
270
+ # first match wins
271
+ return qconfig
272
+ return fallback_qconfig
273
+
274
+
275
+ def _get_module_name_qconfig(qconfig_mapping, module_name, fallback_qconfig):
276
+ if module_name == '':
277
+ # module name qconfig not found
278
+ return fallback_qconfig
279
+ if module_name in qconfig_mapping.module_name_qconfigs:
280
+ return qconfig_mapping.module_name_qconfigs[module_name]
281
+ else:
282
+ parent, _ = _parent_name(module_name)
283
+ return _get_module_name_qconfig(qconfig_mapping, parent, fallback_qconfig)
284
+
285
+
286
+ def _maybe_adjust_qconfig_for_module_type_or_name(qconfig_mapping, module_type, module_name, global_qconfig):
287
+ # get qconfig for module_name,
288
+ # fallback to module_name_regex_qconfig, module_type_qconfig,
289
+ # global_qconfig if necessary
290
+ module_type_qconfig = _get_object_type_qconfig(
291
+ qconfig_mapping, module_type, global_qconfig)
292
+ module_name_regex_qconfig = _get_module_name_regex_qconfig(
293
+ qconfig_mapping, module_name, module_type_qconfig)
294
+ module_name_qconfig = _get_module_name_qconfig(
295
+ qconfig_mapping, module_name, module_name_regex_qconfig)
296
+ return module_name_qconfig
297
+
298
+
299
+ def _get_flattened_qconfig_dict(qconfig_mapping: QConfigMapping) -> Dict[Union[Callable, str], QConfigAny]:
300
+ """ flatten the global, object_type and module_name qconfig
301
+ to the same qconfig_dict so that it can be used by
302
+ propagate_qconfig_ function.
303
+ "module_name_regex" is ignored for now since it's not supported
304
+ in propagate_qconfig_, but it can be fixed later.
305
+
306
+ For example:
307
+ Input: {
308
+ "": qconfig,
309
+ "object_type": [
310
+ (torch.add, qconfig)
311
+ ],
312
+ "module_name": [
313
+ ("conv", qconfig)
314
+ ]
315
+ }
316
+
317
+ Output: {
318
+ "": qconfig,
319
+ torch.add: qconfig,
320
+ "conv": qconfig
321
+ }
322
+ """
323
+ flattened: Dict[Union[Callable, str], QConfigAny] = {"": qconfig_mapping.global_qconfig}
324
+ for obj, qconfig in qconfig_mapping.object_type_qconfigs.items():
325
+ flattened[obj] = qconfig
326
+ for obj, qconfig in qconfig_mapping.module_name_qconfigs.items():
327
+ flattened[obj] = qconfig
328
+ return flattened
329
+
330
+
331
+ def _update_qconfig_for_qat(
332
+ qconfig_mapping: QConfigMapping,
333
+ backend_config: BackendConfig):
334
+ """
335
+ Update the qconfig_mapping to account for module swaps during QAT.
336
+ During QAT we perform a module swap on the nn.Module types to the corresponding nn.qat.modules types.
337
+ """
338
+ module_to_qat_module_class = get_module_to_qat_module(backend_config)
339
+ object_type_dict = qconfig_mapping.object_type_qconfigs
340
+ new_object_type_dict = object_type_dict.copy()
341
+ for k, v in new_object_type_dict.items():
342
+ if k in module_to_qat_module_class:
343
+ object_type_dict[module_to_qat_module_class[k]] = v
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/tracer.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx._symbolic_trace import Tracer
3
+ from torch.fx.proxy import Scope
4
+ from torch.ao.nn.intrinsic import _FusedModule
5
+ from typing import List, Callable
6
+
7
+ __all__ = [
8
+ "QuantizationTracer",
9
+ ]
10
+
11
+ class ScopeContextManager(torch.fx.proxy.ScopeContextManager):
12
+ def __init__(
13
+ self,
14
+ scope: Scope,
15
+ current_module: torch.nn.Module,
16
+ current_module_path: str
17
+ ):
18
+ super().__init__(scope, Scope(current_module_path, type(current_module)))
19
+
20
+
21
+ class QuantizationTracer(Tracer):
22
+ def __init__(
23
+ self, skipped_module_names: List[str], skipped_module_classes: List[Callable]
24
+ ):
25
+ super().__init__()
26
+ self.skipped_module_names = skipped_module_names
27
+ self.skipped_module_classes = skipped_module_classes
28
+ # NB: initialized the module_type of top level module to None
29
+ # we are assuming people won't configure the model with the type of top level
30
+ # module here, since people can use "" for global config
31
+ # We can change this if there is a use case that configures
32
+ # qconfig using top level module type
33
+ self.scope = Scope("", None)
34
+ self.record_stack_traces = True
35
+
36
+ def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
37
+ return (
38
+ (
39
+ (m.__module__.startswith("torch.nn") or m.__module__.startswith("torch.ao.nn"))
40
+ and not isinstance(m, torch.nn.Sequential)
41
+ )
42
+ or module_qualified_name in self.skipped_module_names
43
+ or type(m) in self.skipped_module_classes
44
+ or isinstance(m, _FusedModule)
45
+ )
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.ao.quantization import (
5
+ QConfigAny,
6
+ QuantType,
7
+ )
8
+ from torch.ao.quantization.backend_config import (
9
+ DTypeWithConstraints,
10
+ )
11
+ from torch.ao.quantization.fake_quantize import (
12
+ FakeQuantizeBase,
13
+ FixedQParamsFakeQuantize,
14
+ )
15
+ from torch.ao.quantization.observer import (
16
+ FixedQParamsObserver,
17
+ ObserverBase,
18
+ )
19
+ from torch.ao.quantization.qconfig import (
20
+ float16_static_qconfig,
21
+ float16_dynamic_qconfig,
22
+ qconfig_equals,
23
+ )
24
+ from torch.ao.quantization.stubs import DeQuantStub
25
+ from torch.ao.quantization.utils import (
26
+ activation_is_statically_quantized,
27
+ )
28
+ from torch.ao.quantization.observer import _is_activation_post_process
29
+ from torch.ao.quantization.qconfig_mapping import QConfigMapping
30
+
31
+ from torch.fx import GraphModule, map_arg
32
+
33
+ from torch.fx.graph import (
34
+ Graph,
35
+ Node,
36
+ )
37
+ from .custom_config import PrepareCustomConfig
38
+ # importing the lib so that the quantized_decomposed ops are registered
39
+ from ._decomposed import quantized_decomposed_lib # noqa: F401
40
+
41
+ from typing import Callable, Optional, List, Dict, Any, Set, Tuple, Union, Type
42
+ from dataclasses import dataclass
43
+ from collections import namedtuple
44
+ import operator
45
+ import warnings
46
+
47
+ # TODO: revisit this list. Many helper methods shouldn't be public
48
+ __all__ = [
49
+ "all_node_args_except_first",
50
+ "all_node_args_have_no_tensors",
51
+ "assert_and_get_unique_device",
52
+ "collect_producer_nodes",
53
+ "create_getattr_from_value",
54
+ "create_node_from_old_node_preserve_meta",
55
+ "EMPTY_ARG_DICT",
56
+ "get_custom_module_class_keys",
57
+ "get_linear_prepack_op_for_dtype",
58
+ "get_new_attr_name_with_prefix",
59
+ "get_non_observable_arg_indexes_and_types",
60
+ "get_qconv_prepack_op",
61
+ "get_skipped_module_name_and_classes",
62
+ "graph_module_from_producer_nodes",
63
+ "maybe_get_next_module",
64
+ "NodeInfo",
65
+ "node_arg_is_bias",
66
+ "node_arg_is_weight",
67
+ "NON_OBSERVABLE_ARG_DICT",
68
+ "NON_QUANTIZABLE_WEIGHT_OPS",
69
+ "return_arg_list",
70
+ "ObservedGraphModuleAttrs",
71
+ ]
72
+
73
+ NON_QUANTIZABLE_WEIGHT_OPS = {torch.nn.functional.layer_norm, torch.nn.functional.group_norm, torch.nn.functional.instance_norm}
74
+
75
+ @dataclass
76
+ class ObservedGraphModuleAttrs:
77
+ node_name_to_qconfig: Dict[str, QConfigAny]
78
+ node_name_to_scope: Dict[str, Tuple[str, type]]
79
+ prepare_custom_config: PrepareCustomConfig
80
+ equalization_node_name_to_qconfig: Dict[str, Any]
81
+ qconfig_mapping: QConfigMapping
82
+ is_qat: bool
83
+ observed_node_names: Set[str]
84
+ is_observed_standalone_module: bool = False
85
+ standalone_module_input_quantized_idxs: Optional[List[int]] = None
86
+ standalone_module_output_quantized_idxs: Optional[List[int]] = None
87
+
88
+ def node_arg_is_weight(node: Node, arg: Any) -> bool:
89
+ """Returns if node arg is weight"""
90
+ weight_index = None
91
+ if "target_dtype_info" in node.meta:
92
+ weight_index = node.meta["target_dtype_info"].get("weight_index", None)
93
+ if weight_index is not None and weight_index < len(node.args) and node.args[weight_index] is arg:
94
+ return True
95
+ return node.kwargs.get("weight") is arg
96
+
97
+ def node_arg_is_bias(node: Node, arg: Any) -> bool:
98
+ """Returns if node arg is bias"""
99
+ bias_index = None
100
+ if "target_dtype_info" in node.meta:
101
+ bias_index = node.meta["target_dtype_info"].get("bias_index", None)
102
+ if bias_index is not None and bias_index < len(node.args) and node.args[bias_index] is arg:
103
+ return True
104
+ return node.kwargs.get("bias") is arg
105
+
106
+ def get_custom_module_class_keys(custom_module_mapping: Dict[QuantType, Dict[Type, Type]]) -> List[Any]:
107
+ r""" Get all the unique custom module keys in the custom config dict
108
+ e.g.
109
+ Input:
110
+ {
111
+ QuantType.STATIC: {
112
+ CustomModule1: ObservedCustomModule
113
+ },
114
+ QuantType.DYNAMIC: {
115
+ CustomModule2: DynamicObservedCustomModule
116
+ },
117
+ QuantType.WEIGHT_ONLY: {
118
+ CustomModule3: WeightOnlyObservedCustomModule
119
+ },
120
+ }
121
+
122
+ Output:
123
+ # extract the keys across all inner STATIC, DYNAMIC, and WEIGHT_ONLY dicts
124
+ [CustomModule1, CustomModule2, CustomModule3]
125
+ """
126
+ # using set to dedup
127
+ float_custom_module_classes : Set[Any] = set()
128
+ for quant_mode in [QuantType.STATIC, QuantType.DYNAMIC, QuantType.WEIGHT_ONLY]:
129
+ quant_mode_custom_module_config = custom_module_mapping.get(quant_mode, {})
130
+ quant_mode_custom_module_classes = set(quant_mode_custom_module_config.keys())
131
+ float_custom_module_classes |= quant_mode_custom_module_classes
132
+ return list(float_custom_module_classes)
133
+
134
+ def get_linear_prepack_op_for_dtype(dtype):
135
+ if dtype == torch.float16:
136
+ return torch.ops.quantized.linear_prepack_fp16
137
+ elif dtype == torch.qint8:
138
+ return torch.ops.quantized.linear_prepack
139
+ else:
140
+ raise Exception("can't get linear prepack op for dtype:", dtype)
141
+
142
+ def get_qconv_prepack_op(conv_op: Callable) -> Callable:
143
+ prepack_ops = {
144
+ torch.nn.functional.conv1d: torch.ops.quantized.conv1d_prepack,
145
+ torch.nn.functional.conv2d: torch.ops.quantized.conv2d_prepack,
146
+ torch.nn.functional.conv3d: torch.ops.quantized.conv3d_prepack,
147
+ torch.nn.functional.conv_transpose1d: torch.ops.quantized.conv_transpose1d_prepack,
148
+ torch.nn.functional.conv_transpose2d: torch.ops.quantized.conv_transpose2d_prepack,
149
+ torch.nn.functional.conv_transpose3d: torch.ops.quantized.conv_transpose3d_prepack,
150
+ }
151
+ prepack_op = prepack_ops.get(conv_op, None)
152
+ assert prepack_op, f"Didn't find prepack op for {conv_op}"
153
+ return prepack_op
154
+
155
+ # Returns a function that can get a new attribute name for module with given
156
+ # prefix, for example,
157
+ # >> get_new_observer_name = get_new_attr_name_with_prefix('_observer')
158
+ # >> new_name = get_new_observer_name(module)
159
+ # new_name will be an unused attribute name on module, e.g. `_observer_1`
160
+ def get_new_attr_name_with_prefix(prefix: str) -> Callable:
161
+ prefix = prefix.replace(".", "_")
162
+
163
+ def get_new_attr_name(module: torch.nn.Module):
164
+ def get_attr_name(i: int):
165
+ return prefix + str(i)
166
+ i = 0
167
+ attr_name = get_attr_name(i)
168
+ while hasattr(module, attr_name):
169
+ i += 1
170
+ attr_name = get_attr_name(i)
171
+ return attr_name
172
+ return get_new_attr_name
173
+
174
+ def collect_producer_nodes(node: Node) -> Optional[List[Node]]:
175
+ r''' Starting from a target node, trace back until we hit inpu or
176
+ getattr node. This is used to extract the chain of operators
177
+ starting from getattr to the target node, for example
178
+ def forward(self, x):
179
+ observed = self.observer(self.weight)
180
+ return F.linear(x, observed)
181
+ collect_producer_nodes(observed) will either return a list of nodes that
182
+ produces the observed node or None if we can't extract a self contained
183
+ graph without free variables(inputs of the forward function).
184
+ '''
185
+ nodes = [node]
186
+ frontier = [node]
187
+ while frontier:
188
+ node = frontier.pop()
189
+ all_args = list(node.args) + list(node.kwargs.values())
190
+ for arg in all_args:
191
+ if not isinstance(arg, Node):
192
+ continue
193
+ if arg.op == 'placeholder':
194
+ # hit input, can't fold in this case
195
+ return None
196
+ nodes.append(arg)
197
+ if not (arg.op == 'call_function' and arg.target == getattr):
198
+ frontier.append(arg)
199
+ return nodes
200
+
201
+ def graph_module_from_producer_nodes(
202
+ root: GraphModule, producer_nodes: List[Node]) -> GraphModule:
203
+ r''' Construct a graph module from extracted producer nodes
204
+ from `collect_producer_nodes` function
205
+ Args:
206
+ root: the root module for the original graph
207
+ producer_nodes: a list of nodes we use to construct the graph
208
+ Return:
209
+ A graph module constructed from the producer nodes
210
+ '''
211
+ assert len(producer_nodes) > 0, 'list of producer nodes can not be empty'
212
+ # since we traced back from node to getattr
213
+ producer_nodes.reverse()
214
+ graph = Graph()
215
+ env: Dict[Any, Any] = {}
216
+
217
+ def load_arg(a):
218
+ return map_arg(a, lambda node: env[node])
219
+ for producer_node in producer_nodes:
220
+ env[producer_node] = graph.node_copy(producer_node, load_arg)
221
+ graph.output(load_arg(producer_nodes[-1]))
222
+ graph_module = GraphModule(root, graph)
223
+ return graph_module
224
+
225
+ def assert_and_get_unique_device(module: torch.nn.Module) -> Any:
226
+ """
227
+ Returns the unique device for a module, or None if no device is found.
228
+ Throws an error if multiple devices are detected.
229
+ """
230
+ devices = {p.device for p in module.parameters()} | \
231
+ {p.device for p in module.buffers()}
232
+ """
233
+ As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
234
+ """
235
+ if {torch.device("cpu"), torch.device("meta")} == devices:
236
+ warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.")
237
+ devices = {torch.device("cpu")}
238
+ ""
239
+ assert len(devices) <= 1, (
240
+ "prepare only works with cpu or single-device CUDA modules, "
241
+ f"but got devices {devices}"
242
+ )
243
+ device = next(iter(devices)) if len(devices) > 0 else None
244
+ return device
245
+
246
+ def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node:
247
+ """
248
+ Given a value of any type, creates a getattr node corresponding to the value and
249
+ registers the value as a buffer to the module.
250
+ """
251
+ get_new_attr_name = get_new_attr_name_with_prefix(prefix)
252
+ attr_name = get_new_attr_name(module)
253
+ device = assert_and_get_unique_device(module)
254
+ new_value = value.clone().detach() if isinstance(value, torch.Tensor) \
255
+ else torch.tensor(value, device=device)
256
+ module.register_buffer(attr_name, new_value)
257
+ # Create get_attr with value
258
+ attr_node = graph.create_node("get_attr", attr_name)
259
+ return attr_node
260
+
261
+ def all_node_args_have_no_tensors(node: Node, modules: Dict[str, torch.nn.Module], cache: Dict[Node, bool]) -> bool:
262
+ """
263
+ If we know for sure that all of this node's args have no
264
+ tensors (are primitives), return True. If we either
265
+ find a tensor or are not sure, return False. Note: this
266
+ function is not exact.
267
+ """
268
+ if cache and node in cache:
269
+ return cache[node]
270
+
271
+ result = False # will be overwritten
272
+ if not isinstance(node, Node):
273
+ result = True
274
+ elif node.op == 'placeholder':
275
+ result = False
276
+ elif node.op == 'call_module':
277
+ assert isinstance(node.target, str)
278
+ if _is_activation_post_process(modules[node.target]):
279
+ result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type]
280
+ elif node.op == 'call_module':
281
+ result = False
282
+ elif node.op == 'call_function' and node.target is operator.getitem:
283
+ result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type]
284
+ elif node.op == 'get_attr':
285
+ result = False
286
+ elif node.target is getattr and node.args[1] in ['ndim', 'shape']:
287
+ # x1 = x0.ndim
288
+ result = True
289
+ elif node.op == 'call_method' and node.target == 'size':
290
+ # x1 = x0.size(0)
291
+ result = True
292
+ else:
293
+ found_one_tensor = False
294
+ for arg in node.args:
295
+ if isinstance(arg, list):
296
+ for list_el in arg:
297
+ if isinstance(list_el, Node):
298
+ this_list_el_args_have_no_tensors = \
299
+ all_node_args_have_no_tensors(list_el, modules, cache)
300
+ found_one_tensor = found_one_tensor or \
301
+ (not this_list_el_args_have_no_tensors)
302
+ # If found_one_tensor is True, there is no point in
303
+ # recursing further as the end result will always
304
+ # be True.
305
+ # TODO(future PR): remove this entire function and
306
+ # change to dtype inference without recursion.
307
+ if found_one_tensor:
308
+ result = not found_one_tensor
309
+ if cache:
310
+ cache[node] = result
311
+ return result
312
+ elif isinstance(arg, int):
313
+ pass
314
+ else:
315
+ if isinstance(arg, Node):
316
+ this_arg_args_have_no_tensors = all_node_args_have_no_tensors(arg, modules, cache)
317
+ found_one_tensor = found_one_tensor or \
318
+ (not this_arg_args_have_no_tensors)
319
+ # If found_one_tensor is True, there is no point in
320
+ # recursing further as the end result will always
321
+ # be True.
322
+ # TODO(future PR): remove this entire function and
323
+ # change to dtype inference without recursion.
324
+ if found_one_tensor:
325
+ result = not found_one_tensor
326
+ if cache:
327
+ cache[node] = result
328
+ return result
329
+ else:
330
+ found_one_tensor = True
331
+ result = not found_one_tensor
332
+ if cache:
333
+ cache[node] = result
334
+ return result
335
+
336
+ def all_node_args_except_first(node: Node) -> List[int]:
337
+ """
338
+ Returns all node arg indices after first
339
+ """
340
+ return list(range(1, len(node.args)))
341
+
342
+ def return_arg_list(arg_indices: List[int]) -> Callable[[Node], List[int]]:
343
+ """
344
+ Constructs a function that takes a node as arg and returns the arg_indices
345
+ that are valid for node.args
346
+ """
347
+ def arg_indices_func(node: Node) -> List[int]:
348
+ return [i for i in arg_indices if i < len(node.args)]
349
+ return arg_indices_func
350
+
351
+ NodeInfo = namedtuple("NodeInfo", "op target")
352
+
353
+ # this dict identifies which indices of a node are non tensors
354
+ # so that they can be propagated correctly since inserting observers
355
+ # for them would cause errors
356
+
357
+ NON_OBSERVABLE_ARG_DICT: Dict[NodeInfo, Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]] = {
358
+ NodeInfo("call_method", "masked_fill") : {
359
+ torch.bool: return_arg_list([1]),
360
+ float: return_arg_list([2])
361
+ },
362
+ NodeInfo("call_method", "permute") : {
363
+ int: all_node_args_except_first
364
+ },
365
+ NodeInfo("call_method", "repeat") : {
366
+ int: all_node_args_except_first
367
+ },
368
+ NodeInfo("call_method", "reshape") : {
369
+ int: all_node_args_except_first
370
+ },
371
+ NodeInfo("call_method", "size") : {
372
+ int: return_arg_list([1])
373
+ },
374
+ NodeInfo("call_method", "transpose") : {
375
+ int: all_node_args_except_first
376
+ },
377
+ NodeInfo("call_method", torch.transpose) : {
378
+ int: all_node_args_except_first
379
+ },
380
+ NodeInfo("call_method", "unsqueeze") : {
381
+ int: return_arg_list([1])
382
+ },
383
+ NodeInfo("call_method", "unsqueeze_") : {
384
+ int: return_arg_list([1])
385
+ },
386
+ NodeInfo("call_method", torch.unsqueeze) : {
387
+ int: return_arg_list([1])
388
+ },
389
+ NodeInfo("call_method", "view") : {
390
+ int: all_node_args_except_first
391
+ },
392
+ }
393
+
394
+ EMPTY_ARG_DICT: Dict[Union[type, torch.dtype], Callable[[Node], List[int]]] = {}
395
+
396
+ def get_non_observable_arg_indexes_and_types(node: Node) -> Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]:
397
+ """
398
+ Returns a dict with of non float tensor types as keys and values which correspond to a
399
+ function to retrieve the list (which takes the node as an argument)
400
+ """
401
+ info = NodeInfo(node.op, node.target)
402
+
403
+ return NON_OBSERVABLE_ARG_DICT.get(info, EMPTY_ARG_DICT)
404
+
405
+ def maybe_get_next_module(
406
+ node: Node,
407
+ modules: Dict[str, nn.Module],
408
+ target_module_type: Optional[Type[nn.Module]] = None,
409
+ target_functional_type: Any = None,
410
+ ) -> Optional[Node]:
411
+ """ Gets the next module that matches what is needed in
412
+ is_target_module_type if it exists
413
+
414
+ Args:
415
+ node: The node whose users we want to look at
416
+ target_module_type: Module type that we want to check
417
+ target_functional_type: Functional type that we want to check
418
+ """
419
+
420
+ for user in node.users.keys():
421
+ if user.op == 'call_module' and target_module_type is not None and \
422
+ isinstance(modules[str(user.target)], target_module_type):
423
+ return user
424
+ elif (user.op == 'call_function' and target_functional_type is not None and
425
+ user.target == target_functional_type):
426
+ return user
427
+
428
+ return None
429
+
430
+ def create_node_from_old_node_preserve_meta(
431
+ quantized_graph: Graph,
432
+ create_node_args: Tuple[Any, ...],
433
+ old_node: Node,
434
+ ) -> Node:
435
+ """
436
+ Creates `new_node` and copies the necessary metadata to it from `old_node`.
437
+ """
438
+ new_node = quantized_graph.create_node(*create_node_args)
439
+ new_node.stack_trace = old_node.stack_trace
440
+ return new_node
441
+
442
+ def get_skipped_module_name_and_classes(
443
+ prepare_custom_config: PrepareCustomConfig,
444
+ is_standalone_module: bool) -> Tuple[List[str], List[Type[Any]]]:
445
+ skipped_module_names = copy.copy(prepare_custom_config.non_traceable_module_names)
446
+ skipped_module_classes = copy.copy(prepare_custom_config.non_traceable_module_classes)
447
+ if not is_standalone_module:
448
+ # standalone module and custom module config are applied in top level module
449
+ skipped_module_names += list(prepare_custom_config.standalone_module_names.keys())
450
+ skipped_module_classes += list(prepare_custom_config.standalone_module_classes.keys())
451
+ skipped_module_classes += get_custom_module_class_keys(prepare_custom_config.float_to_observed_mapping)
452
+
453
+ return skipped_module_names, skipped_module_classes
454
+
455
+ def _is_custom_module_lstm(
456
+ node: Node,
457
+ named_modules: Dict[str, torch.nn.Module],
458
+ qconfig: QConfigAny = None,
459
+ # QuantizeHandler, but we cannot include the type here due to circular imports
460
+ qhandler: Optional[Any] = None,
461
+ ) -> bool:
462
+ """
463
+ Return whether this refers to the custom module LSTM flow.
464
+ """
465
+ mod = _get_module(node, named_modules)
466
+ if qconfig is not None and qhandler is not None:
467
+ assert isinstance(qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler) # type: ignore[attr-defined]
468
+ return isinstance(mod, torch.nn.LSTM) and \
469
+ activation_is_statically_quantized(qconfig) and \
470
+ qhandler.is_custom_module()
471
+ else:
472
+ return isinstance(mod, torch.ao.nn.quantizable.LSTM)
473
+
474
+ def _is_custom_module_mha(
475
+ node: Node,
476
+ named_modules: Dict[str, torch.nn.Module],
477
+ qconfig: QConfigAny = None,
478
+ # QuantizeHandler, but we cannot include the type here due to circular imports
479
+ qhandler: Optional[Any] = None,
480
+ ) -> bool:
481
+ """
482
+ Return whether this refers to the custom module MultiheadAttention flow.
483
+ """
484
+ mod = _get_module(node, named_modules)
485
+ if qconfig is not None and qhandler is not None:
486
+ assert isinstance(qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler) # type: ignore[attr-defined]
487
+ return isinstance(mod, torch.nn.MultiheadAttention) and \
488
+ activation_is_statically_quantized(qconfig) and \
489
+ qhandler.is_custom_module()
490
+ else:
491
+ return isinstance(mod, torch.ao.nn.quantizable.MultiheadAttention)
492
+
493
+ def _get_module(node: Node, named_modules: Dict[str, torch.nn.Module]) -> Optional[torch.nn.Module]:
494
+ """
495
+ If `node` refers to a call_module node, return the module, else None.
496
+ """
497
+ if node.op == "call_module" and str(node.target) in named_modules:
498
+ return named_modules[str(node.target)]
499
+ else:
500
+ return None
501
+
502
+ def _insert_dequant_stub(
503
+ node: Node,
504
+ model: torch.nn.Module,
505
+ named_modules: Dict[str, torch.nn.Module],
506
+ graph: Graph,
507
+ ) -> Node:
508
+ """
509
+ Attach a `DeQuantStub` to the model and create a node that calls this
510
+ `DeQuantStub` on the output of `node`, similar to how observers are inserted.
511
+ """
512
+ prefix = "dequant_stub_"
513
+ get_new_dequant_stub_name = get_new_attr_name_with_prefix(prefix)
514
+ dequant_stub_name = get_new_dequant_stub_name(model)
515
+ dequant_stub = DeQuantStub()
516
+ setattr(model, dequant_stub_name, dequant_stub)
517
+ named_modules[dequant_stub_name] = dequant_stub
518
+ with graph.inserting_after(node):
519
+ return graph.call_module(dequant_stub_name, (node,))
520
+
521
+ def _insert_dequant_stubs_for_custom_module_lstm_output(
522
+ node: Node,
523
+ model: torch.nn.Module,
524
+ named_modules: Dict[str, torch.nn.Module],
525
+ graph: Graph,
526
+ ) -> Node:
527
+ """
528
+ Insert DeQuantStubs after each internal output node of custom module LSTM.
529
+
530
+ Custom module LSTM outputs are nested tuples of the structure (output, (hidden0, hidden1)),
531
+ Since we cannot dequantize a tuple as a whole, we must first break down the tuple into its
532
+ components through `getitem`. This function transforms the graph as follows:
533
+
534
+ (1) Split the LSTM node into (output, (hidden0, hidden1))
535
+ (2) Insert a DeQuantStub after each internal node
536
+ (3) Recombine the DeQuantStubs into the same structure as before
537
+ (4) Reroute all consumers of the original LSTM node and its sub-nodes
538
+ (e.g. lstm[0])
539
+
540
+ Before:
541
+ lstm_output
542
+ |
543
+ v
544
+ original_user(s)
545
+ After:
546
+ lstm_output
547
+ / \\
548
+ / (getitem) \\
549
+ / \\
550
+ v v
551
+ output hidden
552
+ | / \\
553
+ (DeQuantStub) (getitem)
554
+ | / \\
555
+ v v v
556
+ output_dq hidden0 hidden1
557
+ | | |
558
+ | (DeQuantStub) (DeQuantStub)
559
+ | | |
560
+ | v v
561
+ | hidden0_dq hidden1_dq
562
+ | \\ /
563
+ | (tuple)
564
+ | \\ /
565
+ | v v
566
+ | hidden_dq
567
+ \\ /
568
+ \\ (tuple) /
569
+ v v
570
+ lstm_output_dq
571
+ |
572
+ v
573
+ original_user(s)
574
+
575
+ For step (4), reroute all users of the original LSTM node(s) as follows:
576
+ lstm_output -> lstm_output_dq
577
+ lstm_output[0] -> output_dq
578
+ lstm_output[1] -> hidden_dq
579
+ lstm_output[1][0] -> hidden0_dq
580
+ lstm_output[1][1] -> hidden1_dq
581
+
582
+ Return the node `lstm_output_dq`.
583
+ """
584
+ # (1) Split the LSTM node into (output, (hidden0, hidden1))
585
+ # (2) Insert a DeQuantStub after each internal node
586
+ with graph.inserting_after(node):
587
+ output = graph.call_function(operator.getitem, (node, 0))
588
+ output_dq = _insert_dequant_stub(output, model, named_modules, graph)
589
+ with graph.inserting_after(output_dq):
590
+ hidden = graph.call_function(operator.getitem, (node, 1))
591
+ with graph.inserting_after(hidden):
592
+ hidden0 = graph.call_function(operator.getitem, (hidden, 0))
593
+ hidden0_dq = _insert_dequant_stub(hidden0, model, named_modules, graph)
594
+ with graph.inserting_after(hidden0_dq):
595
+ hidden1 = graph.call_function(operator.getitem, (hidden, 1))
596
+ hidden1_dq = _insert_dequant_stub(hidden1, model, named_modules, graph)
597
+
598
+ # (3) Recombine the DeQuantStubs into the same structure as before
599
+ with graph.inserting_after(hidden1_dq):
600
+ hidden_dq = graph.call_function(tuple, ([hidden0_dq, hidden1_dq],))
601
+ with graph.inserting_after(hidden_dq):
602
+ lstm_output_dq = graph.call_function(tuple, ([output_dq, hidden_dq],))
603
+
604
+ # (4) Reroute all consumers of the original LSTM node and its sub-nodes
605
+ for user in list(node.users.keys()):
606
+ if user != output and user != hidden:
607
+ user.replace_input_with(node, lstm_output_dq)
608
+ # The getitem and tuple nodes we added here may interfere with reference quantized
609
+ # pattern matching, so we need to redirect the consumers of internal nodes to the
610
+ # corresponding nodes with DeQuantStubs (e.g. lstm_output_dq[0] -> output_dq) attached,
611
+ # in order to preserve reference patterns like "dequantize - consumer - quantize".
612
+ _reroute_tuple_getitem_pattern(graph)
613
+ return lstm_output_dq
614
+
615
+ def _maybe_get_custom_module_lstm_from_node_arg(
616
+ arg: Node,
617
+ named_modules: Dict[str, torch.nn.Module],
618
+ ) -> Optional[Node]:
619
+ """
620
+ Given an argument of a node, if the argument refers to the path through which the node
621
+ is a consumer of custom module LSTM, return the custom module LSTM node, or None otherwise.
622
+
623
+ This is used to determine whether a node is a consumer of custom module LSTM, and, if so,
624
+ skip inserting input observers for this node. This is because custom module LSTM produces
625
+ quantized outputs, so inserting an input observer for the consumer of custom module LSTM
626
+ would unnecessarily quantize the outputs again.
627
+
628
+ lstm -> consumer
629
+
630
+ In practice, however, custom module LSTM outputs a tuple (output, (hidden0, hidden1)) with
631
+ DeQuantStubs attached to each internal node (see `_insert_dequant_stubs_for_custom_module_lstm_output`).
632
+ This tuple can be consumed in one of four ways:
633
+
634
+ lstm -> getitem -> DeQuantStub -> consumer # consume lstm[0]
635
+ lstm -> getitem -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm[1]
636
+ lstm -> getitem -> getitem -> DeQuantStub -> consumer # consume lstm[1][0] or lstm[1][1]
637
+ lstm -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm
638
+
639
+ Thus, we must match against the above patterns instead of simply checking the parent node
640
+ to determine whether this node is a consumer of a custom module LSTM.
641
+ """
642
+ def match_dq(a):
643
+ return isinstance(_get_module(a, named_modules), DeQuantStub)
644
+
645
+ def match_lstm(a):
646
+ return _is_custom_module_lstm(a, named_modules)
647
+
648
+ def match_getitem(a):
649
+ return a.op == "call_function" and a.target == operator.getitem
650
+
651
+ def match_tuple(a):
652
+ return a.op == "call_function" and a.target == tuple
653
+
654
+ def _match_pattern(match_pattern: List[Callable]) -> Optional[Node]:
655
+ """
656
+ Traverse up the graph and match the args one by one.
657
+ If there is a match, return the last matched node, or None otherwise.
658
+ """
659
+ a = arg
660
+ for i, match in enumerate(match_pattern):
661
+ if not match(a):
662
+ return None
663
+ # Match next arg, for tuple the arg is a tuple of a list, e.g. ([dq_1, other_node],)
664
+ if i < len(match_pattern) - 1:
665
+ if match == match_tuple:
666
+ a = a.args[0][0] # type: ignore[assignment,index]
667
+ else:
668
+ a = a.args[0] # type: ignore[assignment]
669
+ return a
670
+
671
+ all_match_patterns = [
672
+ [match_dq, match_getitem, match_lstm],
673
+ [match_tuple, match_dq, match_getitem, match_getitem, match_lstm],
674
+ [match_dq, match_getitem, match_getitem, match_lstm],
675
+ [match_tuple, match_dq, match_getitem, match_lstm],
676
+ ]
677
+
678
+ for p in all_match_patterns:
679
+ matched_node = _match_pattern(p)
680
+ if matched_node is not None:
681
+ return matched_node
682
+ return None
683
+
684
+ def _reroute_tuple_getitem_pattern(graph: Graph):
685
+ """
686
+ Search for patterns where N consecutive `tuple` call_function nodes are followed by
687
+ N consecutive `getitem` call_function nodes that are "reverses" of the `tuple` nodes.
688
+ If we find this pattern, reroute the consumers of the last `getitem` to skip these
689
+ N `tuple` and `getitem` nodes.
690
+
691
+ Before:
692
+
693
+ a b c
694
+ | \\ /
695
+ \\ tuple
696
+ \\ /
697
+ tuple
698
+ |
699
+ getitem(1)
700
+ |
701
+ getitem(0)
702
+ |
703
+ d
704
+
705
+ After:
706
+
707
+ b
708
+ |
709
+ d
710
+ """
711
+ def find_patterns(
712
+ node: Node,
713
+ index_stack: List[int],
714
+ current_pattern: List[Node],
715
+ matched_patterns: List[List[Node]],
716
+ seen: Set[Tuple[Node, Tuple[int, ...]]]):
717
+ """
718
+ Traverse the graph recursively to match for the N-tuple - N-getitem patterns,
719
+ starting at the given node.
720
+
721
+ We use a stack to keep track of the expected `getitem` indices, since these are
722
+ reversed from the `tuple` indices. In the above example, the stack after
723
+ (b -> tuple -> tuple) will be [0, 1], which will be popped by getitem(1) first
724
+ and then by getitem(0).
725
+
726
+ TODO: traverse upwards from the output and handle the case when tuple is not a
727
+ separate node, e.g. graph.call_function(operator.getitem, args=(a, (b, c)))
728
+ """
729
+ if len(index_stack) == 0 and len(current_pattern) > 0:
730
+ matched_patterns.append(copy.copy(current_pattern))
731
+ current_pattern.clear()
732
+
733
+ # Avoid duplicating work
734
+ state = (node, tuple(index_stack))
735
+ if state in seen:
736
+ return
737
+ seen.add(state)
738
+
739
+ # Iterate through users of this node to find tuple/getitem nodes to match
740
+ for user in node.users:
741
+ if user.op == "call_function" and user.target == tuple:
742
+ for i, user_arg in enumerate(user.args[0]): # type: ignore[arg-type]
743
+ if user_arg == node:
744
+ index_stack.append(i)
745
+ current_pattern.append(user)
746
+ find_patterns(user, index_stack, current_pattern, matched_patterns, seen)
747
+ elif user.op == "call_function" and user.target == operator.getitem:
748
+ if len(index_stack) > 0:
749
+ if user.args[1] == index_stack[-1]:
750
+ index_stack.pop()
751
+ current_pattern.append(user)
752
+ find_patterns(user, index_stack, current_pattern, matched_patterns, seen)
753
+ return matched_patterns
754
+
755
+ # Collect all matched patterns
756
+ matched_patterns: List[List[Node]] = []
757
+ seen: Set[Tuple[Node, Tuple[int, ...]]] = set() # (node, index_stack)
758
+ for node in graph.nodes:
759
+ find_patterns(node, [], [], matched_patterns, seen)
760
+
761
+ # For each pattern, redirect all consumers of the last getitem node to the correct input
762
+ # of the first tuple node
763
+ for pattern in matched_patterns:
764
+ first_tuple = pattern[0]
765
+ last_getitem = pattern[-1]
766
+ assert first_tuple.op == "call_function" and first_tuple.target == tuple
767
+ assert last_getitem.op == "call_function" and last_getitem.target == operator.getitem
768
+ last_getitem_index = last_getitem.args[1]
769
+ new_input = first_tuple.args[0][last_getitem_index] # type: ignore[index]
770
+ for user in list(last_getitem.users.keys()):
771
+ user.replace_input_with(last_getitem, new_input)
772
+
773
+ def _get_observer_from_activation_post_process(
774
+ activation_post_process: Union[ObserverBase, FakeQuantizeBase],
775
+ ) -> ObserverBase:
776
+ """
777
+ If `activation_post_process` is an observer, return the observer.
778
+ If `activation_post_process` is a fake quantize, return the internal observer.
779
+ """
780
+ if isinstance(activation_post_process, ObserverBase):
781
+ return activation_post_process
782
+ else:
783
+ assert isinstance(activation_post_process, FakeQuantizeBase)
784
+ return activation_post_process.activation_post_process # type: ignore[return-value]
785
+
786
+ def _qconfig_satisfies_dtype_config_constraints(
787
+ qconfig: QConfigAny,
788
+ dtype_with_constraints: DTypeWithConstraints,
789
+ is_activation: bool = True) -> bool:
790
+ """
791
+ Return whether `qconfig` satisfies the following constraints from the backend,
792
+ specified through the activation and weight DTypeWithConstraints.
793
+
794
+ 1. QConfig specified a quantization range that falls within the backend's, if any
795
+ 2. QConfig specified a min scale value that is >= the backend's, if any
796
+ 3. QConfig specified a FixedQParamsObserver or FixedQParamsFakeQuantize that has
797
+ scale and zero point that match the backend's, if any
798
+
799
+ If `is_activation` is True, we check `qconfig.activation`, else we check `qconfig.weight`.
800
+ If `qconfig` or `dtype_with_constraints.dtype` is None, or the dtypes do not match, return True.
801
+ """
802
+ # TODO: log warnings only when the user enabled a debug flag
803
+ def _activation_post_process_satisfies_dtype_config_constraints(
804
+ activation_post_process: Union[ObserverBase, FakeQuantizeBase],
805
+ dtype_with_constraints: DTypeWithConstraints,
806
+ debug_string: str) -> bool:
807
+ observer = _get_observer_from_activation_post_process(activation_post_process)
808
+ app_quant_min = getattr(observer, "quant_min", None)
809
+ app_quant_max = getattr(observer, "quant_max", None)
810
+ # TODO: for now, just use the existing eps value as scale_min. In the future, we should
811
+ # resolve the differences between the two, either by renaming eps or some other way
812
+ app_scale_min = getattr(observer, "eps", None)
813
+ backend_quant_min = dtype_with_constraints.quant_min_lower_bound
814
+ backend_quant_max = dtype_with_constraints.quant_max_upper_bound
815
+ backend_scale_min = dtype_with_constraints.scale_min_lower_bound
816
+ backend_scale_exact_match = dtype_with_constraints.scale_exact_match
817
+ backend_zero_point_exact_match = dtype_with_constraints.zero_point_exact_match
818
+ # check quantization ranges
819
+ if backend_quant_min is not None and backend_quant_max is not None:
820
+ if app_quant_min is None or app_quant_max is None:
821
+ warnings.warn(f"QConfig {debug_string} must specify 'quant_min' and 'quant_max', ignoring {qconfig}")
822
+ return False
823
+ elif app_quant_min < backend_quant_min or app_quant_max > backend_quant_max:
824
+ warnings.warn(
825
+ f"QConfig {debug_string} quantization range must fall within the backend's:\n"
826
+ f"QConfig range = ({app_quant_min}, {app_quant_max}), "
827
+ f"BackendConfig range = ({backend_quant_min}, {backend_quant_max}), "
828
+ f"ignoring {qconfig}"
829
+ )
830
+ return False
831
+ # check scale min
832
+ if backend_scale_min is not None:
833
+ if app_scale_min is None:
834
+ warnings.warn(f"QConfig {debug_string} must specify 'eps', ignoring {qconfig}")
835
+ return False
836
+ if app_scale_min < backend_scale_min:
837
+ warnings.warn(
838
+ f"QConfig {debug_string} eps ({app_scale_min}) must be greater than or equal to "
839
+ f"the backend's min scale value ({backend_scale_min}), ignoring {qconfig}"
840
+ )
841
+ return False
842
+ # check fixed scale and zero point
843
+ if backend_scale_exact_match is not None and backend_zero_point_exact_match is not None:
844
+ # For tests only, accept the following qconfigs for now
845
+ # TODO: handle fp16 qconfigs properly
846
+ for accepted_qconfig in [float16_static_qconfig, float16_dynamic_qconfig]:
847
+ if qconfig_equals(qconfig, accepted_qconfig):
848
+ return True
849
+ suggestion_str = (
850
+ "Please use torch.ao.quantization.get_default_qconfig_mapping or "
851
+ "torch.ao.quantization.get_default_qat_qconfig_mapping. Example:\n"
852
+ " qconfig_mapping = get_default_qconfig_mapping(\"fbgemm\")\n"
853
+ " model = prepare_fx(model, qconfig_mapping, example_inputs)"
854
+ )
855
+ if not isinstance(activation_post_process, FixedQParamsObserver) and \
856
+ not isinstance(activation_post_process, FixedQParamsFakeQuantize):
857
+ warnings.warn(
858
+ f"QConfig must specify a FixedQParamsObserver or a FixedQParamsFakeQuantize "
859
+ f"for fixed qparams ops, ignoring {qconfig}.\n{suggestion_str}"
860
+ )
861
+ return False
862
+ if observer.scale != backend_scale_exact_match or observer.zero_point != backend_zero_point_exact_match:
863
+ warnings.warn(
864
+ f"QConfig fixed scale ({observer.scale}) and zero point ({observer.zero_point}) "
865
+ f"do not match the backend's ({backend_scale_exact_match} and {backend_zero_point_exact_match}), "
866
+ f"ignoring {qconfig}.\n{suggestion_str}"
867
+ )
868
+ return False
869
+ return True
870
+
871
+ if qconfig is None or dtype_with_constraints.dtype is None:
872
+ return True
873
+
874
+ activation_post_process_ctr = qconfig.activation if is_activation else qconfig.weight
875
+ debug_string = "activation" if is_activation else "weight"
876
+ satisfies_constraints = True
877
+ if activation_post_process_ctr is not None:
878
+ activation_post_process = activation_post_process_ctr()
879
+ assert _is_activation_post_process(activation_post_process)
880
+ # If dtypes don't match, don't check the activation_post_process and return True early
881
+ if activation_post_process.dtype != dtype_with_constraints.dtype:
882
+ return True
883
+ satisfies_constraints = _activation_post_process_satisfies_dtype_config_constraints(
884
+ activation_post_process, dtype_with_constraints, debug_string)
885
+ return satisfies_constraints
venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/export_utils.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc ADDED
Binary file (698 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc ADDED
Binary file (7.22 kB). View file