Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py +526 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite_fx.py +1025 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_passes.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_matcher.py +460 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_passes.py +950 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/mappings.py +761 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/n_shadows_utils.py +1312 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/ns_types.py +64 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/pattern_utils.py +200 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/qconfig_multi_mapping.py +243 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/utils.py +533 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/weight_utils.py +275 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py +23 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_common_operator_config_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_qnnpack_pt2e.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/backend_config.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/executorch.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/fbgemm.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/native.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/observation_type.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/onednn.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/qnnpack.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/tensorrt.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_common_operator_config_utils.py +637 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py +160 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py +659 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/executorch.py +494 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/fbgemm.py +116 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py +204 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/observation_type.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/onednn.py +542 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py +160 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py +81 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py +279 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py +113 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/eval_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (479 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py
ADDED
@@ -0,0 +1,526 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.ao.nn.quantized as nnq
|
4 |
+
import torch.ao.nn.quantized.dynamic as nnqd
|
5 |
+
from torch.ao.quantization import prepare
|
6 |
+
from typing import Dict, List, Optional, Any, Union, Callable, Set
|
7 |
+
|
8 |
+
from torch.ao.quantization.quantization_mappings import (
|
9 |
+
get_default_compare_output_module_list,
|
10 |
+
)
|
11 |
+
|
12 |
+
NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST = {
|
13 |
+
nnqd.Linear,
|
14 |
+
nnq.Linear,
|
15 |
+
nnqd.LSTM,
|
16 |
+
nn.LSTM,
|
17 |
+
}
|
18 |
+
|
19 |
+
|
20 |
+
def _find_match(
|
21 |
+
str_list: Union[Dict[str, Any], List[str]], key_str: str,
|
22 |
+
postfix: str,
|
23 |
+
) -> Optional[str]:
|
24 |
+
split_str = key_str.split(".")
|
25 |
+
if split_str[-1] == postfix:
|
26 |
+
match_string = "".join(key_str.split(".")[0:-1])
|
27 |
+
for s2 in str_list:
|
28 |
+
pattern1 = "".join(s2.split(".")[0:-1])
|
29 |
+
pattern2 = "".join(s2.split(".")[0:-2])
|
30 |
+
if match_string == pattern1:
|
31 |
+
return s2
|
32 |
+
if match_string == pattern2:
|
33 |
+
return s2
|
34 |
+
|
35 |
+
# For matching "fc.weight" and "fc._packed_params._packed_params"
|
36 |
+
if postfix == "_packed_params":
|
37 |
+
match_string = "".join(key_str.split(".")[0:-2])
|
38 |
+
if len(match_string) == 0:
|
39 |
+
return None
|
40 |
+
for s2 in str_list:
|
41 |
+
pattern1 = "".join(s2.split(".")[0:-1])
|
42 |
+
pattern2 = "".join(s2.split(".")[0:-2])
|
43 |
+
if match_string == pattern1:
|
44 |
+
return s2
|
45 |
+
if match_string == pattern2:
|
46 |
+
return s2
|
47 |
+
return None
|
48 |
+
else:
|
49 |
+
return None
|
50 |
+
|
51 |
+
|
52 |
+
def compare_weights(
|
53 |
+
float_dict: Dict[str, Any], quantized_dict: Dict[str, Any]
|
54 |
+
) -> Dict[str, Dict[str, torch.Tensor]]:
|
55 |
+
r"""Compare the weights of the float module with its corresponding quantized
|
56 |
+
module. Return a dict with key corresponding to module names and each entry being
|
57 |
+
a dictionary with two keys 'float' and 'quantized', containing the float and
|
58 |
+
quantized weights. This dict can be used to compare and compute the quantization
|
59 |
+
error of the weights of float and quantized models.
|
60 |
+
|
61 |
+
Example usage::
|
62 |
+
|
63 |
+
wt_compare_dict = compare_weights(
|
64 |
+
float_model.state_dict(), qmodel.state_dict())
|
65 |
+
for key in wt_compare_dict:
|
66 |
+
print(
|
67 |
+
key,
|
68 |
+
compute_error(
|
69 |
+
wt_compare_dict[key]['float'],
|
70 |
+
wt_compare_dict[key]['quantized'].dequantize()
|
71 |
+
)
|
72 |
+
)
|
73 |
+
|
74 |
+
Args:
|
75 |
+
float_dict: state dict of the float model
|
76 |
+
quantized_dict: state dict of the quantized model
|
77 |
+
|
78 |
+
Return:
|
79 |
+
weight_dict: dict with key corresponding to module names and each entry being
|
80 |
+
a dictionary with two keys 'float' and 'quantized', containing the float and
|
81 |
+
quantized weights
|
82 |
+
"""
|
83 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_weights")
|
84 |
+
weight_dict: Dict[str, Dict] = {}
|
85 |
+
for key in quantized_dict:
|
86 |
+
match_key = _find_match(float_dict, key, "weight")
|
87 |
+
if match_key is not None:
|
88 |
+
weight_dict[key] = {}
|
89 |
+
weight_dict[key]["float"] = float_dict[match_key]
|
90 |
+
weight_dict[key]["quantized"] = quantized_dict[key]
|
91 |
+
continue
|
92 |
+
|
93 |
+
# For matching "fc.weight" and "fc._packed_params._packed_params"
|
94 |
+
match_key = _find_match(float_dict, key, "_packed_params")
|
95 |
+
if match_key is not None:
|
96 |
+
weight_dict[key] = {}
|
97 |
+
weight_dict[key]["float"] = float_dict[match_key]
|
98 |
+
weight_dict[key]["quantized"] = quantized_dict[key][0]
|
99 |
+
|
100 |
+
# For LSTM
|
101 |
+
split_str = key.split(".")
|
102 |
+
if split_str[-1] == "param" and split_str[-3] == "_all_weight_values":
|
103 |
+
layer = split_str[-2]
|
104 |
+
module_name = ".".join(split_str[:-3])
|
105 |
+
float_weight_ih_key = module_name + ".weight_ih_l" + layer
|
106 |
+
float_weight_hh_key = module_name + ".weight_hh_l" + layer
|
107 |
+
if float_weight_ih_key in float_dict and float_weight_hh_key in float_dict:
|
108 |
+
weight_dict[key] = {}
|
109 |
+
weight_dict[key]["float"] = float_dict[float_weight_ih_key]
|
110 |
+
weight_dict[key]["quantized"] = (
|
111 |
+
quantized_dict[key].__getstate__()[0][4][0].__getstate__()[0][0]
|
112 |
+
)
|
113 |
+
weight_dict[key]["float"] = float_dict[float_weight_hh_key]
|
114 |
+
weight_dict[key]["quantized"] = (
|
115 |
+
quantized_dict[key].__getstate__()[0][4][1].__getstate__()[0][0]
|
116 |
+
)
|
117 |
+
|
118 |
+
return weight_dict
|
119 |
+
|
120 |
+
|
121 |
+
def _get_logger_dict_helper(
|
122 |
+
mod: nn.Module, target_dict: Dict[str, Any],
|
123 |
+
prefix: str = "",
|
124 |
+
) -> None:
|
125 |
+
r"""This is the helper function for get_logger_dict
|
126 |
+
|
127 |
+
Args:
|
128 |
+
mod: module we want to save all logger stats
|
129 |
+
prefix: prefix for the current module
|
130 |
+
target_dict: the dictionary used to save all logger stats
|
131 |
+
"""
|
132 |
+
|
133 |
+
def get_prefix(prefix):
|
134 |
+
return prefix if prefix == "" else prefix + "."
|
135 |
+
|
136 |
+
for name, child in mod.named_children():
|
137 |
+
if isinstance(child, Logger):
|
138 |
+
target_dict[get_prefix(prefix) + "stats"] = child.stats
|
139 |
+
break
|
140 |
+
|
141 |
+
for name, child in mod.named_children():
|
142 |
+
module_prefix = get_prefix(prefix) + name if prefix else name
|
143 |
+
_get_logger_dict_helper(child, target_dict, module_prefix)
|
144 |
+
|
145 |
+
|
146 |
+
def get_logger_dict(mod: nn.Module, prefix: str = "") -> Dict[str, Dict]:
|
147 |
+
r"""Traverse the modules and save all logger stats into target dict.
|
148 |
+
This is mainly used for quantization accuracy debug.
|
149 |
+
|
150 |
+
Type of loggers supported:
|
151 |
+
ShadowLogger: used to log the outputs of the quantized module and its matching float shadow module,
|
152 |
+
OutputLogger: used to log the outputs of the modules
|
153 |
+
|
154 |
+
Args:
|
155 |
+
mod: module we want to save all logger stats
|
156 |
+
prefix: prefix for the current module
|
157 |
+
|
158 |
+
Return:
|
159 |
+
target_dict: the dictionary used to save all logger stats
|
160 |
+
|
161 |
+
"""
|
162 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite.get_logger_dict")
|
163 |
+
|
164 |
+
target_dict: Dict[str, Dict] = {}
|
165 |
+
_get_logger_dict_helper(mod, target_dict, prefix)
|
166 |
+
return target_dict
|
167 |
+
|
168 |
+
|
169 |
+
class Logger(nn.Module):
|
170 |
+
r"""Base class for stats logging
|
171 |
+
"""
|
172 |
+
|
173 |
+
def __init__(self):
|
174 |
+
super().__init__()
|
175 |
+
self.stats = {}
|
176 |
+
# We only insert observer if the op is quantized with static quantization,
|
177 |
+
# which is identified by activation_observer.dtype == quint8. This is needed
|
178 |
+
# when attaching Logger as observer for FX mode
|
179 |
+
self.dtype = torch.quint8
|
180 |
+
|
181 |
+
def forward(self, x):
|
182 |
+
"""
|
183 |
+
""" # blank docblock to make autodoc happy
|
184 |
+
pass
|
185 |
+
|
186 |
+
|
187 |
+
class ShadowLogger(Logger):
|
188 |
+
r"""Class used in Shadow module to record the outputs of the original and
|
189 |
+
shadow modules.
|
190 |
+
"""
|
191 |
+
|
192 |
+
def __init__(self):
|
193 |
+
super().__init__()
|
194 |
+
self.stats["float"] = []
|
195 |
+
self.stats["quantized"] = []
|
196 |
+
|
197 |
+
def forward(self, x, y):
|
198 |
+
"""
|
199 |
+
""" # blank docblock to make autodoc happy
|
200 |
+
if len(x) > 1:
|
201 |
+
x = x[0]
|
202 |
+
if len(y) > 1:
|
203 |
+
y = y[0]
|
204 |
+
self.stats["quantized"].append(x.detach())
|
205 |
+
self.stats["float"].append(y.detach())
|
206 |
+
|
207 |
+
|
208 |
+
class OutputLogger(Logger):
|
209 |
+
r"""Class used to log the outputs of the module
|
210 |
+
"""
|
211 |
+
|
212 |
+
def __init__(self):
|
213 |
+
super().__init__()
|
214 |
+
self.stats["tensor_val"] = []
|
215 |
+
|
216 |
+
|
217 |
+
def forward(self, x):
|
218 |
+
"""
|
219 |
+
""" # blank docblock to make autodoc happy
|
220 |
+
self.stats["tensor_val"].append(x)
|
221 |
+
return x
|
222 |
+
|
223 |
+
|
224 |
+
def _convert_tuple_to_list(t: Any) -> Any:
|
225 |
+
return [_convert_tuple_to_list(x) for x in t] if type(t) is tuple else t
|
226 |
+
|
227 |
+
|
228 |
+
def _dequantize_tensor_list(t: Any) -> Any:
|
229 |
+
return (
|
230 |
+
[_dequantize_tensor_list(x) for x in t]
|
231 |
+
if type(t) is list
|
232 |
+
else t.dequantize()
|
233 |
+
if t.is_quantized
|
234 |
+
else t
|
235 |
+
)
|
236 |
+
|
237 |
+
|
238 |
+
class Shadow(nn.Module):
|
239 |
+
r"""Shadow module attaches the float module to its matching quantized module
|
240 |
+
as the shadow. Then it uses Logger module to process the outputs of both
|
241 |
+
modules.
|
242 |
+
|
243 |
+
Args:
|
244 |
+
q_module: module quantized from float_module that we want to shadow
|
245 |
+
float_module: float module used to shadow q_module
|
246 |
+
logger_cls: type of logger used to process the outputs of q_module and
|
247 |
+
float_module. ShadowLogger or custom loggers can be used.
|
248 |
+
"""
|
249 |
+
|
250 |
+
def __init__(self, q_module, float_module, logger_cls):
|
251 |
+
super().__init__()
|
252 |
+
self.orig_module = q_module
|
253 |
+
self.shadow_module = float_module
|
254 |
+
self.dequant = nnq.DeQuantize()
|
255 |
+
self.logger = logger_cls()
|
256 |
+
|
257 |
+
def forward(self, *x) -> torch.Tensor:
|
258 |
+
"""
|
259 |
+
""" # blank docblock to make autodoc happy
|
260 |
+
xl = _convert_tuple_to_list(x)
|
261 |
+
output = self.orig_module(*xl)
|
262 |
+
xl_float = _dequantize_tensor_list(xl)
|
263 |
+
shadow_output = self.shadow_module(*xl_float)
|
264 |
+
self.logger(output, shadow_output)
|
265 |
+
return output
|
266 |
+
|
267 |
+
def add(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
268 |
+
"""
|
269 |
+
""" # blank docblock to make autodoc happy
|
270 |
+
output = self.orig_module.add(x, y)
|
271 |
+
x = x.dequantize()
|
272 |
+
y = y.dequantize()
|
273 |
+
shadow_output = self.shadow_module.add(x, y)
|
274 |
+
self.logger(output, shadow_output)
|
275 |
+
return output
|
276 |
+
|
277 |
+
def add_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
|
278 |
+
"""
|
279 |
+
""" # blank docblock to make autodoc happy
|
280 |
+
output = self.orig_module.add_scalar(x, y)
|
281 |
+
x = x.dequantize()
|
282 |
+
shadow_output = self.shadow_module.add_scalar(x, y)
|
283 |
+
self.logger(output, shadow_output)
|
284 |
+
return output
|
285 |
+
|
286 |
+
def mul(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
287 |
+
"""
|
288 |
+
""" # blank docblock to make autodoc happy
|
289 |
+
output = self.orig_module.mul(x, y)
|
290 |
+
x = x.dequantize()
|
291 |
+
y = y.dequantize()
|
292 |
+
shadow_output = self.shadow_module.mul(x, y)
|
293 |
+
self.logger(output, shadow_output)
|
294 |
+
return output
|
295 |
+
|
296 |
+
def mul_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
|
297 |
+
"""
|
298 |
+
""" # blank docblock to make autodoc happy
|
299 |
+
output = self.orig_module.mul_scalar(x, y)
|
300 |
+
x = x.dequantize()
|
301 |
+
shadow_output = self.shadow_module.mul_scalar(x, y)
|
302 |
+
self.logger(output, shadow_output)
|
303 |
+
return output
|
304 |
+
|
305 |
+
def cat(self, x: List[torch.Tensor], dim: int = 0) -> torch.Tensor:
|
306 |
+
"""
|
307 |
+
""" # blank docblock to make autodoc happy
|
308 |
+
output = self.orig_module.cat(x, dim)
|
309 |
+
x = [y.dequantize() for y in x]
|
310 |
+
shadow_output = self.shadow_module.cat(x, dim)
|
311 |
+
self.logger(output, shadow_output)
|
312 |
+
return output
|
313 |
+
|
314 |
+
def add_relu(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
315 |
+
"""
|
316 |
+
""" # blank docblock to make autodoc happy
|
317 |
+
output = self.orig_module.add_relu(x, y)
|
318 |
+
x = x.dequantize()
|
319 |
+
y = y.dequantize()
|
320 |
+
shadow_output = self.shadow_module.add_relu(x, y)
|
321 |
+
self.logger(output, shadow_output)
|
322 |
+
return output
|
323 |
+
|
324 |
+
|
325 |
+
def prepare_model_with_stubs(
|
326 |
+
float_module: nn.Module, q_module: nn.Module,
|
327 |
+
module_swap_list: Set[type], logger_cls: Callable,
|
328 |
+
) -> None:
|
329 |
+
r"""Prepare the model by attaching the float module to its matching quantized
|
330 |
+
module as the shadow if the float module type is in module_swap_list.
|
331 |
+
|
332 |
+
Example usage::
|
333 |
+
|
334 |
+
prepare_model_with_stubs(float_model, q_model, module_swap_list, Logger)
|
335 |
+
q_model(data)
|
336 |
+
ob_dict = get_logger_dict(q_model)
|
337 |
+
|
338 |
+
Args:
|
339 |
+
float_module: float module used to generate the q_module
|
340 |
+
q_module: module quantized from float_module
|
341 |
+
module_swap_list: list of float module types to attach the shadow
|
342 |
+
logger_cls: type of logger to be used in shadow module to process the outputs of
|
343 |
+
quantized module and its float shadow module
|
344 |
+
"""
|
345 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_with_stubs")
|
346 |
+
|
347 |
+
float_module_children = {}
|
348 |
+
for name, mod in float_module.named_children():
|
349 |
+
float_module_children[name] = mod
|
350 |
+
|
351 |
+
reassign = {}
|
352 |
+
for name, mod in q_module.named_children():
|
353 |
+
|
354 |
+
if name not in float_module_children:
|
355 |
+
continue
|
356 |
+
|
357 |
+
float_mod = float_module_children[name]
|
358 |
+
|
359 |
+
if type(float_mod) not in module_swap_list:
|
360 |
+
prepare_model_with_stubs(float_mod, mod, module_swap_list, logger_cls)
|
361 |
+
|
362 |
+
# Insert shadow module only if the module is not of the same type as
|
363 |
+
# the floating point module
|
364 |
+
if type(float_mod) in module_swap_list and not _is_identical_module_type(mod, float_mod):
|
365 |
+
reassign[name] = Shadow(mod, float_mod, logger_cls)
|
366 |
+
|
367 |
+
for key, value in reassign.items():
|
368 |
+
q_module._modules[key] = value
|
369 |
+
|
370 |
+
def _is_identical_module_type(mod1, mod2):
|
371 |
+
# Compare if two modules have the same dtype
|
372 |
+
mod1_module_types = [type(mod) for mod in mod1.modules()]
|
373 |
+
mod2_module_types = [type(mod) for mod in mod2.modules()]
|
374 |
+
return mod1_module_types == mod2_module_types
|
375 |
+
|
376 |
+
|
377 |
+
|
378 |
+
def compare_model_stub(
|
379 |
+
float_model: nn.Module, q_model: nn.Module, module_swap_list: Set[type],
|
380 |
+
*data, logger_cls=ShadowLogger
|
381 |
+
) -> Dict[str, Dict]:
|
382 |
+
r"""Compare quantized module in a model with its floating point counterpart,
|
383 |
+
feeding both of them the same input. Return a dict with key corresponding to
|
384 |
+
module names and each entry being a dictionary with two keys 'float' and
|
385 |
+
'quantized', containing the output tensors of quantized and its matching
|
386 |
+
float shadow module. This dict can be used to compare and compute the module
|
387 |
+
level quantization error.
|
388 |
+
|
389 |
+
This function first call prepare_model_with_stubs() to swap the quantized
|
390 |
+
module that we want to compare with the Shadow module, which takes quantized
|
391 |
+
module, corresponding float module and logger as input, and creates a forward
|
392 |
+
path inside to make the float module to shadow quantized module sharing the
|
393 |
+
same input. The logger can be customizable, default logger is ShadowLogger
|
394 |
+
and it will save the outputs of the quantized module and float module that
|
395 |
+
can be used to compute the module level quantization error.
|
396 |
+
|
397 |
+
Example usage::
|
398 |
+
|
399 |
+
module_swap_list = [torchvision.models.quantization.resnet.QuantizableBasicBlock]
|
400 |
+
ob_dict = compare_model_stub(float_model,qmodel,module_swap_list, data)
|
401 |
+
for key in ob_dict:
|
402 |
+
print(key, compute_error(ob_dict[key]['float'], ob_dict[key]['quantized'].dequantize()))
|
403 |
+
|
404 |
+
Args:
|
405 |
+
float_model: float model used to generate the q_model
|
406 |
+
q_model: model quantized from float_model
|
407 |
+
module_swap_list: list of float module types at which shadow modules will
|
408 |
+
be attached.
|
409 |
+
data: input data used to run the prepared q_model
|
410 |
+
logger_cls: type of logger to be used in shadow module to process the outputs of
|
411 |
+
quantized module and its float shadow module
|
412 |
+
"""
|
413 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_stub")
|
414 |
+
prepare_model_with_stubs(float_model, q_model, module_swap_list, logger_cls)
|
415 |
+
q_model(*data)
|
416 |
+
ob_dict = get_logger_dict(q_model)
|
417 |
+
return ob_dict
|
418 |
+
|
419 |
+
|
420 |
+
def get_matching_activations(
|
421 |
+
float_module: nn.Module, q_module: nn.Module,
|
422 |
+
) -> Dict[str, Dict[str, torch.Tensor]]:
|
423 |
+
r"""Find the matching activation between float and quantized modules.
|
424 |
+
|
425 |
+
Args:
|
426 |
+
float_module: float module used to generate the q_module
|
427 |
+
q_module: module quantized from float_module
|
428 |
+
|
429 |
+
Return:
|
430 |
+
act_dict: dict with key corresponding to quantized module names and each
|
431 |
+
entry being a dictionary with two keys 'float' and 'quantized', containing
|
432 |
+
the matching float and quantized activations
|
433 |
+
"""
|
434 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite.get_matching_activations")
|
435 |
+
float_dict = get_logger_dict(float_module)
|
436 |
+
quantized_dict = get_logger_dict(q_module)
|
437 |
+
act_dict: Dict[str, Dict] = {}
|
438 |
+
for key in quantized_dict:
|
439 |
+
if len(quantized_dict[key]["tensor_val"]) == 0:
|
440 |
+
continue
|
441 |
+
match_key = _find_match(sorted(float_dict, reverse=True), key, "stats")
|
442 |
+
if match_key is not None:
|
443 |
+
act_dict[key] = {}
|
444 |
+
act_dict[key]["float"] = float_dict[match_key]["tensor_val"]
|
445 |
+
act_dict[key]["quantized"] = quantized_dict[key]["tensor_val"]
|
446 |
+
return act_dict
|
447 |
+
|
448 |
+
|
449 |
+
def prepare_model_outputs(
|
450 |
+
float_module: nn.Module,
|
451 |
+
q_module: nn.Module,
|
452 |
+
logger_cls=OutputLogger,
|
453 |
+
allow_list=None
|
454 |
+
) -> None:
|
455 |
+
r"""Prepare the model by attaching the logger to both float module
|
456 |
+
and quantized module if they are in the allow_list.
|
457 |
+
|
458 |
+
Args:
|
459 |
+
float_module: float module used to generate the q_module
|
460 |
+
q_module: module quantized from float_module
|
461 |
+
logger_cls: type of logger to be attached to float_module and q_module
|
462 |
+
allow_list: list of module types to attach logger
|
463 |
+
"""
|
464 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_outputs")
|
465 |
+
if allow_list is None:
|
466 |
+
allow_list = get_default_compare_output_module_list()
|
467 |
+
|
468 |
+
qconfig_debug = torch.ao.quantization.QConfig(activation=logger_cls, weight=None)
|
469 |
+
float_module.qconfig = qconfig_debug # type: ignore[assignment]
|
470 |
+
prepare(float_module, inplace=True, allow_list=allow_list, prepare_custom_config_dict={})
|
471 |
+
q_module.qconfig = qconfig_debug # type: ignore[assignment]
|
472 |
+
prepare(
|
473 |
+
q_module,
|
474 |
+
inplace=True,
|
475 |
+
allow_list=allow_list,
|
476 |
+
observer_non_leaf_module_list=NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
|
477 |
+
prepare_custom_config_dict={}
|
478 |
+
)
|
479 |
+
|
480 |
+
|
481 |
+
def compare_model_outputs(
|
482 |
+
float_model: nn.Module,
|
483 |
+
q_model: nn.Module,
|
484 |
+
*data,
|
485 |
+
logger_cls=OutputLogger,
|
486 |
+
allow_list=None
|
487 |
+
) -> Dict[str, Dict[str, torch.Tensor]]:
|
488 |
+
r"""Compare output activations between float and quantized models at
|
489 |
+
corresponding locations for the same input. Return a dict with key corresponding
|
490 |
+
to quantized module names and each entry being a dictionary with two keys
|
491 |
+
'float' and 'quantized', containing the activations of quantized model and
|
492 |
+
float model at matching locations. This dict can be used to compare and
|
493 |
+
compute the propagation quantization error.
|
494 |
+
|
495 |
+
Example usage::
|
496 |
+
|
497 |
+
act_compare_dict = compare_model_outputs(float_model, qmodel, data)
|
498 |
+
for key in act_compare_dict:
|
499 |
+
print(
|
500 |
+
key,
|
501 |
+
compute_error(
|
502 |
+
act_compare_dict[key]['float'],
|
503 |
+
act_compare_dict[key]['quantized'].dequantize()
|
504 |
+
)
|
505 |
+
)
|
506 |
+
|
507 |
+
Args:
|
508 |
+
float_model: float model used to generate the q_model
|
509 |
+
q_model: model quantized from float_model
|
510 |
+
data: input data used to run the prepared float_model and q_model
|
511 |
+
logger_cls: type of logger to be attached to float_module and q_module
|
512 |
+
allow_list: list of module types to attach logger
|
513 |
+
|
514 |
+
Return:
|
515 |
+
act_compare_dict: dict with key corresponding to quantized module names
|
516 |
+
and each entry being a dictionary with two keys 'float' and 'quantized',
|
517 |
+
containing the matching float and quantized activations
|
518 |
+
"""
|
519 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_outputs")
|
520 |
+
if allow_list is None:
|
521 |
+
allow_list = get_default_compare_output_module_list()
|
522 |
+
prepare_model_outputs(float_model, q_model, logger_cls, allow_list)
|
523 |
+
float_model(*data)
|
524 |
+
q_model(*data)
|
525 |
+
act_compare_dict = get_matching_activations(float_model, q_model)
|
526 |
+
return act_compare_dict
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite_fx.py
ADDED
@@ -0,0 +1,1025 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module contains tooling to compare weights and activations
|
3 |
+
across models. Example usage::
|
4 |
+
|
5 |
+
import copy
|
6 |
+
import torch
|
7 |
+
import torch.ao.quantization.quantize_fx as quantize_fx
|
8 |
+
import torch.ao.ns._numeric_suite_fx as ns
|
9 |
+
|
10 |
+
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)).eval()
|
11 |
+
mp = quantize_fx.prepare_fx(m, {'': torch.ao.quantization.default_qconfig})
|
12 |
+
# We convert a copy because we need the original prepared model
|
13 |
+
# to be available for comparisons, and `quantize_fx.convert_fx` is inplace.
|
14 |
+
mq = quantize_fx.convert_fx(copy.deepcopy(mp))
|
15 |
+
|
16 |
+
#
|
17 |
+
# Comparing weights
|
18 |
+
#
|
19 |
+
|
20 |
+
# extract weight pairs
|
21 |
+
weight_comparison = ns.extract_weights('a', mp, 'b', mq)
|
22 |
+
|
23 |
+
# add SQNR for each comparison, inplace
|
24 |
+
ns.extend_logger_results_with_comparison(
|
25 |
+
weight_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
|
26 |
+
'sqnr')
|
27 |
+
|
28 |
+
# weight_comparison contains the weights from `mp` and `mq` stored
|
29 |
+
# in pairs, and can be used for further analysis.
|
30 |
+
|
31 |
+
|
32 |
+
#
|
33 |
+
# Comparing activations, with error propagation
|
34 |
+
#
|
35 |
+
|
36 |
+
# add loggers
|
37 |
+
mp_ns, mq_ns = ns.add_loggers(
|
38 |
+
'a', copy.deepcopy(mp),
|
39 |
+
'b', copy.deepcopy(mq),
|
40 |
+
ns.OutputLogger)
|
41 |
+
|
42 |
+
# send an example datum to capture intermediate activations
|
43 |
+
datum = torch.randn(1, 1, 1, 1)
|
44 |
+
mp_ns(datum)
|
45 |
+
mq_ns(datum)
|
46 |
+
|
47 |
+
# extract intermediate activations
|
48 |
+
act_comparison = ns.extract_logger_info(
|
49 |
+
mp_ns, mq_ns, ns.OutputLogger, 'b')
|
50 |
+
|
51 |
+
# add SQNR for each comparison, inplace
|
52 |
+
ns.extend_logger_results_with_comparison(
|
53 |
+
act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
|
54 |
+
'sqnr')
|
55 |
+
|
56 |
+
# act_comparison contains the activations from `mp_ns` and `mq_ns` stored
|
57 |
+
# in pairs, and can be used for further analysis.
|
58 |
+
|
59 |
+
#
|
60 |
+
# Comparing activations, without error propagation
|
61 |
+
#
|
62 |
+
|
63 |
+
# create shadow model
|
64 |
+
mp_shadows_mq = ns.add_shadow_loggers(
|
65 |
+
'a', copy.deepcopy(mp),
|
66 |
+
'b', copy.deepcopy(mq),
|
67 |
+
ns.OutputLogger)
|
68 |
+
|
69 |
+
# send an example datum to capture intermediate activations
|
70 |
+
datum = torch.randn(1, 1, 1, 1)
|
71 |
+
mp_shadows_mq(datum)
|
72 |
+
|
73 |
+
# extract intermediate activations
|
74 |
+
shadow_act_comparison = ns.extract_shadow_logger_info(
|
75 |
+
mp_shadows_mq, ns.OutputLogger, 'b')
|
76 |
+
|
77 |
+
# add SQNR for each comparison, inplace
|
78 |
+
ns.extend_logger_results_with_comparison(
|
79 |
+
shadow_act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
|
80 |
+
'sqnr')
|
81 |
+
|
82 |
+
# shadow_act_comparison contains the activations from `mp_ns` and `mq_ns` stored
|
83 |
+
# in pairs, and can be used for further analysis.
|
84 |
+
|
85 |
+
"""
|
86 |
+
|
87 |
+
import collections
|
88 |
+
|
89 |
+
import torch
|
90 |
+
import torch.nn as nn
|
91 |
+
import torch.ao.quantization.quantize_fx as quantize_fx
|
92 |
+
from torch.fx import GraphModule
|
93 |
+
from torch.fx.graph import Node
|
94 |
+
from torch.ao.ns.fx.mappings import (
|
95 |
+
get_base_name_to_sets_of_related_ops,
|
96 |
+
)
|
97 |
+
from torch.ao.ns.fx.graph_matcher import (
|
98 |
+
get_matching_subgraph_pairs,
|
99 |
+
get_type_a_related_to_b,
|
100 |
+
)
|
101 |
+
|
102 |
+
from .fx.weight_utils import (
|
103 |
+
extract_weight_from_node,
|
104 |
+
)
|
105 |
+
|
106 |
+
from .fx.graph_passes import (
|
107 |
+
add_loggers_to_model,
|
108 |
+
create_a_shadows_b,
|
109 |
+
)
|
110 |
+
|
111 |
+
from .fx.utils import (
|
112 |
+
rekey_logger_info_on_node_name_of_model,
|
113 |
+
maybe_add_missing_fqns,
|
114 |
+
get_target_type_str,
|
115 |
+
)
|
116 |
+
|
117 |
+
from .fx.ns_types import (
|
118 |
+
NSSingleResultValuesType,
|
119 |
+
NSResultsType,
|
120 |
+
NSNodeTargetType,
|
121 |
+
)
|
122 |
+
from torch.ao.quantization.backend_config.utils import get_fusion_pattern_to_root_node_getter
|
123 |
+
from torch.ao.quantization.backend_config import BackendConfig
|
124 |
+
from torch.ao.quantization.fx.match_utils import _find_matches
|
125 |
+
from torch.ao.quantization.fx.graph_module import _get_observed_graph_module_attr
|
126 |
+
from torch.ao.quantization.fx.qconfig_mapping_utils import _generate_node_name_to_qconfig
|
127 |
+
from torch.ao.quantization.fx.quantize_handler import _get_pattern_to_quantize_handlers
|
128 |
+
from torch.ao.quantization.qconfig import QConfigAny
|
129 |
+
from torch.ao.quantization import QConfigMapping
|
130 |
+
from torch.ao.ns.fx.n_shadows_utils import (
|
131 |
+
OutputProp,
|
132 |
+
_get_dedup_subgraphs,
|
133 |
+
SHADOW_WRAPPER_NODE_NAME_PREFIX,
|
134 |
+
group_results_by_subgraph,
|
135 |
+
create_results_comparison,
|
136 |
+
print_n_shadows_summary,
|
137 |
+
create_n_transformed_and_logged_copies_of_subgraph,
|
138 |
+
create_add_loggers_graph,
|
139 |
+
extract_weight_comparison,
|
140 |
+
)
|
141 |
+
from torch.ao.ns.fx.qconfig_multi_mapping import QConfigMultiMapping
|
142 |
+
|
143 |
+
from typing import Dict, Tuple, Callable, List, Optional, Set, Any, Type
|
144 |
+
|
145 |
+
RNNReturnType = Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
|
146 |
+
|
147 |
+
class OutputLogger(nn.Module):
|
148 |
+
"""
|
149 |
+
Base class for capturing intermediate values.
|
150 |
+
"""
|
151 |
+
stats: List[torch.Tensor]
|
152 |
+
stats_rnn: List[RNNReturnType]
|
153 |
+
|
154 |
+
# Mark as impure so that calls to it will not be removed during DCE.
|
155 |
+
_is_impure = True
|
156 |
+
|
157 |
+
def __init__(
|
158 |
+
self,
|
159 |
+
ref_node_name: str,
|
160 |
+
prev_node_name: str,
|
161 |
+
model_name: str,
|
162 |
+
ref_name: str,
|
163 |
+
prev_node_target_type: str,
|
164 |
+
ref_node_target_type: str,
|
165 |
+
results_type: str,
|
166 |
+
index_within_arg: int,
|
167 |
+
index_of_arg: int,
|
168 |
+
fqn: Optional[str],
|
169 |
+
qconfig_str: Optional[str] = '',
|
170 |
+
):
|
171 |
+
super().__init__()
|
172 |
+
self.stats: List[torch.Tensor] = []
|
173 |
+
self.stats_rnn: List[RNNReturnType] = []
|
174 |
+
|
175 |
+
# name of the node which was responsible for adding this logger
|
176 |
+
# Note:
|
177 |
+
# - if we are logging node outputs, this is the same as prev_node_name
|
178 |
+
# - if we are logging node inputs, this is the name of the node
|
179 |
+
# whose input this logger is logging.
|
180 |
+
#
|
181 |
+
# example, where logger1 is logging input of op1 and logger2 is logging
|
182 |
+
# the output of op1:
|
183 |
+
#
|
184 |
+
# x1 -> logger1 -> op1 -> logger2 -> x2
|
185 |
+
#
|
186 |
+
# in this example,
|
187 |
+
# - logger1's prev_node_name is x1 and ref_node_name is op1
|
188 |
+
# - logger2's prev_node_name is op1 and ref_node_name is op1
|
189 |
+
self.ref_node_name = ref_node_name
|
190 |
+
# name of the node whose output this Logger is capturing
|
191 |
+
self.prev_node_name = prev_node_name
|
192 |
+
|
193 |
+
# name of the model from which the node originated from
|
194 |
+
self.model_name = model_name
|
195 |
+
# reference name, used to match loggers from separate models
|
196 |
+
# to each other
|
197 |
+
self.ref_name = ref_name
|
198 |
+
# type of the target of the node whose output this logger is logging
|
199 |
+
self.prev_node_target_type = prev_node_target_type
|
200 |
+
# type of the target of the node which was responsible for adding this
|
201 |
+
# logger
|
202 |
+
self.ref_node_target_type = ref_node_target_type
|
203 |
+
# what kind of values are inside of stats
|
204 |
+
self.results_type = results_type
|
205 |
+
# index of this node within the arg of the input/output node
|
206 |
+
# for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1
|
207 |
+
self.index_within_arg = index_within_arg
|
208 |
+
# index of this node within the args of the input/output node
|
209 |
+
# for example, in add(x1, x2), x2 would have index_of_arg == 1
|
210 |
+
self.index_of_arg = index_of_arg
|
211 |
+
# fully qualified name
|
212 |
+
self.fqn = fqn
|
213 |
+
# if loggers are added before prepare_fx, but we do not want
|
214 |
+
# collect results of calibration, only results after convert_fx
|
215 |
+
# so, we add a flag to control whether this logger collects data
|
216 |
+
self.enabled = True
|
217 |
+
# string representation of qconfig
|
218 |
+
self.qconfig_str = qconfig_str
|
219 |
+
# this can be turned off to reduce memory usage during calibration
|
220 |
+
self.save_activations = True
|
221 |
+
|
222 |
+
# Note: cannot annotate the type of x because TorchScript does not support
|
223 |
+
# the Union type.
|
224 |
+
def forward(self, x):
|
225 |
+
"""
|
226 |
+
""" # blank docblock to make autodoc happy
|
227 |
+
# TODO(future PR): consider designing this better, as the difference
|
228 |
+
# between these two flags is subtle and not obvious.
|
229 |
+
if not self.enabled:
|
230 |
+
return x
|
231 |
+
if not self.save_activations:
|
232 |
+
return x
|
233 |
+
# TODO(future PR): consider refactoring this to better reuse the parent
|
234 |
+
# class
|
235 |
+
if isinstance(x, torch.Tensor):
|
236 |
+
self.stats.append(x.detach())
|
237 |
+
elif isinstance(x, tuple) and len(x) == 2 and len(x[1]) == 2:
|
238 |
+
new_res = (x[0].detach(), (x[1][0].detach(), x[1][1].detach()))
|
239 |
+
self.stats_rnn.append(new_res)
|
240 |
+
return x
|
241 |
+
|
242 |
+
def __repr__(self):
|
243 |
+
clean_dict = {
|
244 |
+
k: v
|
245 |
+
for k, v in self.__dict__.items()
|
246 |
+
# skip nn.Module keys
|
247 |
+
if (k != 'training') and not k.startswith('_')
|
248 |
+
}
|
249 |
+
return f"OutputLogger({clean_dict})"
|
250 |
+
|
251 |
+
|
252 |
+
class OutputComparisonLogger(OutputLogger):
|
253 |
+
"""
|
254 |
+
Same as OutputLogger, but also requires the original activation
|
255 |
+
in order to calculate the comparison at calibration time
|
256 |
+
"""
|
257 |
+
|
258 |
+
def __init__(self, *args, **kwargs):
|
259 |
+
super().__init__(*args, **kwargs)
|
260 |
+
# TODO(future PR): make the comparison function configurable
|
261 |
+
self.comparison_fn = torch.ao.ns.fx.utils.compute_sqnr
|
262 |
+
self.comparison_fn_name = 'sqnr'
|
263 |
+
# precalculated comparisons of logger output versus reference
|
264 |
+
self.comparisons = []
|
265 |
+
# precalculated comparisons function
|
266 |
+
|
267 |
+
def forward(self, x, x_ref):
|
268 |
+
"""
|
269 |
+
""" # blank docblock to make autodoc happy
|
270 |
+
if not self.enabled:
|
271 |
+
return x
|
272 |
+
assert isinstance(x, torch.Tensor), 'non-tensor inputs not yet supported'
|
273 |
+
if self.save_activations:
|
274 |
+
# save the activation, for debugging
|
275 |
+
self.stats.append(x.detach())
|
276 |
+
# save the comparison
|
277 |
+
self.comparisons.append(self.comparison_fn(x, x_ref))
|
278 |
+
return x
|
279 |
+
|
280 |
+
def __repr__(self):
|
281 |
+
clean_dict = {
|
282 |
+
k: v
|
283 |
+
for k, v in self.__dict__.items()
|
284 |
+
# skip nn.Module keys
|
285 |
+
if (k != 'training') and not k.startswith('_')
|
286 |
+
}
|
287 |
+
return f"OutputComparisonLogger({clean_dict})"
|
288 |
+
|
289 |
+
|
290 |
+
class NSTracer(quantize_fx.QuantizationTracer):
|
291 |
+
"""
|
292 |
+
Just like a regular FX quantization tracer, but treats observers and fake_quantize
|
293 |
+
modules as leaf modules.
|
294 |
+
"""
|
295 |
+
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
|
296 |
+
"""
|
297 |
+
""" # blank docblock to make autodoc happy
|
298 |
+
if isinstance(m, torch.ao.quantization.ObserverBase):
|
299 |
+
return True
|
300 |
+
elif isinstance(m, torch.ao.quantization.FakeQuantizeBase):
|
301 |
+
return True
|
302 |
+
return super().is_leaf_module(m, module_qualified_name)
|
303 |
+
|
304 |
+
|
305 |
+
def _extract_weights_one_model(
|
306 |
+
model_name: str,
|
307 |
+
model: GraphModule,
|
308 |
+
nodes_and_names_to_instrument: List[Tuple[Node, str]],
|
309 |
+
results: NSResultsType,
|
310 |
+
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
|
311 |
+
) -> None:
|
312 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_one_model")
|
313 |
+
for node, ref_name in nodes_and_names_to_instrument:
|
314 |
+
res_type = NSSingleResultValuesType.WEIGHT.value
|
315 |
+
extracted_weight = extract_weight_from_node(
|
316 |
+
node, model, op_to_type_to_weight_extraction_fn)
|
317 |
+
if extracted_weight:
|
318 |
+
if ref_name not in results:
|
319 |
+
results[ref_name] = {res_type: {}}
|
320 |
+
results[ref_name][res_type][model_name] = [extracted_weight]
|
321 |
+
|
322 |
+
|
323 |
+
def _extract_weights_impl(
|
324 |
+
model_name_a: str,
|
325 |
+
gm_a: GraphModule,
|
326 |
+
model_name_b: str,
|
327 |
+
gm_b: GraphModule,
|
328 |
+
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
329 |
+
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
330 |
+
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
|
331 |
+
) -> NSResultsType:
|
332 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_impl")
|
333 |
+
matched_subgraph_pairs = get_matching_subgraph_pairs(
|
334 |
+
gm_a, gm_b, base_name_to_sets_of_related_ops,
|
335 |
+
unmatchable_types_map)
|
336 |
+
|
337 |
+
# split the subgraph pairs into one data structure for each model
|
338 |
+
nodes_and_names_to_instrument_a: List[Tuple[Node, str]] = []
|
339 |
+
nodes_and_names_to_instrument_b: List[Tuple[Node, str]] = []
|
340 |
+
for match_name, match in matched_subgraph_pairs.items():
|
341 |
+
subgraph_a, subgraph_b = match
|
342 |
+
nodes_and_names_to_instrument_a.append((subgraph_a.base_op_node, match_name))
|
343 |
+
nodes_and_names_to_instrument_b.append((subgraph_b.base_op_node, match_name))
|
344 |
+
|
345 |
+
# populate the results, one model at a time
|
346 |
+
results: NSResultsType = {}
|
347 |
+
_extract_weights_one_model(
|
348 |
+
model_name_a, gm_a, nodes_and_names_to_instrument_a, results,
|
349 |
+
op_to_type_to_weight_extraction_fn)
|
350 |
+
_extract_weights_one_model(
|
351 |
+
model_name_b, gm_b, nodes_and_names_to_instrument_b, results,
|
352 |
+
op_to_type_to_weight_extraction_fn)
|
353 |
+
|
354 |
+
# fill in missing fqn entries
|
355 |
+
maybe_add_missing_fqns(results)
|
356 |
+
|
357 |
+
# rekey on names of nodes in gm_b
|
358 |
+
results = rekey_logger_info_on_node_name_of_model(results, model_name_b)
|
359 |
+
|
360 |
+
return results
|
361 |
+
|
362 |
+
|
363 |
+
def extract_weights(
|
364 |
+
model_name_a: str,
|
365 |
+
model_a: nn.Module,
|
366 |
+
model_name_b: str,
|
367 |
+
model_b: nn.Module,
|
368 |
+
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
369 |
+
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
370 |
+
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
|
371 |
+
) -> NSResultsType:
|
372 |
+
"""
|
373 |
+
Extract weights from model A and model B, and return a comparison.
|
374 |
+
|
375 |
+
Args:
|
376 |
+
model_name_a: string name of model A to use in results
|
377 |
+
model_a: model A
|
378 |
+
model_name_b: string name of model B to use in results
|
379 |
+
model_b: model B
|
380 |
+
base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
|
381 |
+
unmatchable_types_map: optional override of unmatchable types, subject to change
|
382 |
+
op_to_type_to_weight_extraction_fn: optional override of function which extracts weight
|
383 |
+
from a type, subject to change
|
384 |
+
|
385 |
+
Return:
|
386 |
+
NSResultsType, containing the weight comparisons
|
387 |
+
"""
|
388 |
+
|
389 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_weights")
|
390 |
+
if base_name_to_sets_of_related_ops is None:
|
391 |
+
base_name_to_sets_of_related_ops = \
|
392 |
+
get_base_name_to_sets_of_related_ops()
|
393 |
+
type_a_related_to_b = \
|
394 |
+
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
|
395 |
+
|
396 |
+
# TODO(future PR): expose these
|
397 |
+
skipped_module_names: List[str] = []
|
398 |
+
skipped_module_classes: List[Callable] = []
|
399 |
+
tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
|
400 |
+
tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
|
401 |
+
gm_a = GraphModule(model_a, tracer_a.trace(model_a))
|
402 |
+
maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope')
|
403 |
+
if maybe_model_a_node_name_to_scope is not None:
|
404 |
+
gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope
|
405 |
+
gm_b = GraphModule(model_b, tracer_b.trace(model_b))
|
406 |
+
maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope')
|
407 |
+
if maybe_model_b_node_name_to_scope is not None:
|
408 |
+
gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope
|
409 |
+
return _extract_weights_impl(
|
410 |
+
model_name_a, gm_a, model_name_b, gm_b, base_name_to_sets_of_related_ops,
|
411 |
+
unmatchable_types_map, op_to_type_to_weight_extraction_fn)
|
412 |
+
|
413 |
+
|
414 |
+
def _add_loggers_one_model(
|
415 |
+
model_name: str,
|
416 |
+
model: GraphModule,
|
417 |
+
nodes_and_names_to_instrument_inputs: List[Tuple[Node, str, str]],
|
418 |
+
nodes_and_names_to_instrument_outputs: List[Tuple[Node, str, str]],
|
419 |
+
logger_cls: Callable,
|
420 |
+
) -> nn.Module:
|
421 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_one_model")
|
422 |
+
|
423 |
+
# TODO(future PR): do not observe nodes we do not care
|
424 |
+
# about (both fp32, denylist, etc)
|
425 |
+
node_to_instrument_inputs_to_ref_name: Dict[Node, Tuple[str, str]] = {}
|
426 |
+
node_to_instrument_outputs_to_ref_name: Dict[Node, Tuple[str, str]] = {}
|
427 |
+
for node, ref_name, ref_node_type in nodes_and_names_to_instrument_inputs:
|
428 |
+
node_to_instrument_inputs_to_ref_name[node] = (ref_name, ref_node_type)
|
429 |
+
for node, ref_name, ref_node_type in nodes_and_names_to_instrument_outputs:
|
430 |
+
node_to_instrument_outputs_to_ref_name[node] = (ref_name, ref_node_type)
|
431 |
+
|
432 |
+
model = add_loggers_to_model(
|
433 |
+
model, node_to_instrument_inputs_to_ref_name,
|
434 |
+
node_to_instrument_outputs_to_ref_name, logger_cls, model_name)
|
435 |
+
return model
|
436 |
+
|
437 |
+
|
438 |
+
def _add_loggers_impl(
|
439 |
+
name_a: str,
|
440 |
+
gm_a: GraphModule,
|
441 |
+
name_b: str,
|
442 |
+
gm_b: GraphModule,
|
443 |
+
logger_cls: Callable,
|
444 |
+
should_log_inputs: bool,
|
445 |
+
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
446 |
+
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
447 |
+
) -> Tuple[nn.Module, nn.Module]:
|
448 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_impl")
|
449 |
+
matched_subgraph_pairs = get_matching_subgraph_pairs(
|
450 |
+
gm_a, gm_b,
|
451 |
+
base_name_to_sets_of_related_ops, unmatchable_types_map)
|
452 |
+
nodes_and_names_to_instrument_inputs_a = []
|
453 |
+
nodes_and_names_to_instrument_inputs_b = []
|
454 |
+
nodes_and_names_to_instrument_outputs_a = []
|
455 |
+
nodes_and_names_to_instrument_outputs_b = []
|
456 |
+
for match_name, (subgraph_a, subgraph_b) in matched_subgraph_pairs.items():
|
457 |
+
ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a)
|
458 |
+
ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b)
|
459 |
+
# Note: for matching inputs we use start_node, such as observing
|
460 |
+
# the input of linear in linear-relu
|
461 |
+
if should_log_inputs:
|
462 |
+
nodes_and_names_to_instrument_inputs_a.append(
|
463 |
+
(subgraph_a.start_node, match_name, ref_node_type_a))
|
464 |
+
nodes_and_names_to_instrument_inputs_b.append(
|
465 |
+
(subgraph_b.start_node, match_name, ref_node_type_b))
|
466 |
+
# Note: for matching activations we always use end_node,
|
467 |
+
# such as observing the output of relu in linear-relu
|
468 |
+
nodes_and_names_to_instrument_outputs_a.append(
|
469 |
+
(subgraph_a.end_node, match_name, ref_node_type_a))
|
470 |
+
nodes_and_names_to_instrument_outputs_b.append(
|
471 |
+
(subgraph_b.end_node, match_name, ref_node_type_b))
|
472 |
+
|
473 |
+
new_model_a = _add_loggers_one_model(
|
474 |
+
name_a, gm_a, nodes_and_names_to_instrument_inputs_a,
|
475 |
+
nodes_and_names_to_instrument_outputs_a, logger_cls)
|
476 |
+
new_model_b = _add_loggers_one_model(
|
477 |
+
name_b, gm_b, nodes_and_names_to_instrument_inputs_b,
|
478 |
+
nodes_and_names_to_instrument_outputs_b, logger_cls)
|
479 |
+
return (new_model_a, new_model_b)
|
480 |
+
|
481 |
+
|
482 |
+
def add_loggers(
|
483 |
+
name_a: str,
|
484 |
+
model_a: nn.Module,
|
485 |
+
name_b: str,
|
486 |
+
model_b: nn.Module,
|
487 |
+
logger_cls: Callable,
|
488 |
+
should_log_inputs : bool = False,
|
489 |
+
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
490 |
+
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
491 |
+
) -> Tuple[nn.Module, nn.Module]:
|
492 |
+
"""
|
493 |
+
Instrument model A and model B with loggers.
|
494 |
+
|
495 |
+
Args:
|
496 |
+
name_a: string name of model A to use in results
|
497 |
+
model_a: model A
|
498 |
+
name_b: string name of model B to use in results
|
499 |
+
model_b: model B
|
500 |
+
logger_cls: class of Logger to use
|
501 |
+
base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
|
502 |
+
unmatchable_types_map: optional override of unmatchable types, subject to change
|
503 |
+
|
504 |
+
Return:
|
505 |
+
Returns a tuple of (model_a_with_loggers, model_b_with_loggers). Modifies both models inplace.
|
506 |
+
"""
|
507 |
+
|
508 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_loggers")
|
509 |
+
# TODO(future PR): expose these
|
510 |
+
skipped_module_names: List[str] = []
|
511 |
+
skipped_module_classes: List[Callable] = []
|
512 |
+
tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
|
513 |
+
tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
|
514 |
+
gm_a = GraphModule(model_a, tracer_a.trace(model_a))
|
515 |
+
maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope')
|
516 |
+
if maybe_model_a_node_name_to_scope is not None:
|
517 |
+
gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope
|
518 |
+
gm_b = GraphModule(model_b, tracer_b.trace(model_b))
|
519 |
+
maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope')
|
520 |
+
if maybe_model_b_node_name_to_scope is not None:
|
521 |
+
gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope
|
522 |
+
return _add_loggers_impl(
|
523 |
+
name_a, gm_a, name_b, gm_b, logger_cls,
|
524 |
+
should_log_inputs=should_log_inputs,
|
525 |
+
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
|
526 |
+
unmatchable_types_map=unmatchable_types_map)
|
527 |
+
|
528 |
+
|
529 |
+
def _extract_logger_info_one_model(
|
530 |
+
model: nn.Module,
|
531 |
+
results: NSResultsType,
|
532 |
+
logger_cls: Callable,
|
533 |
+
) -> None:
|
534 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_logger_info_one_model")
|
535 |
+
for gm_name, mod in model.named_modules():
|
536 |
+
# TODO(future PR): better check when scripted
|
537 |
+
is_logger = (
|
538 |
+
isinstance(mod, logger_cls) # type: ignore[arg-type]
|
539 |
+
or (
|
540 |
+
isinstance(mod, torch.jit.RecursiveScriptModule)
|
541 |
+
and mod.original_name == 'OutputLogger'
|
542 |
+
)
|
543 |
+
)
|
544 |
+
if is_logger:
|
545 |
+
key = mod.ref_name
|
546 |
+
if key not in results:
|
547 |
+
results[key] = {}
|
548 |
+
assert mod.model_name not in results[key], \
|
549 |
+
f"{mod.model_name} is already present in results"
|
550 |
+
if mod.results_type not in results[key]:
|
551 |
+
results[key][mod.results_type] = {}
|
552 |
+
if mod.model_name not in results[key][mod.results_type]:
|
553 |
+
results[key][mod.results_type][mod.model_name] = []
|
554 |
+
stats_to_use = mod.stats
|
555 |
+
if len(mod.stats_rnn) > 0:
|
556 |
+
stats_to_use = mod.stats_rnn
|
557 |
+
data = {
|
558 |
+
'type': mod.results_type,
|
559 |
+
'values': stats_to_use,
|
560 |
+
'ref_node_name': mod.ref_node_name,
|
561 |
+
'ref_node_target_type': mod.ref_node_target_type,
|
562 |
+
'prev_node_name': mod.prev_node_name,
|
563 |
+
'prev_node_target_type': mod.prev_node_target_type,
|
564 |
+
'index_within_arg': mod.index_within_arg,
|
565 |
+
'index_of_arg': mod.index_of_arg,
|
566 |
+
'fqn': mod.fqn,
|
567 |
+
'qconfig_str': mod.qconfig_str,
|
568 |
+
}
|
569 |
+
if hasattr(mod, 'comparisons'):
|
570 |
+
data['comparisons'] = mod.comparisons
|
571 |
+
data['comparison_fn_name'] = mod.comparison_fn_name
|
572 |
+
else:
|
573 |
+
data['comparisons'] = []
|
574 |
+
data['comparison_fn_name'] = ''
|
575 |
+
results[key][mod.results_type][mod.model_name].append(data)
|
576 |
+
# ensure the list stays sorted
|
577 |
+
results[key][mod.results_type][mod.model_name].sort(
|
578 |
+
key=lambda res:
|
579 |
+
f"{res['index_of_arg']}:{res['index_within_arg']}"
|
580 |
+
)
|
581 |
+
|
582 |
+
|
583 |
+
# TODO(future PR): align on naming
|
584 |
+
# this is equivalent of just the comparison extraction part of `ns.compare_model_outputs`
|
585 |
+
def extract_logger_info(
|
586 |
+
model_a: nn.Module,
|
587 |
+
model_b: nn.Module,
|
588 |
+
logger_cls: Callable,
|
589 |
+
model_name_to_use_for_layer_names: str,
|
590 |
+
) -> NSResultsType:
|
591 |
+
"""
|
592 |
+
Traverse all loggers in `model_a` and `model_b`, and extract the logged
|
593 |
+
information.
|
594 |
+
|
595 |
+
Args:
|
596 |
+
model_a: model A
|
597 |
+
model_b: model B
|
598 |
+
logger_cls: class of Logger to use
|
599 |
+
model_name_to_use_for_layer_names: string name of model to use for
|
600 |
+
layer names in the output
|
601 |
+
|
602 |
+
Return:
|
603 |
+
NSResultsType, containing the logged comparisons
|
604 |
+
"""
|
605 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_logger_info")
|
606 |
+
results: NSResultsType = {}
|
607 |
+
for model in (model_a, model_b):
|
608 |
+
_extract_logger_info_one_model(model, results, logger_cls)
|
609 |
+
# fill in missing fqn entries
|
610 |
+
maybe_add_missing_fqns(results)
|
611 |
+
# rekey on the name of model b
|
612 |
+
results = rekey_logger_info_on_node_name_of_model(
|
613 |
+
results, model_name_to_use_for_layer_names)
|
614 |
+
return results
|
615 |
+
|
616 |
+
|
617 |
+
def _add_shadow_loggers_impl(
|
618 |
+
name_a: str,
|
619 |
+
gm_a: GraphModule,
|
620 |
+
name_b: str,
|
621 |
+
gm_b: GraphModule,
|
622 |
+
logger_cls: Callable,
|
623 |
+
should_log_inputs: bool,
|
624 |
+
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
625 |
+
node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
626 |
+
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
627 |
+
) -> nn.Module:
|
628 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_shadow_loggers_impl")
|
629 |
+
matched_subgraph_pairs = get_matching_subgraph_pairs(
|
630 |
+
gm_a, gm_b, base_name_to_sets_of_related_ops,
|
631 |
+
unmatchable_types_map)
|
632 |
+
gm_a_shadows_b = create_a_shadows_b(
|
633 |
+
name_a, gm_a, name_b, gm_b, matched_subgraph_pairs, logger_cls,
|
634 |
+
should_log_inputs=should_log_inputs,
|
635 |
+
node_type_to_io_type_map=node_type_to_io_type_map)
|
636 |
+
return gm_a_shadows_b
|
637 |
+
|
638 |
+
|
639 |
+
def add_shadow_loggers(
|
640 |
+
name_a: str,
|
641 |
+
model_a: nn.Module,
|
642 |
+
name_b: str,
|
643 |
+
model_b: nn.Module,
|
644 |
+
logger_cls: Callable,
|
645 |
+
should_log_inputs: bool = False,
|
646 |
+
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
647 |
+
node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
648 |
+
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
649 |
+
) -> nn.Module:
|
650 |
+
"""
|
651 |
+
Instrument model A and model B with shadow loggers.
|
652 |
+
|
653 |
+
Args:
|
654 |
+
name_a: string name of model A to use in results
|
655 |
+
model_a: model A
|
656 |
+
name_b: string name of model B to use in results
|
657 |
+
model_b: model B
|
658 |
+
logger_cls: class of Logger to use
|
659 |
+
should_log_inputs: whether to log inputs
|
660 |
+
base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
|
661 |
+
unmatchable_types_map: optional override of unmatchable types, subject to change
|
662 |
+
"""
|
663 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_shadow_loggers")
|
664 |
+
# TODO(future PR): expose these
|
665 |
+
skipped_module_names: List[str] = []
|
666 |
+
skipped_module_classes: List[Callable] = []
|
667 |
+
tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
|
668 |
+
tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
|
669 |
+
gm_a = GraphModule(model_a, tracer_a.trace(model_a))
|
670 |
+
maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope')
|
671 |
+
if maybe_model_a_node_name_to_scope is not None:
|
672 |
+
gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope
|
673 |
+
gm_b = GraphModule(model_b, tracer_b.trace(model_b))
|
674 |
+
maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope')
|
675 |
+
if maybe_model_b_node_name_to_scope is not None:
|
676 |
+
gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope
|
677 |
+
return _add_shadow_loggers_impl(
|
678 |
+
name_a, gm_a, name_b, gm_b, logger_cls,
|
679 |
+
should_log_inputs=should_log_inputs,
|
680 |
+
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
|
681 |
+
node_type_to_io_type_map=node_type_to_io_type_map,
|
682 |
+
unmatchable_types_map=unmatchable_types_map)
|
683 |
+
|
684 |
+
|
685 |
+
def extract_shadow_logger_info(
|
686 |
+
model_a_shadows_b: nn.Module,
|
687 |
+
logger_cls: Callable,
|
688 |
+
model_name_to_use_for_layer_names: str,
|
689 |
+
) -> NSResultsType:
|
690 |
+
"""
|
691 |
+
Traverse all loggers in a shadow model, and extract the logged
|
692 |
+
information.
|
693 |
+
|
694 |
+
Args:
|
695 |
+
model_a_shadows_b: shadow model
|
696 |
+
logger_cls: class of Logger to use
|
697 |
+
model_name_to_use_for_layer_names: string name of model to use for
|
698 |
+
layer names in the output
|
699 |
+
|
700 |
+
Return:
|
701 |
+
NSResultsType, containing the logged comparisons
|
702 |
+
"""
|
703 |
+
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_shadow_logger_info")
|
704 |
+
results: NSResultsType = collections.defaultdict(dict)
|
705 |
+
_extract_logger_info_one_model(model_a_shadows_b, results, logger_cls)
|
706 |
+
# fill in missing fqn entries
|
707 |
+
maybe_add_missing_fqns(results)
|
708 |
+
# rekey on the name of model b
|
709 |
+
results = rekey_logger_info_on_node_name_of_model(
|
710 |
+
results, model_name_to_use_for_layer_names)
|
711 |
+
return dict(results)
|
712 |
+
|
713 |
+
|
714 |
+
def extend_logger_results_with_comparison(
|
715 |
+
results: NSResultsType,
|
716 |
+
model_name_1: str,
|
717 |
+
model_name_2: str,
|
718 |
+
comparison_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
|
719 |
+
comparison_name: str,
|
720 |
+
) -> None:
|
721 |
+
"""
|
722 |
+
Compares the logged values from `model_name_2` against the corresponding
|
723 |
+
values in `model_name_1`, using `comparison_fn`. Records the result
|
724 |
+
in `model_name_2`'s results under `comparison_name`. Modifies `results` inplace.
|
725 |
+
|
726 |
+
Args:
|
727 |
+
results: the result data structure from `extract_logger_info` or
|
728 |
+
`extract_shadow_logger_info`.
|
729 |
+
model_name_1: string name of model 1
|
730 |
+
model_name_2: string name of model 2
|
731 |
+
comparison_fn: function to compare two Tensors
|
732 |
+
comparison_name: string name of model to use for
|
733 |
+
layer names in the output
|
734 |
+
"""
|
735 |
+
for results_type_to_results in results.values():
|
736 |
+
for model_name_to_results in results_type_to_results.values():
|
737 |
+
assert model_name_1 in model_name_to_results, \
|
738 |
+
f"{model_name_1} not found in results"
|
739 |
+
assert model_name_2 in model_name_to_results, \
|
740 |
+
f"{model_name_2} not found in results"
|
741 |
+
|
742 |
+
results_1 = model_name_to_results[model_name_1]
|
743 |
+
results_2 = model_name_to_results[model_name_2]
|
744 |
+
|
745 |
+
for result_2 in results_2:
|
746 |
+
index_within_arg_2 = result_2['index_within_arg']
|
747 |
+
index_of_arg_2 = result_2['index_of_arg']
|
748 |
+
# find corresponding result_1
|
749 |
+
result_1 = None
|
750 |
+
for cur_result_1 in results_1:
|
751 |
+
index_within_arg_1 = cur_result_1['index_within_arg']
|
752 |
+
index_of_arg_1 = cur_result_1['index_of_arg']
|
753 |
+
if (
|
754 |
+
(index_within_arg_1 == index_within_arg_2) and
|
755 |
+
(index_of_arg_1 == index_of_arg_2)
|
756 |
+
):
|
757 |
+
result_1 = cur_result_1
|
758 |
+
break
|
759 |
+
assert result_1 is not None
|
760 |
+
|
761 |
+
values_1 = result_1['values']
|
762 |
+
values_2 = result_2['values']
|
763 |
+
result_2[comparison_name] = []
|
764 |
+
for value_1, value_2 in zip(values_1, values_2):
|
765 |
+
comparison_result = comparison_fn(value_1, value_2)
|
766 |
+
result_2[comparison_name].append(comparison_result)
|
767 |
+
|
768 |
+
def prepare_n_shadows_model(
|
769 |
+
model: torch.nn.Module,
|
770 |
+
example_inputs: Any,
|
771 |
+
qconfig_multi_mapping: QConfigMultiMapping,
|
772 |
+
backend_config: BackendConfig,
|
773 |
+
custom_prepare_fn: Optional[Callable] = None,
|
774 |
+
custom_prepare_kwargs: Optional[Dict[str, Any]] = None,
|
775 |
+
custom_tracer: Any = None,
|
776 |
+
) -> GraphModule:
|
777 |
+
"""
|
778 |
+
Given a model with a graph with M ops such as
|
779 |
+
|
780 |
+
|
781 |
+
args_kwargs_m -> op_m -> output_m
|
782 |
+
|
783 |
+
|
784 |
+
And a set of N qconfigs for each op, creates a new model, with
|
785 |
+
each of the subgraph of `op_m` transformed into
|
786 |
+
|
787 |
+
.. code::
|
788 |
+
|
789 |
+
|---------> op_m_n -> log_m_n
|
790 |
+
| /
|
791 |
+
args_kwargs_m ---------> op_m -> log_m_0
|
792 |
+
|
793 |
+
Where op_m_n is op_m wrapped in a submodule and transformed with
|
794 |
+
qconfig_n, and its inner graph looks like
|
795 |
+
|
796 |
+
.. code::
|
797 |
+
|
798 |
+
args_m -------- op_m_prepared_with_qconfig_n -> out_m_n
|
799 |
+
/
|
800 |
+
kwargs_m ---
|
801 |
+
|
802 |
+
This is useful for testing different quantization of multiple layers in
|
803 |
+
a single pass through the model.
|
804 |
+
|
805 |
+
High level TODOs for future PRs:
|
806 |
+
* figure out a better way to name the output structure
|
807 |
+
* return a results data structure instead of printing it out
|
808 |
+
* add examples to docblocks
|
809 |
+
"""
|
810 |
+
|
811 |
+
if custom_tracer is None:
|
812 |
+
tracer = quantize_fx.QuantizationTracer([], [])
|
813 |
+
else:
|
814 |
+
tracer = custom_tracer
|
815 |
+
mt = torch.fx.GraphModule(model, tracer.trace(model))
|
816 |
+
# this is necessary to ensure logger FQNs get populated
|
817 |
+
mt._node_name_to_scope = tracer.node_name_to_scope
|
818 |
+
|
819 |
+
# run example input propagation, we need this to call prepare_fx on
|
820 |
+
# individual subgraphs
|
821 |
+
output_prop = OutputProp(mt)
|
822 |
+
output_prop.propagate(*example_inputs)
|
823 |
+
|
824 |
+
# Find the set of subgraphs in the original graph which we need to
|
825 |
+
# consider.
|
826 |
+
modules = dict(mt.named_modules(remove_duplicate=False))
|
827 |
+
patterns = _get_pattern_to_quantize_handlers(backend_config)
|
828 |
+
root_node_getter_mapping = \
|
829 |
+
get_fusion_pattern_to_root_node_getter(backend_config)
|
830 |
+
standalone_module_names: List[str] = []
|
831 |
+
standalone_module_classes: List[Type] = []
|
832 |
+
custom_module_classes: List[Type] = []
|
833 |
+
matches = _find_matches(
|
834 |
+
mt.graph, modules, patterns, root_node_getter_mapping,
|
835 |
+
standalone_module_names, standalone_module_classes, custom_module_classes)
|
836 |
+
subgraphs_dedup: Dict[str, List[Node]] = \
|
837 |
+
_get_dedup_subgraphs(matches)
|
838 |
+
|
839 |
+
# generate node to qconfig for each subgraph
|
840 |
+
# TODO(future PR): deduplicate repeating entries
|
841 |
+
list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]] = []
|
842 |
+
for qconfig_mapping in qconfig_multi_mapping.qconfig_mappings_list:
|
843 |
+
node_name_to_qconfig = _generate_node_name_to_qconfig(
|
844 |
+
mt, modules, mt.graph, qconfig_mapping, tracer.node_name_to_scope)
|
845 |
+
list_of_node_name_to_qconfig.append(node_name_to_qconfig)
|
846 |
+
|
847 |
+
# For each region in the model, do the following:
|
848 |
+
# For each qconfig for that region, do the following:
|
849 |
+
# 1. create a copy of the region wrapped in a module
|
850 |
+
# 2. pass original args, original kwargs, and expected output to module
|
851 |
+
# 3. add an output comparison logger and hook it up to compare
|
852 |
+
# actual output to expected output
|
853 |
+
# 4. run `prepare_fx` on the module
|
854 |
+
for (subgraph_idx, (match_name, nodes_in_this_subgraph)) in \
|
855 |
+
enumerate(subgraphs_dedup.items()):
|
856 |
+
create_n_transformed_and_logged_copies_of_subgraph(
|
857 |
+
mt, subgraph_idx, match_name, nodes_in_this_subgraph,
|
858 |
+
qconfig_multi_mapping.qconfig_mappings_list, list_of_node_name_to_qconfig,
|
859 |
+
custom_prepare_fn, custom_prepare_kwargs # type: ignore[arg-type]
|
860 |
+
)
|
861 |
+
|
862 |
+
return mt
|
863 |
+
|
864 |
+
# TODO(future PR): we should rethink the names of all the PNP APIs
|
865 |
+
def _prepare_n_shadows_add_loggers_model(
|
866 |
+
model: torch.nn.Module,
|
867 |
+
example_inputs: Any,
|
868 |
+
qconfig_mapping: QConfigMapping,
|
869 |
+
backend_config: BackendConfig,
|
870 |
+
) -> torch.nn.Module:
|
871 |
+
r"""
|
872 |
+
Note: this API is not recommended for wide usage, it is only
|
873 |
+
provided for customers who need to migrate from the `add_loggers`
|
874 |
+
API.
|
875 |
+
|
876 |
+
This creates a model which provides logging for the following
|
877 |
+
problem: if we quantize `model` with `qconfig_mapping` and feed
|
878 |
+
the same input through both models, log the comparisons of
|
879 |
+
corresponding intermediate layers.
|
880 |
+
|
881 |
+
The problem is solved with a single model. Specifically, we
|
882 |
+
partition `model` into N subgraphs, create a copy of each relevant
|
883 |
+
subgraph, wrap it in a module, apply the quantization API to that
|
884 |
+
module, and hook up loggers to measure the comparisons.
|
885 |
+
|
886 |
+
Example starting graph:
|
887 |
+
|
888 |
+
x0 -> op0 -> x1 -> op1 -> x2
|
889 |
+
|
890 |
+
Example config: quantize op0 to int8, do nothing to op1.
|
891 |
+
The following graph will be created:
|
892 |
+
|
893 |
+
.. code::
|
894 |
+
|
895 |
+
x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log
|
896 |
+
\ \ \ # noqa: W605
|
897 |
+
---> op0_1 -> x1_1 ----> clog -> op1_0 -> x2_1 ----> clog
|
898 |
+
|
899 |
+
Where op0_0 is op0, op0_1 is op0 wrapped in a submodule and quantized
|
900 |
+
to int8, op1_0 is op1 (appearing in the graph twice), log is a logger,
|
901 |
+
and clog is a comparison logger.
|
902 |
+
"""
|
903 |
+
|
904 |
+
tracer = quantize_fx.QuantizationTracer([], [])
|
905 |
+
mt = torch.fx.GraphModule(model, tracer.trace(model))
|
906 |
+
# this is necessary to ensure logger FQNs get populated
|
907 |
+
mt._node_name_to_scope = tracer.node_name_to_scope
|
908 |
+
|
909 |
+
# run example input propagation, we need this to call prepare_fx on
|
910 |
+
# individual subgraphs
|
911 |
+
output_prop = OutputProp(mt)
|
912 |
+
output_prop.propagate(*example_inputs)
|
913 |
+
|
914 |
+
# Find the set of subgraphs in the original graph which we need to
|
915 |
+
# consider.
|
916 |
+
modules = dict(mt.named_modules(remove_duplicate=False))
|
917 |
+
patterns = _get_pattern_to_quantize_handlers(backend_config)
|
918 |
+
root_node_getter_mapping = \
|
919 |
+
get_fusion_pattern_to_root_node_getter(backend_config)
|
920 |
+
standalone_module_names: List[str] = []
|
921 |
+
standalone_module_classes: List[Type] = []
|
922 |
+
custom_module_classes: List[Type] = []
|
923 |
+
matches = _find_matches(
|
924 |
+
mt.graph, modules, patterns, root_node_getter_mapping,
|
925 |
+
standalone_module_names, standalone_module_classes, custom_module_classes)
|
926 |
+
subgraphs_dedup: Dict[str, List[Node]] = \
|
927 |
+
_get_dedup_subgraphs(matches)
|
928 |
+
|
929 |
+
# generate node to qconfig for each subgraph
|
930 |
+
node_name_to_qconfig = _generate_node_name_to_qconfig(
|
931 |
+
mt, modules, mt.graph, qconfig_mapping, tracer.node_name_to_scope)
|
932 |
+
|
933 |
+
# Now, mutate the graph to be the add_loggers graph with propagation
|
934 |
+
# error.
|
935 |
+
create_add_loggers_graph(
|
936 |
+
mt, subgraphs_dedup, qconfig_mapping, node_name_to_qconfig)
|
937 |
+
|
938 |
+
return mt
|
939 |
+
|
940 |
+
# TODO(future PR): we should rethink the names of all the PNP APIs
|
941 |
+
def _n_shadows_compare_weights(
|
942 |
+
model: torch.nn.Module,
|
943 |
+
example_inputs: Any,
|
944 |
+
qconfig_mapping: QConfigMapping,
|
945 |
+
backend_config: BackendConfig,
|
946 |
+
) -> NSResultsType:
|
947 |
+
"""
|
948 |
+
Note: this API is not recommended for wide usage, it is only
|
949 |
+
provided for customers who need to migrate from the `add_loggers`
|
950 |
+
API.
|
951 |
+
"""
|
952 |
+
qconfig_multi_mapping = \
|
953 |
+
QConfigMultiMapping.from_list_qconfig_mapping([qconfig_mapping])
|
954 |
+
mp = prepare_n_shadows_model(
|
955 |
+
model, example_inputs, qconfig_multi_mapping, backend_config)
|
956 |
+
# passing inputs through the model is necessary to populate
|
957 |
+
# observers which observe weights with real values
|
958 |
+
mp(*example_inputs)
|
959 |
+
mq = convert_n_shadows_model(mp)
|
960 |
+
weight_comparison = extract_weight_comparison(mq)
|
961 |
+
return weight_comparison
|
962 |
+
|
963 |
+
# TODO(future PR): consider aligning API signature with other similar quantization
|
964 |
+
# functions (enable_fake_quant, etc)
|
965 |
+
def loggers_set_enabled(model: torch.nn.Module, enabled: bool) -> None:
|
966 |
+
"""
|
967 |
+
Sets the `enabled` setting on a `model`'s loggers
|
968 |
+
"""
|
969 |
+
for name, child in model.named_modules():
|
970 |
+
if isinstance(child, OutputLogger):
|
971 |
+
child.enabled = enabled
|
972 |
+
|
973 |
+
# TODO(future PR): consider aligning API signature with other similar quantization
|
974 |
+
# functions (enable_fake_quant, etc)
|
975 |
+
def loggers_set_save_activations(
|
976 |
+
model: torch.nn.Module,
|
977 |
+
save_activations: bool,
|
978 |
+
) -> None:
|
979 |
+
"""
|
980 |
+
Sets the `save_activations` setting on a `model`'s loggers
|
981 |
+
"""
|
982 |
+
for name, child in model.named_modules():
|
983 |
+
if isinstance(child, OutputLogger):
|
984 |
+
child.save_activations = save_activations
|
985 |
+
|
986 |
+
def convert_n_shadows_model(
|
987 |
+
model: GraphModule,
|
988 |
+
custom_convert_fn: Optional[Callable] = None,
|
989 |
+
custom_convert_kwargs: Optional[Dict[str, Any]] = None
|
990 |
+
) -> GraphModule:
|
991 |
+
"""
|
992 |
+
Given a model from `prepare_n_shadows_model`, runs `convert_fx`
|
993 |
+
on each shadow submodule.
|
994 |
+
"""
|
995 |
+
for node in model.graph.nodes:
|
996 |
+
# TODO(future PR): consider matching in a safer way than
|
997 |
+
# node name string match
|
998 |
+
if node.name.startswith(SHADOW_WRAPPER_NODE_NAME_PREFIX):
|
999 |
+
orig_mod = getattr(model, node.name)
|
1000 |
+
if custom_convert_fn is None:
|
1001 |
+
converted_mod = torch.ao.quantization.quantize_fx.convert_fx(
|
1002 |
+
orig_mod)
|
1003 |
+
else:
|
1004 |
+
if custom_convert_kwargs is None:
|
1005 |
+
custom_convert_kwargs = {}
|
1006 |
+
converted_mod = custom_convert_fn(orig_mod, **custom_convert_kwargs)
|
1007 |
+
setattr(model, node.name, converted_mod)
|
1008 |
+
|
1009 |
+
return model
|
1010 |
+
|
1011 |
+
def extract_results_n_shadows_model(model: torch.nn.Module) -> NSResultsType:
|
1012 |
+
"""
|
1013 |
+
Extracts logger results from `model`.
|
1014 |
+
"""
|
1015 |
+
results: NSResultsType = {}
|
1016 |
+
_extract_logger_info_one_model(model, results, OutputLogger)
|
1017 |
+
return results
|
1018 |
+
|
1019 |
+
def print_comparisons_n_shadows_model(results: NSResultsType) -> None:
|
1020 |
+
"""
|
1021 |
+
Prints a summary of extracted `results`.
|
1022 |
+
"""
|
1023 |
+
results_grouped = group_results_by_subgraph(results)
|
1024 |
+
results_comparison = create_results_comparison(results_grouped)
|
1025 |
+
print_n_shadows_summary(results_comparison)
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_passes.cpython-310.pyc
ADDED
Binary file (17.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc
ADDED
Binary file (9.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc
ADDED
Binary file (4.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc
ADDED
Binary file (6.85 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_matcher.py
ADDED
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import enum
|
3 |
+
|
4 |
+
import torch
|
5 |
+
toq = torch.ops.quantized
|
6 |
+
|
7 |
+
from torch.fx import GraphModule
|
8 |
+
from torch.fx.graph import Graph, Node
|
9 |
+
|
10 |
+
from torch.ao.quantization.utils import getattr_from_fqn
|
11 |
+
from .ns_types import NSSubgraph, NSNodeTargetType
|
12 |
+
from .mappings import (
|
13 |
+
get_base_name_to_sets_of_related_ops,
|
14 |
+
get_unmatchable_types_map,
|
15 |
+
)
|
16 |
+
from .pattern_utils import (
|
17 |
+
get_type_a_related_to_b,
|
18 |
+
get_reversed_fusions,
|
19 |
+
end_node_matches_reversed_fusion,
|
20 |
+
)
|
21 |
+
from torch.ao.quantization import (
|
22 |
+
ObserverBase,
|
23 |
+
FakeQuantizeBase,
|
24 |
+
)
|
25 |
+
|
26 |
+
from typing import Dict, Tuple, List, Optional, Set, Any
|
27 |
+
|
28 |
+
def _get_output_nodes(g: Graph) -> List[Node]:
|
29 |
+
return [n for n in g.nodes if n.op == 'output']
|
30 |
+
|
31 |
+
class _NSGraphMatchableSubgraphsIterator:
|
32 |
+
"""
|
33 |
+
Iterates through the graph of gm, starting with the output nodes
|
34 |
+
and continuing backwards.
|
35 |
+
1. Returns matchable subgraphs, in order. A subgraph is defined by
|
36 |
+
(start_node, end_node).
|
37 |
+
2. Skips over non-matchable subgraphs
|
38 |
+
"""
|
39 |
+
def __init__(
|
40 |
+
self,
|
41 |
+
gm: GraphModule,
|
42 |
+
non_matchable_functions: Set[NSNodeTargetType],
|
43 |
+
non_matchable_modules: Set[NSNodeTargetType],
|
44 |
+
non_matchable_methods: Set[NSNodeTargetType],
|
45 |
+
):
|
46 |
+
self.gm: GraphModule = gm
|
47 |
+
self.non_matchable_functions: Set[NSNodeTargetType] = non_matchable_functions
|
48 |
+
self.non_matchable_modules: Set[NSNodeTargetType] = non_matchable_modules
|
49 |
+
self.non_matchable_methods: Set[NSNodeTargetType] = non_matchable_methods
|
50 |
+
self.seen_nodes: Set[Node] = set()
|
51 |
+
self.stack: List[Node] = []
|
52 |
+
for start_node in _get_output_nodes(self.gm.graph):
|
53 |
+
self.stack.append(start_node)
|
54 |
+
|
55 |
+
def __iter__(self):
|
56 |
+
return self
|
57 |
+
|
58 |
+
def __next__(self) -> NSSubgraph:
|
59 |
+
"""
|
60 |
+
Returns the next matchable subgraph.
|
61 |
+
"""
|
62 |
+
while len(self.stack) > 0:
|
63 |
+
cur_end_node = self.stack.pop()
|
64 |
+
if cur_end_node in self.seen_nodes:
|
65 |
+
continue
|
66 |
+
|
67 |
+
# for subgraphs which are single nodes, start_node == end_node
|
68 |
+
# for subgraphs with more than one node, start node != end_node
|
69 |
+
cur_start_node = cur_end_node
|
70 |
+
# Subgraphs like linear-relu have the base node as the start node.
|
71 |
+
# Subgraphs like dequantize-linear-relu-to(torch.float16) have the
|
72 |
+
# base node as the second node.
|
73 |
+
# The cur_base_op_node var will move to the actual node during
|
74 |
+
# the fusion matching later in this code block.
|
75 |
+
cur_base_op_node = cur_end_node
|
76 |
+
|
77 |
+
# Check for potential fusions. For now, we are greedy
|
78 |
+
# and always skip all non-base nodes of a fusion. For example,
|
79 |
+
# if we match linear-relu backwards, we will always skip the
|
80 |
+
# relu node and attempt to match the linear node. This can
|
81 |
+
# be made configurable later if needed.
|
82 |
+
for _reverse_fusion_ops, base_op_idx in get_reversed_fusions():
|
83 |
+
is_match = end_node_matches_reversed_fusion(
|
84 |
+
cur_end_node, _reverse_fusion_ops, self.gm, self.seen_nodes)
|
85 |
+
if is_match:
|
86 |
+
# navigate to the base node
|
87 |
+
for rev_fusion_idx in range(len(_reverse_fusion_ops) - 1):
|
88 |
+
self.seen_nodes.add(cur_start_node)
|
89 |
+
# for now, assume that there are no other nodes
|
90 |
+
# which need to be added to the stack
|
91 |
+
cur_start_node = cur_start_node.args[0] # type: ignore[assignment]
|
92 |
+
# if the base op index matches the current node, set it
|
93 |
+
rev_base_op_idx = \
|
94 |
+
len(_reverse_fusion_ops) - 2 - base_op_idx
|
95 |
+
if rev_fusion_idx == rev_base_op_idx:
|
96 |
+
cur_base_op_node = cur_start_node
|
97 |
+
break
|
98 |
+
|
99 |
+
self.seen_nodes.add(cur_start_node)
|
100 |
+
# add args of previous nodes to stack
|
101 |
+
for arg in cur_start_node.all_input_nodes:
|
102 |
+
self._recursively_add_node_arg_to_stack(arg)
|
103 |
+
|
104 |
+
# skip unmatchable nodes
|
105 |
+
# note: this check is done on the start_node, i.e.
|
106 |
+
# if we are matching linear-relu in reverse, this would do the matchable
|
107 |
+
# check on the linear
|
108 |
+
if not self._is_matchable(cur_base_op_node):
|
109 |
+
continue
|
110 |
+
|
111 |
+
# If an observer or a fake_quant was not matched as a part of
|
112 |
+
# a pattern of multiple nodes, ignore it. One case where this is
|
113 |
+
# relevant is an observer on a graph input, which was added because
|
114 |
+
# it is necessary for the next node.
|
115 |
+
if cur_end_node.op == 'call_module' and cur_start_node is cur_end_node:
|
116 |
+
maybe_obs = getattr_from_fqn(self.gm, cur_end_node.target) # type: ignore[arg-type]
|
117 |
+
if isinstance(maybe_obs, (ObserverBase, FakeQuantizeBase)):
|
118 |
+
continue
|
119 |
+
|
120 |
+
return NSSubgraph(
|
121 |
+
start_node=cur_start_node, end_node=cur_end_node,
|
122 |
+
base_op_node=cur_base_op_node)
|
123 |
+
|
124 |
+
raise StopIteration
|
125 |
+
|
126 |
+
def _recursively_add_node_arg_to_stack(self, arg: Any) -> None:
|
127 |
+
"""
|
128 |
+
Adds all of the nodes in this arg to the stack, properly navigating
|
129 |
+
through list, dicts and tuples.
|
130 |
+
"""
|
131 |
+
if isinstance(arg, Node):
|
132 |
+
self.stack.append(arg)
|
133 |
+
elif isinstance(arg, torch.fx.immutable_collections.immutable_list) or type(arg) is tuple:
|
134 |
+
for inner_arg in arg:
|
135 |
+
self._recursively_add_node_arg_to_stack(inner_arg)
|
136 |
+
elif isinstance(arg, torch.fx.immutable_collections.immutable_dict):
|
137 |
+
for value in arg.values():
|
138 |
+
self._recursively_add_node_arg_to_stack(value)
|
139 |
+
|
140 |
+
def _is_matchable(self, node: Node) -> bool:
|
141 |
+
if node.op == 'call_function':
|
142 |
+
return node.target not in self.non_matchable_functions
|
143 |
+
elif node.op == 'call_module':
|
144 |
+
assert isinstance(node.target, str)
|
145 |
+
target_mod = getattr_from_fqn(self.gm, node.target)
|
146 |
+
return not \
|
147 |
+
any(isinstance(target_mod, t) # type: ignore[arg-type]
|
148 |
+
for t in self.non_matchable_modules)
|
149 |
+
elif node.op == 'call_method':
|
150 |
+
return node.target not in self.non_matchable_methods
|
151 |
+
else:
|
152 |
+
return False
|
153 |
+
|
154 |
+
class GraphMatchingException(Exception):
|
155 |
+
"""
|
156 |
+
Exception raised when two graphs cannot be matched.
|
157 |
+
"""
|
158 |
+
pass
|
159 |
+
|
160 |
+
class SubgraphTypeRelationship(enum.Enum):
|
161 |
+
# same type, known
|
162 |
+
# example: F.linear and F.linear, or nn.Conv2d and nn.Conv2d
|
163 |
+
EQUAL = enum.auto()
|
164 |
+
# same type, but the type is not known to Numerical Suite
|
165 |
+
# (user defined type, etc).
|
166 |
+
EQUAL_BUT_UKNOWN = enum.auto()
|
167 |
+
# known, same subgraph_relationship set, but not the same type
|
168 |
+
# example: F.linear and toq.linear
|
169 |
+
RELATED_BUT_NOT_EQUAL = enum.auto()
|
170 |
+
# not related
|
171 |
+
NOT_RELATED = enum.auto()
|
172 |
+
|
173 |
+
def _get_subgraph_relationship_type(
|
174 |
+
subgraph_a: NSSubgraph,
|
175 |
+
subgraph_b: NSSubgraph,
|
176 |
+
gm_a: GraphModule,
|
177 |
+
gm_b: GraphModule,
|
178 |
+
type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]],
|
179 |
+
) -> SubgraphTypeRelationship:
|
180 |
+
node_a = subgraph_a.base_op_node
|
181 |
+
node_b = subgraph_b.base_op_node
|
182 |
+
|
183 |
+
# TODO(next): make this code handle matching by what is before the base op
|
184 |
+
if node_a.op != node_b.op:
|
185 |
+
if not (
|
186 |
+
node_a.op in ('call_function', 'call_method') and
|
187 |
+
node_b.op in ('call_function', 'call_method')
|
188 |
+
):
|
189 |
+
return SubgraphTypeRelationship.NOT_RELATED
|
190 |
+
|
191 |
+
if node_a.op in ('call_function', 'call_method'):
|
192 |
+
key = (node_a.target, node_b.target)
|
193 |
+
|
194 |
+
if key not in type_a_related_to_b:
|
195 |
+
if node_a.target == node_b.target:
|
196 |
+
return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN
|
197 |
+
else:
|
198 |
+
return SubgraphTypeRelationship.NOT_RELATED
|
199 |
+
# after this point, we are dealing with known types
|
200 |
+
|
201 |
+
if node_a.target == node_b.target:
|
202 |
+
node_a_has_prev = subgraph_a.base_op_node == subgraph_a.start_node
|
203 |
+
node_b_has_prev = subgraph_b.base_op_node == subgraph_b.start_node
|
204 |
+
if node_a_has_prev and (not node_b_has_prev):
|
205 |
+
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
|
206 |
+
elif (not node_a_has_prev) and node_b_has_prev:
|
207 |
+
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
|
208 |
+
elif (not node_a_has_prev) and (not node_b_has_prev):
|
209 |
+
return SubgraphTypeRelationship.EQUAL
|
210 |
+
else:
|
211 |
+
# TODO(future PR): check for matches start_op_node and base_op_node
|
212 |
+
return SubgraphTypeRelationship.EQUAL
|
213 |
+
|
214 |
+
if key in type_a_related_to_b:
|
215 |
+
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
|
216 |
+
else:
|
217 |
+
return SubgraphTypeRelationship.NOT_RELATED
|
218 |
+
elif node_a.op == 'call_module':
|
219 |
+
assert (subgraph_a.base_op_node == subgraph_a.start_node and
|
220 |
+
subgraph_b.base_op_node == subgraph_b.start_node), \
|
221 |
+
"Matching call_module patterns where base_op_node != start_node is not supported yet"
|
222 |
+
# for call_module, we need to look up the modules to do the type check
|
223 |
+
assert isinstance(node_a.target, str)
|
224 |
+
mod_a = getattr_from_fqn(gm_a, node_a.target)
|
225 |
+
assert isinstance(node_b.target, str)
|
226 |
+
mod_b = getattr_from_fqn(gm_b, node_b.target)
|
227 |
+
|
228 |
+
key = (type(mod_a), type(mod_b))
|
229 |
+
|
230 |
+
if key not in type_a_related_to_b:
|
231 |
+
if type(mod_a) == type(mod_b):
|
232 |
+
return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN
|
233 |
+
else:
|
234 |
+
return SubgraphTypeRelationship.NOT_RELATED
|
235 |
+
elif type(mod_a) == type(mod_b):
|
236 |
+
return SubgraphTypeRelationship.EQUAL
|
237 |
+
else:
|
238 |
+
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
|
239 |
+
|
240 |
+
return SubgraphTypeRelationship.NOT_RELATED
|
241 |
+
|
242 |
+
def _get_name_for_subgraph(
|
243 |
+
subgraph_a: NSSubgraph,
|
244 |
+
gm_a: GraphModule,
|
245 |
+
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
|
246 |
+
existing_names: Set[str],
|
247 |
+
) -> str:
|
248 |
+
"""
|
249 |
+
Returns a unique name for a subgraph. This name is based on two things:
|
250 |
+
1. the name of the set containing the underlying type of the base op in the
|
251 |
+
subgraph (i.e. 'torch.nn.functional.linear' if this is related to a linear op)
|
252 |
+
2. the number of previous subgraphs with related underlying type of the base op
|
253 |
+
|
254 |
+
For example, in the graph
|
255 |
+
|
256 |
+
linear0 -> relu0 -> linear1 -> relu1
|
257 |
+
|
258 |
+
The subgraphs are (linear0, relu0) and (linear1, relu1). If we iterate
|
259 |
+
from the output node backwards, the name given to (linear1, relu1) will be
|
260 |
+
`base_op_torch.nn.functional.linear_0`, and the name given to (linear0, relu0)
|
261 |
+
will be `base_op_torch.nn.functional.linear_1`.
|
262 |
+
|
263 |
+
Why are we not just using the node name? Answer: because of two requirements:
|
264 |
+
A. fusions must be supported
|
265 |
+
B. some Numeric Suite APIs can be called without having all of the models in memory
|
266 |
+
|
267 |
+
For example, let's say we need to match nodes of
|
268 |
+
|
269 |
+
(1) ... -> linear0 -> relu0 -> ...
|
270 |
+
|
271 |
+
And
|
272 |
+
|
273 |
+
(2) ... -> linear_relu0 -> ...
|
274 |
+
|
275 |
+
Without being able to inspect them together. With the current naming scheme, if
|
276 |
+
we iterate through both of these graphs in the same order, and assuming the rest
|
277 |
+
of the graphs match, both of these subgraphs will get the same name without
|
278 |
+
(1) and (2) knowing anything about each other.
|
279 |
+
"""
|
280 |
+
target_type = _get_node_target_type(subgraph_a.base_op_node, gm_a)
|
281 |
+
target_base_type = None
|
282 |
+
for base_name, sets_of_related_ops in base_name_to_sets_of_related_ops.items():
|
283 |
+
if target_type in sets_of_related_ops:
|
284 |
+
target_base_type = base_name
|
285 |
+
target_base_name = 'base_op_' + str(target_base_type)
|
286 |
+
counter = 0
|
287 |
+
proposed_name = target_base_name + '_' + str(counter)
|
288 |
+
while proposed_name in existing_names:
|
289 |
+
counter += 1
|
290 |
+
proposed_name = target_base_name + '_' + str(counter)
|
291 |
+
existing_names.add(proposed_name)
|
292 |
+
return proposed_name
|
293 |
+
|
294 |
+
def _get_node_target_type(node: Node, gm: GraphModule) -> Optional[NSNodeTargetType]:
|
295 |
+
if node.op in ('call_function', 'call_method'):
|
296 |
+
return node.target
|
297 |
+
elif node.op == 'call_module':
|
298 |
+
assert isinstance(node.target, str)
|
299 |
+
mod = getattr_from_fqn(gm, node.target)
|
300 |
+
return type(mod)
|
301 |
+
return None
|
302 |
+
|
303 |
+
def get_matching_subgraph_pairs(
|
304 |
+
gm_a: GraphModule,
|
305 |
+
gm_b: GraphModule,
|
306 |
+
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
307 |
+
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
308 |
+
) -> Dict[str, Tuple[NSSubgraph, NSSubgraph]]:
|
309 |
+
"""
|
310 |
+
Matches matchable subgraphs of graph_a to graph_b.
|
311 |
+
|
312 |
+
For a node, "matchable" is defined as a node which is not an observer,
|
313 |
+
fake_quants, quant or dequant.
|
314 |
+
|
315 |
+
A subgraph can contain one or more nodes. A subgraph is matchable if
|
316 |
+
at least one node inside of it is matchable. Currently, all nodes in
|
317 |
+
a subgraph must be matchable (because we assume no observers will be
|
318 |
+
inserted in the middle of a fusion).
|
319 |
+
|
320 |
+
A subgraph is defined by (start_node, end_node). We assume that only
|
321 |
+
start_node and end_node are linked with the surrounding graph, all other
|
322 |
+
nodes in a subgraph are self-contained.
|
323 |
+
|
324 |
+
A pair of nodes is "related" if both nodes represent the same mathematical
|
325 |
+
operation across different quantization flavors. For example,
|
326 |
+
`F.linear` and `torch.ops.quantized.linear` are related, and
|
327 |
+
`F.linear` and `torch.nn.Conv` are not related.
|
328 |
+
|
329 |
+
For each matchable pair of nodes node_a and node_b, they will match
|
330 |
+
if node_a and node_b are related.
|
331 |
+
|
332 |
+
For graphs A and B, they will match iff:
|
333 |
+
1. the number of matchable subgraphs in A and B is equivalent
|
334 |
+
2. when iterating through the matchable subgraphs of A and B in the same order, each
|
335 |
+
corresponding pair of base nodes is related.
|
336 |
+
|
337 |
+
This enables us to find the corresponding subgraphs between
|
338 |
+
graphs of related models. For example, if we had two graphs such as:
|
339 |
+
|
340 |
+
graph_a: x0 -> conv_0 (type: nn.Conv2d) -> obs_0 -> x1
|
341 |
+
w -/
|
342 |
+
b -/
|
343 |
+
|
344 |
+
graph_b: x0 -> quant_0 -> qconv_0 (type: nnq.Conv2d) -> dequant_0 -> x1
|
345 |
+
packed_params_0 -/
|
346 |
+
|
347 |
+
This function will return the following result:
|
348 |
+
{
|
349 |
+
'conv_0': ( # the name of the node in graph_b
|
350 |
+
(conv_0, conv_0), # (start_node_a, end_node_a)
|
351 |
+
(qconv_0, qconv_0), # (start_node_b, end_node_b)
|
352 |
+
),
|
353 |
+
}
|
354 |
+
|
355 |
+
Or, if we have a fusion pattern,
|
356 |
+
|
357 |
+
graph_a: x0 -> linear_0 -> relu_0 -> obs_0 -> x1
|
358 |
+
w -/
|
359 |
+
b -/
|
360 |
+
|
361 |
+
graph_b: x0 -> quant_0 -> linear_relu_0 -> dequant_0 -> x1
|
362 |
+
packed_params_0 -/
|
363 |
+
|
364 |
+
This function will return the following result:
|
365 |
+
{
|
366 |
+
'linear_relu_0': ( # the name of the node in graph_b
|
367 |
+
(linear_0, relu_0), # (start_node_a, end_node_a)
|
368 |
+
(linear_relu_0, linear_relu_0), # (start_node_b, end_node_b)
|
369 |
+
),
|
370 |
+
}
|
371 |
+
"""
|
372 |
+
if unmatchable_types_map is None:
|
373 |
+
unmatchable_types_map = get_unmatchable_types_map()
|
374 |
+
non_matchable_functions = unmatchable_types_map['funs_unmatchable']
|
375 |
+
non_matchable_modules = unmatchable_types_map['mods_unmatchable']
|
376 |
+
non_matchable_methods = unmatchable_types_map['meths_unmatchable']
|
377 |
+
|
378 |
+
graph_a_iterator = _NSGraphMatchableSubgraphsIterator(
|
379 |
+
gm_a, non_matchable_functions, non_matchable_modules,
|
380 |
+
non_matchable_methods)
|
381 |
+
graph_b_iterator = _NSGraphMatchableSubgraphsIterator(
|
382 |
+
gm_b, non_matchable_functions, non_matchable_modules,
|
383 |
+
non_matchable_methods)
|
384 |
+
results = collections.OrderedDict()
|
385 |
+
if base_name_to_sets_of_related_ops is None:
|
386 |
+
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
|
387 |
+
type_a_related_to_b = \
|
388 |
+
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
|
389 |
+
|
390 |
+
existing_names_a: Set[str] = set()
|
391 |
+
existing_names_b: Set[str] = set()
|
392 |
+
|
393 |
+
while True:
|
394 |
+
# fetch the next subgraphs from a and b
|
395 |
+
cur_subgraph_a, cur_subgraph_b = None, None
|
396 |
+
try:
|
397 |
+
cur_subgraph_a = next(graph_a_iterator)
|
398 |
+
except StopIteration:
|
399 |
+
pass
|
400 |
+
try:
|
401 |
+
cur_subgraph_b = next(graph_b_iterator)
|
402 |
+
except StopIteration:
|
403 |
+
pass
|
404 |
+
|
405 |
+
# look up types of a and b for useful error messages
|
406 |
+
type_start_a, type_start_b = None, None
|
407 |
+
if cur_subgraph_a is not None:
|
408 |
+
type_start_a = _get_node_target_type(cur_subgraph_a.start_node, gm_a)
|
409 |
+
if cur_subgraph_b is not None:
|
410 |
+
type_start_b = _get_node_target_type(cur_subgraph_b.start_node, gm_b)
|
411 |
+
|
412 |
+
# check for results and determine what to do next
|
413 |
+
if cur_subgraph_a is not None and cur_subgraph_b is not None:
|
414 |
+
# both nodes were fetched, check for subgraph_relationship
|
415 |
+
# note: subgraph_relationship is checked on the start node, i.e.
|
416 |
+
# if a linear-relu pattern is checked, we would check for subgraph_relationship
|
417 |
+
# of the linear
|
418 |
+
subgraph_relationship = _get_subgraph_relationship_type(
|
419 |
+
cur_subgraph_a, cur_subgraph_b,
|
420 |
+
gm_a, gm_b, type_a_related_to_b)
|
421 |
+
if subgraph_relationship == SubgraphTypeRelationship.NOT_RELATED:
|
422 |
+
msg = f"""
|
423 |
+
The subgraphs
|
424 |
+
({cur_subgraph_a}, {type_start_a}) and
|
425 |
+
({cur_subgraph_b}, {type_start_b})
|
426 |
+
are not related. Please ensure that the two models you pass in have the same number
|
427 |
+
of subgraphs, and each pair of subgraphs is related to each other."""
|
428 |
+
raise GraphMatchingException(msg)
|
429 |
+
elif subgraph_relationship == SubgraphTypeRelationship.EQUAL_BUT_UKNOWN:
|
430 |
+
# skip matching but unknown types
|
431 |
+
continue
|
432 |
+
key_name_a = _get_name_for_subgraph(
|
433 |
+
cur_subgraph_a, gm_a, base_name_to_sets_of_related_ops,
|
434 |
+
existing_names_a)
|
435 |
+
key_name_b = _get_name_for_subgraph(
|
436 |
+
cur_subgraph_b, gm_b, base_name_to_sets_of_related_ops,
|
437 |
+
existing_names_b)
|
438 |
+
assert key_name_a == key_name_b, \
|
439 |
+
f"Subgraph names {key_name_a} and {key_name_b} do not match"
|
440 |
+
results[key_name_a] = (cur_subgraph_a, cur_subgraph_b)
|
441 |
+
continue
|
442 |
+
elif cur_subgraph_a is None and cur_subgraph_b is None:
|
443 |
+
# we reached the end of both graphs
|
444 |
+
break
|
445 |
+
else:
|
446 |
+
# only one node was fetched, no match possible, throw error
|
447 |
+
msg = f"""
|
448 |
+
Attempting to match
|
449 |
+
({cur_subgraph_a}, {type_start_a}) and
|
450 |
+
({cur_subgraph_b}, {type_start_b}),
|
451 |
+
one of which is empty. Please ensure that the two models you pass in have the same number
|
452 |
+
of subgraphs."""
|
453 |
+
raise GraphMatchingException(msg)
|
454 |
+
|
455 |
+
# The subgraph pairs are originally created by traversing the two graphs
|
456 |
+
# from the outputs to the inputs. Reverse the results to return the
|
457 |
+
# subgraphs in their order of execution.
|
458 |
+
results = collections.OrderedDict(reversed(list(results.items())))
|
459 |
+
|
460 |
+
return results
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_passes.py
ADDED
@@ -0,0 +1,950 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.fx import GraphModule, map_arg
|
3 |
+
from torch.fx.graph import Graph, Node
|
4 |
+
from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix
|
5 |
+
|
6 |
+
from .utils import (
|
7 |
+
get_node_first_input_and_output_type,
|
8 |
+
getattr_from_fqn,
|
9 |
+
NodeInputOrOutputType,
|
10 |
+
return_first_non_observer_node,
|
11 |
+
get_number_of_non_param_args,
|
12 |
+
get_target_type_str,
|
13 |
+
get_arg_indices_of_inputs_to_log,
|
14 |
+
get_node_input_qparams,
|
15 |
+
op_type_supports_shadowing,
|
16 |
+
get_normalized_nth_input,
|
17 |
+
)
|
18 |
+
|
19 |
+
from .ns_types import (
|
20 |
+
NSSingleResultValuesType,
|
21 |
+
NSSubgraph,
|
22 |
+
NSNodeTargetType,
|
23 |
+
)
|
24 |
+
from torch.ao.ns.fx.mappings import (
|
25 |
+
get_node_type_to_io_type_map,
|
26 |
+
)
|
27 |
+
from torch.ao.quantization.observer import _is_activation_post_process
|
28 |
+
|
29 |
+
from typing import Dict, Tuple, Callable, List, Any, Union, Optional, Set
|
30 |
+
|
31 |
+
def _maybe_get_fqn(node: Node, gm: GraphModule) -> Optional[str]:
|
32 |
+
fqn = None
|
33 |
+
if hasattr(gm, '_node_name_to_scope'):
|
34 |
+
# fqn on observers is not present, because they do not
|
35 |
+
# exist when the fqns are created during tracing. If this is
|
36 |
+
# an observer, get the fqn of the node being observed.
|
37 |
+
node_to_use_for_fqn = node
|
38 |
+
if node.op == 'call_module':
|
39 |
+
assert isinstance(node.target, str)
|
40 |
+
module = getattr_from_fqn(gm, node.target)
|
41 |
+
if _is_activation_post_process(module):
|
42 |
+
node_to_use_for_fqn = get_normalized_nth_input(node, gm, 0)
|
43 |
+
fqn = gm._node_name_to_scope[node_to_use_for_fqn.name][0] # type: ignore[index]
|
44 |
+
return fqn # type: ignore[return-value]
|
45 |
+
|
46 |
+
def _insert_logger_after_node(
|
47 |
+
node: Node,
|
48 |
+
gm: GraphModule,
|
49 |
+
logger_cls: Callable,
|
50 |
+
logger_node_name_suffix: str,
|
51 |
+
ref_node_name: str,
|
52 |
+
model_name: str,
|
53 |
+
ref_name: str,
|
54 |
+
ref_node_target_type: str,
|
55 |
+
results_type: str,
|
56 |
+
index_within_arg: int,
|
57 |
+
index_of_arg: int,
|
58 |
+
fqn: Optional[str],
|
59 |
+
) -> Node:
|
60 |
+
"""
|
61 |
+
Given a starting graph of
|
62 |
+
|
63 |
+
prev_node -> node -> next_node
|
64 |
+
|
65 |
+
This function creates a new logger_cls obj and adds it
|
66 |
+
after node, resulting in
|
67 |
+
|
68 |
+
prev_node -> node -> logger_obj -> next_node
|
69 |
+
"""
|
70 |
+
# create new name
|
71 |
+
logger_node_name = \
|
72 |
+
get_new_attr_name_with_prefix(node.name + logger_node_name_suffix)(gm)
|
73 |
+
target_type = get_target_type_str(node, gm)
|
74 |
+
# create the logger object
|
75 |
+
logger_obj = logger_cls(
|
76 |
+
ref_node_name, node.name, model_name, ref_name, target_type,
|
77 |
+
ref_node_target_type,
|
78 |
+
results_type, index_within_arg, index_of_arg, fqn)
|
79 |
+
# attach the logger object to the parent module
|
80 |
+
setattr(gm, logger_node_name, logger_obj)
|
81 |
+
logger_node = node.graph.create_node(
|
82 |
+
'call_module', logger_node_name, (node,), {})
|
83 |
+
return logger_node
|
84 |
+
|
85 |
+
def add_loggers_to_model(
|
86 |
+
gm: GraphModule,
|
87 |
+
node_to_instrument_inputs_to_ref_node_name: Dict[Node, Tuple[str, str]],
|
88 |
+
node_to_instrument_outputs_to_ref_node_name: Dict[Node, Tuple[str, str]],
|
89 |
+
logger_cls: Callable,
|
90 |
+
model_name: str,
|
91 |
+
) -> GraphModule:
|
92 |
+
"""
|
93 |
+
Takes the graph of gm, adds loggers to the output
|
94 |
+
of each node in nodes_to_instrument. Returns a GraphModule with the new
|
95 |
+
graph.
|
96 |
+
"""
|
97 |
+
|
98 |
+
new_graph = Graph()
|
99 |
+
env: Dict[str, Any] = {}
|
100 |
+
modules = dict(gm.named_modules())
|
101 |
+
|
102 |
+
def load_arg(a):
|
103 |
+
return map_arg(a, lambda node: env[node.name])
|
104 |
+
|
105 |
+
for node in gm.graph.nodes:
|
106 |
+
if node.op == 'output':
|
107 |
+
new_graph.output(map_arg(get_normalized_nth_input(node, gm, 0), load_arg))
|
108 |
+
continue
|
109 |
+
|
110 |
+
if (
|
111 |
+
(node in node_to_instrument_inputs_to_ref_node_name) or
|
112 |
+
(node in node_to_instrument_outputs_to_ref_node_name)
|
113 |
+
):
|
114 |
+
fqn = _maybe_get_fqn(node, gm)
|
115 |
+
|
116 |
+
if node in node_to_instrument_inputs_to_ref_node_name:
|
117 |
+
ref_name, ref_node_type = node_to_instrument_inputs_to_ref_node_name[node]
|
118 |
+
# Ops such add and mul are special because either
|
119 |
+
# one or two of the first two arguments can be tensors,
|
120 |
+
# and if one argument is a tensor it can be first or
|
121 |
+
# second (x + 1 versus 1 + x).
|
122 |
+
arg_indices_to_log = get_arg_indices_of_inputs_to_log(node)
|
123 |
+
for node_arg_idx in arg_indices_to_log:
|
124 |
+
node_arg = get_normalized_nth_input(node, gm, node_arg_idx)
|
125 |
+
if type(node_arg) == Node:
|
126 |
+
# create a single input logger
|
127 |
+
prev_node = env[node_arg.name]
|
128 |
+
env[node_arg.name] = _insert_logger_after_node(
|
129 |
+
prev_node, gm, logger_cls, '_ns_logger_', node.name,
|
130 |
+
model_name, ref_name, ref_node_type,
|
131 |
+
NSSingleResultValuesType.NODE_INPUT.value,
|
132 |
+
index_within_arg=0, index_of_arg=node_arg_idx,
|
133 |
+
fqn=fqn)
|
134 |
+
elif type(node_arg) == torch.fx.immutable_collections.immutable_list:
|
135 |
+
# create N input loggers, one for each node
|
136 |
+
for arg_idx, arg in enumerate(node_arg): # type: ignore[var-annotated, arg-type]
|
137 |
+
prev_node = env[arg.name]
|
138 |
+
env[prev_node.name] = _insert_logger_after_node(
|
139 |
+
prev_node, gm, logger_cls, '_ns_logger_', node.name,
|
140 |
+
model_name, ref_name, ref_node_type,
|
141 |
+
NSSingleResultValuesType.NODE_INPUT.value,
|
142 |
+
index_within_arg=arg_idx, index_of_arg=node_arg_idx,
|
143 |
+
fqn=fqn)
|
144 |
+
else:
|
145 |
+
pass
|
146 |
+
|
147 |
+
# ensure env is populated with base node
|
148 |
+
# Note: runs for both inputs and outputs
|
149 |
+
env[node.name] = new_graph.node_copy(node, load_arg)
|
150 |
+
|
151 |
+
if node in node_to_instrument_outputs_to_ref_node_name:
|
152 |
+
ref_name, ref_node_type = node_to_instrument_outputs_to_ref_node_name[node]
|
153 |
+
# add the logger after the base node
|
154 |
+
env[node.name] = _insert_logger_after_node(
|
155 |
+
env[node.name], gm, logger_cls, '_ns_logger_', node.name,
|
156 |
+
model_name, ref_name, ref_node_type,
|
157 |
+
NSSingleResultValuesType.NODE_OUTPUT.value,
|
158 |
+
index_within_arg=0, index_of_arg=0, fqn=fqn)
|
159 |
+
|
160 |
+
else:
|
161 |
+
env[node.name] = new_graph.node_copy(node, load_arg)
|
162 |
+
|
163 |
+
new_gm = GraphModule(gm, new_graph)
|
164 |
+
return new_gm
|
165 |
+
|
166 |
+
def _insert_quantize_per_tensor_node(
|
167 |
+
prev_node_c: Node,
|
168 |
+
node_a: Node,
|
169 |
+
gm_b: GraphModule,
|
170 |
+
graph_c: Graph,
|
171 |
+
scale: Union[torch.Tensor, float],
|
172 |
+
zero_point: Union[torch.Tensor, int],
|
173 |
+
dtype_cast_name: str,
|
174 |
+
) -> Node:
|
175 |
+
# copy scale
|
176 |
+
scale_node_name = \
|
177 |
+
get_new_attr_name_with_prefix(
|
178 |
+
node_a.name + '_input_scale_')(gm_b)
|
179 |
+
setattr(gm_b, scale_node_name, scale)
|
180 |
+
scale_node = graph_c.create_node(
|
181 |
+
'get_attr', scale_node_name, (), {}, scale_node_name)
|
182 |
+
# copy zero_point
|
183 |
+
zero_point_node_name = \
|
184 |
+
get_new_attr_name_with_prefix(
|
185 |
+
node_a.name + '_input_zero_point_')(gm_b)
|
186 |
+
setattr(gm_b, zero_point_node_name, zero_point)
|
187 |
+
zero_point_node = graph_c.create_node(
|
188 |
+
'get_attr', zero_point_node_name, (), {}, zero_point_node_name)
|
189 |
+
# create the quantize_per_tensor call
|
190 |
+
return graph_c.create_node(
|
191 |
+
'call_function', torch.quantize_per_tensor,
|
192 |
+
(prev_node_c, scale_node, zero_point_node, torch.quint8), {},
|
193 |
+
dtype_cast_name)
|
194 |
+
|
195 |
+
def _insert_dtype_cast_after_node(
|
196 |
+
node_a: Node,
|
197 |
+
node_c: Node,
|
198 |
+
prev_node_c: Union[Node, List[Node]],
|
199 |
+
gm_a: GraphModule,
|
200 |
+
gm_b: GraphModule,
|
201 |
+
graph_c: Graph,
|
202 |
+
node_name_prefix: str,
|
203 |
+
logger_cls: Callable,
|
204 |
+
node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
|
205 |
+
) -> Union[Node, List[Node]]:
|
206 |
+
"""
|
207 |
+
Given a starting graph C (derived from graph B) of
|
208 |
+
|
209 |
+
... -> prev_node_c -> node_c -> ...
|
210 |
+
|
211 |
+
And a corresponding related node_a, inserts the correct dtype
|
212 |
+
cast node after prev_node_c to cast into the dtype expected
|
213 |
+
by node_a, resulting in:
|
214 |
+
|
215 |
+
dtype_cast
|
216 |
+
/
|
217 |
+
... -> prev_node_c -> node_c -> ...
|
218 |
+
|
219 |
+
For example, if node_c is an int8 op and node_a is an fp32 op, this function
|
220 |
+
will insert a dequant.
|
221 |
+
"""
|
222 |
+
dtype_cast_op = None
|
223 |
+
dtype_cast_mod_cls = None
|
224 |
+
dtype_cast_method = None
|
225 |
+
dtype_cast_method_dtype = None
|
226 |
+
dtype_cast_scale = None
|
227 |
+
dtype_cast_zero_point = None
|
228 |
+
node_input_type_a, _node_output_type_a = \
|
229 |
+
get_node_first_input_and_output_type(
|
230 |
+
node_a, gm_a, logger_cls, node_type_to_io_type_map)
|
231 |
+
node_input_type_c, _node_output_type_c = \
|
232 |
+
get_node_first_input_and_output_type(
|
233 |
+
node_c, gm_b, logger_cls, node_type_to_io_type_map)
|
234 |
+
|
235 |
+
if (
|
236 |
+
(node_input_type_a == NodeInputOrOutputType.FP32 and
|
237 |
+
node_input_type_c == NodeInputOrOutputType.INT8) or
|
238 |
+
(node_input_type_a == NodeInputOrOutputType.FP32 and
|
239 |
+
node_input_type_c == NodeInputOrOutputType.FP16) or
|
240 |
+
# TODO(future PR): determine the actual dtype of node_c,
|
241 |
+
# the current code only works because dequantize works with
|
242 |
+
# multiple input dtypes.
|
243 |
+
(node_input_type_a == NodeInputOrOutputType.FP32 and
|
244 |
+
node_input_type_c == NodeInputOrOutputType.FP32_OR_INT8)
|
245 |
+
):
|
246 |
+
dtype_cast_op = torch.dequantize
|
247 |
+
elif (
|
248 |
+
node_input_type_a == node_input_type_c and
|
249 |
+
node_input_type_a != NodeInputOrOutputType.UNKNOWN
|
250 |
+
):
|
251 |
+
dtype_cast_mod_cls = torch.nn.Identity
|
252 |
+
elif (
|
253 |
+
node_input_type_a == NodeInputOrOutputType.INT8 and
|
254 |
+
node_input_type_c == NodeInputOrOutputType.FP32
|
255 |
+
):
|
256 |
+
# int8 shadows fp32, the dtype cast needs to quantize to int8
|
257 |
+
# with the right qparams.
|
258 |
+
node_a_input_qparams = get_node_input_qparams(
|
259 |
+
node_a, gm_a, node_type_to_io_type_map)
|
260 |
+
if node_a_input_qparams is not None:
|
261 |
+
dtype_cast_op = torch.quantize_per_tensor # type: ignore[assignment]
|
262 |
+
dtype_cast_scale, dtype_cast_zero_point = node_a_input_qparams
|
263 |
+
elif (
|
264 |
+
node_input_type_a == NodeInputOrOutputType.FP16 and
|
265 |
+
node_input_type_c == NodeInputOrOutputType.FP32
|
266 |
+
):
|
267 |
+
dtype_cast_method = 'to'
|
268 |
+
dtype_cast_method_dtype = torch.float16
|
269 |
+
else:
|
270 |
+
raise AssertionError(
|
271 |
+
f"dtype cast from {node_input_type_c} {node_c.format_node()} to " +
|
272 |
+
f"{node_input_type_a} {node_a.format_node()} needs to be implemented")
|
273 |
+
|
274 |
+
if isinstance(prev_node_c, Node):
|
275 |
+
new_dtype_cast_name = \
|
276 |
+
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
|
277 |
+
if dtype_cast_op:
|
278 |
+
if dtype_cast_scale is not None and dtype_cast_zero_point is not None:
|
279 |
+
return _insert_quantize_per_tensor_node(
|
280 |
+
prev_node_c, node_a, gm_b, graph_c, dtype_cast_scale,
|
281 |
+
dtype_cast_zero_point, new_dtype_cast_name)
|
282 |
+
else:
|
283 |
+
return graph_c.create_node(
|
284 |
+
'call_function', dtype_cast_op, (prev_node_c,), {},
|
285 |
+
new_dtype_cast_name)
|
286 |
+
elif dtype_cast_method:
|
287 |
+
return graph_c.create_node(
|
288 |
+
'call_method', dtype_cast_method,
|
289 |
+
(prev_node_c, dtype_cast_method_dtype), {}, new_dtype_cast_name)
|
290 |
+
else:
|
291 |
+
assert dtype_cast_mod_cls
|
292 |
+
dtype_cast_mod = dtype_cast_mod_cls()
|
293 |
+
setattr(gm_b, new_dtype_cast_name, dtype_cast_mod)
|
294 |
+
return graph_c.create_node(
|
295 |
+
'call_module', new_dtype_cast_name, (prev_node_c,), {},
|
296 |
+
new_dtype_cast_name)
|
297 |
+
elif isinstance(prev_node_c, list):
|
298 |
+
results = []
|
299 |
+
for prev_node_c_inner in prev_node_c:
|
300 |
+
new_dtype_cast_name = \
|
301 |
+
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
|
302 |
+
if dtype_cast_op:
|
303 |
+
# TODO(future PR): add handling for quantize_per_tensor
|
304 |
+
new_dtype_cast_node = graph_c.create_node(
|
305 |
+
'call_function', dtype_cast_op, (prev_node_c_inner,), {},
|
306 |
+
new_dtype_cast_name)
|
307 |
+
results.append(new_dtype_cast_node)
|
308 |
+
else:
|
309 |
+
assert dtype_cast_mod_cls
|
310 |
+
dtype_cast_mod = dtype_cast_mod_cls()
|
311 |
+
setattr(gm_b, new_dtype_cast_name, dtype_cast_mod)
|
312 |
+
new_dtype_cast_node = graph_c.create_node(
|
313 |
+
'call_module', new_dtype_cast_name, (prev_node_c_inner,), {},
|
314 |
+
new_dtype_cast_name)
|
315 |
+
results.append(new_dtype_cast_node)
|
316 |
+
return results
|
317 |
+
else:
|
318 |
+
raise AssertionError(f"type f{type(prev_node_c)} is not handled")
|
319 |
+
|
320 |
+
# TODO(future PR): look into using copy_node API instead
|
321 |
+
def _copy_node_from_a_to_c(
|
322 |
+
node_a: Node,
|
323 |
+
gm_a: GraphModule,
|
324 |
+
gm_b: GraphModule,
|
325 |
+
graph_c: Graph,
|
326 |
+
) -> Node:
|
327 |
+
"""
|
328 |
+
Simple copy of node_a to graph_c.
|
329 |
+
"""
|
330 |
+
if node_a.op == 'get_attr':
|
331 |
+
node_a_copy_name = \
|
332 |
+
get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
|
333 |
+
node_a_obj = getattr_from_fqn(gm_a, node_a.target) # type: ignore[arg-type]
|
334 |
+
if torch.is_tensor(node_a_obj):
|
335 |
+
node_a_obj = node_a_obj.detach()
|
336 |
+
setattr(gm_b, node_a_copy_name, node_a_obj)
|
337 |
+
node_a_copy = graph_c.create_node(
|
338 |
+
node_a.op, node_a_copy_name, (), {}, node_a_copy_name)
|
339 |
+
return node_a_copy
|
340 |
+
elif node_a.op == 'call_method':
|
341 |
+
assert node_a.target in ('dequantize', 'to'), \
|
342 |
+
f"target {node_a.target} is not implemented"
|
343 |
+
if node_a.target == 'dequantize':
|
344 |
+
arg_copy = _copy_node_from_a_to_c(
|
345 |
+
get_normalized_nth_input(node_a, gm_a, 0),
|
346 |
+
gm_a, gm_b, graph_c) # type: ignore[arg-type]
|
347 |
+
node_a_copy_name = \
|
348 |
+
get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
|
349 |
+
node_a_copy = graph_c.create_node(
|
350 |
+
node_a.op, node_a.target, (arg_copy,), {}, node_a_copy_name)
|
351 |
+
return node_a_copy
|
352 |
+
else: # to
|
353 |
+
arg_copy = _copy_node_from_a_to_c(
|
354 |
+
get_normalized_nth_input(node_a, gm_a, 0), gm_a, gm_b, graph_c) # type: ignore[arg-type]
|
355 |
+
node_a_copy_name = \
|
356 |
+
get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
|
357 |
+
node_a_copy = graph_c.create_node(
|
358 |
+
node_a.op, node_a.target,
|
359 |
+
(arg_copy, get_normalized_nth_input(node_a, gm_a, 1)),
|
360 |
+
{}, node_a_copy_name)
|
361 |
+
return node_a_copy
|
362 |
+
|
363 |
+
else:
|
364 |
+
raise AssertionError(
|
365 |
+
f"handling of node {node_a.format_node()} with op {node_a.op} is not implemented")
|
366 |
+
|
367 |
+
def _can_insert_copy_of_subgraph_a(
|
368 |
+
subgraph_a: NSSubgraph,
|
369 |
+
gm_a: GraphModule,
|
370 |
+
num_non_param_args_node_a: int,
|
371 |
+
) -> bool:
|
372 |
+
"""
|
373 |
+
This function returns `False` if the input subgraph cannot be copied by
|
374 |
+
`_insert_copy_of_subgraph_a_after_input_node_c`. This usually means
|
375 |
+
that there is a corner case logic for which copy is not yet implemented.
|
376 |
+
"""
|
377 |
+
# populate the list of nodes we need to check
|
378 |
+
nodes = []
|
379 |
+
cur_node = subgraph_a.end_node
|
380 |
+
while cur_node != subgraph_a.start_node:
|
381 |
+
nodes.append(cur_node)
|
382 |
+
cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment]
|
383 |
+
nodes.append(cur_node)
|
384 |
+
nodes.reverse()
|
385 |
+
|
386 |
+
def _can_insert(node_a_arg, gm_a):
|
387 |
+
if isinstance(node_a_arg, Node):
|
388 |
+
arg_a = return_first_non_observer_node(node_a_arg, gm_a)
|
389 |
+
if arg_a.op == 'call_method':
|
390 |
+
return arg_a.target in ('dequantize', 'to')
|
391 |
+
elif arg_a.op == 'get_attr':
|
392 |
+
return True
|
393 |
+
else:
|
394 |
+
return False
|
395 |
+
elif isinstance(node_a_arg, (list, tuple)):
|
396 |
+
for el in node_a_arg:
|
397 |
+
if not isinstance(el, Node):
|
398 |
+
return False
|
399 |
+
return True
|
400 |
+
|
401 |
+
# For each node, check if we handle the copy behavior. This follows the
|
402 |
+
# logic in `_insert_copy_of_subgraph_a_after_input_node_c`.
|
403 |
+
for node_a in nodes:
|
404 |
+
|
405 |
+
local_num_non_param_args_node_a = num_non_param_args_node_a \
|
406 |
+
if node_a is nodes[0] else 1
|
407 |
+
|
408 |
+
norm_args_kwargs = node_a.normalized_arguments(
|
409 |
+
gm_a, normalize_to_only_use_kwargs=True)
|
410 |
+
if norm_args_kwargs is not None:
|
411 |
+
norm_args, norm_kwargs = norm_args_kwargs
|
412 |
+
else:
|
413 |
+
norm_args, norm_kwargs = node_a.args, node_a.kwargs
|
414 |
+
|
415 |
+
cur_idx = 0
|
416 |
+
|
417 |
+
while cur_idx < len(norm_args):
|
418 |
+
if cur_idx == 0:
|
419 |
+
pass
|
420 |
+
elif cur_idx == 1 and local_num_non_param_args_node_a == 2:
|
421 |
+
pass
|
422 |
+
else:
|
423 |
+
if not _can_insert(norm_args[cur_idx], gm_a):
|
424 |
+
return False
|
425 |
+
cur_idx += 1
|
426 |
+
|
427 |
+
for kwarg_val in norm_kwargs.values():
|
428 |
+
# stitch the inputs from base graph
|
429 |
+
if cur_idx == 0:
|
430 |
+
pass
|
431 |
+
elif cur_idx == 1 and local_num_non_param_args_node_a == 2:
|
432 |
+
pass
|
433 |
+
else:
|
434 |
+
if not _can_insert(kwarg_val, gm_a):
|
435 |
+
return False
|
436 |
+
cur_idx += 1
|
437 |
+
|
438 |
+
return True
|
439 |
+
|
440 |
+
def _insert_copy_of_subgraph_a_after_input_node_c(
|
441 |
+
input_node_c: Union[Node, List[Node]],
|
442 |
+
input_node_c_2: Optional[Union[Node, List[Node]]],
|
443 |
+
subgraph_a: NSSubgraph,
|
444 |
+
gm_a: GraphModule,
|
445 |
+
gm_b: GraphModule,
|
446 |
+
node_name_prefix: str,
|
447 |
+
) -> Node:
|
448 |
+
"""
|
449 |
+
TODO(before land): real docblock
|
450 |
+
"""
|
451 |
+
if isinstance(input_node_c, Node):
|
452 |
+
graph_c = input_node_c.graph
|
453 |
+
else:
|
454 |
+
assert isinstance(input_node_c, list)
|
455 |
+
graph_c = input_node_c[0].graph
|
456 |
+
|
457 |
+
# create a sequential list of the subgraphs' nodes from start to end,
|
458 |
+
# because we need to add the nodes to graph C in non-reverse order
|
459 |
+
nodes_of_a = [subgraph_a.end_node]
|
460 |
+
cur_node = subgraph_a.end_node
|
461 |
+
while cur_node != subgraph_a.start_node:
|
462 |
+
cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment]
|
463 |
+
nodes_of_a.insert(0, cur_node)
|
464 |
+
|
465 |
+
# go through nodes of a in order, and insert them into the graph of c
|
466 |
+
# sequentially
|
467 |
+
cur_node_a = nodes_of_a[0]
|
468 |
+
cur_node_c = _insert_copy_of_node_a_after_input_node_c(
|
469 |
+
input_node_c,
|
470 |
+
input_node_c_2,
|
471 |
+
cur_node_a,
|
472 |
+
gm_a,
|
473 |
+
gm_b,
|
474 |
+
node_name_prefix)
|
475 |
+
for cur_idx_a in range(1, len(nodes_of_a)):
|
476 |
+
cur_node_a = nodes_of_a[cur_idx_a]
|
477 |
+
prev_node_c = cur_node_c # previous added node is the input to next node
|
478 |
+
cur_node_c = _insert_copy_of_node_a_after_input_node_c(
|
479 |
+
prev_node_c,
|
480 |
+
# TODO(future PR): enable multiple inputs for nodes which are not at start of subgraph
|
481 |
+
None,
|
482 |
+
cur_node_a,
|
483 |
+
gm_a,
|
484 |
+
gm_b,
|
485 |
+
node_name_prefix)
|
486 |
+
# return the last inserted node
|
487 |
+
return cur_node_c
|
488 |
+
|
489 |
+
|
490 |
+
def _insert_copy_of_node_a_after_input_node_c(
|
491 |
+
input_node_c: Union[Node, List[Node]],
|
492 |
+
input_node_c_2: Optional[Union[Node, List[Node]]],
|
493 |
+
node_a: Node,
|
494 |
+
gm_a: GraphModule,
|
495 |
+
gm_b: GraphModule,
|
496 |
+
node_name_prefix: str,
|
497 |
+
) -> Node:
|
498 |
+
"""
|
499 |
+
Assume that node_a from graph_a has
|
500 |
+
args (input, (input2)?, arg1, ...), and
|
501 |
+
kwargs {kw0: kwarg0, ...}
|
502 |
+
|
503 |
+
Note: input2 is optional. If it equals to None, we assume that the op
|
504 |
+
has a single non-param input. If it is specified, we assume that the op
|
505 |
+
has two non-param inputs.
|
506 |
+
|
507 |
+
Copies the underlying values of arg1..argn and kwarg0..kwargn into gm_b,
|
508 |
+
and creates the corresponding nodes in graph_c. Note: observers are ignored,
|
509 |
+
so if an arg is an observer we navigate up until we find a non-observer parent.
|
510 |
+
|
511 |
+
If node_a is a call_module, points the module pointed to by node_a to gm_b.
|
512 |
+
|
513 |
+
Creates the copy of node_a in graph_c, with input as the first arg,
|
514 |
+
and all other args and kwargs pointing to the copies of the objects
|
515 |
+
in gm_b created above.
|
516 |
+
|
517 |
+
An example in pictures:
|
518 |
+
|
519 |
+
graph A:
|
520 |
+
========
|
521 |
+
|
522 |
+
input -------------> node_a
|
523 |
+
/ / /
|
524 |
+
(input_2)?----------/ / /
|
525 |
+
/ /
|
526 |
+
weight -> weight_obs /
|
527 |
+
/
|
528 |
+
bias ----------------
|
529 |
+
|
530 |
+
graph C (derived from B):
|
531 |
+
=========================
|
532 |
+
|
533 |
+
input_node_c --> node_a_copy
|
534 |
+
/ / /
|
535 |
+
(input_node_c_2)? / /
|
536 |
+
/ /
|
537 |
+
weight_copy ----/ /
|
538 |
+
/
|
539 |
+
bias_copy ------/
|
540 |
+
"""
|
541 |
+
if isinstance(input_node_c, Node):
|
542 |
+
graph_c = input_node_c.graph
|
543 |
+
else:
|
544 |
+
assert isinstance(input_node_c, list)
|
545 |
+
graph_c = input_node_c[0].graph
|
546 |
+
|
547 |
+
norm_args_kwargs = node_a.normalized_arguments(
|
548 |
+
gm_a, normalize_to_only_use_kwargs=True)
|
549 |
+
if norm_args_kwargs is not None:
|
550 |
+
norm_args, norm_kwargs = norm_args_kwargs
|
551 |
+
else:
|
552 |
+
norm_args, norm_kwargs = node_a.args, node_a.kwargs
|
553 |
+
|
554 |
+
new_args = []
|
555 |
+
new_kwargs = {}
|
556 |
+
|
557 |
+
def _copy_arg(arg):
|
558 |
+
# copy the other inputs from the other graph
|
559 |
+
if isinstance(arg, Node):
|
560 |
+
arg = return_first_non_observer_node(arg, gm_a)
|
561 |
+
arg = _copy_node_from_a_to_c(arg, gm_a, gm_b, graph_c)
|
562 |
+
return arg
|
563 |
+
elif isinstance(arg, (int, float, torch.dtype)):
|
564 |
+
return arg
|
565 |
+
elif isinstance(kwarg_val, (list, tuple)):
|
566 |
+
for el in kwarg_val:
|
567 |
+
assert not isinstance(el, Node), \
|
568 |
+
"handling of Node inside list is not implemented"
|
569 |
+
return arg
|
570 |
+
else:
|
571 |
+
raise AssertionError(
|
572 |
+
f"handling for kwarg of type {type(kwarg_val)} is not implemented")
|
573 |
+
|
574 |
+
cur_idx = 0
|
575 |
+
|
576 |
+
while cur_idx < len(norm_args):
|
577 |
+
if cur_idx == 0:
|
578 |
+
new_arg = input_node_c
|
579 |
+
elif cur_idx == 1 and input_node_c_2 is not None:
|
580 |
+
new_arg = input_node_c_2
|
581 |
+
else:
|
582 |
+
new_arg = _copy_arg(norm_args[cur_idx])
|
583 |
+
new_args.append(new_arg)
|
584 |
+
cur_idx += 1
|
585 |
+
|
586 |
+
for kwarg_name, kwarg_val in norm_kwargs.items():
|
587 |
+
# stitch the inputs from base graph
|
588 |
+
if cur_idx == 0:
|
589 |
+
new_kwargs[kwarg_name] = input_node_c
|
590 |
+
elif cur_idx == 1 and input_node_c_2 is not None:
|
591 |
+
new_kwargs[kwarg_name] = input_node_c_2
|
592 |
+
else:
|
593 |
+
new_kwargs[kwarg_name] = _copy_arg(kwarg_val)
|
594 |
+
cur_idx += 1
|
595 |
+
|
596 |
+
new_args = tuple(new_args) # type: ignore[assignment]
|
597 |
+
|
598 |
+
node_a_shadows_c_name = \
|
599 |
+
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
|
600 |
+
|
601 |
+
if node_a.op == 'call_module':
|
602 |
+
# if target is a module, we point to the module from gm_b
|
603 |
+
new_mod_copy_name = \
|
604 |
+
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
|
605 |
+
# fetch the corresponding module from gm_a
|
606 |
+
assert isinstance(node_a.target, str)
|
607 |
+
mod_a = getattr_from_fqn(gm_a, node_a.target)
|
608 |
+
setattr(gm_b, new_mod_copy_name, mod_a)
|
609 |
+
node_a_shadows_c = graph_c.create_node(
|
610 |
+
node_a.op, new_mod_copy_name, new_args,
|
611 |
+
new_kwargs, node_a_shadows_c_name)
|
612 |
+
return node_a_shadows_c
|
613 |
+
else:
|
614 |
+
assert node_a.op in ('call_function', 'call_method')
|
615 |
+
node_a_shadows_c = graph_c.create_node(
|
616 |
+
node_a.op, node_a.target, new_args,
|
617 |
+
new_kwargs, node_a_shadows_c_name)
|
618 |
+
return node_a_shadows_c
|
619 |
+
|
620 |
+
def create_a_shadows_b(
|
621 |
+
name_a: str,
|
622 |
+
gm_a: GraphModule,
|
623 |
+
name_b: str,
|
624 |
+
gm_b: GraphModule,
|
625 |
+
matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
|
626 |
+
logger_cls: Callable,
|
627 |
+
should_log_inputs: bool,
|
628 |
+
node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
|
629 |
+
) -> GraphModule:
|
630 |
+
"""
|
631 |
+
Creates a new GraphModule consisting of the graph of C, with the meaningful
|
632 |
+
nodes of A shadowing the corresponding nodes of B. For example,
|
633 |
+
|
634 |
+
Graph A:
|
635 |
+
a0 -> op0_fp32 -> a1 -> op1_fp32 -> a2
|
636 |
+
|
637 |
+
Graph B:
|
638 |
+
b0 -> op0_int8 -> b1 -> op1_int8 -> b2
|
639 |
+
|
640 |
+
matched_node_pairs: {'op0': (op0_fp32, op0_int8), 'op1': (op1_fp32, op1_int8)}
|
641 |
+
|
642 |
+
Graph C (A shadows B):
|
643 |
+
|
644 |
+
/ dequant0 -> op0_fp32 -> logger_a_0 / dequant_1 -> op1_fp32 -> logger_a_1
|
645 |
+
/ /
|
646 |
+
b0 -------------> op0_int8 -> logger_b_0 --------------> op1_int8 -> logger_b_1
|
647 |
+
|
648 |
+
In a nutshell, this function does the following for each node pair:
|
649 |
+
* copies the necessary attributes and modules from gm_a to gm_b,
|
650 |
+
keeping names unique
|
651 |
+
* adds a dtype cast op (dequant, quant, etc)
|
652 |
+
* adds a copy of node_a in gm_b's graph
|
653 |
+
* adds loggers to the outputs of node_a and node_b
|
654 |
+
"""
|
655 |
+
|
656 |
+
if node_type_to_io_type_map is None:
|
657 |
+
node_type_to_io_type_map = get_node_type_to_io_type_map()
|
658 |
+
|
659 |
+
# graph_c is the graph created from copying the nodes of graph_b and inserting
|
660 |
+
# the shadows with the nodes copied from graph_a
|
661 |
+
graph_c = Graph()
|
662 |
+
env_c: Dict[str, Any] = {}
|
663 |
+
modules = dict(gm_b.named_modules())
|
664 |
+
|
665 |
+
def load_arg(a):
|
666 |
+
return map_arg(a, lambda node: env_c[node.name])
|
667 |
+
|
668 |
+
start_node_b_to_matched_subgraph_a_and_name = {}
|
669 |
+
end_node_b_to_matched_subgraph_a_and_name = {}
|
670 |
+
for match_name, match in matched_subgraph_pairs.items():
|
671 |
+
subgraph_a, subgraph_b = match
|
672 |
+
ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a)
|
673 |
+
ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b)
|
674 |
+
start_node_b_to_matched_subgraph_a_and_name[subgraph_b.start_node] = \
|
675 |
+
(subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
|
676 |
+
end_node_b_to_matched_subgraph_a_and_name[subgraph_b.end_node] = \
|
677 |
+
(subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
|
678 |
+
|
679 |
+
for node_b in gm_b.graph.nodes:
|
680 |
+
if node_b.op == 'output':
|
681 |
+
graph_c.output(map_arg(node_b.args[0], load_arg))
|
682 |
+
continue
|
683 |
+
|
684 |
+
# calculate the flags to determine what to do with this node
|
685 |
+
node_b_is_start_node = node_b in start_node_b_to_matched_subgraph_a_and_name
|
686 |
+
node_b_is_end_node = node_b in end_node_b_to_matched_subgraph_a_and_name
|
687 |
+
|
688 |
+
if (node_b_is_start_node or node_b_is_end_node):
|
689 |
+
|
690 |
+
if node_b_is_start_node:
|
691 |
+
subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
|
692 |
+
start_node_b_to_matched_subgraph_a_and_name[node_b]
|
693 |
+
else:
|
694 |
+
assert node_b_is_end_node
|
695 |
+
subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
|
696 |
+
end_node_b_to_matched_subgraph_a_and_name[node_b]
|
697 |
+
|
698 |
+
all_op_types_support_shadowing = (
|
699 |
+
op_type_supports_shadowing(subgraph_a.start_node) and
|
700 |
+
op_type_supports_shadowing(node_b)
|
701 |
+
)
|
702 |
+
if not all_op_types_support_shadowing:
|
703 |
+
print(
|
704 |
+
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
|
705 |
+
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
|
706 |
+
', unsupported')
|
707 |
+
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
|
708 |
+
continue
|
709 |
+
|
710 |
+
# For both start_node and end_node verify that we know how to do
|
711 |
+
# the dtype cast. If we do not, skip.
|
712 |
+
node_input_type_a, node_output_type_a = \
|
713 |
+
get_node_first_input_and_output_type(
|
714 |
+
subgraph_a.start_node, gm_a, logger_cls,
|
715 |
+
node_type_to_io_type_map)
|
716 |
+
node_input_type_b, node_output_type_b = \
|
717 |
+
get_node_first_input_and_output_type(
|
718 |
+
node_b, gm_b, logger_cls,
|
719 |
+
node_type_to_io_type_map)
|
720 |
+
node_io_types_known_a_and_b = (
|
721 |
+
node_input_type_a != NodeInputOrOutputType.UNKNOWN and
|
722 |
+
node_output_type_a != NodeInputOrOutputType.UNKNOWN and
|
723 |
+
node_input_type_b != NodeInputOrOutputType.UNKNOWN and
|
724 |
+
node_output_type_b != NodeInputOrOutputType.UNKNOWN
|
725 |
+
)
|
726 |
+
if not node_io_types_known_a_and_b:
|
727 |
+
print(
|
728 |
+
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
|
729 |
+
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
|
730 |
+
', unknown dtype cast')
|
731 |
+
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
|
732 |
+
continue
|
733 |
+
|
734 |
+
# If we are shadowing from fp32 to int8, we need to insert
|
735 |
+
# quantize_per_tensor call with qparams from the previous node.
|
736 |
+
# Only do this if we are able to infer these qparams from the graph.
|
737 |
+
if (
|
738 |
+
node_input_type_a == NodeInputOrOutputType.INT8 and
|
739 |
+
node_input_type_b == NodeInputOrOutputType.FP32
|
740 |
+
):
|
741 |
+
node_a_input_qparams = get_node_input_qparams(
|
742 |
+
subgraph_a.start_node, gm_a, node_type_to_io_type_map)
|
743 |
+
if not node_a_input_qparams:
|
744 |
+
print(
|
745 |
+
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
|
746 |
+
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
|
747 |
+
', unknown input qparams')
|
748 |
+
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
|
749 |
+
continue
|
750 |
+
|
751 |
+
num_non_param_args_node_a = \
|
752 |
+
get_number_of_non_param_args(subgraph_a.start_node, gm_a)
|
753 |
+
if not _can_insert_copy_of_subgraph_a(subgraph_a, gm_a, num_non_param_args_node_a):
|
754 |
+
print(
|
755 |
+
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
|
756 |
+
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
|
757 |
+
', unhandled logic in subgraph copy')
|
758 |
+
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
|
759 |
+
continue
|
760 |
+
|
761 |
+
fqn_base_a = _maybe_get_fqn(subgraph_a.base_op_node, gm_a)
|
762 |
+
fqn_base_b = _maybe_get_fqn(subgraph_b.base_op_node, gm_b)
|
763 |
+
|
764 |
+
if node_b_is_start_node:
|
765 |
+
|
766 |
+
# if necessary, log the input of node_c
|
767 |
+
if should_log_inputs:
|
768 |
+
prev_node_b = get_normalized_nth_input(node_b, gm_b, 0)
|
769 |
+
if isinstance(prev_node_b, Node):
|
770 |
+
prev_node_c = env_c[prev_node_b.name]
|
771 |
+
env_c[prev_node_c.name] = _insert_logger_after_node(
|
772 |
+
prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
|
773 |
+
node_b.name, name_b, ref_name, ref_node_type_b,
|
774 |
+
NSSingleResultValuesType.NODE_INPUT.value,
|
775 |
+
index_within_arg=0, index_of_arg=0,
|
776 |
+
fqn=fqn_base_b)
|
777 |
+
elif isinstance(prev_node_b, list):
|
778 |
+
# first, save the prev_node instances, because they
|
779 |
+
# will be overwritten in the env after the first logger
|
780 |
+
# is added
|
781 |
+
prev_node_c_list = [env_c[arg.name] for arg in prev_node_b]
|
782 |
+
|
783 |
+
for arg_idx, arg in enumerate(prev_node_b):
|
784 |
+
prev_node_c = prev_node_c_list[arg_idx]
|
785 |
+
env_c[prev_node_c.name] = _insert_logger_after_node(
|
786 |
+
prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
|
787 |
+
node_b.name, name_b, ref_name, ref_node_type_b,
|
788 |
+
NSSingleResultValuesType.NODE_INPUT.value,
|
789 |
+
index_within_arg=arg_idx, index_of_arg=0,
|
790 |
+
fqn=fqn_base_b)
|
791 |
+
else:
|
792 |
+
# logging of inputs which are not lists is not supported yet
|
793 |
+
raise AssertionError(f"type {type(prev_node_b)} is not handled yet")
|
794 |
+
# subgraph so far:
|
795 |
+
#
|
796 |
+
# (prev_node_c)+ -> (logger_c_input)?
|
797 |
+
|
798 |
+
# Note: this if statement is always True, spelling it out to clarify code
|
799 |
+
# intent.
|
800 |
+
if node_b_is_start_node or node_b_is_end_node:
|
801 |
+
# ensure env_c is populated with base node
|
802 |
+
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
|
803 |
+
node_c = env_c[node_b.name]
|
804 |
+
|
805 |
+
# after this point,
|
806 |
+
#
|
807 |
+
# node_a is the original node from graph_a, with parent module gm_a
|
808 |
+
# node_b is the original node from graph_b, with parent module gm_b
|
809 |
+
# node_c is the copy of node_b in graph_c
|
810 |
+
#
|
811 |
+
# subgraph so far:
|
812 |
+
#
|
813 |
+
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
|
814 |
+
|
815 |
+
if node_b_is_start_node:
|
816 |
+
|
817 |
+
# cast dtype from the dtype of node_c's input to the dtype of
|
818 |
+
# node_a's input (dequant, etc)
|
819 |
+
# prev_node_c = node_c.args[0]
|
820 |
+
prev_node_c = get_normalized_nth_input(node_c, gm_b, 0)
|
821 |
+
if should_log_inputs:
|
822 |
+
# skip the input logger when inserting a dtype cast
|
823 |
+
if isinstance(prev_node_c, Node):
|
824 |
+
prev_node_c = get_normalized_nth_input(node_c, gm_b, 0)
|
825 |
+
elif isinstance(prev_node_c, list):
|
826 |
+
prev_node_c = [get_normalized_nth_input(arg, gm_b, 0) for arg in prev_node_c]
|
827 |
+
dtype_cast_node = _insert_dtype_cast_after_node(
|
828 |
+
subgraph_a.start_node, node_c, prev_node_c, gm_a, gm_b, graph_c,
|
829 |
+
node_b.name + '_dtype_cast_', logger_cls,
|
830 |
+
node_type_to_io_type_map)
|
831 |
+
# note: not inserting to env_c because all nodes which use the dtype
|
832 |
+
# casts are copied from graph_a
|
833 |
+
#
|
834 |
+
# subgraph so far:
|
835 |
+
#
|
836 |
+
# (dtype_cast_node)+
|
837 |
+
# /
|
838 |
+
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
|
839 |
+
|
840 |
+
# if input logging is enabled, log the input to the subgraph
|
841 |
+
if should_log_inputs:
|
842 |
+
# TODO: explain this
|
843 |
+
ref_node_name = ''
|
844 |
+
if isinstance(dtype_cast_node, Node):
|
845 |
+
dtype_cast_node = _insert_logger_after_node(
|
846 |
+
dtype_cast_node, gm_b, logger_cls, '_ns_logger_a_inp_',
|
847 |
+
ref_node_name, name_a, ref_name, ref_node_type_a,
|
848 |
+
NSSingleResultValuesType.NODE_INPUT.value,
|
849 |
+
index_within_arg=0, index_of_arg=0,
|
850 |
+
fqn=fqn_base_a)
|
851 |
+
input_logger: Union[Node, List[Node]] = dtype_cast_node
|
852 |
+
else:
|
853 |
+
assert isinstance(dtype_cast_node, list)
|
854 |
+
new_loggers = []
|
855 |
+
for dtype_cast_idx, dtype_cast_node_inner in enumerate(dtype_cast_node):
|
856 |
+
dtype_cast_logger = _insert_logger_after_node(
|
857 |
+
dtype_cast_node_inner, gm_b, logger_cls, '_ns_logger_a_inp_',
|
858 |
+
ref_node_name, name_a, ref_name, ref_node_type_a,
|
859 |
+
NSSingleResultValuesType.NODE_INPUT.value,
|
860 |
+
index_within_arg=dtype_cast_idx,
|
861 |
+
index_of_arg=0,
|
862 |
+
fqn=fqn_base_a)
|
863 |
+
new_loggers.append(dtype_cast_logger)
|
864 |
+
dtype_cast_node = new_loggers
|
865 |
+
input_logger = dtype_cast_node
|
866 |
+
# subgraph so far:
|
867 |
+
#
|
868 |
+
# (dtype_cast_node)+ -> (logger_a_input)?
|
869 |
+
# /
|
870 |
+
# prev_node_c -> (logger_c_input)? -> node_start_c
|
871 |
+
|
872 |
+
# hook up the new mod_a copy to be in the graph, receiving the
|
873 |
+
# same inputs as mod_b does, with dtype cast to match a
|
874 |
+
# Some ops, such as LSTMs, have two non-param inputs. If we have
|
875 |
+
# such an op, pass the second param as well. Note: dtype casting
|
876 |
+
# for the second param is not implemented yet, it can be added
|
877 |
+
# later if there is a use case.
|
878 |
+
node_c_second_non_param_arg = None
|
879 |
+
num_non_param_args_node_a = get_number_of_non_param_args(subgraph_a.start_node, gm_a)
|
880 |
+
if num_non_param_args_node_a == 2:
|
881 |
+
# node_c_second_non_param_arg = node_c.args[1]
|
882 |
+
node_c_second_non_param_arg = get_normalized_nth_input(node_c, gm_b, 1)
|
883 |
+
node_a_shadows_c = _insert_copy_of_subgraph_a_after_input_node_c(
|
884 |
+
dtype_cast_node, node_c_second_non_param_arg,
|
885 |
+
subgraph_a, gm_a, gm_b, node_c.name + '_shadow_copy_')
|
886 |
+
env_c[node_a_shadows_c.name] = node_a_shadows_c
|
887 |
+
# subgraph so far:
|
888 |
+
#
|
889 |
+
# dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy(args/kwargs not shown)
|
890 |
+
# /
|
891 |
+
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
|
892 |
+
|
893 |
+
if should_log_inputs:
|
894 |
+
# When we created the input logger, we left the ref_node_name
|
895 |
+
# as an empty string, because the subgraph copy did not exist
|
896 |
+
# yet. Now that the subgraph copy exists, we modify this name
|
897 |
+
# to its true value.
|
898 |
+
# Note: the alternative to this is to create the input logger
|
899 |
+
# after creating the subgraph, which is slightly more
|
900 |
+
# complicated. This is the lesser of two evils.
|
901 |
+
# input_logger = env_c[dtype_cast_node.name]
|
902 |
+
# Find the first node in the subgraph
|
903 |
+
cur_node = node_a_shadows_c
|
904 |
+
while get_normalized_nth_input(cur_node, gm_b, 0) != input_logger:
|
905 |
+
cur_node = get_normalized_nth_input(cur_node, gm_b, 0) # type: ignore[assignment]
|
906 |
+
if isinstance(input_logger, Node):
|
907 |
+
input_logger_mod = getattr(gm_b, input_logger.name)
|
908 |
+
input_logger_mod.ref_node_name = cur_node.name
|
909 |
+
else:
|
910 |
+
assert isinstance(input_logger, list)
|
911 |
+
for input_logger_inner in input_logger:
|
912 |
+
input_logger_mod = getattr(gm_b, input_logger_inner.name)
|
913 |
+
input_logger_mod.ref_node_name = cur_node.name
|
914 |
+
|
915 |
+
# hook up a logger to the mod_a copy
|
916 |
+
env_c[node_a_shadows_c.name] = _insert_logger_after_node(
|
917 |
+
env_c[node_a_shadows_c.name], gm_b, logger_cls, '_ns_logger_a_',
|
918 |
+
node_a_shadows_c.name, name_a, ref_name, ref_node_type_a,
|
919 |
+
NSSingleResultValuesType.NODE_OUTPUT.value,
|
920 |
+
index_within_arg=0, index_of_arg=0,
|
921 |
+
fqn=fqn_base_a)
|
922 |
+
# subgraph so far:
|
923 |
+
#
|
924 |
+
# dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
|
925 |
+
# /
|
926 |
+
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
|
927 |
+
|
928 |
+
if node_b_is_end_node:
|
929 |
+
|
930 |
+
# hook up a logger to the mod_b copy
|
931 |
+
env_c[node_b.name] = _insert_logger_after_node(
|
932 |
+
env_c[node_b.name], gm_b, logger_cls, '_ns_logger_b_',
|
933 |
+
node_b.name, name_b, ref_name, ref_node_type_b,
|
934 |
+
NSSingleResultValuesType.NODE_OUTPUT.value,
|
935 |
+
index_within_arg=0, index_of_arg=0,
|
936 |
+
fqn=fqn_base_b)
|
937 |
+
# subgraph so far:
|
938 |
+
#
|
939 |
+
# dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
|
940 |
+
# /
|
941 |
+
# (prev_node_c+) -> (logger_c_input)? -> node_start_c -> ... -> node_end_c -> logger_c
|
942 |
+
#
|
943 |
+
# Note: node_start_c may be the same node as node_end_c, or they
|
944 |
+
# may have nodes inbetween.
|
945 |
+
|
946 |
+
else:
|
947 |
+
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
|
948 |
+
|
949 |
+
gm_c = GraphModule(gm_b, graph_c)
|
950 |
+
return gm_c
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/mappings.py
ADDED
@@ -0,0 +1,761 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
toq = torch.ops.quantized
|
7 |
+
|
8 |
+
import torch.ao.nn.quantized as nnq
|
9 |
+
import torch.ao.nn.quantized.dynamic as nnqd
|
10 |
+
import torch.ao.nn.intrinsic.quantized as nniq
|
11 |
+
import torch.ao.nn.intrinsic.quantized.dynamic as nniqd
|
12 |
+
import torch.ao.nn.intrinsic.qat as nniqat
|
13 |
+
import torch.ao.nn.intrinsic as nni
|
14 |
+
import torch.ao.nn.qat as nnqat
|
15 |
+
import torch.ao.nn.qat.dynamic as nnqatd
|
16 |
+
from torch.ao.quantization.backend_config import get_native_backend_config
|
17 |
+
import torch.ao.quantization.fx._lower_to_native_backend as \
|
18 |
+
_lower_to_native_backend
|
19 |
+
import torch.ao.quantization.quantization_mappings as quantization_mappings
|
20 |
+
|
21 |
+
from .ns_types import NSNodeTargetType
|
22 |
+
|
23 |
+
from typing import Callable, Dict, List, Optional, Set, Tuple
|
24 |
+
|
25 |
+
|
26 |
+
def get_base_name_to_sets_of_related_ops() -> Dict[str, Set[NSNodeTargetType]]:
|
27 |
+
# note: this set is modified below by items from backend_config
|
28 |
+
sets_of_related_ops: List[Set[NSNodeTargetType]] = [
|
29 |
+
# conv modules
|
30 |
+
{
|
31 |
+
nn.Conv1d,
|
32 |
+
},
|
33 |
+
{
|
34 |
+
nn.Conv2d,
|
35 |
+
},
|
36 |
+
{
|
37 |
+
nn.Conv3d,
|
38 |
+
},
|
39 |
+
# conv functionals
|
40 |
+
{
|
41 |
+
F.conv1d,
|
42 |
+
},
|
43 |
+
{
|
44 |
+
F.conv2d,
|
45 |
+
},
|
46 |
+
{
|
47 |
+
F.conv3d,
|
48 |
+
},
|
49 |
+
# linear modules
|
50 |
+
{
|
51 |
+
nn.Linear,
|
52 |
+
},
|
53 |
+
# linear functionals
|
54 |
+
{
|
55 |
+
F.linear,
|
56 |
+
},
|
57 |
+
# average pool
|
58 |
+
{
|
59 |
+
nn.AvgPool1d,
|
60 |
+
torch.avg_pool1d,
|
61 |
+
},
|
62 |
+
{
|
63 |
+
nn.AvgPool2d,
|
64 |
+
torch._C._nn.avg_pool2d,
|
65 |
+
},
|
66 |
+
{
|
67 |
+
nn.AvgPool3d,
|
68 |
+
torch._C._nn.avg_pool3d,
|
69 |
+
},
|
70 |
+
# adaptive average pool
|
71 |
+
{
|
72 |
+
nn.AdaptiveAvgPool1d,
|
73 |
+
F.adaptive_avg_pool1d,
|
74 |
+
},
|
75 |
+
{
|
76 |
+
nn.AdaptiveAvgPool2d,
|
77 |
+
F.adaptive_avg_pool2d,
|
78 |
+
},
|
79 |
+
{
|
80 |
+
nn.AdaptiveAvgPool3d,
|
81 |
+
F.adaptive_avg_pool3d,
|
82 |
+
},
|
83 |
+
# LSTM
|
84 |
+
{
|
85 |
+
nn.LSTM,
|
86 |
+
},
|
87 |
+
# add
|
88 |
+
{
|
89 |
+
torch.add,
|
90 |
+
operator.add, # x + y
|
91 |
+
},
|
92 |
+
# cat
|
93 |
+
{
|
94 |
+
torch.cat,
|
95 |
+
},
|
96 |
+
# mul
|
97 |
+
{
|
98 |
+
torch.mul,
|
99 |
+
operator.mul,
|
100 |
+
},
|
101 |
+
# relu
|
102 |
+
{
|
103 |
+
F.relu,
|
104 |
+
nn.ReLU,
|
105 |
+
'relu',
|
106 |
+
'relu_',
|
107 |
+
torch.relu,
|
108 |
+
},
|
109 |
+
# maxpool
|
110 |
+
{
|
111 |
+
nn.MaxPool1d,
|
112 |
+
F.max_pool1d,
|
113 |
+
},
|
114 |
+
{
|
115 |
+
nn.MaxPool2d,
|
116 |
+
F.max_pool2d,
|
117 |
+
},
|
118 |
+
{
|
119 |
+
nn.MaxPool3d,
|
120 |
+
F.max_pool3d,
|
121 |
+
},
|
122 |
+
# sigmoid
|
123 |
+
{
|
124 |
+
torch.sigmoid,
|
125 |
+
'sigmoid',
|
126 |
+
'sigmoid_',
|
127 |
+
nn.Sigmoid,
|
128 |
+
F.sigmoid,
|
129 |
+
},
|
130 |
+
# BatchNorm
|
131 |
+
{
|
132 |
+
nn.BatchNorm2d,
|
133 |
+
},
|
134 |
+
{
|
135 |
+
nn.BatchNorm3d,
|
136 |
+
},
|
137 |
+
# ConvTranspose
|
138 |
+
{
|
139 |
+
nn.ConvTranspose1d,
|
140 |
+
},
|
141 |
+
{
|
142 |
+
nn.ConvTranspose2d,
|
143 |
+
},
|
144 |
+
{
|
145 |
+
nn.ConvTranspose3d,
|
146 |
+
},
|
147 |
+
# functional transposed conv
|
148 |
+
{
|
149 |
+
F.conv_transpose1d,
|
150 |
+
},
|
151 |
+
{
|
152 |
+
F.conv_transpose2d,
|
153 |
+
},
|
154 |
+
{
|
155 |
+
F.conv_transpose3d,
|
156 |
+
},
|
157 |
+
# ELU
|
158 |
+
{
|
159 |
+
nn.ELU,
|
160 |
+
},
|
161 |
+
# Embedding
|
162 |
+
{
|
163 |
+
nn.Embedding,
|
164 |
+
},
|
165 |
+
# EmbeddingBag
|
166 |
+
{
|
167 |
+
nn.EmbeddingBag,
|
168 |
+
},
|
169 |
+
# GroupNorm
|
170 |
+
{
|
171 |
+
nn.GroupNorm,
|
172 |
+
},
|
173 |
+
# Hardswish
|
174 |
+
{
|
175 |
+
nn.Hardswish,
|
176 |
+
},
|
177 |
+
# InstanceNorm
|
178 |
+
{
|
179 |
+
nn.InstanceNorm1d,
|
180 |
+
},
|
181 |
+
{
|
182 |
+
nn.InstanceNorm2d,
|
183 |
+
},
|
184 |
+
{
|
185 |
+
nn.InstanceNorm3d,
|
186 |
+
},
|
187 |
+
# LayerNorm
|
188 |
+
{
|
189 |
+
nn.LayerNorm,
|
190 |
+
},
|
191 |
+
# LeakyReLU
|
192 |
+
{
|
193 |
+
nn.LeakyReLU,
|
194 |
+
},
|
195 |
+
# ReLU6
|
196 |
+
{
|
197 |
+
nn.ReLU6,
|
198 |
+
F.relu6,
|
199 |
+
},
|
200 |
+
# F.elu
|
201 |
+
{
|
202 |
+
F.elu,
|
203 |
+
},
|
204 |
+
# F.hardswish
|
205 |
+
{
|
206 |
+
F.hardswish,
|
207 |
+
},
|
208 |
+
# F.group_norm
|
209 |
+
{
|
210 |
+
F.group_norm,
|
211 |
+
},
|
212 |
+
# F.instance_norm
|
213 |
+
{
|
214 |
+
F.instance_norm,
|
215 |
+
},
|
216 |
+
# F.layer_norm
|
217 |
+
{
|
218 |
+
F.layer_norm,
|
219 |
+
},
|
220 |
+
# F.leaky_relu
|
221 |
+
{
|
222 |
+
F.leaky_relu,
|
223 |
+
},
|
224 |
+
# F.silu
|
225 |
+
{
|
226 |
+
nn.SiLU,
|
227 |
+
F.silu,
|
228 |
+
},
|
229 |
+
# F.mish
|
230 |
+
{
|
231 |
+
nn.Mish,
|
232 |
+
F.mish,
|
233 |
+
},
|
234 |
+
# F.tanh
|
235 |
+
{
|
236 |
+
nn.Tanh,
|
237 |
+
F.tanh,
|
238 |
+
torch.tanh,
|
239 |
+
'tanh_',
|
240 |
+
'tanh',
|
241 |
+
},
|
242 |
+
# F.hardsigmoid
|
243 |
+
{
|
244 |
+
'hardsigmoid_',
|
245 |
+
'hardsigmoid',
|
246 |
+
F.hardsigmoid,
|
247 |
+
nn.Hardsigmoid,
|
248 |
+
},
|
249 |
+
# F.hardtanh
|
250 |
+
{
|
251 |
+
nn.Hardtanh,
|
252 |
+
F.hardtanh,
|
253 |
+
F.hardtanh_,
|
254 |
+
},
|
255 |
+
# floordiv
|
256 |
+
{
|
257 |
+
operator.floordiv,
|
258 |
+
},
|
259 |
+
# unsqueeze
|
260 |
+
{
|
261 |
+
torch.unsqueeze,
|
262 |
+
},
|
263 |
+
# stack
|
264 |
+
{
|
265 |
+
torch.stack,
|
266 |
+
},
|
267 |
+
# squeeze
|
268 |
+
{
|
269 |
+
torch.squeeze,
|
270 |
+
},
|
271 |
+
# sort
|
272 |
+
{
|
273 |
+
torch.sort,
|
274 |
+
},
|
275 |
+
# repeat_interleave
|
276 |
+
{
|
277 |
+
torch.repeat_interleave,
|
278 |
+
},
|
279 |
+
# min
|
280 |
+
{
|
281 |
+
torch.min,
|
282 |
+
},
|
283 |
+
# mean
|
284 |
+
{
|
285 |
+
torch.mean,
|
286 |
+
},
|
287 |
+
# max
|
288 |
+
{
|
289 |
+
torch.max,
|
290 |
+
},
|
291 |
+
# transpose
|
292 |
+
{
|
293 |
+
torch.transpose,
|
294 |
+
},
|
295 |
+
# flatten
|
296 |
+
{
|
297 |
+
torch.flatten,
|
298 |
+
},
|
299 |
+
# clamp
|
300 |
+
{
|
301 |
+
torch.clamp,
|
302 |
+
},
|
303 |
+
# chunk
|
304 |
+
{
|
305 |
+
torch.chunk,
|
306 |
+
},
|
307 |
+
# interpolate
|
308 |
+
{
|
309 |
+
torch.nn.functional.interpolate,
|
310 |
+
},
|
311 |
+
# dropout
|
312 |
+
{
|
313 |
+
nn.Dropout,
|
314 |
+
},
|
315 |
+
# F.dropout
|
316 |
+
{
|
317 |
+
F.dropout,
|
318 |
+
},
|
319 |
+
# matmul
|
320 |
+
{
|
321 |
+
torch.matmul,
|
322 |
+
},
|
323 |
+
# Softmax
|
324 |
+
{
|
325 |
+
nn.Softmax,
|
326 |
+
},
|
327 |
+
# PReLU
|
328 |
+
{
|
329 |
+
nn.PReLU,
|
330 |
+
nnq.PReLU,
|
331 |
+
},
|
332 |
+
# F.prelu
|
333 |
+
{
|
334 |
+
F.prelu,
|
335 |
+
toq.prelu,
|
336 |
+
},
|
337 |
+
# pixel shuffle
|
338 |
+
{
|
339 |
+
nn.PixelShuffle,
|
340 |
+
},
|
341 |
+
{
|
342 |
+
F.pixel_shuffle,
|
343 |
+
},
|
344 |
+
# pixel unshuffle
|
345 |
+
{
|
346 |
+
nn.PixelUnshuffle,
|
347 |
+
},
|
348 |
+
{
|
349 |
+
F.pixel_unshuffle,
|
350 |
+
},
|
351 |
+
# narrow
|
352 |
+
{
|
353 |
+
torch.narrow,
|
354 |
+
},
|
355 |
+
]
|
356 |
+
|
357 |
+
# for each floating point op, add versions of the op added by
|
358 |
+
# backend_config
|
359 |
+
backend_config = get_native_backend_config()
|
360 |
+
|
361 |
+
new_connections: List[Tuple[Callable, Callable]] = [
|
362 |
+
# technical debt edge case
|
363 |
+
(nn.Linear, nn.modules.linear.NonDynamicallyQuantizableLinear),
|
364 |
+
]
|
365 |
+
|
366 |
+
for pattern, config in backend_config._pattern_complex_format_to_config.items():
|
367 |
+
|
368 |
+
# pattern format: (c, (b, a))
|
369 |
+
first_element = pattern
|
370 |
+
# look from the end, because pattern is in reverse order
|
371 |
+
while isinstance(first_element, (list, tuple)):
|
372 |
+
first_element = first_element[-1]
|
373 |
+
|
374 |
+
if config.fused_module is not None:
|
375 |
+
# case 1: pattern fuses a pattern of ops into an op
|
376 |
+
# example: nn.Conv1d, nn.ReLU fused into nni.ConvReLU1d
|
377 |
+
new_connections.append((first_element, config.fused_module))
|
378 |
+
|
379 |
+
if config.qat_module is not None:
|
380 |
+
# case 2: pattern swaps a module into a QAT module
|
381 |
+
# example: nni.ConvReLU1d swapped into nniqat.ConvReLU1d
|
382 |
+
new_connections.append((first_element, config.qat_module))
|
383 |
+
|
384 |
+
if config.reference_quantized_module is not None:
|
385 |
+
# case 3: reference version of floating point module, such as
|
386 |
+
# nn.Conv2d and nnqr.Conv2d
|
387 |
+
new_connections.append((first_element, config.reference_quantized_module))
|
388 |
+
|
389 |
+
#
|
390 |
+
# Add reference module swaps from default lowering path
|
391 |
+
#
|
392 |
+
|
393 |
+
for source_to_target in (
|
394 |
+
_lower_to_native_backend.STATIC_LOWER_MODULE_MAP,
|
395 |
+
_lower_to_native_backend.DYNAMIC_LOWER_MODULE_MAP,
|
396 |
+
_lower_to_native_backend.WEIGHT_ONLY_LOWER_MODULE_MAP,
|
397 |
+
_lower_to_native_backend.SPECIAL_PATTERN_LOWER_MODULE_MAP,
|
398 |
+
):
|
399 |
+
for source, target in source_to_target.items(): # type: ignore[attr-defined]
|
400 |
+
new_connections.append((source, target))
|
401 |
+
|
402 |
+
for source_to_double_target in (
|
403 |
+
_lower_to_native_backend.STATIC_LOWER_FUSED_MODULE_MAP,
|
404 |
+
_lower_to_native_backend.STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP,
|
405 |
+
_lower_to_native_backend.DYNAMIC_LOWER_FUSED_MODULE_MAP,
|
406 |
+
):
|
407 |
+
for source, (target1, target2) in source_to_double_target.items(): # type: ignore[attr-defined]
|
408 |
+
new_connections.append((source, target1))
|
409 |
+
new_connections.append((source, target2))
|
410 |
+
|
411 |
+
#
|
412 |
+
# Add function swaps from default lowering path
|
413 |
+
#
|
414 |
+
|
415 |
+
for source, (target1, target2) in \
|
416 |
+
_lower_to_native_backend.STATIC_LOWER_FUNCTIONAL_MAP.items():
|
417 |
+
new_connections.append((source, target1))
|
418 |
+
new_connections.append((source, target2))
|
419 |
+
|
420 |
+
for source_to_target in (
|
421 |
+
_lower_to_native_backend.QBIN_OP_MAPPING,
|
422 |
+
_lower_to_native_backend.QBIN_RELU_OP_MAPPING,
|
423 |
+
quantization_mappings.DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS,
|
424 |
+
):
|
425 |
+
for source, target in source_to_target.items():
|
426 |
+
new_connections.append((source, target))
|
427 |
+
|
428 |
+
#
|
429 |
+
# Add other swaps, ideally in the future this could be removed
|
430 |
+
# after the lowering code stops using these.
|
431 |
+
#
|
432 |
+
for source_to_target in (
|
433 |
+
quantization_mappings.DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
|
434 |
+
):
|
435 |
+
for source, target in source_to_target.items():
|
436 |
+
new_connections.append((source, target))
|
437 |
+
|
438 |
+
|
439 |
+
# add the new connections from backend_config
|
440 |
+
for item1, item2 in new_connections:
|
441 |
+
for set_of_related_ops in sets_of_related_ops:
|
442 |
+
if item1 in set_of_related_ops or item2 in set_of_related_ops:
|
443 |
+
set_of_related_ops.add(item1)
|
444 |
+
set_of_related_ops.add(item2)
|
445 |
+
break
|
446 |
+
|
447 |
+
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]] = {}
|
448 |
+
|
449 |
+
counter = 0
|
450 |
+
for set_of_related_ops in sets_of_related_ops:
|
451 |
+
base_name = str(counter)
|
452 |
+
counter += 1
|
453 |
+
base_name_to_sets_of_related_ops[base_name] = set_of_related_ops
|
454 |
+
|
455 |
+
return base_name_to_sets_of_related_ops
|
456 |
+
|
457 |
+
|
458 |
+
def get_base_name_for_op(
|
459 |
+
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
|
460 |
+
op: NSNodeTargetType,
|
461 |
+
) -> Optional[str]:
|
462 |
+
for base_name, set_of_related_ops in base_name_to_sets_of_related_ops.items():
|
463 |
+
if op in set_of_related_ops:
|
464 |
+
return base_name
|
465 |
+
return None
|
466 |
+
|
467 |
+
|
468 |
+
def add_op_to_sets_of_related_ops(
|
469 |
+
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
|
470 |
+
op: NSNodeTargetType,
|
471 |
+
related_op: Optional[NSNodeTargetType],
|
472 |
+
) -> None:
|
473 |
+
if related_op is not None:
|
474 |
+
for set_of_related_ops in base_name_to_sets_of_related_ops.values():
|
475 |
+
if related_op in set_of_related_ops:
|
476 |
+
set_of_related_ops.add(op)
|
477 |
+
return
|
478 |
+
# if we got here, related_op was not found
|
479 |
+
raise AssertionError(f"{related_op} was not found")
|
480 |
+
else:
|
481 |
+
counter = 0
|
482 |
+
while str(counter) in base_name_to_sets_of_related_ops:
|
483 |
+
counter += 1
|
484 |
+
base_name_to_sets_of_related_ops[str(counter)] = {op}
|
485 |
+
|
486 |
+
|
487 |
+
# TODO(future PR): clean this up
|
488 |
+
def get_node_type_to_io_type_map() -> Dict[str, Set[NSNodeTargetType]]:
|
489 |
+
FUNS_IO_TYPE_FP32: Set[NSNodeTargetType] = {
|
490 |
+
F.linear,
|
491 |
+
F.conv1d,
|
492 |
+
F.conv2d,
|
493 |
+
F.conv3d,
|
494 |
+
torch.cat,
|
495 |
+
F.elu,
|
496 |
+
F.hardswish,
|
497 |
+
F.instance_norm,
|
498 |
+
F.layer_norm,
|
499 |
+
F.leaky_relu,
|
500 |
+
F.dropout,
|
501 |
+
F.silu,
|
502 |
+
F.mish,
|
503 |
+
operator.add,
|
504 |
+
torch.add,
|
505 |
+
operator.mul,
|
506 |
+
torch.mul,
|
507 |
+
torch.sum,
|
508 |
+
F.prelu,
|
509 |
+
}
|
510 |
+
|
511 |
+
FUNS_IO_TYPE_FP16: Set[NSNodeTargetType] = set()
|
512 |
+
|
513 |
+
FUNS_IO_TYPE_INT8: Set[NSNodeTargetType] = {
|
514 |
+
toq.linear,
|
515 |
+
toq.linear_relu,
|
516 |
+
toq.conv1d,
|
517 |
+
toq.conv1d_relu,
|
518 |
+
toq.conv2d,
|
519 |
+
toq.conv2d_relu,
|
520 |
+
toq.conv3d,
|
521 |
+
toq.conv3d_relu,
|
522 |
+
toq.cat,
|
523 |
+
toq.elu,
|
524 |
+
toq.hardswish,
|
525 |
+
toq.instance_norm,
|
526 |
+
toq.layer_norm,
|
527 |
+
toq.leaky_relu,
|
528 |
+
toq.dropout,
|
529 |
+
toq.prelu,
|
530 |
+
# TODO(future PR): implement shadowing for binary ops and
|
531 |
+
# uncomment below
|
532 |
+
# toq.add,
|
533 |
+
# toq.mul,
|
534 |
+
}
|
535 |
+
|
536 |
+
FUNS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = {
|
537 |
+
F.relu,
|
538 |
+
F.tanh,
|
539 |
+
torch.tanh,
|
540 |
+
F.sigmoid,
|
541 |
+
torch.sigmoid,
|
542 |
+
F.hardsigmoid,
|
543 |
+
operator.floordiv,
|
544 |
+
torch.adaptive_avg_pool1d,
|
545 |
+
F.adaptive_avg_pool2d,
|
546 |
+
F.adaptive_avg_pool3d,
|
547 |
+
F.dropout,
|
548 |
+
F.hardtanh,
|
549 |
+
F.hardtanh_,
|
550 |
+
F.interpolate,
|
551 |
+
F.max_pool1d,
|
552 |
+
F.max_pool2d,
|
553 |
+
F.max_pool3d,
|
554 |
+
F.relu6,
|
555 |
+
F.pixel_shuffle,
|
556 |
+
F.pixel_unshuffle,
|
557 |
+
torch.avg_pool1d,
|
558 |
+
torch._C._nn.avg_pool2d,
|
559 |
+
torch._C._nn.avg_pool3d,
|
560 |
+
torch.cat,
|
561 |
+
torch.chunk,
|
562 |
+
torch.clamp,
|
563 |
+
torch.flatten,
|
564 |
+
torch.transpose,
|
565 |
+
torch.max,
|
566 |
+
torch.mean,
|
567 |
+
torch.min,
|
568 |
+
torch.narrow,
|
569 |
+
torch.repeat_interleave,
|
570 |
+
torch.sort,
|
571 |
+
torch.squeeze,
|
572 |
+
torch.stack,
|
573 |
+
torch.unsqueeze,
|
574 |
+
operator.add,
|
575 |
+
}
|
576 |
+
|
577 |
+
MODS_IO_TYPE_FP32: Set[NSNodeTargetType] = {
|
578 |
+
nn.Linear,
|
579 |
+
nnqat.Linear,
|
580 |
+
nnqatd.Linear,
|
581 |
+
nnqd.Linear,
|
582 |
+
torch.nn.modules.linear.NonDynamicallyQuantizableLinear,
|
583 |
+
nn.Conv1d,
|
584 |
+
nn.Conv2d,
|
585 |
+
nn.Conv3d,
|
586 |
+
nnqat.Conv1d,
|
587 |
+
nnqat.Conv2d,
|
588 |
+
nnqat.Conv3d,
|
589 |
+
nnqat.Embedding,
|
590 |
+
nnqat.EmbeddingBag,
|
591 |
+
nn.LSTM,
|
592 |
+
# note: nnqd.Linear is an instance of nnq.Linear, so this
|
593 |
+
# check has to happen before the int8 module check
|
594 |
+
nnqd.LSTM,
|
595 |
+
nn.BatchNorm2d,
|
596 |
+
nn.BatchNorm3d,
|
597 |
+
nn.Dropout,
|
598 |
+
nn.ConvTranspose1d,
|
599 |
+
nn.ConvTranspose2d,
|
600 |
+
nn.ConvTranspose3d,
|
601 |
+
nn.ELU,
|
602 |
+
nn.GroupNorm,
|
603 |
+
nn.InstanceNorm1d,
|
604 |
+
nn.InstanceNorm2d,
|
605 |
+
nn.InstanceNorm3d,
|
606 |
+
nn.LayerNorm,
|
607 |
+
nn.Hardswish,
|
608 |
+
nn.LeakyReLU,
|
609 |
+
nn.ReLU6,
|
610 |
+
nn.SiLU,
|
611 |
+
nn.Mish,
|
612 |
+
nn.Softmax,
|
613 |
+
nn.PReLU,
|
614 |
+
nni.BNReLU2d,
|
615 |
+
nni.BNReLU3d,
|
616 |
+
nni.ConvReLU1d,
|
617 |
+
nni.ConvReLU2d,
|
618 |
+
nni.ConvReLU3d,
|
619 |
+
nni.LinearReLU,
|
620 |
+
nni.LinearBn1d,
|
621 |
+
nni.ConvBn1d,
|
622 |
+
nni.ConvBn2d,
|
623 |
+
nni.ConvBn3d,
|
624 |
+
nniqat.ConvBn1d,
|
625 |
+
nniqat.ConvBn2d,
|
626 |
+
nniqat.ConvBn3d,
|
627 |
+
nniqat.ConvBnReLU1d,
|
628 |
+
nniqat.ConvBnReLU2d,
|
629 |
+
nniqat.ConvBnReLU3d,
|
630 |
+
nniqat.ConvReLU1d,
|
631 |
+
nniqat.ConvReLU2d,
|
632 |
+
nniqat.ConvReLU3d,
|
633 |
+
nniqat.LinearReLU,
|
634 |
+
nniqat.LinearBn1d,
|
635 |
+
nniqd.LinearReLU,
|
636 |
+
nni.LinearLeakyReLU,
|
637 |
+
nni.LinearTanh,
|
638 |
+
nni.ConvAdd2d,
|
639 |
+
nni.ConvAddReLU2d,
|
640 |
+
}
|
641 |
+
|
642 |
+
MODS_IO_TYPE_INT8: Set[NSNodeTargetType] = {
|
643 |
+
nnq.Linear,
|
644 |
+
nnq.Conv1d,
|
645 |
+
nnq.Conv2d,
|
646 |
+
nnq.Conv3d,
|
647 |
+
nnq.BatchNorm2d,
|
648 |
+
nnq.BatchNorm3d,
|
649 |
+
nnq.Dropout,
|
650 |
+
nnq.ConvTranspose1d,
|
651 |
+
nnq.ConvTranspose2d,
|
652 |
+
nnq.ELU,
|
653 |
+
nnq.InstanceNorm1d,
|
654 |
+
nnq.InstanceNorm2d,
|
655 |
+
nnq.InstanceNorm3d,
|
656 |
+
nnq.LayerNorm,
|
657 |
+
nnq.Hardswish,
|
658 |
+
nnq.LeakyReLU,
|
659 |
+
nnq.Embedding,
|
660 |
+
nnq.EmbeddingBag,
|
661 |
+
nnq.Dropout,
|
662 |
+
nnq.Softmax,
|
663 |
+
nnq.PReLU,
|
664 |
+
nniq.BNReLU2d,
|
665 |
+
nniq.BNReLU3d,
|
666 |
+
nniq.ConvReLU1d,
|
667 |
+
nniq.ConvReLU2d,
|
668 |
+
nniq.ConvReLU3d,
|
669 |
+
nniq.LinearReLU,
|
670 |
+
nniq.LinearLeakyReLU,
|
671 |
+
nniq.LinearTanh,
|
672 |
+
nniq.ConvAdd2d,
|
673 |
+
nniq.ConvAddReLU2d,
|
674 |
+
}
|
675 |
+
|
676 |
+
MODS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = {
|
677 |
+
nn.ReLU,
|
678 |
+
nn.Tanh,
|
679 |
+
nn.Sigmoid,
|
680 |
+
nn.Hardsigmoid,
|
681 |
+
nn.AdaptiveAvgPool1d,
|
682 |
+
nn.AdaptiveAvgPool2d,
|
683 |
+
nn.AdaptiveAvgPool3d,
|
684 |
+
nn.AvgPool1d,
|
685 |
+
nn.AvgPool2d,
|
686 |
+
nn.AvgPool3d,
|
687 |
+
nn.Dropout,
|
688 |
+
nn.Hardtanh,
|
689 |
+
nn.Identity,
|
690 |
+
nn.MaxPool1d,
|
691 |
+
nn.MaxPool2d,
|
692 |
+
nn.MaxPool3d,
|
693 |
+
nn.PixelShuffle,
|
694 |
+
nn.PixelUnshuffle,
|
695 |
+
nn.ReLU6,
|
696 |
+
}
|
697 |
+
|
698 |
+
METHS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = {
|
699 |
+
'sigmoid_',
|
700 |
+
'sigmoid',
|
701 |
+
'tanh_',
|
702 |
+
'tanh',
|
703 |
+
'hardsigmoid_',
|
704 |
+
'hardsigmoid',
|
705 |
+
'relu_',
|
706 |
+
'relu',
|
707 |
+
}
|
708 |
+
|
709 |
+
return {
|
710 |
+
'funs_io_type_fp32': FUNS_IO_TYPE_FP32,
|
711 |
+
'funs_io_type_fp16': FUNS_IO_TYPE_FP16,
|
712 |
+
'funs_io_type_int8': FUNS_IO_TYPE_INT8,
|
713 |
+
'funs_io_type_fp32_or_int8': FUNS_IO_TYPE_FP32_OR_INT8,
|
714 |
+
'mods_io_type_fp32': MODS_IO_TYPE_FP32,
|
715 |
+
'mods_io_type_int8': MODS_IO_TYPE_INT8,
|
716 |
+
'mods_io_type_fp32_or_int8': MODS_IO_TYPE_FP32_OR_INT8,
|
717 |
+
'meths_io_type_fp32_or_int8': METHS_IO_TYPE_FP32_OR_INT8,
|
718 |
+
}
|
719 |
+
|
720 |
+
|
721 |
+
def get_unmatchable_types_map() -> Dict[str, Set[NSNodeTargetType]]:
|
722 |
+
|
723 |
+
FUNS_UNMATCHABLE: Set[NSNodeTargetType] = {
|
724 |
+
torch.quantize_per_tensor,
|
725 |
+
operator.getitem,
|
726 |
+
}
|
727 |
+
|
728 |
+
MODS_UNMATCHABLE: Set[NSNodeTargetType] = {
|
729 |
+
nn.Identity,
|
730 |
+
}
|
731 |
+
|
732 |
+
METHS_UNMATCHABLE: Set[NSNodeTargetType] = {
|
733 |
+
'to',
|
734 |
+
'dequantize',
|
735 |
+
'reshape',
|
736 |
+
'view',
|
737 |
+
'unsqueeze_',
|
738 |
+
'unsqueeze',
|
739 |
+
'transpose',
|
740 |
+
'squeeze_',
|
741 |
+
'squeeze',
|
742 |
+
'size',
|
743 |
+
'shape',
|
744 |
+
'resize_',
|
745 |
+
'repeat_interleave',
|
746 |
+
'repeat',
|
747 |
+
'permute',
|
748 |
+
'numel',
|
749 |
+
'mean',
|
750 |
+
'detach_',
|
751 |
+
'detach',
|
752 |
+
'contiguous',
|
753 |
+
'clamp',
|
754 |
+
'chunk',
|
755 |
+
}
|
756 |
+
|
757 |
+
return {
|
758 |
+
'funs_unmatchable': FUNS_UNMATCHABLE,
|
759 |
+
'mods_unmatchable': MODS_UNMATCHABLE,
|
760 |
+
'meths_unmatchable': METHS_UNMATCHABLE,
|
761 |
+
}
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/n_shadows_utils.py
ADDED
@@ -0,0 +1,1312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.fx
|
3 |
+
from torch.fx import (
|
4 |
+
Node,
|
5 |
+
GraphModule,
|
6 |
+
Graph,
|
7 |
+
)
|
8 |
+
|
9 |
+
from torch.ao.ns.fx.utils import (
|
10 |
+
# TODO(future PR): make this work correctly for methods
|
11 |
+
get_target_type_str,
|
12 |
+
get_normalized_nth_input,
|
13 |
+
)
|
14 |
+
from torch.ao.ns.fx.ns_types import (
|
15 |
+
NSSingleResultValuesType,
|
16 |
+
NSResultsType,
|
17 |
+
)
|
18 |
+
from torch.ao.ns.fx.graph_passes import _maybe_get_fqn
|
19 |
+
from torch.ao.quantization import QConfigMapping
|
20 |
+
from torch.ao.quantization.qconfig import QConfigAny
|
21 |
+
from torch.ao.quantization.utils import getattr_from_fqn
|
22 |
+
from torch.ao.quantization.fx.match_utils import _MatchResult
|
23 |
+
from torch.utils._pytree import tree_map
|
24 |
+
|
25 |
+
import collections
|
26 |
+
import copy
|
27 |
+
from typing import List, Dict, Set, Tuple, Callable, Any, Optional
|
28 |
+
import operator
|
29 |
+
|
30 |
+
SHADOW_NODE_NAME_PREFIX = 'shadow'
|
31 |
+
SHADOW_WRAPPER_NODE_NAME_PREFIX = 'shadow_wrapper'
|
32 |
+
|
33 |
+
# TODO(future PR): reuse existing mapping instead of creating a new one
|
34 |
+
BINARY_FUNCTIONS = {
|
35 |
+
torch.add,
|
36 |
+
torch.Tensor.add,
|
37 |
+
operator.add,
|
38 |
+
torch.mul,
|
39 |
+
torch.Tensor.mul,
|
40 |
+
operator.mul,
|
41 |
+
}
|
42 |
+
|
43 |
+
def _get_attr_name(subgraph_idx, subgraph_candidate_idx):
|
44 |
+
return f"{SHADOW_NODE_NAME_PREFIX}_{subgraph_idx}_{subgraph_candidate_idx}"
|
45 |
+
|
46 |
+
def _get_attr_wrapper_name(subgraph_idx, subgraph_candidate_idx):
|
47 |
+
return f"{SHADOW_WRAPPER_NODE_NAME_PREFIX}_{subgraph_idx}_{subgraph_candidate_idx}"
|
48 |
+
|
49 |
+
|
50 |
+
class OutputProp:
|
51 |
+
"""
|
52 |
+
Output propagation (modeled from shape propagation).
|
53 |
+
|
54 |
+
Given a GraphModule and an example input, saves the output flowing
|
55 |
+
through each node on `node.traced_result`.
|
56 |
+
|
57 |
+
Code based on the example from
|
58 |
+
https://pytorch.org/docs/stable/fx.html#the-interpreter-pattern
|
59 |
+
"""
|
60 |
+
def __init__(self, mod):
|
61 |
+
self.mod = mod
|
62 |
+
self.graph = mod.graph
|
63 |
+
self.modules = dict(self.mod.named_modules())
|
64 |
+
|
65 |
+
def propagate(self, *args):
|
66 |
+
args_iter = iter(args)
|
67 |
+
env : Dict[str, Node] = {}
|
68 |
+
|
69 |
+
def load_arg(a):
|
70 |
+
return torch.fx.graph.map_arg(a, lambda n: env[n.name])
|
71 |
+
|
72 |
+
def fetch_attr(target : str):
|
73 |
+
target_atoms = target.split('.')
|
74 |
+
attr_itr = self.mod
|
75 |
+
for i, atom in enumerate(target_atoms):
|
76 |
+
if not hasattr(attr_itr, atom):
|
77 |
+
raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
|
78 |
+
attr_itr = getattr(attr_itr, atom)
|
79 |
+
return attr_itr
|
80 |
+
|
81 |
+
for node in self.graph.nodes:
|
82 |
+
if node.op == 'placeholder':
|
83 |
+
result = next(args_iter)
|
84 |
+
elif node.op == 'get_attr':
|
85 |
+
result = fetch_attr(node.target)
|
86 |
+
elif node.op == 'call_function':
|
87 |
+
result = node.target(*load_arg(node.args), **load_arg(node.kwargs))
|
88 |
+
elif node.op == 'call_method':
|
89 |
+
self_obj, *args = load_arg(node.args)
|
90 |
+
kwargs = load_arg(node.kwargs)
|
91 |
+
result = getattr(self_obj, node.target)(*args, **kwargs)
|
92 |
+
elif node.op == 'call_module':
|
93 |
+
result = self.modules[node.target](*load_arg(node.args), **load_arg(node.kwargs))
|
94 |
+
|
95 |
+
if isinstance(result, torch.Tensor):
|
96 |
+
node.traced_result = result
|
97 |
+
|
98 |
+
env[node.name] = result
|
99 |
+
|
100 |
+
return None
|
101 |
+
|
102 |
+
def _get_dedup_subgraphs(
|
103 |
+
matches: Dict[str, _MatchResult]
|
104 |
+
) -> Dict[str, List[Node]]:
|
105 |
+
# the original matches variable is unique by node, make it unique by subgraph
|
106 |
+
# instead
|
107 |
+
seen_nodes = set()
|
108 |
+
subgraphs_dedup = {}
|
109 |
+
|
110 |
+
# Dict items are not reversible until Python 3.8, so we hack it
|
111 |
+
# to be compatible with previous Python versions
|
112 |
+
# TODO(future PR): try reversed(list(matches.items()))
|
113 |
+
matches_items_reversed: List[Tuple[str, _MatchResult]] = []
|
114 |
+
for name, cur_match in matches.items():
|
115 |
+
matches_items_reversed.insert(0, (name, cur_match))
|
116 |
+
|
117 |
+
# Note: the order is important. `matches` currently provides the matches
|
118 |
+
# in reverse order. We would like to process the matches in non-reverse
|
119 |
+
# order, so that we can create an intuitive naming scheme, such as
|
120 |
+
# naming the first op's submodules `shadow_0_0` through `shadow_0_(n-1)`
|
121 |
+
for name, cur_match in matches_items_reversed: # type: ignore[call-overload]
|
122 |
+
was_seen = False
|
123 |
+
for node_or_tuple in cur_match[1]:
|
124 |
+
|
125 |
+
# Cur_match[1] has an unusual type. It says that it's a `List[Node]`,
|
126 |
+
# but it is really not. Furthermore, the contents of this field
|
127 |
+
# can change from match results of multiple nodes of the same pattern
|
128 |
+
#
|
129 |
+
# For example, for conv -> bn -> relu, we see
|
130 |
+
# match_results = {
|
131 |
+
# 'conv': (relu, [(bn, conv), relu], ...),
|
132 |
+
# 'bn': (relu, [(bn, conv), relu], ...),
|
133 |
+
# 'relu': (relu, [(bn, conv), relu], ...),
|
134 |
+
# }
|
135 |
+
#
|
136 |
+
# Ideally we should clean up the `find_matches` function to make
|
137 |
+
# this more intuitive. For the purposes of this prototype, we hack
|
138 |
+
# around it.
|
139 |
+
|
140 |
+
if isinstance(node_or_tuple, Node):
|
141 |
+
if node_or_tuple in seen_nodes:
|
142 |
+
was_seen = True
|
143 |
+
seen_nodes.add(node_or_tuple)
|
144 |
+
|
145 |
+
else:
|
146 |
+
assert isinstance(node_or_tuple, tuple)
|
147 |
+
for node in node_or_tuple:
|
148 |
+
assert isinstance(node, Node)
|
149 |
+
if node in seen_nodes:
|
150 |
+
was_seen = True
|
151 |
+
seen_nodes.add(node)
|
152 |
+
|
153 |
+
if was_seen:
|
154 |
+
continue
|
155 |
+
|
156 |
+
# Start with the unusual type, convert it to [op_0, ..., op_n]
|
157 |
+
list_of_nodes = []
|
158 |
+
|
159 |
+
if len(cur_match[1]) == 1:
|
160 |
+
list_of_nodes = cur_match[1]
|
161 |
+
else:
|
162 |
+
assert len(cur_match[1]) == 2
|
163 |
+
# either (a, b), or ((a, b), c) or (c, (a, b))
|
164 |
+
# cannot make any assumptions on order, not clear what the
|
165 |
+
# _find_matches function is doing to populate this
|
166 |
+
# TODO(future PR): make this code less confusing, see discussion
|
167 |
+
# in https://github.com/pytorch/pytorch/pull/80521/files#r975918836
|
168 |
+
|
169 |
+
def _order_nodes(node_a, node_b, node_c) -> List[Node]:
|
170 |
+
nodes = [node_a, node_b, node_c]
|
171 |
+
first_node = None
|
172 |
+
mid_node = None
|
173 |
+
last_node = None
|
174 |
+
for n in nodes:
|
175 |
+
prev_n = n.args[0]
|
176 |
+
next_n = next(iter(n.users))
|
177 |
+
if prev_n not in nodes:
|
178 |
+
first_node = n
|
179 |
+
elif next_n not in nodes:
|
180 |
+
last_node = n
|
181 |
+
else:
|
182 |
+
mid_node = n
|
183 |
+
assert first_node is not None and mid_node is not None and \
|
184 |
+
last_node is not None
|
185 |
+
assert mid_node.args[0] is first_node
|
186 |
+
assert last_node.args[0] is mid_node
|
187 |
+
return [last_node, mid_node, first_node]
|
188 |
+
|
189 |
+
if isinstance(cur_match[1][0], Node) and isinstance(cur_match[1][1], Node):
|
190 |
+
# (a, b)
|
191 |
+
list_of_nodes = cur_match[1]
|
192 |
+
elif isinstance(cur_match[1][0], tuple):
|
193 |
+
# ((a, b), c)
|
194 |
+
node_a, node_b = cur_match[1][0]
|
195 |
+
node_c = cur_match[1][1]
|
196 |
+
list_of_nodes = _order_nodes(node_a, node_b, node_c)
|
197 |
+
elif isinstance(cur_match[1][1], tuple):
|
198 |
+
# (a, (b, c))
|
199 |
+
node_a, node_b = cur_match[1][1]
|
200 |
+
node_c = cur_match[1][0]
|
201 |
+
list_of_nodes = _order_nodes(node_a, node_b, node_c)
|
202 |
+
|
203 |
+
# [node_n, ..., node_0], note that the order is reversed
|
204 |
+
# to make it chronological for simple subgraphs
|
205 |
+
list_of_nodes.reverse()
|
206 |
+
subgraphs_dedup[name] = list_of_nodes
|
207 |
+
|
208 |
+
return subgraphs_dedup
|
209 |
+
|
210 |
+
def _get_logger_for_subgraph(
|
211 |
+
model: GraphModule,
|
212 |
+
first_node: Node,
|
213 |
+
last_node: Node,
|
214 |
+
subgraph_idx: int,
|
215 |
+
subgraph_candidate_idx: int,
|
216 |
+
qconfig_str: str,
|
217 |
+
logger_cls: Callable,
|
218 |
+
fqn: Optional[str],
|
219 |
+
) -> torch.nn.Module:
|
220 |
+
"""
|
221 |
+
Given a model and a linear subgraph starting from `first_node` and
|
222 |
+
ending with `last_node`, creates a logger for the end of this
|
223 |
+
subgraph.
|
224 |
+
"""
|
225 |
+
if fqn is None:
|
226 |
+
fqn = ''
|
227 |
+
logger_mod_orig = logger_cls(
|
228 |
+
first_node.name, # ref_node_name
|
229 |
+
last_node.name, # prev_node_name
|
230 |
+
f'subgraph_{subgraph_idx}_{subgraph_candidate_idx}', # model_name
|
231 |
+
'model', # ref_name
|
232 |
+
get_target_type_str(last_node, model), # prev_node_target_type
|
233 |
+
get_target_type_str(first_node, model), # ref_node_target_type
|
234 |
+
NSSingleResultValuesType.NODE_OUTPUT.value, # results_type
|
235 |
+
0, # index_within_arg
|
236 |
+
0, # index_of_arg
|
237 |
+
fqn, # fqn
|
238 |
+
qconfig_str,
|
239 |
+
)
|
240 |
+
# Usually we expect the user to add loggers, then calibrate, then convert,
|
241 |
+
# and then populate loggers. This is why the loggers start disabled.
|
242 |
+
# TODO(future PR): reconsider the design to make this more intuitive.
|
243 |
+
logger_mod_orig.enabled = False
|
244 |
+
return logger_mod_orig
|
245 |
+
|
246 |
+
def create_submodule_from_subgraph(
|
247 |
+
model: torch.nn.Module,
|
248 |
+
first_node: Node,
|
249 |
+
last_node: Node,
|
250 |
+
) -> GraphModule:
|
251 |
+
"""
|
252 |
+
Input: a model, and a linear subgraph within the model from first_node to
|
253 |
+
last_node.
|
254 |
+
|
255 |
+
Output: a new submodule containing a copy of the subgraph, with the inputs
|
256 |
+
to the first node becoming the inputs to the submodule, and all other
|
257 |
+
nodes in the subgraph being copied.
|
258 |
+
|
259 |
+
Example inputs:
|
260 |
+
|
261 |
+
`model`: a module with graph
|
262 |
+
|
263 |
+
x0 -> op1 -> x1 -> op2 -> x2
|
264 |
+
|
|
265 |
+
arg1
|
266 |
+
|
267 |
+
`first_node`: op1
|
268 |
+
`last_node`: op2
|
269 |
+
|
270 |
+
Example output: a new module with graph
|
271 |
+
|
272 |
+
input1 -> op1_copy -> x1 -> op2_copy -> output1
|
273 |
+
|
|
274 |
+
arg1
|
275 |
+
"""
|
276 |
+
|
277 |
+
#
|
278 |
+
# create a blank GraphModule with an empty graph
|
279 |
+
#
|
280 |
+
|
281 |
+
class M(torch.nn.Module):
|
282 |
+
def forward(self, x):
|
283 |
+
pass
|
284 |
+
|
285 |
+
m = M()
|
286 |
+
gm = torch.fx.symbolic_trace(m)
|
287 |
+
g = gm.graph
|
288 |
+
for node in reversed(gm.graph.nodes):
|
289 |
+
g.erase_node(node)
|
290 |
+
|
291 |
+
#
|
292 |
+
# modify the graph to have a copy of our subgraph
|
293 |
+
#
|
294 |
+
|
295 |
+
cur_node_orig = first_node
|
296 |
+
cur_args_orig = cur_node_orig.args
|
297 |
+
cur_kwargs_orig = cur_node_orig.kwargs
|
298 |
+
|
299 |
+
cur_name_idx = 0
|
300 |
+
|
301 |
+
iteration_limit = 100
|
302 |
+
cur_iteration = 0
|
303 |
+
|
304 |
+
while True:
|
305 |
+
if cur_node_orig is first_node:
|
306 |
+
# we are at the first node, we need to set up graph inputs
|
307 |
+
# TODO(future): some graphs could have placeholders which are unrelated
|
308 |
+
# to the first node, need to handle this
|
309 |
+
cur_args_copy = []
|
310 |
+
cur_kwargs_copy = {}
|
311 |
+
seen_names: Set[str] = set()
|
312 |
+
old_name_to_new_node: Dict[str, Node] = {}
|
313 |
+
|
314 |
+
def _add_placeholder(
|
315 |
+
g: Graph, node: Node, seen_names, old_name_to_new_node
|
316 |
+
):
|
317 |
+
# note: for graphs starting with patterns such as `y = x + x`, we
|
318 |
+
# need to ensure we do not add multiple placeholders with the
|
319 |
+
# same name
|
320 |
+
counter = 0
|
321 |
+
while node.name + '_' + str(counter) in seen_names:
|
322 |
+
counter += 1
|
323 |
+
cur_name = node.name + '_' + str(counter)
|
324 |
+
seen_names.add(cur_name)
|
325 |
+
placeholder = g.placeholder(cur_name)
|
326 |
+
old_name_to_new_node[node.name] = placeholder
|
327 |
+
return placeholder
|
328 |
+
|
329 |
+
for arg in cur_node_orig.args:
|
330 |
+
if isinstance(arg, Node):
|
331 |
+
p = _add_placeholder(
|
332 |
+
g, arg, seen_names, old_name_to_new_node)
|
333 |
+
cur_args_copy.append(p)
|
334 |
+
elif isinstance(arg, (list, tuple)):
|
335 |
+
new_arg = []
|
336 |
+
for inner_arg in arg:
|
337 |
+
if isinstance(inner_arg, Node):
|
338 |
+
new_arg.append(_add_placeholder(
|
339 |
+
g, inner_arg, seen_names, old_name_to_new_node))
|
340 |
+
else:
|
341 |
+
new_arg.append(inner_arg)
|
342 |
+
cur_args_copy.append(new_arg)
|
343 |
+
else:
|
344 |
+
cur_args_copy.append(arg)
|
345 |
+
|
346 |
+
# TODO(future PR): handle non-normalized kwargs
|
347 |
+
for kwarg_name, kwarg in cur_node_orig.kwargs.items():
|
348 |
+
if isinstance(kwarg, Node):
|
349 |
+
cur_kwargs_copy[kwarg_name] = _add_placeholder(
|
350 |
+
g, kwarg, seen_names, old_name_to_new_node)
|
351 |
+
elif isinstance(kwarg, (list, tuple)):
|
352 |
+
new_kwarg = []
|
353 |
+
for inner_kwarg in kwarg:
|
354 |
+
p = _add_placeholder(
|
355 |
+
g, inner_kwarg, seen_names, old_name_to_new_node)
|
356 |
+
new_kwarg.append(p)
|
357 |
+
cur_kwargs_copy[kwarg_name] = new_kwarg
|
358 |
+
else:
|
359 |
+
cur_kwargs_copy[kwarg_name] = kwarg
|
360 |
+
|
361 |
+
cur_args_copy = tuple(cur_args_copy) # type: ignore[assignment]
|
362 |
+
else:
|
363 |
+
# we are not at first node, first arg is from the previous node,
|
364 |
+
# and all other args are copied
|
365 |
+
|
366 |
+
# the current implementation is simplistic and cannot handle
|
367 |
+
# ops with two or more arguments which need to be passed from
|
368 |
+
# the previous op, so we assert them out
|
369 |
+
assert cur_node_orig.target not in BINARY_FUNCTIONS
|
370 |
+
|
371 |
+
# at this point in the code, cur_node_copy is pointing to the copy
|
372 |
+
# of the previous node
|
373 |
+
# TODO(future PR): this is not handling complicated graphs correctly, need to
|
374 |
+
# look at actual relationships instead of assuming sequential graph
|
375 |
+
# TODO(future PR): this is ignoring kwargs, will need to support kwargs
|
376 |
+
# for any fusion pattern which has them for a node that is not the
|
377 |
+
# first node.
|
378 |
+
cur_args_copy = [cur_node_copy] # type: ignore[has-type]
|
379 |
+
|
380 |
+
if len(cur_node_orig.args) > 1:
|
381 |
+
for arg in cur_node_orig.args[1:]:
|
382 |
+
if isinstance(arg, torch.nn.Parameter):
|
383 |
+
new_arg = arg.clone().detach() # type: ignore[assignment]
|
384 |
+
mod_name = f"mod_{cur_name_idx}"
|
385 |
+
cur_name_idx += 1
|
386 |
+
setattr(gm, mod_name, new_arg)
|
387 |
+
new_arg_placeholder = gm.placeholder(mod_name)
|
388 |
+
cur_args_copy.append(new_arg_placeholder)
|
389 |
+
elif isinstance(arg, (float, int, torch.dtype)):
|
390 |
+
cur_args_copy.append(arg)
|
391 |
+
else:
|
392 |
+
raise AssertionError(f'arg of type {type(arg)} not handled yet')
|
393 |
+
cur_args_copy = tuple(cur_args_copy) # type: ignore[assignment]
|
394 |
+
|
395 |
+
# copy the node
|
396 |
+
if cur_node_orig.op == 'call_module':
|
397 |
+
orig_mod = getattr_from_fqn(model, cur_node_orig.target) # type: ignore[arg-type]
|
398 |
+
orig_mod_copy = copy.deepcopy(orig_mod)
|
399 |
+
mod_name = f"mod_{cur_name_idx}"
|
400 |
+
setattr(gm, mod_name, orig_mod_copy)
|
401 |
+
cur_name_idx += 1
|
402 |
+
cur_node_copy = g.call_module(mod_name, cur_args_copy, cur_kwargs_copy)
|
403 |
+
|
404 |
+
elif cur_node_orig.op == 'call_function':
|
405 |
+
cur_node_copy = g.call_function(
|
406 |
+
cur_node_orig.target, cur_args_copy, cur_kwargs_copy)
|
407 |
+
|
408 |
+
elif cur_node_orig.op == 'call_method':
|
409 |
+
cur_node_copy = g.call_method(
|
410 |
+
cur_node_orig.target, cur_args_copy, cur_kwargs_copy)
|
411 |
+
|
412 |
+
else:
|
413 |
+
raise AssertionError(f'{cur_node_orig.op} not supported yet')
|
414 |
+
|
415 |
+
if cur_node_orig is last_node:
|
416 |
+
break
|
417 |
+
|
418 |
+
# go to next node
|
419 |
+
assert len(cur_node_orig.users.keys()) == 1, \
|
420 |
+
f'{cur_node_orig} has more than 1 users, not supported yet'
|
421 |
+
cur_node_orig = next(iter(cur_node_orig.users.keys()))
|
422 |
+
cur_args_orig = cur_node_orig.args
|
423 |
+
cur_kwargs_orig = cur_node_orig.kwargs
|
424 |
+
|
425 |
+
cur_iteration += 1
|
426 |
+
if cur_iteration > iteration_limit:
|
427 |
+
raise AssertionError('iteration limit exceeded')
|
428 |
+
|
429 |
+
# set up outputs
|
430 |
+
g.output(cur_node_copy)
|
431 |
+
|
432 |
+
gm.recompile()
|
433 |
+
return gm
|
434 |
+
|
435 |
+
def create_one_transformed_and_logged_copy_of_subgraph(
|
436 |
+
mt: GraphModule,
|
437 |
+
subgraph_idx: int,
|
438 |
+
subgraph_candidate_idx: int,
|
439 |
+
first_node: Node,
|
440 |
+
last_node: Node,
|
441 |
+
fqn: Optional[str],
|
442 |
+
list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]],
|
443 |
+
example_inputs: Any,
|
444 |
+
last_added_shadow_node_list: List[Optional[Node]],
|
445 |
+
custom_prepare_fn: Optional[Callable] = None,
|
446 |
+
custom_prepare_kwargs: Optional[Dict[str, Any]] = None,
|
447 |
+
) -> None:
|
448 |
+
"""
|
449 |
+
Given a subgraph in `mt` and a subgraph candidate idx, inserts the
|
450 |
+
subgraph candidate copy and instruments it with loggers.
|
451 |
+
|
452 |
+
If subgraph_candidate_idx is 0, this is the baseline fp32 subgraph and we just
|
453 |
+
add a logger to the end.
|
454 |
+
|
455 |
+
If subgraph_candidate_idx is not 0, we create a copy of the subgraph and
|
456 |
+
prepare it with `prepare_fx`.
|
457 |
+
"""
|
458 |
+
|
459 |
+
# TODO(future PR): move logger classes to utils to remove circular dependency
|
460 |
+
from torch.ao.ns._numeric_suite_fx import OutputLogger, OutputComparisonLogger
|
461 |
+
|
462 |
+
if subgraph_candidate_idx == 0:
|
463 |
+
# idx = 0 is the floating point (original) version of the subgraph
|
464 |
+
# We keep the subgraph as is, and add a logger at the end
|
465 |
+
|
466 |
+
qconfig_str = ''
|
467 |
+
logger_mod_orig = _get_logger_for_subgraph(
|
468 |
+
mt, first_node, last_node, subgraph_idx, subgraph_candidate_idx,
|
469 |
+
qconfig_str, OutputLogger, fqn)
|
470 |
+
|
471 |
+
attr_name = _get_attr_name(subgraph_idx, subgraph_candidate_idx)
|
472 |
+
assert not hasattr(mt, attr_name)
|
473 |
+
setattr(mt, attr_name, logger_mod_orig)
|
474 |
+
with mt.graph.inserting_after(last_node):
|
475 |
+
new_node = mt.graph.call_module(attr_name, args=(last_node,), kwargs={})
|
476 |
+
last_added_shadow_node_list[0] = new_node
|
477 |
+
|
478 |
+
else:
|
479 |
+
# idx > 0 means we have a candidate qconfig to try, so we need
|
480 |
+
# to make a copy of the subgraph, feed it with the right inputs,
|
481 |
+
# and add a logger at the end
|
482 |
+
|
483 |
+
# get the qconfig
|
484 |
+
# subtract one because the first candidate is the floating point
|
485 |
+
# version of the subgraph
|
486 |
+
node_name_to_qconfig = \
|
487 |
+
list_of_node_name_to_qconfig[subgraph_candidate_idx - 1]
|
488 |
+
qconfig = node_name_to_qconfig[first_node.name]
|
489 |
+
|
490 |
+
# if no quantization is requested, skip
|
491 |
+
# TODO(future PR): deduplicate equivalent qconfigs that come from
|
492 |
+
# different qconfig mapping objects
|
493 |
+
if qconfig is None:
|
494 |
+
return
|
495 |
+
|
496 |
+
qconfig_mapping = QConfigMapping().set_global(qconfig)
|
497 |
+
|
498 |
+
# create a copy of the submodule, wrapped in a separate module
|
499 |
+
orig_mod_copy_wrapped = create_submodule_from_subgraph(
|
500 |
+
mt, first_node, last_node)
|
501 |
+
|
502 |
+
# add a call to prepare_fx on the wrapper module
|
503 |
+
if custom_prepare_fn is None:
|
504 |
+
orig_mod_copy_wrapped = torch.ao.quantization.quantize_fx.prepare_fx(
|
505 |
+
orig_mod_copy_wrapped, qconfig_mapping, example_inputs=example_inputs)
|
506 |
+
else:
|
507 |
+
if custom_prepare_kwargs is None:
|
508 |
+
custom_prepare_kwargs = {}
|
509 |
+
for kwarg_name in ["example_inputs", "prepare_custom_config", "qconfig_mapping"]:
|
510 |
+
assert kwarg_name not in custom_prepare_kwargs, f"cannot specify {kwarg_name} in custom_prepare_kwargs"
|
511 |
+
prepare_kwargs: Dict[str, Any] = {
|
512 |
+
"example_inputs": example_inputs,
|
513 |
+
"qconfig_mapping": qconfig_mapping
|
514 |
+
}
|
515 |
+
prepare_kwargs.update(custom_prepare_kwargs)
|
516 |
+
orig_mod_copy_wrapped = custom_prepare_fn(
|
517 |
+
orig_mod_copy_wrapped,
|
518 |
+
**prepare_kwargs)
|
519 |
+
|
520 |
+
# attach the wrapper to the model
|
521 |
+
attr_name = _get_attr_wrapper_name(subgraph_idx, subgraph_candidate_idx)
|
522 |
+
assert not hasattr(mt, attr_name)
|
523 |
+
setattr(mt, attr_name, orig_mod_copy_wrapped)
|
524 |
+
|
525 |
+
# add a call to the wrapper module from the parent graph
|
526 |
+
insert_after_node = last_added_shadow_node_list[0]
|
527 |
+
with mt.graph.inserting_after(insert_after_node):
|
528 |
+
# TODO(future PR): handle fusion patterns where non-first nodes
|
529 |
+
# need inputs
|
530 |
+
|
531 |
+
# pass in all node args and kwargs
|
532 |
+
|
533 |
+
new_args = []
|
534 |
+
for arg in first_node.args:
|
535 |
+
if isinstance(arg, Node):
|
536 |
+
new_args.append(arg)
|
537 |
+
elif isinstance(arg, (list, tuple)) and len(arg) and isinstance(arg[0], Node):
|
538 |
+
for inner_arg in arg:
|
539 |
+
if isinstance(inner_arg, Node):
|
540 |
+
new_args.append(inner_arg)
|
541 |
+
|
542 |
+
new_kwargs = {}
|
543 |
+
for name, old_kwarg in first_node.kwargs.items():
|
544 |
+
if isinstance(old_kwarg, Node):
|
545 |
+
new_kwargs[name] = old_kwarg
|
546 |
+
elif isinstance(old_kwarg, (list, tuple)) and len(old_kwarg):
|
547 |
+
for inner_old_kwarg in old_kwarg:
|
548 |
+
# TODO(future PR): clarify why we are adding kwargs to args
|
549 |
+
new_args.append(inner_old_kwarg)
|
550 |
+
|
551 |
+
new_args = tuple(new_args) # type: ignore[assignment]
|
552 |
+
|
553 |
+
new_node = mt.graph.call_module(
|
554 |
+
attr_name, args=new_args, kwargs=new_kwargs)
|
555 |
+
|
556 |
+
# add a logger to parent graph to observe the shadow wrapper
|
557 |
+
logger_mod_orig = _get_logger_for_subgraph(
|
558 |
+
mt, first_node, last_node, subgraph_idx, subgraph_candidate_idx,
|
559 |
+
str(qconfig), OutputComparisonLogger, fqn)
|
560 |
+
|
561 |
+
attr_name = _get_attr_name(subgraph_idx, subgraph_candidate_idx)
|
562 |
+
assert not hasattr(mt, attr_name)
|
563 |
+
setattr(mt, attr_name, logger_mod_orig)
|
564 |
+
with mt.graph.inserting_after(new_node):
|
565 |
+
logger = mt.graph.call_module(attr_name, args=(new_node, last_node), kwargs={})
|
566 |
+
last_added_shadow_node_list[0] = logger
|
567 |
+
|
568 |
+
mt.recompile()
|
569 |
+
|
570 |
+
def create_n_transformed_and_logged_copies_of_subgraph(
|
571 |
+
mt: GraphModule,
|
572 |
+
subgraph_idx: int,
|
573 |
+
match_name: str,
|
574 |
+
nodes_in_this_subgraph: List[Any],
|
575 |
+
qconfig_mappings: List[QConfigMapping],
|
576 |
+
list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]],
|
577 |
+
custom_prepare_fn: Optional[Callable] = None,
|
578 |
+
custom_prepare_kwargs: Optional[Dict[str, Any]] = None,
|
579 |
+
) -> None:
|
580 |
+
"""
|
581 |
+
Given a model `mt` and a subgraph_idx, creates the needed copies
|
582 |
+
of the subgraph for all qconfigs, and instruments them with loggers.
|
583 |
+
"""
|
584 |
+
# for now, assume that
|
585 |
+
# 1. the first node has one input
|
586 |
+
# 2. the last node has one output
|
587 |
+
|
588 |
+
# for now, ignore all subgraphs that contain non-nodes (tuples, etc)
|
589 |
+
# TODO(future PR): implement this
|
590 |
+
if any(
|
591 |
+
not isinstance(node, Node)
|
592 |
+
for node in nodes_in_this_subgraph
|
593 |
+
):
|
594 |
+
return
|
595 |
+
|
596 |
+
first_node = nodes_in_this_subgraph[0]
|
597 |
+
last_node = nodes_in_this_subgraph[-1]
|
598 |
+
# We used output propagation to populate example values on each
|
599 |
+
# node. Use the example values from the previous node as the input
|
600 |
+
# to the current node.
|
601 |
+
prev_node = get_normalized_nth_input(first_node, mt, 0)
|
602 |
+
if isinstance(prev_node, list):
|
603 |
+
example_inputs = [x.traced_result for x in prev_node]
|
604 |
+
elif isinstance(prev_node, tuple):
|
605 |
+
example_inputs = (x.traced_result for x in prev_node) # type: ignore[assignment]
|
606 |
+
else:
|
607 |
+
# currently some customer models do not have a traced_result in
|
608 |
+
# every node, so we have to guard for this case since we cannot
|
609 |
+
# quantize without an example input
|
610 |
+
# TODO(future PR): add a test case for this once we have an easy
|
611 |
+
# repro, see https://github.com/pytorch/pytorch/pull/80521/files#r975940489
|
612 |
+
# for additional context
|
613 |
+
if hasattr(prev_node, 'traced_result'):
|
614 |
+
example_inputs = (prev_node.traced_result,) # type: ignore[attr-defined, assignment]
|
615 |
+
else:
|
616 |
+
print(
|
617 |
+
'unable to get example input for node ' +
|
618 |
+
f'{first_node.format_node()}, skipping')
|
619 |
+
return
|
620 |
+
|
621 |
+
# If there are no quantization configs for this subgraph, skip adding
|
622 |
+
# loggers. This reduces memory usage for models where not all layers are
|
623 |
+
# quantized.
|
624 |
+
# TODO(future): consider making this configurable
|
625 |
+
found_at_least_one_qconfig = False
|
626 |
+
for subgraph_candidate_idx in range(len(qconfig_mappings) + 1):
|
627 |
+
|
628 |
+
if subgraph_candidate_idx == 0:
|
629 |
+
# fp32 baseline does not need a qconfig
|
630 |
+
continue
|
631 |
+
|
632 |
+
# a. we have N shadows, so len(qconfig_mappings) is N
|
633 |
+
# b. we will have the fp32 layer + N shadows, so overall number of
|
634 |
+
# (original_op) + (*shadows) will be N+1
|
635 |
+
# c. since `subgraph_candidate_idx` represents (b), we need
|
636 |
+
# to subtract 1 to query from (a)
|
637 |
+
node_name_to_qconfig = \
|
638 |
+
list_of_node_name_to_qconfig[subgraph_candidate_idx - 1]
|
639 |
+
qconfig = node_name_to_qconfig[first_node.name]
|
640 |
+
if qconfig is not None:
|
641 |
+
found_at_least_one_qconfig = True
|
642 |
+
break
|
643 |
+
if not found_at_least_one_qconfig:
|
644 |
+
print('unable to find at least one qconfig for node ' +
|
645 |
+
f'{first_node.format_node()}, skipping')
|
646 |
+
return
|
647 |
+
|
648 |
+
fqn = _maybe_get_fqn(first_node, mt)
|
649 |
+
|
650 |
+
# We want the results to contain the subgraphs in natural order,
|
651 |
+
# and the graph to also contain shadow wrappers and shadow loggers
|
652 |
+
# in natural order.
|
653 |
+
# If we just iterate in reverse, the graph will be in natural
|
654 |
+
# order but the eventual results will be in reverse order.
|
655 |
+
# So, we keep track of the last shadow logger we added and
|
656 |
+
# always insert after it.
|
657 |
+
last_added_shadow_node_list: List[Optional[Node]] = [None]
|
658 |
+
for subgraph_candidate_idx in range(len(qconfig_mappings) + 1):
|
659 |
+
|
660 |
+
create_one_transformed_and_logged_copy_of_subgraph(
|
661 |
+
mt, subgraph_idx, subgraph_candidate_idx, first_node,
|
662 |
+
last_node, fqn, list_of_node_name_to_qconfig,
|
663 |
+
example_inputs, last_added_shadow_node_list, custom_prepare_fn,
|
664 |
+
custom_prepare_kwargs)
|
665 |
+
|
666 |
+
def create_add_loggers_graph(
|
667 |
+
model: GraphModule,
|
668 |
+
subgraphs_dedup: Dict[str, List[Node]],
|
669 |
+
qconfig_mapping: QConfigMapping,
|
670 |
+
node_name_to_qconfig: Dict[str, QConfigAny],
|
671 |
+
) -> None:
|
672 |
+
r"""
|
673 |
+
Given a model, a model graph partition (currently a set of matched
|
674 |
+
subgraphs) and instructions how to transform each subgraph
|
675 |
+
(currently quantizing it according to qconfig_mapping), modifies
|
676 |
+
the model graph to create an alternate path through the original graph,
|
677 |
+
with each of the subgraphs quantized. This is useful to compare
|
678 |
+
propagation error of a transformation such as quantization.
|
679 |
+
|
680 |
+
For example, given layer op0 and op1, there are four cases when handling op1:
|
681 |
+
1. op0 and op1 quantized
|
682 |
+
2. op0 and op1 unquantized
|
683 |
+
3. op0 quantized, op1 unquantized
|
684 |
+
4. op0 unquantized, op1 quantized
|
685 |
+
|
686 |
+
Example input, case 1:
|
687 |
+
|
688 |
+
.. code::
|
689 |
+
|
690 |
+
x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log
|
691 |
+
\ \ \ \ # noqa: W605
|
692 |
+
---> op0_1 -> x1_1 ----> clog op1_1 -> x2_1 ----> clog
|
693 |
+
|
694 |
+
Example output, case 1:
|
695 |
+
|
696 |
+
.. code::
|
697 |
+
|
698 |
+
x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log
|
699 |
+
\ \ \ # noqa: W605
|
700 |
+
---> op0_1 -> x1_1 ----> clog -> op1_1 -> x2_1 ----> clog
|
701 |
+
|
702 |
+
"""
|
703 |
+
# TODO(future PR): move logger classes to utils to remove circular dependency
|
704 |
+
from torch.ao.ns._numeric_suite_fx import OutputLogger, OutputComparisonLogger
|
705 |
+
|
706 |
+
def _get_subgraph_containing_node(node, subgraphs_dedup):
|
707 |
+
for subgraph in subgraphs_dedup.values():
|
708 |
+
if node in subgraph:
|
709 |
+
return subgraph
|
710 |
+
return None
|
711 |
+
|
712 |
+
# First, we need to create shadow branches, going from
|
713 |
+
#
|
714 |
+
# x0 -> op0 -> x1 -> ...
|
715 |
+
#
|
716 |
+
#
|
717 |
+
# to
|
718 |
+
#
|
719 |
+
# x0 -> op0_0 -> x1_0 -> log -> ...
|
720 |
+
# \ \
|
721 |
+
# -> op0_1 -> x1_1 -> clog
|
722 |
+
#
|
723 |
+
# Later, the outputs of each shadow will be rerouted to calculate
|
724 |
+
# propagation error.
|
725 |
+
|
726 |
+
# Note: we cannot iterate over matched subgraphs because some nodes
|
727 |
+
# may not be matched. So, we iterate over nodes in the graph, and
|
728 |
+
# associate them to matched subgraphs if possible.
|
729 |
+
|
730 |
+
nodes_to_skip = set()
|
731 |
+
# for each subgraph, save a mapping from first node of subgraph
|
732 |
+
# to first and last node of the shadow of this subgraph
|
733 |
+
orig_first_node_to_shadow_in_node = {}
|
734 |
+
orig_first_node_to_shadow_out_node = {}
|
735 |
+
# need to record original list because we will mutate the graph as we go
|
736 |
+
orig_nodes = list(model.graph.nodes) # type: ignore[union-attr, arg-type]
|
737 |
+
cur_subgraph_idx = 0
|
738 |
+
for n in orig_nodes:
|
739 |
+
if n.op in ('placeholder', 'get_attr', 'output') or n in nodes_to_skip:
|
740 |
+
continue
|
741 |
+
|
742 |
+
maybe_subgraph = _get_subgraph_containing_node(n, subgraphs_dedup)
|
743 |
+
insert_submodule_copy = False
|
744 |
+
if maybe_subgraph is not None:
|
745 |
+
first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1]
|
746 |
+
for node_to_skip in maybe_subgraph:
|
747 |
+
nodes_to_skip.add(node_to_skip)
|
748 |
+
qconfig = node_name_to_qconfig[first_node.name]
|
749 |
+
if qconfig is not None:
|
750 |
+
insert_submodule_copy = True
|
751 |
+
else:
|
752 |
+
first_node, last_node = n, n
|
753 |
+
|
754 |
+
if insert_submodule_copy:
|
755 |
+
match_name = first_node.name
|
756 |
+
create_n_transformed_and_logged_copies_of_subgraph(
|
757 |
+
model, cur_subgraph_idx, match_name, maybe_subgraph,
|
758 |
+
[qconfig_mapping], [node_name_to_qconfig],
|
759 |
+
None, None # type: ignore[arg-type]
|
760 |
+
)
|
761 |
+
# find the created shadow module and record it so we
|
762 |
+
# can find it easily in step 2
|
763 |
+
expected_shadow_target = f"shadow_wrapper_{cur_subgraph_idx}_1"
|
764 |
+
new_shadow_mod = None
|
765 |
+
for maybe_shadow_mod in model.graph.nodes:
|
766 |
+
if maybe_shadow_mod.op == 'call_module' and \
|
767 |
+
maybe_shadow_mod.target == expected_shadow_target:
|
768 |
+
new_shadow_mod = maybe_shadow_mod
|
769 |
+
break
|
770 |
+
assert new_shadow_mod is not None
|
771 |
+
orig_first_node_to_shadow_in_node[first_node] = new_shadow_mod
|
772 |
+
orig_first_node_to_shadow_out_node[first_node] = new_shadow_mod
|
773 |
+
|
774 |
+
else:
|
775 |
+
# create a copy of the subgraph by only copying FX nodes
|
776 |
+
# but not copying any parameters, to minimize memory usage
|
777 |
+
subgraph_to_use = maybe_subgraph if maybe_subgraph is not None \
|
778 |
+
else [first_node]
|
779 |
+
|
780 |
+
# add a regular logger after last_node
|
781 |
+
qconfig_str = ''
|
782 |
+
subgraph_candidate_idx = 0
|
783 |
+
fqn = _maybe_get_fqn(first_node, model)
|
784 |
+
logger_mod_orig = _get_logger_for_subgraph(
|
785 |
+
model, first_node, last_node, cur_subgraph_idx, subgraph_candidate_idx,
|
786 |
+
qconfig_str, OutputLogger, fqn)
|
787 |
+
attr_name = _get_attr_name(cur_subgraph_idx, subgraph_candidate_idx)
|
788 |
+
assert not hasattr(model, attr_name)
|
789 |
+
setattr(model, attr_name, logger_mod_orig)
|
790 |
+
insertion_point = last_node
|
791 |
+
with model.graph.inserting_after(insertion_point):
|
792 |
+
logger = model.graph.call_module(
|
793 |
+
attr_name, args=(last_node,), kwargs={})
|
794 |
+
insertion_point = logger
|
795 |
+
|
796 |
+
# create a copy of the subgraph
|
797 |
+
cur_node_orig = first_node
|
798 |
+
cur_node_copy = None
|
799 |
+
first_node_copy = None
|
800 |
+
while cur_node_orig in subgraph_to_use:
|
801 |
+
# TODO(future PR): make this support all possible args/kwargs
|
802 |
+
if cur_node_orig is first_node:
|
803 |
+
new_args = cur_node_orig.args
|
804 |
+
new_kwargs = cur_node_orig.kwargs
|
805 |
+
else:
|
806 |
+
first_arg_for_copy = cur_node_copy
|
807 |
+
new_args = tuple([first_arg_for_copy, *cur_node_orig.args[1:]]) # noqa: C409
|
808 |
+
new_kwargs = cur_node_orig.kwargs
|
809 |
+
# make a copy of cur_node_orig
|
810 |
+
with model.graph.inserting_after(insertion_point):
|
811 |
+
cur_node_copy = model.graph.create_node(
|
812 |
+
cur_node_orig.op,
|
813 |
+
cur_node_orig.target,
|
814 |
+
new_args,
|
815 |
+
new_kwargs,
|
816 |
+
# cur_node_orig.name, # TODO(future PR): set name explicitly
|
817 |
+
)
|
818 |
+
if first_node_copy is None:
|
819 |
+
first_node_copy = cur_node_copy
|
820 |
+
# since now only linear subgraphs are supported, all nodes
|
821 |
+
# except the last one must have only one user
|
822 |
+
if cur_node_orig != last_node:
|
823 |
+
assert len(cur_node_orig.users.keys()) == 1
|
824 |
+
cur_node_orig = next(iter(cur_node_orig.users.keys()))
|
825 |
+
assert not cur_node_orig.name.startswith(SHADOW_NODE_NAME_PREFIX)
|
826 |
+
insertion_point = cur_node_copy
|
827 |
+
|
828 |
+
# add a comparison logger after last_node's copy
|
829 |
+
subgraph_candidate_idx = 1
|
830 |
+
logger_mod_orig = _get_logger_for_subgraph(
|
831 |
+
model, first_node, last_node, cur_subgraph_idx, subgraph_candidate_idx,
|
832 |
+
qconfig_str, OutputComparisonLogger, fqn)
|
833 |
+
attr_name = _get_attr_name(cur_subgraph_idx, subgraph_candidate_idx)
|
834 |
+
assert not hasattr(model, attr_name)
|
835 |
+
setattr(model, attr_name, logger_mod_orig)
|
836 |
+
with model.graph.inserting_after(insertion_point):
|
837 |
+
logger = model.graph.call_module(
|
838 |
+
attr_name, args=(cur_node_copy, last_node), kwargs={})
|
839 |
+
|
840 |
+
# save the final node so we can use it in step 2
|
841 |
+
orig_first_node_to_shadow_in_node[first_node] = first_node_copy
|
842 |
+
orig_first_node_to_shadow_out_node[first_node] = cur_node_copy
|
843 |
+
|
844 |
+
cur_subgraph_idx += 1
|
845 |
+
|
846 |
+
model.recompile()
|
847 |
+
|
848 |
+
# Now, we go from
|
849 |
+
#
|
850 |
+
# x0 -> op0_0 -> x1_0 -> log -> x1 -> op1_0 -> ...
|
851 |
+
# \ \ \
|
852 |
+
# -> op0_1 -> x1_1 -> clog -> op1_1 -> ...
|
853 |
+
#
|
854 |
+
# to
|
855 |
+
#
|
856 |
+
# x0 -> op0_0 -> x1_0 -> log --> x1_0 -> op1_0 -> ...
|
857 |
+
# \ \
|
858 |
+
# -> op0_1 -> x1_1 -> clog -> x1_1 -> op1_1 -> ...
|
859 |
+
#
|
860 |
+
# sample values of key internal variables for the example above:
|
861 |
+
#
|
862 |
+
# orig_first_node_to_shadow_in_node = {op0_0: op0_1, op1_0: op1_1}
|
863 |
+
# orig_first_node_to_shadow_out_node = {op0_0: op0_1, op1_0: op1_1}
|
864 |
+
#
|
865 |
+
# note: for subgraphs with more than one node, in_node will be different
|
866 |
+
# compared to out_node
|
867 |
+
|
868 |
+
|
869 |
+
nodes_to_skip = set()
|
870 |
+
for n in orig_nodes:
|
871 |
+
if n.op in ('placeholder', 'get_attr', 'output') or n in nodes_to_skip:
|
872 |
+
continue
|
873 |
+
|
874 |
+
maybe_subgraph = _get_subgraph_containing_node(n, subgraphs_dedup)
|
875 |
+
if maybe_subgraph is not None:
|
876 |
+
first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1]
|
877 |
+
for node_to_skip in maybe_subgraph:
|
878 |
+
nodes_to_skip.add(node_to_skip)
|
879 |
+
else:
|
880 |
+
first_node, last_node = n, n
|
881 |
+
|
882 |
+
def maybe_remap_node_to_shadow(node):
|
883 |
+
"""
|
884 |
+
If unshadowed `node` has a shadow version, return that. If not,
|
885 |
+
return `node`.
|
886 |
+
"""
|
887 |
+
if not isinstance(node, Node):
|
888 |
+
# handle scalars
|
889 |
+
return node
|
890 |
+
|
891 |
+
if node.op in ('placeholder', 'get_attr'):
|
892 |
+
return node
|
893 |
+
|
894 |
+
# Find the shadowed version of this arg from the previous
|
895 |
+
# subgraph. For this, we need to:
|
896 |
+
# 1. navigate to the first node of the previous subgraph
|
897 |
+
# 2. get the output of the shadow wrapper which has (1) as an input
|
898 |
+
|
899 |
+
# For now, assume the arg is in matched subgraphs. In the
|
900 |
+
# future we may have to handle the case where this is not true.
|
901 |
+
prev_subgraph = _get_subgraph_containing_node(
|
902 |
+
node, subgraphs_dedup)
|
903 |
+
if prev_subgraph is None:
|
904 |
+
prev_subgraph = [node]
|
905 |
+
prev_first_node = prev_subgraph[0]
|
906 |
+
prev_shadow_output = \
|
907 |
+
orig_first_node_to_shadow_out_node[prev_first_node]
|
908 |
+
return prev_shadow_output
|
909 |
+
|
910 |
+
cur_shadow_input = \
|
911 |
+
orig_first_node_to_shadow_in_node[first_node]
|
912 |
+
assert cur_shadow_input is not None
|
913 |
+
cur_shadow_input.args = tree_map(
|
914 |
+
maybe_remap_node_to_shadow, cur_shadow_input.args)
|
915 |
+
cur_shadow_input.kwargs = tree_map(
|
916 |
+
maybe_remap_node_to_shadow, cur_shadow_input.kwargs)
|
917 |
+
|
918 |
+
model.recompile()
|
919 |
+
|
920 |
+
def _get_weight_info_from_shadow_wrapper(shadow_wrapper: torch.nn.Module):
|
921 |
+
# input: shadow wrapper module
|
922 |
+
# output if shadow wrapper module has a weighted op:
|
923 |
+
# (quantize_fn, (quantize_fn_args))
|
924 |
+
# output if shadow wrapper module doesn't have a weighted op:
|
925 |
+
# None
|
926 |
+
|
927 |
+
# For now, assume that the weight is the second input
|
928 |
+
# to the shadow module. If that changes, we can fix it later.
|
929 |
+
placeholders_seen = 0
|
930 |
+
for shadow_n in shadow_wrapper.graph.nodes: # type: ignore[union-attr]
|
931 |
+
if shadow_n.op != 'placeholder':
|
932 |
+
continue
|
933 |
+
|
934 |
+
placeholders_seen += 1
|
935 |
+
if placeholders_seen != 2:
|
936 |
+
continue
|
937 |
+
|
938 |
+
# the subgraph looks like
|
939 |
+
#
|
940 |
+
# _input_scale_1 = self._input_scale_1
|
941 |
+
# _input_zero_point_1 = self._input_zero_point_1
|
942 |
+
# quantize_per_channel = torch.quantize_per_channel(
|
943 |
+
# w2_0, _input_scale_1, _input_zero_point_1,
|
944 |
+
# 0, torch.qint8)
|
945 |
+
#
|
946 |
+
# we have `w2_0`, and are navigating this subgraph
|
947 |
+
# to get `_input_scale_1` and `_input_zero_point_1`
|
948 |
+
|
949 |
+
assert len(shadow_n.users) == 1
|
950 |
+
quant_node = next(iter(shadow_n.users.keys()))
|
951 |
+
new_args: Any = None
|
952 |
+
if quant_node.target == torch.quantize_per_channel:
|
953 |
+
_weight, scale_node, zp_node, axis, dtype = quant_node.args
|
954 |
+
scale_val = getattr_from_fqn(
|
955 |
+
shadow_wrapper, scale_node.target)
|
956 |
+
zp_val = getattr_from_fqn(
|
957 |
+
shadow_wrapper, zp_node.target)
|
958 |
+
new_args = (scale_val, zp_val, axis, dtype)
|
959 |
+
else:
|
960 |
+
assert quant_node.target == torch.quantize_per_tensor
|
961 |
+
_weight, scale_node, zp_node, dtype = quant_node.args
|
962 |
+
scale_val = getattr_from_fqn(
|
963 |
+
shadow_wrapper, scale_node.target)
|
964 |
+
zp_val = getattr_from_fqn(
|
965 |
+
shadow_wrapper, zp_node.target)
|
966 |
+
new_args = (scale_val, zp_val, dtype)
|
967 |
+
return (quant_node.target, new_args)
|
968 |
+
|
969 |
+
return None
|
970 |
+
|
971 |
+
|
972 |
+
def extract_weight_comparison(m: GraphModule) -> NSResultsType:
|
973 |
+
|
974 |
+
# example graph:
|
975 |
+
#
|
976 |
+
# w1 = self.w1
|
977 |
+
# b1 = self.b1
|
978 |
+
# linear = torch._C._nn.linear(x, w1, b1)
|
979 |
+
# shadow_0_0 = self.shadow_0_0(linear)
|
980 |
+
# shadow_wrapper_0_1 = self.shadow_wrapper_0_1(x, w1, b1)
|
981 |
+
# shadow_0_1 = self.shadow_0_1(shadow_wrapper_0_1, linear)
|
982 |
+
#
|
983 |
+
# algorithm:
|
984 |
+
# 1. for each call_function node matching our allowlist:
|
985 |
+
# 2. if corresponding shadow wrapper exists, extract the weight pair
|
986 |
+
#
|
987 |
+
# Note: this is not super robust, but that's ok because this is
|
988 |
+
# just for legacy customers who depend on the previous two-model version
|
989 |
+
# of this API. TBD if we need to make this robust.
|
990 |
+
# Note: modules are not supported, since existing customers only
|
991 |
+
# use functions.
|
992 |
+
|
993 |
+
# TODO(future PR): move this to config
|
994 |
+
weighted_ops = {
|
995 |
+
torch.nn.functional.linear,
|
996 |
+
}
|
997 |
+
|
998 |
+
results: NSResultsType = {
|
999 |
+
'model': {NSSingleResultValuesType.WEIGHT.value: {}}
|
1000 |
+
}
|
1001 |
+
|
1002 |
+
for n in m.graph.nodes: # type: ignore[union-attr]
|
1003 |
+
if not (n.op == 'call_function' and n.target in weighted_ops):
|
1004 |
+
continue
|
1005 |
+
|
1006 |
+
# Check if we have a corresponding shadow wrapper
|
1007 |
+
# TODO(future PR, if needed): support kwargs
|
1008 |
+
# TODO(future PR, if needed): support multiple shadow users
|
1009 |
+
first_arg = n.args[0]
|
1010 |
+
shadow_wrapper_node = None
|
1011 |
+
for user in first_arg.users:
|
1012 |
+
# TODO(before land): fix string match
|
1013 |
+
if user.op == 'call_module' and \
|
1014 |
+
user.target.startswith('shadow_wrapper'):
|
1015 |
+
shadow_wrapper_node = user
|
1016 |
+
break
|
1017 |
+
|
1018 |
+
if shadow_wrapper_node is None:
|
1019 |
+
continue
|
1020 |
+
|
1021 |
+
shadow_wrapper = getattr_from_fqn(
|
1022 |
+
m, shadow_wrapper_node.target) # type: ignore[arg-type]
|
1023 |
+
weight_info = _get_weight_info_from_shadow_wrapper(
|
1024 |
+
shadow_wrapper)
|
1025 |
+
if weight_info is None:
|
1026 |
+
continue
|
1027 |
+
|
1028 |
+
# get weight
|
1029 |
+
w_node = n.args[1]
|
1030 |
+
w_obj = getattr_from_fqn(m, w_node.target).detach()
|
1031 |
+
|
1032 |
+
# get a quantized version of weight
|
1033 |
+
quant_fn, quant_fn_args_except_first = weight_info
|
1034 |
+
new_args = (w_obj, *quant_fn_args_except_first)
|
1035 |
+
w_obj_q = quant_fn(*new_args)
|
1036 |
+
|
1037 |
+
# add a comparison
|
1038 |
+
ref_node_name = n.name
|
1039 |
+
prev_node_name = n.name
|
1040 |
+
ref_node_type = get_target_type_str(n, m)
|
1041 |
+
prev_node_type = ref_node_type
|
1042 |
+
fqn = None
|
1043 |
+
if hasattr(m, '_node_name_to_scope'):
|
1044 |
+
fqn = m._node_name_to_scope[n.name][0] # type: ignore[index]
|
1045 |
+
comparison = torch.ao.ns.fx.utils.compute_sqnr(w_obj, w_obj_q)
|
1046 |
+
result_fp32 = {
|
1047 |
+
'res_type': NSSingleResultValuesType.WEIGHT.value,
|
1048 |
+
'values': [w_obj],
|
1049 |
+
'prev_node_name': prev_node_name,
|
1050 |
+
'prev_node_target_type': prev_node_type,
|
1051 |
+
'ref_node_name': ref_node_name,
|
1052 |
+
'ref_node_target_type': ref_node_type,
|
1053 |
+
'index_within_arg': 0,
|
1054 |
+
'index_of_arg': 0,
|
1055 |
+
'fqn': fqn,
|
1056 |
+
'qconfig_str': '',
|
1057 |
+
'comparisons': [comparison],
|
1058 |
+
'comparison_fn_name': 'sqnr',
|
1059 |
+
}
|
1060 |
+
result_q = {
|
1061 |
+
'res_type': NSSingleResultValuesType.WEIGHT.value,
|
1062 |
+
'values': [w_obj_q],
|
1063 |
+
'prev_node_name': prev_node_name,
|
1064 |
+
'prev_node_target_type': prev_node_type,
|
1065 |
+
'ref_node_name': ref_node_name,
|
1066 |
+
'ref_node_target_type': ref_node_type,
|
1067 |
+
'index_within_arg': 0,
|
1068 |
+
'index_of_arg': 0,
|
1069 |
+
'fqn': fqn,
|
1070 |
+
'qconfig_str': '',
|
1071 |
+
'comparisons': [comparison],
|
1072 |
+
'comparison_fn_name': 'sqnr',
|
1073 |
+
}
|
1074 |
+
|
1075 |
+
# go from subgraph_n_1 to subgraph_n_0
|
1076 |
+
_1, _2, node_idx, _3 = shadow_wrapper_node.target.split('_')
|
1077 |
+
name_fp32 = f"subgraph_{node_idx}_0"
|
1078 |
+
name_q = f"subgraph_{node_idx}_1"
|
1079 |
+
|
1080 |
+
results['model'][NSSingleResultValuesType.WEIGHT.value][name_fp32] = \
|
1081 |
+
[result_fp32]
|
1082 |
+
results['model'][NSSingleResultValuesType.WEIGHT.value][name_q] = \
|
1083 |
+
[result_q]
|
1084 |
+
|
1085 |
+
return results
|
1086 |
+
|
1087 |
+
# TODO(future PR): redesign this to make it easier to consume outputs
|
1088 |
+
def group_results_by_subgraph(results: NSResultsType) -> Any:
|
1089 |
+
"""
|
1090 |
+
Creates a comparison of results
|
1091 |
+
|
1092 |
+
Input:
|
1093 |
+
|
1094 |
+
{
|
1095 |
+
'model': {
|
1096 |
+
'node_output': {
|
1097 |
+
'subgraph_0_0': [
|
1098 |
+
'values': [torch.tensor(...), ...], ...
|
1099 |
+
'ref_node_name': ...,
|
1100 |
+
'ref_node_target_type': ...,
|
1101 |
+
'qconfig_str': ...,
|
1102 |
+
'comparisons': [], ...
|
1103 |
+
'comparison_fn_name': '',
|
1104 |
+
'fqn': '...',
|
1105 |
+
],
|
1106 |
+
'subgraph_0_1': [
|
1107 |
+
'values': [torch.tensor(...), ...], ...
|
1108 |
+
'ref_node_name': ...,
|
1109 |
+
'ref_node_target_type': ...,
|
1110 |
+
'qconfig_str': ...,
|
1111 |
+
'comparisons': [torch.tensor(...), ...], ...
|
1112 |
+
'comparison_fn_name': '...',
|
1113 |
+
'fqn': '...',
|
1114 |
+
],
|
1115 |
+
...
|
1116 |
+
},
|
1117 |
+
},
|
1118 |
+
}
|
1119 |
+
|
1120 |
+
Output:
|
1121 |
+
{
|
1122 |
+
'subgraph_0': {
|
1123 |
+
'0': {
|
1124 |
+
'ref_node_name': '...',
|
1125 |
+
'ref_node_target_type': ...,
|
1126 |
+
'values': [torch.tensor(...), ...],
|
1127 |
+
'qconfig_str': None,
|
1128 |
+
'comparisons': [torch.tensor(...), ...], ...
|
1129 |
+
'comparison_fn_name': '...',
|
1130 |
+
'fqn': '...',
|
1131 |
+
},
|
1132 |
+
'1': {
|
1133 |
+
'ref_node_name': '...',
|
1134 |
+
'ref_node_target_type': ...,
|
1135 |
+
'values': [torch.tensor(...), ...],
|
1136 |
+
'qconfig_str': '...',
|
1137 |
+
'comparisons': [torch.tensor(...), ...], ...
|
1138 |
+
'comparison_fn_name': '...',
|
1139 |
+
'fqn': '...',
|
1140 |
+
},
|
1141 |
+
},
|
1142 |
+
}
|
1143 |
+
|
1144 |
+
"""
|
1145 |
+
subgraph_name_to_subgraph_results: Any = collections.defaultdict(dict)
|
1146 |
+
|
1147 |
+
# node_output or weight
|
1148 |
+
key_to_use = next(iter(results['model'].keys()))
|
1149 |
+
|
1150 |
+
for subgraph_name_with_idx, subgraph_candidate_results in \
|
1151 |
+
results['model'][key_to_use].items():
|
1152 |
+
|
1153 |
+
# convert from `subgraph_m_n` to `subgraph_m` and `n`
|
1154 |
+
subgraph_str, subgraph_idx, subgraph_candidate_idx = \
|
1155 |
+
subgraph_name_with_idx.split('_')
|
1156 |
+
subgraph_name = f'{subgraph_str}_{subgraph_idx}'
|
1157 |
+
|
1158 |
+
subgraph_results = {
|
1159 |
+
'ref_node_name': subgraph_candidate_results[0]['ref_node_name'],
|
1160 |
+
'ref_node_target_type': subgraph_candidate_results[0]['ref_node_target_type'],
|
1161 |
+
'fqn': subgraph_candidate_results[0]['fqn'],
|
1162 |
+
'values': subgraph_candidate_results[0]['values'],
|
1163 |
+
'qconfig_str': subgraph_candidate_results[0]['qconfig_str'],
|
1164 |
+
'comparisons': subgraph_candidate_results[0]['comparisons'],
|
1165 |
+
'comparison_fn_name': subgraph_candidate_results[0]['comparison_fn_name'],
|
1166 |
+
}
|
1167 |
+
|
1168 |
+
subgraph_name_to_subgraph_results[subgraph_name][subgraph_candidate_idx] = \
|
1169 |
+
subgraph_results
|
1170 |
+
|
1171 |
+
return dict(subgraph_name_to_subgraph_results)
|
1172 |
+
|
1173 |
+
# TODO(future PR): redesign this to make it easier to consume outputs
|
1174 |
+
def create_results_comparison(
|
1175 |
+
results_grouped,
|
1176 |
+
) -> Any:
|
1177 |
+
"""
|
1178 |
+
Input:
|
1179 |
+
|
1180 |
+
{
|
1181 |
+
'subgraph_0': {
|
1182 |
+
'0': {
|
1183 |
+
'ref_node_name': '...',
|
1184 |
+
'ref_node_target_type': ...,
|
1185 |
+
'values': [torch.tensor(...), ...],
|
1186 |
+
'qconfig_str': '',
|
1187 |
+
'comparisons': [],
|
1188 |
+
'comparison_fn_name': '',
|
1189 |
+
'fqn': '...',
|
1190 |
+
},
|
1191 |
+
'1': {
|
1192 |
+
'ref_node_name': '...',
|
1193 |
+
'ref_node_target_type': ...,
|
1194 |
+
'values': [torch.tensor(...), ...],
|
1195 |
+
'qconfig_str': '...',
|
1196 |
+
'comparisons': [torch.tensor(...), ...],
|
1197 |
+
'comparison_fn_name': 'sqnr',
|
1198 |
+
'fqn': '...',
|
1199 |
+
},
|
1200 |
+
},
|
1201 |
+
}
|
1202 |
+
|
1203 |
+
Output:
|
1204 |
+
{
|
1205 |
+
'subgraph_0': {
|
1206 |
+
'ref_node_name': '...',
|
1207 |
+
'ref_node_target_type': '...',
|
1208 |
+
'fqn': '...',
|
1209 |
+
'candidates': {
|
1210 |
+
'1': {
|
1211 |
+
'qconfig_str': ...,
|
1212 |
+
'comparison_fn_name': 'sqnr',
|
1213 |
+
'cmp_raw': [..., ...],
|
1214 |
+
'cmp_mean': ...,
|
1215 |
+
},
|
1216 |
+
...,
|
1217 |
+
},
|
1218 |
+
},
|
1219 |
+
}
|
1220 |
+
"""
|
1221 |
+
|
1222 |
+
results_comparison = {}
|
1223 |
+
|
1224 |
+
for subgraph_name, subgraph_results in results_grouped.items():
|
1225 |
+
|
1226 |
+
candidates = {}
|
1227 |
+
for subgraph_inner_name, subgraph_inner_result in subgraph_results.items():
|
1228 |
+
# skip comparing baseline to baseline
|
1229 |
+
if subgraph_inner_name == '0':
|
1230 |
+
continue
|
1231 |
+
|
1232 |
+
# we expect the comparisons to be precalculated from
|
1233 |
+
# calibration, so we just fetch them here
|
1234 |
+
cmp_raw = subgraph_inner_result['comparisons']
|
1235 |
+
cmp_raw_tensor = torch.stack(cmp_raw)
|
1236 |
+
|
1237 |
+
candidates[subgraph_inner_name] = {
|
1238 |
+
'qconfig_str': subgraph_inner_result['qconfig_str'],
|
1239 |
+
'comparison_fn_name': subgraph_inner_result['comparison_fn_name'],
|
1240 |
+
'cmp_raw': cmp_raw_tensor,
|
1241 |
+
'cmp_mean': torch.mean(cmp_raw_tensor),
|
1242 |
+
}
|
1243 |
+
|
1244 |
+
results_comparison[subgraph_name] = {
|
1245 |
+
'ref_node_name': subgraph_results['0']['ref_node_name'],
|
1246 |
+
'ref_node_target_type': subgraph_results['0']['ref_node_target_type'],
|
1247 |
+
'fqn': subgraph_results['0']['fqn'],
|
1248 |
+
'candidates': candidates,
|
1249 |
+
}
|
1250 |
+
|
1251 |
+
return results_comparison
|
1252 |
+
|
1253 |
+
# TODO(future PR): redesign this to make it easier to consume outputs
|
1254 |
+
def print_n_shadows_summary(
|
1255 |
+
results_comparison,
|
1256 |
+
) -> None:
|
1257 |
+
"""
|
1258 |
+
Input:
|
1259 |
+
|
1260 |
+
{
|
1261 |
+
'subgraph_0': {
|
1262 |
+
'ref_node_name': 'linear1',
|
1263 |
+
'ref_node_target_type': '...',
|
1264 |
+
'fqn': '...',
|
1265 |
+
'candidates': {
|
1266 |
+
'1': {
|
1267 |
+
'qconfig_str': ...,
|
1268 |
+
'comparison_fn_name': ...,
|
1269 |
+
'cmp_raw': [45.0, 55.0],
|
1270 |
+
'cmp_mean': 50.0,
|
1271 |
+
},
|
1272 |
+
...,
|
1273 |
+
},
|
1274 |
+
},
|
1275 |
+
}
|
1276 |
+
|
1277 |
+
Prints:
|
1278 |
+
|
1279 |
+
node_name | node_type | fqn | 0 | 1 | ...
|
1280 |
+
linear1 | ... | ... | 45.0 | 50.0 | ...
|
1281 |
+
"""
|
1282 |
+
|
1283 |
+
try:
|
1284 |
+
from tabulate import tabulate
|
1285 |
+
except ImportError:
|
1286 |
+
print("`print_tabular` relies on the library `tabulate`, "
|
1287 |
+
"which could not be found on this machine. Run `pip "
|
1288 |
+
"install tabulate` to install the library.")
|
1289 |
+
return
|
1290 |
+
|
1291 |
+
results = []
|
1292 |
+
for subgraph_data in results_comparison.values():
|
1293 |
+
mean_all_candidates = [
|
1294 |
+
candidate['cmp_mean']
|
1295 |
+
for candidate_name, candidate in subgraph_data['candidates'].items()
|
1296 |
+
]
|
1297 |
+
|
1298 |
+
data_row = [
|
1299 |
+
subgraph_data['ref_node_name'],
|
1300 |
+
subgraph_data['ref_node_target_type'],
|
1301 |
+
subgraph_data['fqn'],
|
1302 |
+
*mean_all_candidates,
|
1303 |
+
]
|
1304 |
+
results.append(data_row)
|
1305 |
+
|
1306 |
+
max_candidate_idx_len = -1
|
1307 |
+
for data_row in results:
|
1308 |
+
max_candidate_idx_len = max(max_candidate_idx_len, len(data_row[1]))
|
1309 |
+
candidate_idx_headers = [str(x) for x in range(max_candidate_idx_len)]
|
1310 |
+
|
1311 |
+
headers = ['node_name', 'node_type', 'fqn', *candidate_idx_headers]
|
1312 |
+
print(tabulate(results, headers=headers))
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/ns_types.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import enum
|
2 |
+
from typing import NamedTuple
|
3 |
+
|
4 |
+
from torch.fx.graph import Node
|
5 |
+
|
6 |
+
from typing import Dict, Any, List, Union, Callable
|
7 |
+
|
8 |
+
class NSSingleResultValuesType(str, enum.Enum):
|
9 |
+
WEIGHT = 'weight'
|
10 |
+
NODE_OUTPUT = 'node_output'
|
11 |
+
NODE_INPUT = 'node_input'
|
12 |
+
|
13 |
+
class NSSubgraph(NamedTuple):
|
14 |
+
start_node: Node
|
15 |
+
end_node: Node
|
16 |
+
base_op_node: Node
|
17 |
+
|
18 |
+
# TODO(future PR): see if we can use typing_extensions's TypedDict instead
|
19 |
+
# to properly type the various keys
|
20 |
+
# {
|
21 |
+
# # one of NSSingleResultValuesType
|
22 |
+
# 'type': 'weight',
|
23 |
+
# # the values of type specified above
|
24 |
+
# 'values': [torch.tensor(...), ...],
|
25 |
+
# # name of the node directly before the logger
|
26 |
+
# 'prev_node_name': 'linear1',
|
27 |
+
# # type of the underlying function or module
|
28 |
+
# 'prev_node_target_type': torch.nn.functional.linear # or torch.nn.Linear, etc
|
29 |
+
# # name of the node responsible for adding this logger
|
30 |
+
# # Note: this may differ from prev_node_name if we are logging inputs
|
31 |
+
# 'ref_node_name': 'linear1',
|
32 |
+
# # index of this node within the arg of the input/output node
|
33 |
+
# # for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1
|
34 |
+
# 'index_within_arg': 0,
|
35 |
+
# # index of this node within the args of the input/output node
|
36 |
+
# # for example, in add(x1, x2), x2 would have index_of_arg == 1
|
37 |
+
# 'index_of_arg': 0,
|
38 |
+
# # precomputed comparisons of logger values to reference values
|
39 |
+
# 'comparisons': [torch.tensor(...), ...]
|
40 |
+
# # name of function used for precomputed comparisons
|
41 |
+
# 'comparison_fn_name': 'sqnr',
|
42 |
+
# # string representation of qconfig responsible for creating this logger
|
43 |
+
# 'qconfig_str': 'QConfig(...)',
|
44 |
+
# }
|
45 |
+
NSSingleResultType = Dict[str, Any]
|
46 |
+
|
47 |
+
# {
|
48 |
+
# 'layer_name_1': { # subgraph name
|
49 |
+
# 'node_output': { # results type (node_output, node_input, weight)
|
50 |
+
# 'model_name_a': # model name
|
51 |
+
# [NSSingleResultType, ...], # results, ordered by index_within_arg
|
52 |
+
# 'model_name_b':
|
53 |
+
# [NSSingleResultType, ...],
|
54 |
+
# },
|
55 |
+
# },
|
56 |
+
# }
|
57 |
+
#
|
58 |
+
NSResultsType = Dict[str, Dict[str, Dict[str, List[NSSingleResultType]]]]
|
59 |
+
|
60 |
+
# Defines the underlying target type of a node, for example:
|
61 |
+
# `F.conv1d` for a `call_function` conv node
|
62 |
+
# `nn.Conv1d` for a `call_module` node calling the forward of a `nn.Conv1d` module
|
63 |
+
# `'sigmoid'` for a `call_method` node calling `x.sigmoid()`
|
64 |
+
NSNodeTargetType = Union[Callable, str]
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/pattern_utils.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
toq = torch.ops.quantized
|
5 |
+
|
6 |
+
from torch.fx import GraphModule
|
7 |
+
from torch.fx.graph import Node
|
8 |
+
|
9 |
+
from torch.ao.quantization.backend_config import get_native_backend_config
|
10 |
+
from torch.ao.quantization.fx.quantize_handler import _get_pattern_to_quantize_handlers
|
11 |
+
from torch.ao.quantization.utils import getattr_from_fqn
|
12 |
+
from .ns_types import NSNodeTargetType
|
13 |
+
from torch.ao.quantization import (
|
14 |
+
ObserverBase,
|
15 |
+
FakeQuantizeBase,
|
16 |
+
)
|
17 |
+
|
18 |
+
from typing import Dict, Tuple, Set, Callable, Any, Union, List
|
19 |
+
|
20 |
+
|
21 |
+
def get_type_a_related_to_b(
|
22 |
+
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
|
23 |
+
) -> Set[Tuple[NSNodeTargetType, NSNodeTargetType]]:
|
24 |
+
# TODO(future PR): allow customizations
|
25 |
+
# TODO(future PR): reuse existing quantization mappings
|
26 |
+
# TODO(future PR): add the rest of modules and ops here
|
27 |
+
type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]] = set()
|
28 |
+
|
29 |
+
for s in base_name_to_sets_of_related_ops.values():
|
30 |
+
s_list = list(s)
|
31 |
+
# add every bidirectional pair
|
32 |
+
for idx_0 in range(0, len(s_list)):
|
33 |
+
for idx_1 in range(idx_0, len(s_list)):
|
34 |
+
type_a_related_to_b.add((s_list[idx_0], s_list[idx_1]))
|
35 |
+
type_a_related_to_b.add((s_list[idx_1], s_list[idx_0]))
|
36 |
+
|
37 |
+
return type_a_related_to_b
|
38 |
+
|
39 |
+
|
40 |
+
NSFusionElType = Union[
|
41 |
+
Callable, # call_function or call_module type, example: F.linear or nn.Conv2d
|
42 |
+
str, # call_method name, example: "dequantize"
|
43 |
+
Tuple[str, Any], # call_method name and first argument, example: ("to", torch.float16)
|
44 |
+
]
|
45 |
+
NSFusionType = Union[
|
46 |
+
Tuple[NSFusionElType, NSFusionElType],
|
47 |
+
Tuple[NSFusionElType, NSFusionElType, NSFusionElType, NSFusionElType],
|
48 |
+
]
|
49 |
+
|
50 |
+
def get_reversed_fusions() -> List[Tuple[NSFusionType, int]]:
|
51 |
+
"""
|
52 |
+
Set of potential fusions, in reverse order. The order is reversed
|
53 |
+
to match how fusion patterns are defined in quantization code.
|
54 |
+
|
55 |
+
Fusion format:
|
56 |
+
((fusion_op_0, fusion_op_1), base_op_idx)
|
57 |
+
|
58 |
+
Where base_op_idx is the idx of the op we should use to match other related
|
59 |
+
ops. Note: base_op_idx is specified in non-reverse order, i.e. a base_op_idx
|
60 |
+
of 0 represents the first op in regular (non-reverse) order, 1 represents the
|
61 |
+
second op, etc.
|
62 |
+
"""
|
63 |
+
results: List[Tuple[NSFusionType, int]] = []
|
64 |
+
|
65 |
+
# Possible syntaxes:
|
66 |
+
# * single op: torch.nn.Conv2d
|
67 |
+
# * multiple ops: (torch.nn.ReLU, torch.nn.Conv2d)
|
68 |
+
# For fusions, we only care about patterns composed of multiple ops.
|
69 |
+
# TODO(future PR): allow customizations from default patterns.
|
70 |
+
all_quant_patterns = _get_pattern_to_quantize_handlers(get_native_backend_config())
|
71 |
+
|
72 |
+
default_base_op_idx = 0
|
73 |
+
for quant_pattern in all_quant_patterns.keys():
|
74 |
+
# TODO: this is a temporary hack to flatten the patterns from quantization so
|
75 |
+
# that it works with the ns matcher function, maybe we should use `_is_match`
|
76 |
+
# in torch.ao.quantization.fx.match_utils to match the patterns
|
77 |
+
if isinstance(quant_pattern, tuple) and len(quant_pattern) == 2 and \
|
78 |
+
isinstance(quant_pattern[1], tuple) and len(quant_pattern[1]) == 2:
|
79 |
+
# flatten the pattern with form (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d))
|
80 |
+
quant_pattern = (quant_pattern[0], quant_pattern[1][0], quant_pattern[1][1])
|
81 |
+
|
82 |
+
# Only patterns of multiple ops are fusions, ignore
|
83 |
+
# patterns which contain a single ops (they get matched
|
84 |
+
# without caring about fusions).
|
85 |
+
if isinstance(quant_pattern, tuple):
|
86 |
+
results.append((quant_pattern, default_base_op_idx)) # type: ignore[arg-type]
|
87 |
+
|
88 |
+
# For each pattern, add additional patterns with observers and
|
89 |
+
# fake quants at the end.
|
90 |
+
# TODO(future PR): if needed, implement matching for a node
|
91 |
+
# having multiple output observers.
|
92 |
+
for cls in (ObserverBase, FakeQuantizeBase):
|
93 |
+
if isinstance(quant_pattern, tuple):
|
94 |
+
new_pattern = (cls, *quant_pattern)
|
95 |
+
else:
|
96 |
+
new_pattern = (cls, quant_pattern)
|
97 |
+
results.append((new_pattern, default_base_op_idx)) # type: ignore[arg-type]
|
98 |
+
|
99 |
+
|
100 |
+
# After this point, results contains values such as
|
101 |
+
# [..., ((torch.nn.Relu, torch.nn.Conv2d), 0), ...]
|
102 |
+
|
103 |
+
# Patterns for matching fp16 emulation are not specified in the quantization
|
104 |
+
# fusion mappings. For now, define them here.
|
105 |
+
fp16_em_base_op_idx = 1
|
106 |
+
patterns_to_add = [
|
107 |
+
# linear-relu fp16 emulation:
|
108 |
+
# fp16_to_fp32 -> linear -> relu -> fp32_to_fp16
|
109 |
+
((("to", torch.float16), F.relu, F.linear, "dequantize"), fp16_em_base_op_idx,),
|
110 |
+
# Conv-BN fusion (this happens outside of quantization patterns,
|
111 |
+
# which is why it is defined separately here).
|
112 |
+
((nn.BatchNorm1d, nn.Conv1d), default_base_op_idx),
|
113 |
+
((nn.BatchNorm2d, nn.Conv2d), default_base_op_idx),
|
114 |
+
((nn.BatchNorm3d, nn.Conv3d), default_base_op_idx),
|
115 |
+
((nn.ReLU, nn.BatchNorm1d, nn.Conv1d), default_base_op_idx),
|
116 |
+
((nn.ReLU, nn.BatchNorm2d, nn.Conv2d), default_base_op_idx),
|
117 |
+
((nn.ReLU, nn.BatchNorm3d, nn.Conv3d), default_base_op_idx),
|
118 |
+
]
|
119 |
+
for p in patterns_to_add:
|
120 |
+
results.append(p) # type: ignore[arg-type]
|
121 |
+
results.append(((ObserverBase, *p[0]), p[1])) # type: ignore[arg-type]
|
122 |
+
results.append(((FakeQuantizeBase, *p[0]), p[1])) # type: ignore[arg-type]
|
123 |
+
|
124 |
+
return results
|
125 |
+
|
126 |
+
|
127 |
+
def end_node_matches_reversed_fusion(
|
128 |
+
end_node: Node,
|
129 |
+
reversed_fusion: NSFusionType,
|
130 |
+
gm: GraphModule,
|
131 |
+
seen_nodes: Set[Node],
|
132 |
+
) -> bool:
|
133 |
+
"""
|
134 |
+
Returns true if a pattern ending with `end_node` matches
|
135 |
+
the fusion pattern.
|
136 |
+
"""
|
137 |
+
cur_node = end_node
|
138 |
+
for fusion_idx in range(len(reversed_fusion)):
|
139 |
+
# each node can only belong to one matched pattern
|
140 |
+
if cur_node in seen_nodes:
|
141 |
+
return False
|
142 |
+
|
143 |
+
cur_fusion_el = reversed_fusion[fusion_idx]
|
144 |
+
|
145 |
+
if cur_node.op == 'call_function':
|
146 |
+
fusion_el_is_fun = (not isinstance(cur_fusion_el, str)) and \
|
147 |
+
(not isinstance(cur_fusion_el, type))
|
148 |
+
if fusion_el_is_fun:
|
149 |
+
if cur_node.target != cur_fusion_el:
|
150 |
+
return False
|
151 |
+
if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
|
152 |
+
cur_node = cur_node.args[0]
|
153 |
+
else:
|
154 |
+
return False
|
155 |
+
else:
|
156 |
+
return False
|
157 |
+
|
158 |
+
elif cur_node.op == 'call_module':
|
159 |
+
fusion_el_is_mod = isinstance(cur_fusion_el, type)
|
160 |
+
if fusion_el_is_mod:
|
161 |
+
assert isinstance(cur_node.target, str)
|
162 |
+
target_mod = getattr_from_fqn(gm, cur_node.target)
|
163 |
+
if not isinstance(cur_fusion_el, type):
|
164 |
+
return False
|
165 |
+
if not isinstance(target_mod, cur_fusion_el):
|
166 |
+
return False
|
167 |
+
if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
|
168 |
+
cur_node = cur_node.args[0]
|
169 |
+
else:
|
170 |
+
return False
|
171 |
+
else:
|
172 |
+
return False
|
173 |
+
|
174 |
+
elif cur_node.op == 'call_method':
|
175 |
+
fusion_el_is_meth_with_second_arg = \
|
176 |
+
isinstance(cur_fusion_el, tuple) and len(cur_fusion_el) == 2
|
177 |
+
fusion_el_is_meth_without_args = isinstance(cur_fusion_el, str)
|
178 |
+
if fusion_el_is_meth_without_args or fusion_el_is_meth_with_second_arg:
|
179 |
+
if fusion_el_is_meth_without_args:
|
180 |
+
if cur_node.target != cur_fusion_el:
|
181 |
+
return False
|
182 |
+
else:
|
183 |
+
assert isinstance(cur_fusion_el, tuple)
|
184 |
+
if cur_node.target != cur_fusion_el[0]:
|
185 |
+
return False
|
186 |
+
elif len(cur_node.args) < 2:
|
187 |
+
return False
|
188 |
+
elif cur_node.args[1] != cur_fusion_el[1]:
|
189 |
+
return False
|
190 |
+
|
191 |
+
if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
|
192 |
+
cur_node = cur_node.args[0]
|
193 |
+
else:
|
194 |
+
return False
|
195 |
+
else:
|
196 |
+
return False
|
197 |
+
else:
|
198 |
+
return False
|
199 |
+
|
200 |
+
return True
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/qconfig_multi_mapping.py
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import copy
|
4 |
+
from typing import Any, Callable, Dict, List, Union
|
5 |
+
|
6 |
+
import torch
|
7 |
+
from torch.ao.quantization import QConfigMapping
|
8 |
+
from torch.ao.quantization.qconfig_mapping import _QCONFIG_STYLE_ORDER
|
9 |
+
from torch.ao.quantization.qconfig import QConfigAny
|
10 |
+
|
11 |
+
__all__ = ["QConfigMultiMapping"]
|
12 |
+
|
13 |
+
_QCONFIG_STYLE_TO_METHOD: Dict[str, str] = {
|
14 |
+
"global_qconfig": "set_global",
|
15 |
+
"object_type_qconfigs": "set_object_type",
|
16 |
+
"module_name_regex_qconfigs": "set_module_name_regex",
|
17 |
+
"module_name_qconfigs": "set_module_name",
|
18 |
+
"module_name_object_type_order_qconfigs": "set_module_name_object_type_order",
|
19 |
+
}
|
20 |
+
|
21 |
+
def _remove_duplicates_and_none(qconfig_list: List[QConfigAny]) -> None:
|
22 |
+
to_remove = []
|
23 |
+
for index, cur_qconfig in enumerate(qconfig_list):
|
24 |
+
if cur_qconfig is None:
|
25 |
+
to_remove.append(index)
|
26 |
+
break
|
27 |
+
for checked_qconfig in qconfig_list[:index]:
|
28 |
+
if torch.ao.quantization.qconfig_equals(cur_qconfig, checked_qconfig):
|
29 |
+
to_remove.append(index)
|
30 |
+
break
|
31 |
+
for index in to_remove[::-1]:
|
32 |
+
qconfig_list.pop(index)
|
33 |
+
|
34 |
+
class QConfigMultiMapping:
|
35 |
+
"""
|
36 |
+
This class, used with the prepare_n_shadows_model API, stores a list of :class:`torch.ao.quantization.QConfigMapping`s
|
37 |
+
so that multiple QConfigs can be specified for each QConfig matching style.
|
38 |
+
|
39 |
+
The user can specify QConfigs using the following methods (in increasing match priority):
|
40 |
+
|
41 |
+
``set_global`` : sets the global (default) QConfigs
|
42 |
+
|
43 |
+
``set_object_type`` : sets the QConfigs for a given module type, function, or method name
|
44 |
+
|
45 |
+
``set_module_name_regex`` : sets the QConfigs for modules matching the given regex string
|
46 |
+
|
47 |
+
``set_module_name`` : sets the QConfigs for modules matching the given module name
|
48 |
+
|
49 |
+
``set_module_name_object_type_order`` : sets the QConfigs for modules matching a combination
|
50 |
+
of the given module name, object type, and the index at which the module appears
|
51 |
+
|
52 |
+
Note: Usage of set methods is the same as in QConfigMapping except with a passed in list of QConfigs rather than a
|
53 |
+
single QConfig.
|
54 |
+
|
55 |
+
Example usage::
|
56 |
+
|
57 |
+
qconfig_mapping = QConfigMultiMapping()
|
58 |
+
.set_global([qconfig1, qconfig2])
|
59 |
+
.set_object_type(torch.nn.Linear, [qconfig2, qconfig3])
|
60 |
+
.set_object_type(torch.nn.ReLU, [qconfig1])
|
61 |
+
.set_module_name_regex("foo.*bar.*conv[0-9]+", [qconfig2])
|
62 |
+
.set_module_name_regex("foo.*", [qconfig1, qconfig2, qconfig3])
|
63 |
+
.set_module_name("module1", [None])
|
64 |
+
.set_module_name("module2", [qconfig2])
|
65 |
+
.set_module_name_object_type_order("foo.bar", torch.nn.functional.linear, 0, [qconfig3])
|
66 |
+
|
67 |
+
"""
|
68 |
+
|
69 |
+
def __init__(self):
|
70 |
+
# initialize this with 1 QConfigMapping to avoid corner cases
|
71 |
+
self.qconfig_mappings_list: List[QConfigMapping] = [QConfigMapping()]
|
72 |
+
|
73 |
+
def _handle_list_size_mismatch(
|
74 |
+
self, qconfig_list: List[QConfigAny], style: str
|
75 |
+
) -> None:
|
76 |
+
# this method handles cases where the size of qconfig_list does not match
|
77 |
+
# the size of qconfig_mappings_list.
|
78 |
+
# Issue: Consider a user inserting global_qconfig A and B first, then inserting
|
79 |
+
# qconfig C as an object_type_qconfig for conv ops. If we internally store
|
80 |
+
# 1 QConfigMapping with A and C and another with just B, then the
|
81 |
+
# second QConfigMapping will match B to conv ops (which is not wanted), since B is global.
|
82 |
+
|
83 |
+
# we avoid this by maintaining the invariant that if any QConfigMapping
|
84 |
+
# has a qconfig style+key with a qconfig in it, all QConfigMappings must
|
85 |
+
# have either a qconfig or None for that same style+key. In the above
|
86 |
+
# example, a None qconfig would prevent the unwanted match in the
|
87 |
+
# second QConfigMapping
|
88 |
+
|
89 |
+
if len(qconfig_list) > len(self.qconfig_mappings_list):
|
90 |
+
# Case: we have more qconfigs (in qconfig_list) than QConfigMappings
|
91 |
+
|
92 |
+
# Add new QConfigMappings (initialized so we maintain the `invariant`)
|
93 |
+
|
94 |
+
new_qconfig_mapping = QConfigMapping()
|
95 |
+
# searches other QConfigMappings for qconfig style+keys
|
96 |
+
# that need to be inserted as `None` into the new QConfigMapping
|
97 |
+
for qconfig_mapping in self.qconfig_mappings_list:
|
98 |
+
|
99 |
+
# global_qconfig has None by default
|
100 |
+
for check_style in _QCONFIG_STYLE_ORDER[1:]:
|
101 |
+
qconfigs_dict = getattr(qconfig_mapping, check_style)
|
102 |
+
target_qconfigs_dict = getattr(new_qconfig_mapping, check_style)
|
103 |
+
for key in qconfigs_dict:
|
104 |
+
target_qconfigs_dict[key] = None
|
105 |
+
break
|
106 |
+
|
107 |
+
# insert copies of this new QConfigMapping until all entires
|
108 |
+
# in qconfig_list can fit among the QConfigMappings
|
109 |
+
while len(qconfig_list) > len(self.qconfig_mappings_list):
|
110 |
+
self.qconfig_mappings_list.append(copy.deepcopy(new_qconfig_mapping))
|
111 |
+
else:
|
112 |
+
# Case: we have fewer qconfigs in qconfig_list than QConfigMappings
|
113 |
+
|
114 |
+
# pad qconfig_list with `None` until length is same
|
115 |
+
while len(qconfig_list) < len(self.qconfig_mappings_list):
|
116 |
+
qconfig_list.append(None)
|
117 |
+
|
118 |
+
# this function applies the insertion method across each QConfigMapping
|
119 |
+
def _insert_qconfig_list(
|
120 |
+
self,
|
121 |
+
style: str,
|
122 |
+
args: List[Union[str, int, Callable]],
|
123 |
+
qconfig_list: List[QConfigAny],
|
124 |
+
) -> None:
|
125 |
+
|
126 |
+
# we remove duplicates and None to make the ordering of qconfigs
|
127 |
+
# deterministic upon insertion.
|
128 |
+
_remove_duplicates_and_none(qconfig_list)
|
129 |
+
|
130 |
+
self._handle_list_size_mismatch(qconfig_list, style)
|
131 |
+
method_name = _QCONFIG_STYLE_TO_METHOD[style]
|
132 |
+
for qconfig_mapping, qconfig in zip(self.qconfig_mappings_list, qconfig_list):
|
133 |
+
# uses QConfigMapping set method to insert qconfig
|
134 |
+
set_method = getattr(qconfig_mapping, method_name)
|
135 |
+
set_method(*args, qconfig)
|
136 |
+
|
137 |
+
def set_global(self, global_qconfig_list: List[QConfigAny]) -> QConfigMultiMapping:
|
138 |
+
"""
|
139 |
+
Set global QConfigs
|
140 |
+
see :func:`~torch.ao.quantization.QConfigMapping.set_global()` for more info
|
141 |
+
"""
|
142 |
+
self._insert_qconfig_list("global_qconfig", [], global_qconfig_list)
|
143 |
+
return self
|
144 |
+
|
145 |
+
def set_object_type(
|
146 |
+
self, object_type: Union[Callable, str], qconfig_list: List[QConfigAny]
|
147 |
+
) -> QConfigMultiMapping:
|
148 |
+
"""
|
149 |
+
Set object type QConfigs
|
150 |
+
see :func:`~torch.ao.quantization.QConfigMapping.set_object_type()` for more info
|
151 |
+
"""
|
152 |
+
self._insert_qconfig_list("object_type_qconfigs", [object_type], qconfig_list)
|
153 |
+
return self
|
154 |
+
|
155 |
+
def set_module_name_regex(
|
156 |
+
self, module_name_regex: str, qconfig_list: List[QConfigAny]
|
157 |
+
) -> QConfigMultiMapping:
|
158 |
+
"""
|
159 |
+
Set module_name_regex QConfigs
|
160 |
+
see :func:`~torch.ao.quantization.QConfigMapping.set_module_name_regex()` for more info
|
161 |
+
"""
|
162 |
+
self._insert_qconfig_list(
|
163 |
+
"module_name_regex_qconfigs", [module_name_regex], qconfig_list
|
164 |
+
)
|
165 |
+
return self
|
166 |
+
|
167 |
+
def set_module_name(
|
168 |
+
self, module_name: str, qconfig_list: List[QConfigAny]
|
169 |
+
) -> QConfigMultiMapping:
|
170 |
+
"""
|
171 |
+
Set module_name QConfigs
|
172 |
+
see :func:`~torch.ao.quantization.QConfigMapping.set_module_name()` for more info
|
173 |
+
"""
|
174 |
+
self._insert_qconfig_list("module_name_qconfigs", [module_name], qconfig_list)
|
175 |
+
return self
|
176 |
+
|
177 |
+
def set_module_name_object_type_order(
|
178 |
+
self,
|
179 |
+
module_name: str,
|
180 |
+
object_type: Callable,
|
181 |
+
index: int,
|
182 |
+
qconfig_list: List[QConfigAny],
|
183 |
+
) -> QConfigMultiMapping:
|
184 |
+
"""
|
185 |
+
Set module_name QConfigs
|
186 |
+
see :func:`~torch.ao.quantization.QConfigMapping.set_module_name_object_type_order()` for more info
|
187 |
+
"""
|
188 |
+
self._insert_qconfig_list(
|
189 |
+
"module_name_object_type_order_qconfigs",
|
190 |
+
[module_name, object_type, index],
|
191 |
+
qconfig_list,
|
192 |
+
)
|
193 |
+
return self
|
194 |
+
|
195 |
+
def __repr__(self):
|
196 |
+
return (
|
197 |
+
self.__class__.__name__ +
|
198 |
+
" [" +
|
199 |
+
"".join(f"\n{qconfig_mapping.__repr__()}," for qconfig_mapping in self.qconfig_mappings_list) +
|
200 |
+
"\n]"
|
201 |
+
)
|
202 |
+
|
203 |
+
@classmethod
|
204 |
+
def from_list_qconfig_mapping(
|
205 |
+
cls, qconfig_mapping_list: List[QConfigMapping]
|
206 |
+
) -> QConfigMultiMapping:
|
207 |
+
"""
|
208 |
+
Creates a QConfigMultiMapping from a list of QConfigMappings
|
209 |
+
"""
|
210 |
+
new_qconfig_multi_mapping = cls()
|
211 |
+
|
212 |
+
new_qconfig_multi_mapping.qconfig_mappings_list = copy.deepcopy(
|
213 |
+
qconfig_mapping_list
|
214 |
+
)
|
215 |
+
|
216 |
+
# we need to avoid the issue described in _handle_list_size_mismatch,
|
217 |
+
# so we reinsert all the qconfigs using the QConfigMultiMapping
|
218 |
+
# set methods
|
219 |
+
|
220 |
+
# go through all qconfig styles
|
221 |
+
# note: global can be ignored since it is None by default
|
222 |
+
for style in _QCONFIG_STYLE_ORDER[1:]:
|
223 |
+
|
224 |
+
# gather all key+qconfigs for current style
|
225 |
+
# into qconfig_dict_list
|
226 |
+
qconfig_dict_list: Dict[Any, List[QConfigAny]] = {}
|
227 |
+
for qconfig_mapping in qconfig_mapping_list:
|
228 |
+
qconfig_dict = getattr(qconfig_mapping, style)
|
229 |
+
for key, qconfig in qconfig_dict.items():
|
230 |
+
if key not in qconfig_dict_list:
|
231 |
+
qconfig_dict_list[key] = []
|
232 |
+
qconfig_dict_list[key].append(qconfig)
|
233 |
+
|
234 |
+
# reinsert all gathered key+qconfigs
|
235 |
+
set_method_name = _QCONFIG_STYLE_TO_METHOD[style]
|
236 |
+
set_method = getattr(new_qconfig_multi_mapping, set_method_name)
|
237 |
+
for key, qconfig_list in qconfig_dict_list.items():
|
238 |
+
if isinstance(key, tuple):
|
239 |
+
set_method(*key, qconfig_list)
|
240 |
+
else:
|
241 |
+
set_method(key, qconfig_list)
|
242 |
+
|
243 |
+
return new_qconfig_multi_mapping
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/utils.py
ADDED
@@ -0,0 +1,533 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import enum
|
2 |
+
import operator
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import torch.ao.nn.intrinsic.quantized as nniq
|
7 |
+
import torch.ao.nn.quantized as nnq
|
8 |
+
|
9 |
+
toq = torch.ops.quantized
|
10 |
+
from typing import Tuple, Callable, Dict, Set, List, Optional, Union
|
11 |
+
|
12 |
+
from torch.fx import GraphModule
|
13 |
+
from torch.fx.graph import Node
|
14 |
+
from torch.ao.quantization import (
|
15 |
+
ObserverBase,
|
16 |
+
FakeQuantizeBase,
|
17 |
+
)
|
18 |
+
from torch.ao.quantization.utils import getattr_from_fqn
|
19 |
+
from torch.ao.quantization.observer import _is_activation_post_process
|
20 |
+
|
21 |
+
from .ns_types import NSNodeTargetType, NSResultsType
|
22 |
+
|
23 |
+
# TODO(future PR): consider deleting this enum and using the torch types
|
24 |
+
# directly. This might be tricky because it is not a one to one mapping.
|
25 |
+
class NodeInputOrOutputType(enum.Enum):
|
26 |
+
FP32 = enum.auto() # torch.float
|
27 |
+
INT8 = enum.auto() # torch.qint8 or torch.quint8
|
28 |
+
FP16 = enum.auto() # torch.float16
|
29 |
+
UNKNOWN = enum.auto() # we cannot determine input/output dtype
|
30 |
+
# TODO(future PR): while these functions can support multiple dtypes,
|
31 |
+
# for the purposes of numerical debugging we want to get the actual
|
32 |
+
# dtype used in the model. We will likely need some kind of dtype
|
33 |
+
# propagation to estimate this.
|
34 |
+
FP32_OR_INT8 = enum.auto() # either torch.float or torch.quint8 or torch.qint8
|
35 |
+
# TODO(future PRs): dynamic quant, fake quant, etc
|
36 |
+
|
37 |
+
|
38 |
+
def get_node_first_input_and_output_type(
|
39 |
+
node: Node,
|
40 |
+
gm: GraphModule,
|
41 |
+
logger_cls: Callable,
|
42 |
+
node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
|
43 |
+
) -> Tuple[NodeInputOrOutputType, NodeInputOrOutputType]:
|
44 |
+
|
45 |
+
# TODO(future PR): clean this up
|
46 |
+
FUNS_IO_TYPE_FP32 = node_type_to_io_type_map["funs_io_type_fp32"]
|
47 |
+
FUNS_IO_TYPE_FP16 = node_type_to_io_type_map["funs_io_type_fp16"]
|
48 |
+
FUNS_IO_TYPE_INT8 = node_type_to_io_type_map["funs_io_type_int8"]
|
49 |
+
FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["funs_io_type_fp32_or_int8"]
|
50 |
+
MODS_IO_TYPE_FP32 = node_type_to_io_type_map["mods_io_type_fp32"]
|
51 |
+
MODS_IO_TYPE_INT8 = node_type_to_io_type_map["mods_io_type_int8"]
|
52 |
+
MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
|
53 |
+
METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["meths_io_type_fp32_or_int8"]
|
54 |
+
|
55 |
+
if node.op == "call_function":
|
56 |
+
if node.target in FUNS_IO_TYPE_FP32:
|
57 |
+
return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
|
58 |
+
if node.target in FUNS_IO_TYPE_FP16:
|
59 |
+
return (NodeInputOrOutputType.FP16, NodeInputOrOutputType.FP16)
|
60 |
+
elif node.target in FUNS_IO_TYPE_INT8:
|
61 |
+
return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
|
62 |
+
elif node.target in FUNS_IO_TYPE_FP32_OR_INT8:
|
63 |
+
first_arg = get_normalized_nth_input(node, gm, 0)
|
64 |
+
assert isinstance(first_arg, Node)
|
65 |
+
(
|
66 |
+
_prev_node_input_type,
|
67 |
+
prev_node_output_type,
|
68 |
+
) = get_node_first_input_and_output_type(
|
69 |
+
first_arg, gm, logger_cls, node_type_to_io_type_map
|
70 |
+
)
|
71 |
+
return (prev_node_output_type, prev_node_output_type)
|
72 |
+
else:
|
73 |
+
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
|
74 |
+
|
75 |
+
elif node.op == "call_module":
|
76 |
+
assert node.op == "call_module"
|
77 |
+
assert isinstance(node.target, str)
|
78 |
+
mod = getattr_from_fqn(gm, node.target)
|
79 |
+
is_known_fp32_or_int8_input_module = any(
|
80 |
+
isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
|
81 |
+
)
|
82 |
+
if (
|
83 |
+
isinstance(mod, (logger_cls, ObserverBase, FakeQuantizeBase)) # type: ignore[arg-type]
|
84 |
+
or is_known_fp32_or_int8_input_module
|
85 |
+
):
|
86 |
+
# A logger or observer's input and output type is the output
|
87 |
+
# type of the preceding node.
|
88 |
+
first_arg = get_normalized_nth_input(node, gm, 0)
|
89 |
+
assert isinstance(first_arg, Node)
|
90 |
+
(
|
91 |
+
_prev_node_input_type,
|
92 |
+
prev_node_output_type,
|
93 |
+
) = get_node_first_input_and_output_type(
|
94 |
+
first_arg, gm, logger_cls, node_type_to_io_type_map
|
95 |
+
)
|
96 |
+
return (prev_node_output_type, prev_node_output_type)
|
97 |
+
is_known_fp32_input_module = any(
|
98 |
+
isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32 # type: ignore[arg-type]
|
99 |
+
)
|
100 |
+
is_known_int8_input_module = any(
|
101 |
+
isinstance(mod, target_type) for target_type in MODS_IO_TYPE_INT8 # type: ignore[arg-type]
|
102 |
+
)
|
103 |
+
if is_known_fp32_input_module:
|
104 |
+
return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
|
105 |
+
elif is_known_int8_input_module:
|
106 |
+
return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
|
107 |
+
else:
|
108 |
+
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
|
109 |
+
|
110 |
+
elif node.op == "call_method":
|
111 |
+
if node.target == "dequantize":
|
112 |
+
# Dequantize is a special node because it allows multiple input types.
|
113 |
+
# So, we look up the output type of the previous node and return that
|
114 |
+
# as the input type of this node instance.
|
115 |
+
prev_node = get_normalized_nth_input(node, gm, 0)
|
116 |
+
assert isinstance(prev_node, Node)
|
117 |
+
(
|
118 |
+
_prev_node_input_type,
|
119 |
+
prev_node_output_type,
|
120 |
+
) = get_node_first_input_and_output_type(
|
121 |
+
prev_node, gm, logger_cls, node_type_to_io_type_map
|
122 |
+
)
|
123 |
+
return (prev_node_output_type, NodeInputOrOutputType.FP32)
|
124 |
+
|
125 |
+
elif node.target == "to":
|
126 |
+
# to is a special node because it allows multiple input types.
|
127 |
+
# So, we look up the output type of the previous node and return that
|
128 |
+
# as the input type of this node instance. We also look up the target
|
129 |
+
# of to and return the correct output type.
|
130 |
+
prev_node = get_normalized_nth_input(node, gm, 0)
|
131 |
+
assert isinstance(prev_node, Node)
|
132 |
+
(
|
133 |
+
_prev_node_input_type,
|
134 |
+
prev_node_output_type,
|
135 |
+
) = get_node_first_input_and_output_type(
|
136 |
+
prev_node, gm, logger_cls, node_type_to_io_type_map
|
137 |
+
)
|
138 |
+
|
139 |
+
cur_node_dtype_target = get_normalized_nth_input(node, gm, 1)
|
140 |
+
assert (
|
141 |
+
cur_node_dtype_target is torch.float16
|
142 |
+
), f"{cur_node_dtype_target} handling needs to be added"
|
143 |
+
|
144 |
+
return (prev_node_output_type, NodeInputOrOutputType.FP16)
|
145 |
+
|
146 |
+
elif node.target in METHS_IO_TYPE_FP32_OR_INT8:
|
147 |
+
first_arg = get_normalized_nth_input(node, gm, 0)
|
148 |
+
assert isinstance(first_arg, Node)
|
149 |
+
(
|
150 |
+
_prev_node_input_type,
|
151 |
+
prev_node_output_type,
|
152 |
+
) = get_node_first_input_and_output_type(
|
153 |
+
first_arg, gm, logger_cls, node_type_to_io_type_map
|
154 |
+
)
|
155 |
+
return (prev_node_output_type, prev_node_output_type)
|
156 |
+
|
157 |
+
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
|
158 |
+
else:
|
159 |
+
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
|
160 |
+
|
161 |
+
|
162 |
+
def get_node_input_qparams(
|
163 |
+
node: Node,
|
164 |
+
gm: GraphModule,
|
165 |
+
node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
|
166 |
+
) -> Optional[Tuple[Union[torch.Tensor, float], Union[torch.Tensor, int]]]:
|
167 |
+
"""
|
168 |
+
Returns the qparams (scale, zero_point) of the first input to `node`,
|
169 |
+
if they can be inferred from the graph.
|
170 |
+
"""
|
171 |
+
prev_node = get_normalized_nth_input(node, gm, 0)
|
172 |
+
|
173 |
+
if not isinstance(prev_node, Node):
|
174 |
+
return None
|
175 |
+
|
176 |
+
MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
|
177 |
+
|
178 |
+
def _get_scale_zp_from_function_args(node, gm, scale_arg_idx, zp_arg_idx):
|
179 |
+
scale_node = get_normalized_nth_input(node, gm, scale_arg_idx)
|
180 |
+
zp_node = get_normalized_nth_input(node, gm, zp_arg_idx)
|
181 |
+
assert isinstance(scale_node, Node) and isinstance(scale_node.target, str)
|
182 |
+
assert isinstance(zp_node, Node) and isinstance(zp_node.target, str)
|
183 |
+
scale_obj = getattr_from_fqn(gm, scale_node.target)
|
184 |
+
zp_obj = getattr_from_fqn(gm, zp_node.target)
|
185 |
+
return (scale_obj, zp_obj)
|
186 |
+
|
187 |
+
if prev_node.op == "call_function":
|
188 |
+
|
189 |
+
# quantize - read the args directly
|
190 |
+
if prev_node.target == torch.quantize_per_tensor:
|
191 |
+
return _get_scale_zp_from_function_args(prev_node, gm, 1, 2)
|
192 |
+
elif prev_node.target in (toq.add, toq.add_relu, toq.mul, toq.mul_relu):
|
193 |
+
return _get_scale_zp_from_function_args(prev_node, gm, 2, 3)
|
194 |
+
|
195 |
+
return None
|
196 |
+
# TODO(future PR): handle more functionals
|
197 |
+
# TODO(future PR): handle functional ops which inherit qparams from input
|
198 |
+
|
199 |
+
elif prev_node.op == "call_module":
|
200 |
+
|
201 |
+
# get type of the module
|
202 |
+
assert isinstance(prev_node.target, str)
|
203 |
+
module_obj = getattr_from_fqn(gm, prev_node.target)
|
204 |
+
if isinstance(
|
205 |
+
module_obj,
|
206 |
+
(
|
207 |
+
nnq.Linear,
|
208 |
+
nnq.Conv1d,
|
209 |
+
nnq.Conv2d,
|
210 |
+
nniq.ConvReLU2d,
|
211 |
+
nnq.Conv3d,
|
212 |
+
nnq.BatchNorm2d,
|
213 |
+
nnq.BatchNorm3d,
|
214 |
+
nnq.ConvTranspose1d,
|
215 |
+
nnq.ConvTranspose2d,
|
216 |
+
nnq.ELU,
|
217 |
+
nnq.GroupNorm,
|
218 |
+
nnq.InstanceNorm1d,
|
219 |
+
nnq.InstanceNorm2d,
|
220 |
+
nnq.InstanceNorm3d,
|
221 |
+
nnq.LayerNorm,
|
222 |
+
nnq.Hardswish,
|
223 |
+
nnq.LeakyReLU,
|
224 |
+
nnq.ReLU6,
|
225 |
+
nniq.BNReLU2d,
|
226 |
+
nniq.BNReLU3d,
|
227 |
+
nniq.ConvReLU1d,
|
228 |
+
nniq.ConvReLU2d,
|
229 |
+
nniq.ConvReLU3d,
|
230 |
+
nniq.LinearReLU,
|
231 |
+
),
|
232 |
+
):
|
233 |
+
return (module_obj.scale, module_obj.zero_point) # type: ignore[return-value]
|
234 |
+
|
235 |
+
is_known_fp32_or_int8_input_module = any(
|
236 |
+
isinstance(module_obj, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
|
237 |
+
)
|
238 |
+
if is_known_fp32_or_int8_input_module:
|
239 |
+
return get_node_input_qparams(prev_node, gm, node_type_to_io_type_map)
|
240 |
+
|
241 |
+
return None
|
242 |
+
|
243 |
+
|
244 |
+
def return_first_non_observer_node(
|
245 |
+
node: Node,
|
246 |
+
gm: GraphModule,
|
247 |
+
) -> Node:
|
248 |
+
"""
|
249 |
+
If node is not an observer, returns it. If node is an observer,
|
250 |
+
navigates up the graph and returns the first parent which is not an
|
251 |
+
observer. For example,
|
252 |
+
|
253 |
+
graph: (node_non_obs), node = node_non_obs : returns node_non_obs
|
254 |
+
graph: (node_non_obs -> obs0), node = obs0 : returns node_non_obs
|
255 |
+
graph: (node_non_obs -> obs0 -> fq0), node = fq0 : returns node_non_obs
|
256 |
+
"""
|
257 |
+
if node.op == "call_module":
|
258 |
+
node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
|
259 |
+
if _is_activation_post_process(node_obj):
|
260 |
+
assert len(node.args) == 1
|
261 |
+
assert isinstance(node.args[0], Node)
|
262 |
+
node = node.args[0]
|
263 |
+
# code duplication intended, not worth refactoring
|
264 |
+
assert isinstance(node.target, str)
|
265 |
+
node_obj = getattr_from_fqn(gm, node.target)
|
266 |
+
if _is_activation_post_process(node_obj):
|
267 |
+
assert len(node.args) == 1
|
268 |
+
assert isinstance(node.args[0], Node)
|
269 |
+
node = node.args[0]
|
270 |
+
return node
|
271 |
+
|
272 |
+
|
273 |
+
def get_number_of_non_param_args(
|
274 |
+
node: Node,
|
275 |
+
gm: GraphModule,
|
276 |
+
) -> int:
|
277 |
+
"""
|
278 |
+
Assumes that all non-param args occur first. Returns the number of
|
279 |
+
non-param args expected for a node. For example, for
|
280 |
+
|
281 |
+
F.linear(x, weight, bias)
|
282 |
+
|
283 |
+
Returns 1, because x is a non-param arg and weight and bias are params.
|
284 |
+
For
|
285 |
+
|
286 |
+
lstm_mod(x, hid)
|
287 |
+
|
288 |
+
Returns 2, because both x and hid are non-param args.
|
289 |
+
"""
|
290 |
+
if node.op == "call_module":
|
291 |
+
node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
|
292 |
+
if isinstance(node_obj, nn.LSTM):
|
293 |
+
return 2
|
294 |
+
|
295 |
+
# default is 1
|
296 |
+
return 1
|
297 |
+
|
298 |
+
|
299 |
+
def get_arg_indices_of_inputs_to_log(node: Node) -> List[int]:
|
300 |
+
"""
|
301 |
+
Returns the indices of args of the node which we should attach
|
302 |
+
loggers to, if input logging is enabled.
|
303 |
+
|
304 |
+
For example,
|
305 |
+
* for (x + y), returns [0, 1]
|
306 |
+
* for (1 + y), returns [1]
|
307 |
+
* for (x + 1), returns [0]
|
308 |
+
* for (linear(x, w, b)) returns [0]
|
309 |
+
* by default, returns [0]
|
310 |
+
"""
|
311 |
+
if len(node.args) == 0:
|
312 |
+
return []
|
313 |
+
if node.op == "call_function" and (
|
314 |
+
# TODO(future PR): use relationship map instead of hardcoding
|
315 |
+
node.target in (torch.add, torch.ops.quantized.add, operator.add)
|
316 |
+
or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)
|
317 |
+
):
|
318 |
+
result = []
|
319 |
+
for i in range(2):
|
320 |
+
if type(node.args[i]) == Node:
|
321 |
+
result.append(i)
|
322 |
+
return result
|
323 |
+
return [0]
|
324 |
+
|
325 |
+
|
326 |
+
def get_target_type_str(node: Node, gm: GraphModule) -> str:
|
327 |
+
"""
|
328 |
+
Returns a string representation of the type of the function or module
|
329 |
+
pointed to by this node, or '' for other node types.
|
330 |
+
"""
|
331 |
+
target_type = ""
|
332 |
+
if node.op in ("call_function", "call_method"):
|
333 |
+
target_type = torch.typename(node.target)
|
334 |
+
elif node.op == "call_module":
|
335 |
+
assert isinstance(node.target, str)
|
336 |
+
target_mod = getattr_from_fqn(gm, node.target)
|
337 |
+
target_type = torch.typename(target_mod)
|
338 |
+
return target_type
|
339 |
+
|
340 |
+
|
341 |
+
def rekey_logger_info_on_node_name_of_model(
|
342 |
+
results: NSResultsType,
|
343 |
+
model_name: str,
|
344 |
+
) -> NSResultsType:
|
345 |
+
"""
|
346 |
+
Rekeys the layer name of a results dictionary to use node names
|
347 |
+
from `model_name`.
|
348 |
+
|
349 |
+
For example, transforms
|
350 |
+
|
351 |
+
{'base_op_1_0': {'node_output': {'model_a':
|
352 |
+
[{'ref_node_name': 'linear1', ...}]}}}
|
353 |
+
|
354 |
+
into
|
355 |
+
|
356 |
+
{'linear1': {'node_output': {'model_a':
|
357 |
+
[{'ref_node_name': 'linear1', ...}]}}}
|
358 |
+
|
359 |
+
Note: we cannot use these node names directly because they are not
|
360 |
+
guaranteed to be consistent across models. This is why we extract
|
361 |
+
the results first and rekey afterwards.
|
362 |
+
"""
|
363 |
+
new_results = {}
|
364 |
+
for old_layer_name, result_type_to_results in results.items():
|
365 |
+
new_layer_name = None
|
366 |
+
for model_name_to_results in result_type_to_results.values():
|
367 |
+
for cur_model_name, list_of_results in model_name_to_results.items():
|
368 |
+
if cur_model_name == model_name:
|
369 |
+
assert len(list_of_results)
|
370 |
+
new_layer_name = list_of_results[0]["ref_node_name"]
|
371 |
+
else:
|
372 |
+
continue
|
373 |
+
if new_layer_name is not None:
|
374 |
+
new_results[new_layer_name] = result_type_to_results
|
375 |
+
else:
|
376 |
+
new_results[old_layer_name] = result_type_to_results
|
377 |
+
return new_results
|
378 |
+
|
379 |
+
|
380 |
+
def maybe_add_missing_fqns(results: NSResultsType) -> None:
|
381 |
+
"""
|
382 |
+
If `fqn` entries are filled in for one of the models in `results`, copies
|
383 |
+
them over to any models which do not have them filled out.
|
384 |
+
|
385 |
+
A common use case benefitting from this is comparing a model prepared by
|
386 |
+
quantization to a quantized model. In this case, the model prepared by
|
387 |
+
quantization would have `fqn` entries, and the quantized model would not.
|
388 |
+
"""
|
389 |
+
|
390 |
+
# Check in the first result to find any model with fqn entries defined.
|
391 |
+
model_name_with_fqns = None
|
392 |
+
for result_type_to_results in results.values():
|
393 |
+
for model_name_to_results in result_type_to_results.values():
|
394 |
+
for model_name, model_results in model_name_to_results.items():
|
395 |
+
if len(model_results) > 0:
|
396 |
+
if model_results[0]["fqn"] is not None:
|
397 |
+
model_name_with_fqns = model_name
|
398 |
+
break
|
399 |
+
break
|
400 |
+
break
|
401 |
+
|
402 |
+
if model_name_with_fqns:
|
403 |
+
for result_type_to_results in results.values():
|
404 |
+
for model_name_to_results in result_type_to_results.values():
|
405 |
+
ref_model_results = model_name_to_results[model_name_with_fqns]
|
406 |
+
for model_name, model_results in model_name_to_results.items():
|
407 |
+
if model_name == model_name_with_fqns:
|
408 |
+
continue
|
409 |
+
for i in range(len(model_results)):
|
410 |
+
fqn = ref_model_results[i]["fqn"]
|
411 |
+
model_results[i]["fqn"] = fqn
|
412 |
+
|
413 |
+
|
414 |
+
def maybe_dequantize_first_two_tensor_args_and_handle_tuples(f):
|
415 |
+
def inner(*args, **kwargs):
|
416 |
+
a0, a1, *a_other = args
|
417 |
+
|
418 |
+
if (isinstance(a0, tuple) and isinstance(a1, tuple)) or (
|
419 |
+
isinstance(a0, list) and isinstance(a1, list)
|
420 |
+
):
|
421 |
+
results = []
|
422 |
+
for el0, el1 in zip(a0, a1):
|
423 |
+
new_args = (el0, el1, *a_other)
|
424 |
+
results.append(inner(*new_args, **kwargs))
|
425 |
+
return results
|
426 |
+
|
427 |
+
elif isinstance(a0, torch.Tensor) and isinstance(a1, torch.Tensor):
|
428 |
+
if a0.is_quantized:
|
429 |
+
a0 = a0.dequantize()
|
430 |
+
if a1.is_quantized:
|
431 |
+
a1 = a1.dequantize()
|
432 |
+
|
433 |
+
# for the purposes of this util, only handle floats
|
434 |
+
if a0.dtype != torch.float or a1.dtype != torch.float:
|
435 |
+
return None
|
436 |
+
|
437 |
+
new_args = (a0, a1, *a_other)
|
438 |
+
return f(*new_args, **kwargs)
|
439 |
+
|
440 |
+
return inner
|
441 |
+
|
442 |
+
|
443 |
+
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
|
444 |
+
def compute_sqnr(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
445 |
+
"""
|
446 |
+
Computes the SQNR between `x` and `y`.
|
447 |
+
|
448 |
+
Args:
|
449 |
+
x: Tensor or tuple of tensors
|
450 |
+
y: Tensor or tuple of tensors
|
451 |
+
|
452 |
+
Return:
|
453 |
+
float or tuple of floats
|
454 |
+
"""
|
455 |
+
Ps = torch.norm(x)
|
456 |
+
Pn = torch.norm(x - y)
|
457 |
+
return 20 * torch.log10(Ps / Pn)
|
458 |
+
|
459 |
+
|
460 |
+
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
|
461 |
+
def compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
462 |
+
"""
|
463 |
+
Computes the normalized L2 error between `x` and `y`.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
x: Tensor or tuple of tensors
|
467 |
+
y: Tensor or tuple of tensors
|
468 |
+
|
469 |
+
Return:
|
470 |
+
float or tuple of floats
|
471 |
+
"""
|
472 |
+
return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum())
|
473 |
+
|
474 |
+
|
475 |
+
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
|
476 |
+
def compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
477 |
+
"""
|
478 |
+
Computes the cosine similarity between `x` and `y`.
|
479 |
+
|
480 |
+
Args:
|
481 |
+
x: Tensor or tuple of tensors
|
482 |
+
y: Tensor or tuple of tensors
|
483 |
+
|
484 |
+
Return:
|
485 |
+
float or tuple of floats
|
486 |
+
"""
|
487 |
+
# For convolutions, the shape of the quantized weight has one additional
|
488 |
+
# dimension compared to the shape of the fp32 weight. Match the shapes
|
489 |
+
# to enable cosine similarity comparison.
|
490 |
+
x = x.reshape(1, -1)
|
491 |
+
y = y.reshape(1, -1)
|
492 |
+
return torch.nn.functional.cosine_similarity(x, y)
|
493 |
+
|
494 |
+
def op_type_supports_shadowing(node: Node) -> bool:
|
495 |
+
if node.op == 'call_function':
|
496 |
+
if node.target in (torch.add, torch.mul, operator.add, operator.mul, torch.cat, torch.stack):
|
497 |
+
# shadowing for ops with multiple tensor inputs is not implemented yet
|
498 |
+
return False
|
499 |
+
return True
|
500 |
+
|
501 |
+
def get_normalized_nth_input(node: Node, gm: GraphModule, idx: int) -> Node:
|
502 |
+
"""
|
503 |
+
Given a node, gets the n'th input to that node, normalizing
|
504 |
+
args and kwargs to the best of its ability.
|
505 |
+
"""
|
506 |
+
try:
|
507 |
+
norm_args_and_kwargs = node.normalized_arguments(
|
508 |
+
gm, normalize_to_only_use_kwargs=True)
|
509 |
+
if norm_args_and_kwargs is not None:
|
510 |
+
norm_args, norm_kwargs = norm_args_and_kwargs
|
511 |
+
assert len(norm_args) + len(norm_kwargs) > idx
|
512 |
+
if idx < len(norm_args):
|
513 |
+
return norm_args[idx]
|
514 |
+
else:
|
515 |
+
# note: in Python 3.7+ dicts are ordered
|
516 |
+
return list(norm_kwargs.values())[idx]
|
517 |
+
else:
|
518 |
+
assert len(node.args) + len(node.kwargs) > idx
|
519 |
+
if idx < len(node.args):
|
520 |
+
return node.args[idx] # type: ignore[return-value]
|
521 |
+
else:
|
522 |
+
kwargs_idx = idx + len(node.args)
|
523 |
+
return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]
|
524 |
+
except RuntimeError:
|
525 |
+
# this RuntimeError happens when node argument normalization
|
526 |
+
# requires typehints to proceed, such as for torch.add where
|
527 |
+
# either the first, second or both arguments could be tensors
|
528 |
+
assert len(node.args) + len(node.kwargs) > idx
|
529 |
+
if idx < len(node.args):
|
530 |
+
return node.args[idx] # type: ignore[return-value]
|
531 |
+
else:
|
532 |
+
kwargs_idx = idx + len(node.args)
|
533 |
+
return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]
|
env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/weight_utils.py
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import torch.ao.nn.quantized.dynamic as nnqd
|
5 |
+
import torch.ao.nn.quantized as nnq
|
6 |
+
import torch.ao.nn.intrinsic.qat as nniqat
|
7 |
+
import torch.ao.nn.qat as nnqat
|
8 |
+
import torch.ao.nn.intrinsic as nni
|
9 |
+
import torch.ao.nn.intrinsic.quantized as nniq
|
10 |
+
toq = torch.ops.quantized
|
11 |
+
from torch.fx import GraphModule
|
12 |
+
from torch.fx.graph import Node
|
13 |
+
|
14 |
+
from .utils import (
|
15 |
+
get_target_type_str,
|
16 |
+
getattr_from_fqn,
|
17 |
+
return_first_non_observer_node,
|
18 |
+
)
|
19 |
+
|
20 |
+
from .ns_types import (
|
21 |
+
NSSingleResultValuesType,
|
22 |
+
NSSingleResultType,
|
23 |
+
)
|
24 |
+
|
25 |
+
from typing import List, Optional, Dict, Callable
|
26 |
+
|
27 |
+
def mod_weight_detach(mod: nn.Module) -> torch.Tensor:
|
28 |
+
return mod.weight.detach() # type: ignore[operator]
|
29 |
+
|
30 |
+
def mod_0_weight_detach(mod: nn.Module) -> torch.Tensor:
|
31 |
+
return mod[0].weight.detach() # type: ignore[index]
|
32 |
+
|
33 |
+
def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor:
|
34 |
+
return mod._weight_bias()[0] # type: ignore[operator]
|
35 |
+
|
36 |
+
def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]:
|
37 |
+
res = []
|
38 |
+
for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
|
39 |
+
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
|
40 |
+
param_value = mod._flat_weights[idx].detach() # type: ignore[index]
|
41 |
+
res.append(param_value)
|
42 |
+
return res
|
43 |
+
|
44 |
+
def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]:
|
45 |
+
res = []
|
46 |
+
for weight_value in mod._all_weight_values: # type: ignore[union-attr]
|
47 |
+
res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
|
48 |
+
res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
|
49 |
+
return res
|
50 |
+
|
51 |
+
def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
|
52 |
+
if (
|
53 |
+
isinstance(mod, (nn.Conv1d, nn.Conv2d, nn.Conv3d))
|
54 |
+
):
|
55 |
+
return mod.weight.detach()
|
56 |
+
elif (
|
57 |
+
isinstance(mod, (nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d))
|
58 |
+
):
|
59 |
+
return mod[0].weight.detach()
|
60 |
+
else:
|
61 |
+
return mod._weight_bias()[0] # type: ignore[operator]
|
62 |
+
|
63 |
+
def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor:
|
64 |
+
if isinstance(mod, nn.Linear):
|
65 |
+
return mod.weight.detach()
|
66 |
+
elif isinstance(mod, nni.LinearReLU):
|
67 |
+
return mod[0].weight.detach()
|
68 |
+
else:
|
69 |
+
return mod._weight_bias()[0] # type: ignore[operator]
|
70 |
+
|
71 |
+
def get_lstm_mod_weights(mod: nn.Module) -> List[torch.Tensor]:
|
72 |
+
# TODO(future PR): make more generic, handle everything
|
73 |
+
if isinstance(mod, nn.LSTM):
|
74 |
+
res = []
|
75 |
+
for idx, param_name in enumerate(mod._flat_weights_names):
|
76 |
+
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
|
77 |
+
param_value = mod._flat_weights[idx].detach()
|
78 |
+
res.append(param_value)
|
79 |
+
return res
|
80 |
+
else:
|
81 |
+
assert isinstance(mod, nnqd.LSTM), f"type {type(mod)} not handled yet"
|
82 |
+
res = []
|
83 |
+
for weight_value in mod._all_weight_values:
|
84 |
+
res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
|
85 |
+
res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
|
86 |
+
return res
|
87 |
+
|
88 |
+
def get_conv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
|
89 |
+
# traverse backwards from the weight arg, accounting for any observers
|
90 |
+
weight_arg_node = node.args[1]
|
91 |
+
assert isinstance(weight_arg_node, Node)
|
92 |
+
weight_node = return_first_non_observer_node(weight_arg_node, gm)
|
93 |
+
assert isinstance(weight_node, Node)
|
94 |
+
assert weight_node.op == 'get_attr'
|
95 |
+
weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
|
96 |
+
return weight.detach()
|
97 |
+
|
98 |
+
def get_qconv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
|
99 |
+
# qconv state is arg 1
|
100 |
+
qconv_state_node = node.args[1]
|
101 |
+
assert isinstance(qconv_state_node, Node)
|
102 |
+
assert qconv_state_node.op == 'get_attr'
|
103 |
+
qconv_state_obj = getattr_from_fqn(gm, qconv_state_node.target) # type: ignore[arg-type]
|
104 |
+
return qconv_state_obj.weight()
|
105 |
+
|
106 |
+
def get_linear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
|
107 |
+
# traverse backwards from the weight arg, accounting for any observers
|
108 |
+
# supported patterns:
|
109 |
+
# weight -> obs -> linear
|
110 |
+
# weight -> to(torch.float16) -> dequantize -> linear
|
111 |
+
linear_second_arg = node.args[1]
|
112 |
+
assert isinstance(linear_second_arg, Node)
|
113 |
+
|
114 |
+
if linear_second_arg.op == 'call_module':
|
115 |
+
# weight -> obs -> linear
|
116 |
+
weight_arg_node = node.args[1]
|
117 |
+
assert isinstance(weight_arg_node, Node)
|
118 |
+
weight_node = weight_arg_node.args[0]
|
119 |
+
assert isinstance(weight_node, Node)
|
120 |
+
assert weight_node.op == 'get_attr'
|
121 |
+
weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
|
122 |
+
return weight.detach()
|
123 |
+
elif linear_second_arg.op == 'call_method':
|
124 |
+
# weight -> to(torch.float16) -> dequantize -> linear
|
125 |
+
assert linear_second_arg.op == 'call_method'
|
126 |
+
dequant_node = node.args[1]
|
127 |
+
assert isinstance(dequant_node, Node)
|
128 |
+
to_fp16_node = dequant_node.args[0]
|
129 |
+
assert isinstance(to_fp16_node, Node)
|
130 |
+
# extract the dtype, so we can cast to it before returning
|
131 |
+
target_dtype = to_fp16_node.args[1]
|
132 |
+
weight_node = to_fp16_node.args[0]
|
133 |
+
assert isinstance(weight_node, Node)
|
134 |
+
assert weight_node.op == 'get_attr'
|
135 |
+
weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
|
136 |
+
# return the weight with fp16 cast
|
137 |
+
return weight.detach().to(target_dtype)
|
138 |
+
else:
|
139 |
+
assert linear_second_arg.op == 'get_attr'
|
140 |
+
weight = getattr_from_fqn(gm, linear_second_arg.target) # type: ignore[arg-type]
|
141 |
+
return weight.detach()
|
142 |
+
|
143 |
+
def get_qlinear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
|
144 |
+
# packed weight is arg 1
|
145 |
+
packed_weight_node = node.args[1]
|
146 |
+
assert isinstance(packed_weight_node, Node)
|
147 |
+
assert packed_weight_node.op == 'get_attr'
|
148 |
+
packed_weight = getattr_from_fqn(gm, packed_weight_node.target) # type: ignore[arg-type]
|
149 |
+
# TODO(future PR): why does packed_weight.unpack() not work?
|
150 |
+
(weight, _bias), _name = packed_weight.__getstate__()
|
151 |
+
return weight
|
152 |
+
|
153 |
+
def get_op_to_type_to_weight_extraction_fn() -> Dict[str, Dict[Callable, Callable]]:
|
154 |
+
|
155 |
+
op_to_type_to_weight_extraction_fn: Dict[str, Dict[Callable, Callable]] = {
|
156 |
+
'call_module': {
|
157 |
+
# Conv1d
|
158 |
+
nn.Conv1d: mod_weight_detach,
|
159 |
+
nni.ConvReLU1d: mod_0_weight_detach,
|
160 |
+
nnq.Conv1d: mod_weight_bias_0,
|
161 |
+
nnqat.Conv1d: mod_weight_detach,
|
162 |
+
nniqat.ConvBn1d: mod_weight_detach,
|
163 |
+
nniqat.ConvBnReLU1d: mod_weight_detach,
|
164 |
+
nniqat.ConvReLU1d: mod_weight_detach,
|
165 |
+
nniq.ConvReLU1d: mod_weight_bias_0,
|
166 |
+
# Conv2d
|
167 |
+
nn.Conv2d: mod_weight_detach,
|
168 |
+
nni.ConvReLU2d: mod_0_weight_detach,
|
169 |
+
nnq.Conv2d: mod_weight_bias_0,
|
170 |
+
nnqat.Conv2d: mod_weight_detach,
|
171 |
+
nniqat.ConvBn2d: mod_weight_detach,
|
172 |
+
nniqat.ConvBnReLU2d: mod_weight_detach,
|
173 |
+
nniqat.ConvReLU2d: mod_weight_detach,
|
174 |
+
nniq.ConvReLU2d: mod_weight_bias_0,
|
175 |
+
# Conv3d
|
176 |
+
nn.Conv3d: mod_weight_detach,
|
177 |
+
nni.ConvReLU3d: mod_0_weight_detach,
|
178 |
+
nnq.Conv3d: mod_weight_bias_0,
|
179 |
+
nnqat.Conv3d: mod_weight_detach,
|
180 |
+
nniqat.ConvBn3d: mod_weight_detach,
|
181 |
+
nniqat.ConvBnReLU3d: mod_weight_detach,
|
182 |
+
nniqat.ConvReLU3d: mod_weight_detach,
|
183 |
+
nniq.ConvReLU3d: mod_weight_bias_0,
|
184 |
+
# Linear
|
185 |
+
nn.Linear: mod_weight_detach,
|
186 |
+
nnq.Linear: mod_weight_bias_0,
|
187 |
+
nni.LinearReLU: mod_0_weight_detach,
|
188 |
+
nniq.LinearReLU: mod_weight_bias_0,
|
189 |
+
nnqat.Linear: mod_weight_detach,
|
190 |
+
nnqd.Linear: mod_weight_bias_0,
|
191 |
+
nniqat.LinearReLU: mod_weight_detach,
|
192 |
+
nniqat.LinearBn1d: mod_weight_detach,
|
193 |
+
nn.modules.linear.NonDynamicallyQuantizableLinear: mod_weight_detach,
|
194 |
+
# LSTM
|
195 |
+
nn.LSTM: get_lstm_weight,
|
196 |
+
nnqd.LSTM: get_qlstm_weight,
|
197 |
+
},
|
198 |
+
'call_function': {
|
199 |
+
# Conv
|
200 |
+
F.conv1d: get_conv_fun_weight,
|
201 |
+
F.conv2d: get_conv_fun_weight,
|
202 |
+
F.conv3d: get_conv_fun_weight,
|
203 |
+
toq.conv1d: get_qconv_fun_weight,
|
204 |
+
toq.conv2d: get_qconv_fun_weight,
|
205 |
+
toq.conv3d: get_qconv_fun_weight,
|
206 |
+
toq.conv1d_relu: get_qconv_fun_weight,
|
207 |
+
toq.conv2d_relu: get_qconv_fun_weight,
|
208 |
+
toq.conv3d_relu: get_qconv_fun_weight,
|
209 |
+
# Linear
|
210 |
+
F.linear: get_linear_fun_weight,
|
211 |
+
toq.linear: get_qlinear_fun_weight,
|
212 |
+
toq.linear_relu: get_qlinear_fun_weight,
|
213 |
+
},
|
214 |
+
}
|
215 |
+
|
216 |
+
return op_to_type_to_weight_extraction_fn
|
217 |
+
|
218 |
+
def extract_weight_from_node(
|
219 |
+
node: Node,
|
220 |
+
gm: GraphModule,
|
221 |
+
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
|
222 |
+
) -> Optional[NSSingleResultType]:
|
223 |
+
res_type = NSSingleResultValuesType.WEIGHT.value
|
224 |
+
|
225 |
+
# Not all graphmodules have _node_name_to_scope, so only fill it
|
226 |
+
# out if it exists.
|
227 |
+
fqn = None
|
228 |
+
if hasattr(gm, '_node_name_to_scope'):
|
229 |
+
fqn = gm._node_name_to_scope[node.name][0] # type: ignore[index]
|
230 |
+
|
231 |
+
if op_to_type_to_weight_extraction_fn is None:
|
232 |
+
op_to_type_to_weight_extraction_fn = get_op_to_type_to_weight_extraction_fn()
|
233 |
+
|
234 |
+
ref_node_type = get_target_type_str(node, gm)
|
235 |
+
# for extracting weights, these are always the same
|
236 |
+
prev_node_type = ref_node_type
|
237 |
+
|
238 |
+
if node.op == 'call_function':
|
239 |
+
function_mapping = op_to_type_to_weight_extraction_fn['call_function']
|
240 |
+
for target_fn_type, weight_extraction_fn in function_mapping.items():
|
241 |
+
if node.target == target_fn_type:
|
242 |
+
weight = weight_extraction_fn(node, gm)
|
243 |
+
return {
|
244 |
+
'type': res_type,
|
245 |
+
'values': [weight],
|
246 |
+
'prev_node_name': node.name,
|
247 |
+
'prev_node_target_type': prev_node_type,
|
248 |
+
'ref_node_name': node.name,
|
249 |
+
'ref_node_target_type': ref_node_type,
|
250 |
+
'index_within_arg': 0,
|
251 |
+
'index_of_arg': 0,
|
252 |
+
'fqn': fqn,
|
253 |
+
}
|
254 |
+
|
255 |
+
elif node.op == 'call_module':
|
256 |
+
# for call_module, we need to look up the modules to do the type check
|
257 |
+
assert isinstance(node.target, str)
|
258 |
+
mod = getattr_from_fqn(gm, node.target)
|
259 |
+
module_mapping = op_to_type_to_weight_extraction_fn['call_module']
|
260 |
+
for target_mod_type, weight_extraction_fn in module_mapping.items():
|
261 |
+
if type(mod) == target_mod_type:
|
262 |
+
weight = weight_extraction_fn(mod)
|
263 |
+
return {
|
264 |
+
'type': res_type,
|
265 |
+
'values': [weight],
|
266 |
+
'prev_node_name': node.name,
|
267 |
+
'prev_node_target_type': prev_node_type,
|
268 |
+
'ref_node_name': node.name,
|
269 |
+
'ref_node_target_type': ref_node_type,
|
270 |
+
'index_within_arg': 0,
|
271 |
+
'index_of_arg': 0,
|
272 |
+
'fqn': fqn,
|
273 |
+
}
|
274 |
+
|
275 |
+
return None
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig, DTypeWithConstraints, ObservationType
|
2 |
+
from .fbgemm import get_fbgemm_backend_config
|
3 |
+
from .native import get_native_backend_config, get_native_backend_config_dict
|
4 |
+
from .qnnpack import get_qnnpack_backend_config
|
5 |
+
from .tensorrt import get_tensorrt_backend_config, get_tensorrt_backend_config_dict
|
6 |
+
from .executorch import get_executorch_backend_config
|
7 |
+
from .onednn import get_onednn_backend_config
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"get_fbgemm_backend_config",
|
11 |
+
"get_native_backend_config",
|
12 |
+
"get_native_backend_config_dict",
|
13 |
+
"get_qnnpack_backend_config",
|
14 |
+
"get_tensorrt_backend_config",
|
15 |
+
"get_tensorrt_backend_config_dict",
|
16 |
+
"get_executorch_backend_config",
|
17 |
+
"BackendConfig",
|
18 |
+
"BackendPatternConfig",
|
19 |
+
"DTypeConfig",
|
20 |
+
"DTypeWithConstraints",
|
21 |
+
"ObservationType",
|
22 |
+
"get_onednn_backend_config",
|
23 |
+
]
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (883 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_common_operator_config_utils.cpython-310.pyc
ADDED
Binary file (13 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_qnnpack_pt2e.cpython-310.pyc
ADDED
Binary file (3.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/backend_config.cpython-310.pyc
ADDED
Binary file (26.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/executorch.cpython-310.pyc
ADDED
Binary file (7.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/fbgemm.cpython-310.pyc
ADDED
Binary file (2.23 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/native.cpython-310.pyc
ADDED
Binary file (3.59 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/observation_type.cpython-310.pyc
ADDED
Binary file (209 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/onednn.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/qnnpack.cpython-310.pyc
ADDED
Binary file (2.62 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/tensorrt.cpython-310.pyc
ADDED
Binary file (2.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (9.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc
ADDED
Binary file (2.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_common_operator_config_utils.py
ADDED
@@ -0,0 +1,637 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import operator
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.nn as nn
|
6 |
+
import torch.ao.nn.intrinsic as nni
|
7 |
+
import torch.ao.nn.intrinsic.qat as nniqat
|
8 |
+
import torch.ao.nn.qat as nnqat
|
9 |
+
import torch.ao.nn.quantized.reference as nnqr
|
10 |
+
from collections import namedtuple
|
11 |
+
from typing import Callable, Dict, List, Union
|
12 |
+
from .backend_config import (
|
13 |
+
BackendPatternConfig,
|
14 |
+
DTypeConfig,
|
15 |
+
DTypeWithConstraints,
|
16 |
+
ObservationType,
|
17 |
+
)
|
18 |
+
from ..fuser_method_mappings import (
|
19 |
+
_sequential_wrapper2,
|
20 |
+
fuse_conv_bn,
|
21 |
+
fuse_conv_bn_relu,
|
22 |
+
fuse_linear_bn,
|
23 |
+
fuse_convtranspose_bn,
|
24 |
+
)
|
25 |
+
|
26 |
+
__all__: List[str] = []
|
27 |
+
|
28 |
+
# TODO: rename to be more explicit, e.g. qat_conv_relu
|
29 |
+
_ConvMetadata = namedtuple(
|
30 |
+
"_ConvMetadata",
|
31 |
+
["root", "transpose", "bn", "reference", "transpose_reference",
|
32 |
+
"fused_conv_relu", "fused_conv_bn", "fused_conv_bn_relu",
|
33 |
+
"qat", "relu_qat", "bn_qat", "bn_relu_qat",
|
34 |
+
"func", "func_transpose"])
|
35 |
+
_Conv1dMetadata = _ConvMetadata(
|
36 |
+
nn.Conv1d, nn.ConvTranspose1d, nn.BatchNorm1d, nnqr.Conv1d, nnqr.ConvTranspose1d,
|
37 |
+
nni.ConvReLU1d, nni.ConvBn1d, nni.ConvBnReLU1d,
|
38 |
+
nnqat.Conv1d, nniqat.ConvReLU1d, nniqat.ConvBn1d, nniqat.ConvBnReLU1d,
|
39 |
+
F.conv1d, F.conv_transpose1d)
|
40 |
+
_Conv2dMetadata = _ConvMetadata(
|
41 |
+
nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d, nnqr.Conv2d, nnqr.ConvTranspose2d,
|
42 |
+
nni.ConvReLU2d, nni.ConvBn2d, nni.ConvBnReLU2d,
|
43 |
+
nnqat.Conv2d, nniqat.ConvReLU2d, nniqat.ConvBn2d, nniqat.ConvBnReLU2d,
|
44 |
+
F.conv2d, F.conv_transpose2d)
|
45 |
+
_Conv3dMetadata = _ConvMetadata(
|
46 |
+
nn.Conv3d, nn.ConvTranspose3d, nn.BatchNorm3d, nnqr.Conv3d, nnqr.ConvTranspose3d,
|
47 |
+
nni.ConvReLU3d, nni.ConvBn3d, nni.ConvBnReLU3d,
|
48 |
+
nnqat.Conv3d, nniqat.ConvReLU3d, nniqat.ConvBn3d, nniqat.ConvBnReLU3d,
|
49 |
+
F.conv3d, F.conv_transpose3d)
|
50 |
+
|
51 |
+
# Add constraints for fixed qparams ops like sigmoid and tanh to ensure values
|
52 |
+
# fall within the proper ranges, e.g. [0, 1] for sigmoid, [-1, 1] for tanh
|
53 |
+
_FIXED_QPARAM_OP_0TO1_CONSTRAINTS = DTypeWithConstraints(
|
54 |
+
dtype=torch.quint8,
|
55 |
+
quant_min_lower_bound=0,
|
56 |
+
quant_max_upper_bound=255,
|
57 |
+
scale_exact_match=1.0 / 256.0,
|
58 |
+
zero_point_exact_match=0,
|
59 |
+
)
|
60 |
+
_FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS = DTypeWithConstraints(
|
61 |
+
dtype=torch.quint8,
|
62 |
+
quant_min_lower_bound=0,
|
63 |
+
quant_max_upper_bound=255,
|
64 |
+
scale_exact_match=2.0 / 256.0,
|
65 |
+
zero_point_exact_match=128,
|
66 |
+
)
|
67 |
+
_FIXED_QPARAMS_OP_TO_CONSTRAINTS: Dict[Union[Callable, str], DTypeWithConstraints] = {
|
68 |
+
torch.nn.Hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
69 |
+
torch.nn.functional.hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
70 |
+
"hardsigmoid": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
71 |
+
"hardsigmoid_": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
72 |
+
torch.nn.Sigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
73 |
+
torch.sigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
74 |
+
"sigmoid": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
75 |
+
"sigmoid_": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
76 |
+
torch.nn.Softmax: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS,
|
77 |
+
torch.nn.Tanh: _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
|
78 |
+
torch.tanh: _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
|
79 |
+
"tanh": _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
|
80 |
+
"tanh_": _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS,
|
81 |
+
}
|
82 |
+
|
83 |
+
def _get_binary_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
84 |
+
binary_op_configs: List[BackendPatternConfig] = []
|
85 |
+
num_tensor_args_to_observation_type_mapping = {
|
86 |
+
# TODO: this is not used right now since we have extra check in prepare
|
87 |
+
# will need to change this to NO_OBSERVER later after we implemented
|
88 |
+
# Tensor dtype inference properly
|
89 |
+
0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
90 |
+
1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
|
91 |
+
2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
92 |
+
}
|
93 |
+
for op_with_quantized_bop_scalar_variant in [operator.add, torch.add, operator.mul, torch.mul]:
|
94 |
+
bop_patterns = [
|
95 |
+
(op_with_quantized_bop_scalar_variant, nn.ReLU),
|
96 |
+
(op_with_quantized_bop_scalar_variant, F.relu),
|
97 |
+
(op_with_quantized_bop_scalar_variant, torch.relu),
|
98 |
+
op_with_quantized_bop_scalar_variant
|
99 |
+
]
|
100 |
+
for bop_pattern in bop_patterns:
|
101 |
+
binary_op_configs.append(
|
102 |
+
BackendPatternConfig(bop_pattern)
|
103 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
104 |
+
._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping))
|
105 |
+
# matmul
|
106 |
+
binary_op_configs.append(
|
107 |
+
BackendPatternConfig(torch.matmul)
|
108 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
109 |
+
)
|
110 |
+
return binary_op_configs
|
111 |
+
|
112 |
+
def _get_linear_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
113 |
+
"""
|
114 |
+
Return all configs related to linear modules and ops.
|
115 |
+
"""
|
116 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
117 |
+
linear_configs: List[BackendPatternConfig] = []
|
118 |
+
|
119 |
+
# (1) Single linear modules/functions
|
120 |
+
# -------------------------------------
|
121 |
+
# linear module
|
122 |
+
linear_configs.append(
|
123 |
+
BackendPatternConfig(torch.nn.Linear)
|
124 |
+
.set_observation_type(observation_type) # noqa: E131
|
125 |
+
.set_dtype_configs(dtype_configs)
|
126 |
+
.set_root_module(torch.nn.Linear)
|
127 |
+
.set_reference_quantized_module(nnqr.Linear)
|
128 |
+
.set_qat_module(nnqat.Linear))
|
129 |
+
# linear qat module
|
130 |
+
linear_configs.append(
|
131 |
+
BackendPatternConfig(nnqat.Linear)
|
132 |
+
.set_observation_type(observation_type) # noqa: E131
|
133 |
+
.set_dtype_configs(dtype_configs)
|
134 |
+
.set_root_module(torch.nn.Linear)
|
135 |
+
.set_reference_quantized_module(nnqr.Linear))
|
136 |
+
# functional linear
|
137 |
+
linear_configs.append(
|
138 |
+
BackendPatternConfig(torch.nn.functional.linear)
|
139 |
+
.set_observation_type(observation_type) # noqa: E131
|
140 |
+
.set_dtype_configs(dtype_configs)
|
141 |
+
._set_input_type_to_index({"weight": 1, "bias": 2}))
|
142 |
+
|
143 |
+
# (2) Linear + relu
|
144 |
+
# -------------------
|
145 |
+
# 2.1 linear module + relu fusion config
|
146 |
+
# linear relu, linear module + relu module
|
147 |
+
linear_configs.append(
|
148 |
+
BackendPatternConfig((torch.nn.Linear, torch.nn.ReLU))
|
149 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
150 |
+
.set_fuser_method(_sequential_wrapper2(nni.LinearReLU))
|
151 |
+
.set_fused_module(nni.LinearReLU))
|
152 |
+
# linear relu, linear module + functional relu
|
153 |
+
linear_configs.append(
|
154 |
+
BackendPatternConfig((torch.nn.Linear, torch.nn.functional.relu))
|
155 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
156 |
+
.set_fuser_method(_sequential_wrapper2(nni.LinearReLU))
|
157 |
+
.set_fused_module(nni.LinearReLU))
|
158 |
+
|
159 |
+
# 2.2 linear module + relu, fused module configs
|
160 |
+
# linear relu, fused module
|
161 |
+
linear_configs.append(
|
162 |
+
BackendPatternConfig(nni.LinearReLU)
|
163 |
+
.set_observation_type(observation_type) # noqa: E131
|
164 |
+
.set_dtype_configs(dtype_configs)
|
165 |
+
.set_root_module(torch.nn.Linear)
|
166 |
+
.set_reference_quantized_module(nnqr.Linear)
|
167 |
+
.set_qat_module(nniqat.LinearReLU))
|
168 |
+
# linear relu, qat fused module
|
169 |
+
linear_configs.append(
|
170 |
+
BackendPatternConfig(nniqat.LinearReLU)
|
171 |
+
.set_observation_type(observation_type) # noqa: E131
|
172 |
+
.set_dtype_configs(dtype_configs)
|
173 |
+
.set_root_module(torch.nn.Linear)
|
174 |
+
.set_reference_quantized_module(nnqr.Linear))
|
175 |
+
# 2.3 functional linear + relu configs
|
176 |
+
# linear relu, functional linear + relu module
|
177 |
+
linear_configs.append(
|
178 |
+
BackendPatternConfig((F.linear, torch.nn.ReLU))
|
179 |
+
.set_observation_type(observation_type) # noqa: E131
|
180 |
+
.set_dtype_configs(dtype_configs))
|
181 |
+
# linear relu, functional linear + functional relu
|
182 |
+
linear_configs.append(
|
183 |
+
BackendPatternConfig((F.linear, F.relu))
|
184 |
+
.set_observation_type(observation_type) # noqa: E131
|
185 |
+
.set_dtype_configs(dtype_configs))
|
186 |
+
|
187 |
+
# (3) Linear + batchnorm
|
188 |
+
# ------------------------
|
189 |
+
# 3.1 linear bn fusion
|
190 |
+
linear_configs.append(
|
191 |
+
BackendPatternConfig((nn.Linear, nn.BatchNorm1d))
|
192 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
193 |
+
.set_fuser_method(fuse_linear_bn)
|
194 |
+
.set_fused_module(nni.LinearBn1d))
|
195 |
+
|
196 |
+
# 3.2 linear bn fused
|
197 |
+
# linear bn, fused module
|
198 |
+
linear_configs.append(
|
199 |
+
BackendPatternConfig(nni.LinearBn1d)
|
200 |
+
.set_observation_type(observation_type) # noqa: E131
|
201 |
+
.set_dtype_configs(dtype_configs)
|
202 |
+
.set_root_module(torch.nn.Linear)
|
203 |
+
.set_reference_quantized_module(nnqr.Linear)
|
204 |
+
.set_qat_module(nniqat.LinearBn1d))
|
205 |
+
# linear bn, qat fused module
|
206 |
+
linear_configs.append(
|
207 |
+
BackendPatternConfig(nniqat.LinearBn1d)
|
208 |
+
.set_observation_type(observation_type) # noqa: E131
|
209 |
+
.set_dtype_configs(dtype_configs)
|
210 |
+
.set_root_module(torch.nn.Linear)
|
211 |
+
.set_reference_quantized_module(nnqr.Linear))
|
212 |
+
return linear_configs
|
213 |
+
|
214 |
+
def _get_conv_configs(dtype_configs):
|
215 |
+
"""
|
216 |
+
Return all configs related to conv modules and ops.
|
217 |
+
"""
|
218 |
+
conv_configs = []
|
219 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
220 |
+
for convs in [_Conv1dMetadata, _Conv2dMetadata, _Conv3dMetadata]:
|
221 |
+
|
222 |
+
# (1) Single conv modules/functions
|
223 |
+
# -----------------------------------
|
224 |
+
# conv module
|
225 |
+
conv_configs.append(
|
226 |
+
BackendPatternConfig(convs.root)
|
227 |
+
.set_observation_type(observation_type) # noqa: E131
|
228 |
+
.set_dtype_configs(dtype_configs)
|
229 |
+
.set_root_module(convs.root)
|
230 |
+
.set_reference_quantized_module(convs.reference)
|
231 |
+
.set_qat_module(convs.qat))
|
232 |
+
# conv qat module
|
233 |
+
conv_configs.append(
|
234 |
+
BackendPatternConfig(convs.qat)
|
235 |
+
.set_observation_type(observation_type) # noqa: E131
|
236 |
+
.set_dtype_configs(dtype_configs)
|
237 |
+
.set_root_module(convs.root)
|
238 |
+
.set_reference_quantized_module(convs.reference))
|
239 |
+
# functional conv
|
240 |
+
conv_configs.append(
|
241 |
+
BackendPatternConfig(convs.func)
|
242 |
+
.set_observation_type(observation_type) # noqa: E131
|
243 |
+
.set_dtype_configs(dtype_configs)
|
244 |
+
._set_input_type_to_index({"weight": 1, "bias": 2}))
|
245 |
+
|
246 |
+
# (2) Conv + relu
|
247 |
+
# -----------------
|
248 |
+
# 2.1 conv module + relu fusion configs
|
249 |
+
# conv relu fusion, conv module + relu module
|
250 |
+
conv_configs.append(
|
251 |
+
BackendPatternConfig((convs.root, torch.nn.ReLU))
|
252 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
253 |
+
.set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
|
254 |
+
.set_fused_module(convs.fused_conv_relu))
|
255 |
+
# conv relu fusion, conv module + functional relu
|
256 |
+
conv_configs.append(
|
257 |
+
BackendPatternConfig((convs.root, F.relu))
|
258 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
259 |
+
.set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
|
260 |
+
.set_fused_module(convs.fused_conv_relu))
|
261 |
+
# 2.2 conv module + relu fused module configs
|
262 |
+
# conv relu, fused module
|
263 |
+
conv_configs.append(
|
264 |
+
BackendPatternConfig(convs.fused_conv_relu)
|
265 |
+
.set_observation_type(observation_type) # noqa: E131
|
266 |
+
.set_dtype_configs(dtype_configs)
|
267 |
+
.set_root_module(convs.root)
|
268 |
+
.set_reference_quantized_module(convs.reference)
|
269 |
+
.set_qat_module(convs.relu_qat))
|
270 |
+
# conv relu, qat fused module
|
271 |
+
conv_configs.append(
|
272 |
+
BackendPatternConfig(convs.relu_qat)
|
273 |
+
.set_observation_type(observation_type) # noqa: E131
|
274 |
+
.set_dtype_configs(dtype_configs)
|
275 |
+
.set_root_module(convs.root)
|
276 |
+
.set_reference_quantized_module(convs.reference))
|
277 |
+
# 2.3 functional conv + relu configs
|
278 |
+
# conv relu, functional conv + relu module
|
279 |
+
conv_configs.append(
|
280 |
+
BackendPatternConfig((convs.func, torch.nn.ReLU))
|
281 |
+
.set_observation_type(observation_type) # noqa: E131
|
282 |
+
.set_dtype_configs(dtype_configs))
|
283 |
+
# conv relu, functional conv + functional relu
|
284 |
+
conv_configs.append(
|
285 |
+
BackendPatternConfig((convs.func, F.relu))
|
286 |
+
.set_observation_type(observation_type) # noqa: E131
|
287 |
+
.set_dtype_configs(dtype_configs))
|
288 |
+
|
289 |
+
# fused conv relu
|
290 |
+
conv_configs.append(
|
291 |
+
BackendPatternConfig(convs.fused_conv_relu)
|
292 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
293 |
+
.set_qat_module(convs.relu_qat))
|
294 |
+
|
295 |
+
conv_configs.append(
|
296 |
+
BackendPatternConfig(convs.relu_qat)
|
297 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
298 |
+
.set_root_module(convs.root)
|
299 |
+
.set_reference_quantized_module(convs.reference))
|
300 |
+
|
301 |
+
# (3) Conv + batchnorm (+ relu)
|
302 |
+
# -------------------------------
|
303 |
+
# 3.1 conv bn fusion configs
|
304 |
+
# conv + bn fusion
|
305 |
+
conv_configs.append(
|
306 |
+
BackendPatternConfig((convs.root, convs.bn))
|
307 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
308 |
+
.set_fuser_method(fuse_conv_bn)
|
309 |
+
.set_fused_module(convs.fused_conv_bn))
|
310 |
+
# conv + bn + relu module fusion
|
311 |
+
conv_configs.append(
|
312 |
+
BackendPatternConfig((convs.root, convs.bn, nn.ReLU))
|
313 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
314 |
+
.set_fuser_method(fuse_conv_bn_relu)
|
315 |
+
.set_fused_module(convs.fused_conv_bn_relu))
|
316 |
+
# conv + bn + relu functional fusion
|
317 |
+
conv_configs.append(
|
318 |
+
BackendPatternConfig((convs.root, convs.bn, F.relu))
|
319 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
320 |
+
.set_root_module(convs.root)
|
321 |
+
.set_fuser_method(fuse_conv_bn_relu)
|
322 |
+
.set_fused_module(convs.fused_conv_bn_relu))
|
323 |
+
# TODO: we can add fusion for torch.relu as well
|
324 |
+
|
325 |
+
# 3.2 conv + bn (+ relu) fused module configs
|
326 |
+
# fused conv bn
|
327 |
+
conv_configs.append(
|
328 |
+
BackendPatternConfig(convs.fused_conv_bn)
|
329 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
330 |
+
.set_qat_module(convs.bn_qat))
|
331 |
+
|
332 |
+
# fused conv bn relu
|
333 |
+
conv_configs.append(
|
334 |
+
BackendPatternConfig(convs.fused_conv_bn_relu)
|
335 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
336 |
+
.set_qat_module(convs.bn_relu_qat))
|
337 |
+
|
338 |
+
# conv bn, qat fused module
|
339 |
+
conv_configs.append(
|
340 |
+
BackendPatternConfig(convs.bn_qat)
|
341 |
+
.set_observation_type(observation_type) # noqa: E131
|
342 |
+
.set_dtype_configs(dtype_configs)
|
343 |
+
.set_root_module(convs.root)
|
344 |
+
.set_reference_quantized_module(convs.reference))
|
345 |
+
# conv bn relu, qat fused module
|
346 |
+
conv_configs.append(
|
347 |
+
BackendPatternConfig(convs.bn_relu_qat)
|
348 |
+
.set_observation_type(observation_type) # noqa: E131
|
349 |
+
.set_dtype_configs(dtype_configs)
|
350 |
+
.set_root_module(convs.root)
|
351 |
+
.set_reference_quantized_module(convs.reference))
|
352 |
+
|
353 |
+
# (4) conv transpose and its fusion
|
354 |
+
# 4.1 conv transpose config
|
355 |
+
conv_configs.append(
|
356 |
+
BackendPatternConfig(convs.transpose)
|
357 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
358 |
+
.set_root_module(convs.transpose)
|
359 |
+
.set_reference_quantized_module(convs.transpose_reference))
|
360 |
+
|
361 |
+
# 4.2 conv transpose + bn fusion
|
362 |
+
conv_configs.append(
|
363 |
+
BackendPatternConfig((convs.transpose, convs.bn))
|
364 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
365 |
+
.set_fuser_method(fuse_convtranspose_bn)
|
366 |
+
.set_root_module(convs.transpose)
|
367 |
+
.set_reference_quantized_module(convs.transpose_reference))
|
368 |
+
|
369 |
+
# 4.3 functional conv transpose
|
370 |
+
conv_configs.append(
|
371 |
+
BackendPatternConfig(convs.func_transpose)
|
372 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
373 |
+
._set_input_type_to_index({"weight": 1, "bias": 2}))
|
374 |
+
|
375 |
+
return conv_configs
|
376 |
+
|
377 |
+
def _get_cat_config(dtype_configs: List[DTypeConfig]) -> BackendPatternConfig:
|
378 |
+
return BackendPatternConfig(torch.cat) \
|
379 |
+
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
|
380 |
+
.set_dtype_configs(dtype_configs)
|
381 |
+
|
382 |
+
def _get_ln_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
383 |
+
ln_configs = []
|
384 |
+
ln_configs.append(
|
385 |
+
BackendPatternConfig(torch.nn.LayerNorm)
|
386 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
387 |
+
.set_dtype_configs(dtype_configs)
|
388 |
+
)
|
389 |
+
ln_configs.append(
|
390 |
+
BackendPatternConfig(torch.nn.functional.layer_norm)
|
391 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
392 |
+
.set_dtype_configs(dtype_configs)
|
393 |
+
._set_input_type_to_index({"weight": 2, "bias": 3})
|
394 |
+
)
|
395 |
+
return ln_configs
|
396 |
+
|
397 |
+
def _get_default_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
398 |
+
configs = []
|
399 |
+
default_ops = [
|
400 |
+
torch.nn.ELU,
|
401 |
+
torch.nn.LeakyReLU,
|
402 |
+
torch.nn.Hardswish,
|
403 |
+
torch.nn.InstanceNorm1d,
|
404 |
+
torch.nn.InstanceNorm2d,
|
405 |
+
torch.nn.InstanceNorm3d,
|
406 |
+
torch.nn.Dropout,
|
407 |
+
torch.nn.PReLU,
|
408 |
+
torch.nn.functional.elu,
|
409 |
+
torch.nn.functional.hardswish,
|
410 |
+
torch.nn.functional.leaky_relu,
|
411 |
+
torch.nn.functional.dropout,
|
412 |
+
]
|
413 |
+
for op in default_ops:
|
414 |
+
configs.append(
|
415 |
+
BackendPatternConfig(op)
|
416 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
417 |
+
.set_dtype_configs(dtype_configs))
|
418 |
+
|
419 |
+
configs.append(
|
420 |
+
BackendPatternConfig(torch.nn.functional.group_norm)
|
421 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
422 |
+
.set_dtype_configs(dtype_configs)
|
423 |
+
._set_input_type_to_index({"weight": 2, "bias": 3})
|
424 |
+
)
|
425 |
+
|
426 |
+
configs.append(
|
427 |
+
BackendPatternConfig(torch.nn.functional.instance_norm)
|
428 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
429 |
+
.set_dtype_configs(dtype_configs)
|
430 |
+
._set_input_type_to_index({"weight": 3, "bias": 4})
|
431 |
+
)
|
432 |
+
return configs
|
433 |
+
|
434 |
+
def _add_fixed_qparams_to_dtype_configs(
|
435 |
+
dtype_configs: List[DTypeConfig],
|
436 |
+
constraints: DTypeWithConstraints,
|
437 |
+
) -> List[DTypeConfig]:
|
438 |
+
"""
|
439 |
+
Return a copy of the list of DTypeConfigs where activations are subject to the specified
|
440 |
+
constraints required for fixed qparams ops.
|
441 |
+
|
442 |
+
If the data type doesn't match the one in the constraints, simply leave the corresponding
|
443 |
+
DTypeConfig unchanged.
|
444 |
+
|
445 |
+
If `scale_min_lower_bound` or `scale_max_upper_bound` is specified in the activations,
|
446 |
+
throw an exception since these settings are incompatible with fixed qparams ops.
|
447 |
+
"""
|
448 |
+
new_dtype_configs = []
|
449 |
+
for dtype_config in dtype_configs:
|
450 |
+
dc = copy.deepcopy(dtype_config)
|
451 |
+
for orig_constraints in [dc.input_dtype_with_constraints, dc.output_dtype_with_constraints]:
|
452 |
+
if orig_constraints.dtype != constraints.dtype:
|
453 |
+
continue
|
454 |
+
if orig_constraints.scale_min_lower_bound is not None:
|
455 |
+
raise ValueError(f"scale_min_lower_bound is invalid for fixed qparams ops: {dtype_config}")
|
456 |
+
if orig_constraints.scale_max_upper_bound is not None:
|
457 |
+
raise ValueError(f"scale_max_upper_bound is invalid for fixed qparams ops: {dtype_config}")
|
458 |
+
orig_constraints.quant_min_lower_bound = constraints.quant_min_lower_bound
|
459 |
+
orig_constraints.quant_max_upper_bound = constraints.quant_max_upper_bound
|
460 |
+
orig_constraints.scale_exact_match = constraints.scale_exact_match
|
461 |
+
orig_constraints.zero_point_exact_match = constraints.zero_point_exact_match
|
462 |
+
new_dtype_configs.append(dc)
|
463 |
+
return new_dtype_configs
|
464 |
+
|
465 |
+
def _get_fixed_qparams_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
466 |
+
fixed_qparams_op_configs = []
|
467 |
+
for fixed_qparam_op, constraints in _FIXED_QPARAMS_OP_TO_CONSTRAINTS.items():
|
468 |
+
new_dtype_configs = _add_fixed_qparams_to_dtype_configs(dtype_configs, constraints)
|
469 |
+
fixed_qparams_op_configs.append(
|
470 |
+
BackendPatternConfig(fixed_qparam_op)
|
471 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
472 |
+
.set_dtype_configs(new_dtype_configs))
|
473 |
+
return fixed_qparams_op_configs
|
474 |
+
|
475 |
+
def _get_share_qparams_op_configs(dtype_configs):
|
476 |
+
""" Get the operator config for the operators that works for both float and quantized input
|
477 |
+
if input is quantized, the output Tensor shares the same quantization parameter
|
478 |
+
with input.
|
479 |
+
Example operator: avgpool2d, reshape, transpose, maxpool2d
|
480 |
+
Example observed operator:
|
481 |
+
observer_0 - avgpool2d - observer_0 (same observer instance as input)
|
482 |
+
"""
|
483 |
+
|
484 |
+
def _get_share_qprams_op_backend_config(op):
|
485 |
+
return BackendPatternConfig(op) \
|
486 |
+
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
|
487 |
+
.set_dtype_configs(dtype_configs)
|
488 |
+
|
489 |
+
share_qparams_ops = [
|
490 |
+
torch.nn.AdaptiveAvgPool1d,
|
491 |
+
torch.nn.AdaptiveAvgPool2d,
|
492 |
+
torch.nn.AdaptiveAvgPool3d,
|
493 |
+
torch.nn.AvgPool1d,
|
494 |
+
torch.nn.AvgPool2d,
|
495 |
+
torch.nn.AvgPool3d,
|
496 |
+
torch.nn.Hardtanh,
|
497 |
+
torch.nn.Identity,
|
498 |
+
torch.nn.MaxPool1d,
|
499 |
+
torch.nn.MaxPool2d,
|
500 |
+
torch.nn.MaxPool3d,
|
501 |
+
torch.nn.PixelShuffle,
|
502 |
+
torch.nn.PixelUnshuffle,
|
503 |
+
torch.nn.ReLU,
|
504 |
+
torch.nn.ReLU6,
|
505 |
+
torch.adaptive_avg_pool1d,
|
506 |
+
torch.nn.functional.adaptive_avg_pool2d,
|
507 |
+
torch.nn.functional.adaptive_avg_pool3d,
|
508 |
+
torch.nn.functional.hardtanh,
|
509 |
+
torch.nn.functional.hardtanh_,
|
510 |
+
torch.nn.functional.interpolate,
|
511 |
+
torch.nn.functional.max_pool1d,
|
512 |
+
torch.nn.functional.max_pool2d,
|
513 |
+
torch.nn.functional.max_pool3d,
|
514 |
+
torch.nn.functional.pixel_shuffle,
|
515 |
+
torch.nn.functional.pixel_unshuffle,
|
516 |
+
torch.nn.functional.relu,
|
517 |
+
torch.nn.functional.relu6,
|
518 |
+
torch.avg_pool1d,
|
519 |
+
torch._C._nn.avg_pool2d,
|
520 |
+
torch._C._nn.avg_pool3d,
|
521 |
+
torch.clamp,
|
522 |
+
torch.flatten,
|
523 |
+
torch.mean,
|
524 |
+
torch.narrow,
|
525 |
+
torch.repeat_interleave,
|
526 |
+
torch.transpose,
|
527 |
+
torch.squeeze,
|
528 |
+
torch.stack,
|
529 |
+
torch.unsqueeze,
|
530 |
+
operator.floordiv,
|
531 |
+
"contiguous",
|
532 |
+
"clamp",
|
533 |
+
"detach",
|
534 |
+
"detach_",
|
535 |
+
"mean",
|
536 |
+
"permute",
|
537 |
+
"repeat",
|
538 |
+
"repeat_interleave",
|
539 |
+
"reshape",
|
540 |
+
"resize_",
|
541 |
+
"relu",
|
542 |
+
"relu_",
|
543 |
+
"squeeze",
|
544 |
+
"squeeze_",
|
545 |
+
"transpose",
|
546 |
+
"unsqueeze",
|
547 |
+
"unsqueeze_",
|
548 |
+
"view"
|
549 |
+
]
|
550 |
+
return [_get_share_qprams_op_backend_config(op) for op in share_qparams_ops]
|
551 |
+
|
552 |
+
def _get_bn_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
553 |
+
""" Get configs related to batchnorm. """
|
554 |
+
bn_configs = []
|
555 |
+
bn_to_fused_bn = {
|
556 |
+
torch.nn.BatchNorm2d: nni.BNReLU2d,
|
557 |
+
torch.nn.BatchNorm3d: nni.BNReLU3d,
|
558 |
+
}
|
559 |
+
for bn in bn_to_fused_bn.keys():
|
560 |
+
fused_bn = bn_to_fused_bn[bn]
|
561 |
+
# bn module + relu module fusion config
|
562 |
+
bn_configs.append(
|
563 |
+
BackendPatternConfig((bn, nn.ReLU))
|
564 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
565 |
+
.set_fuser_method(_sequential_wrapper2(fused_bn))
|
566 |
+
.set_fused_module(fused_bn))
|
567 |
+
# bn module + F.relu fusion config
|
568 |
+
bn_configs.append(
|
569 |
+
BackendPatternConfig((bn, F.relu))
|
570 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
571 |
+
.set_fuser_method(_sequential_wrapper2(fused_bn))
|
572 |
+
.set_fused_module(fused_bn))
|
573 |
+
bn_configs.append(
|
574 |
+
BackendPatternConfig(bn)
|
575 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
576 |
+
.set_dtype_configs(dtype_configs))
|
577 |
+
|
578 |
+
# fused bn configs
|
579 |
+
for fused_bn in bn_to_fused_bn.values():
|
580 |
+
bn_configs.append(
|
581 |
+
BackendPatternConfig(fused_bn)
|
582 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
583 |
+
.set_dtype_configs(dtype_configs))
|
584 |
+
return bn_configs
|
585 |
+
|
586 |
+
def _get_rnn_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
587 |
+
rnn_op_configs = []
|
588 |
+
for rnn_op, ref_rnn_op in [
|
589 |
+
(nn.GRUCell, nnqr.GRUCell),
|
590 |
+
(nn.LSTMCell, nnqr.LSTMCell),
|
591 |
+
(nn.RNNCell, nnqr.RNNCell),
|
592 |
+
(nn.LSTM, nnqr.LSTM),
|
593 |
+
(nn.GRU, nnqr.GRU)
|
594 |
+
]:
|
595 |
+
rnn_op_configs.append(
|
596 |
+
BackendPatternConfig(rnn_op)
|
597 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
598 |
+
.set_dtype_configs(dtype_configs)
|
599 |
+
.set_root_module(rnn_op)
|
600 |
+
.set_reference_quantized_module(ref_rnn_op))
|
601 |
+
return rnn_op_configs
|
602 |
+
|
603 |
+
def _get_embedding_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
|
604 |
+
embedding_op_configs = []
|
605 |
+
for embedding_op, qat_embedding_op, ref_embedding_op in [
|
606 |
+
(nn.Embedding, nnqat.Embedding, nnqr.Embedding),
|
607 |
+
(nn.EmbeddingBag, nnqat.EmbeddingBag, nnqr.EmbeddingBag),
|
608 |
+
]:
|
609 |
+
embedding_op_configs.append(
|
610 |
+
BackendPatternConfig(embedding_op)
|
611 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
612 |
+
.set_dtype_configs(dtype_configs)
|
613 |
+
.set_qat_module(qat_embedding_op)
|
614 |
+
.set_root_module(embedding_op)
|
615 |
+
.set_reference_quantized_module(ref_embedding_op))
|
616 |
+
|
617 |
+
# config for qat op
|
618 |
+
embedding_op_configs.append(
|
619 |
+
BackendPatternConfig(qat_embedding_op)
|
620 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
|
621 |
+
.set_dtype_configs(dtype_configs)
|
622 |
+
.set_root_module(embedding_op)
|
623 |
+
.set_reference_quantized_module(ref_embedding_op))
|
624 |
+
return embedding_op_configs
|
625 |
+
|
626 |
+
def _get_tensor_info_op_configs(dtype_configs):
|
627 |
+
"""
|
628 |
+
These ops work on tensors of different dtypes but return non-tensors
|
629 |
+
containing information about the input tensor.
|
630 |
+
"""
|
631 |
+
|
632 |
+
def _get_config(op):
|
633 |
+
return BackendPatternConfig(op) \
|
634 |
+
.set_observation_type(ObservationType.INPUT_OUTPUT_NOT_OBSERVED) \
|
635 |
+
.set_dtype_configs(dtype_configs)
|
636 |
+
|
637 |
+
return [_get_config(op) for op in ("shape", "size")]
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
import torch
|
3 |
+
from torch.ao.quantization.backend_config import (
|
4 |
+
BackendConfig,
|
5 |
+
DTypeConfig,
|
6 |
+
ObservationType,
|
7 |
+
BackendPatternConfig,
|
8 |
+
)
|
9 |
+
|
10 |
+
weighted_op_quint8_dtype_config = DTypeConfig(
|
11 |
+
input_dtype=torch.quint8,
|
12 |
+
output_dtype=torch.quint8,
|
13 |
+
weight_dtype=torch.qint8,
|
14 |
+
bias_dtype=torch.float,
|
15 |
+
)
|
16 |
+
from typing import List
|
17 |
+
|
18 |
+
def get_linear_configs():
|
19 |
+
linear_configs = []
|
20 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
21 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
22 |
+
|
23 |
+
# TODO: need to fix the way we insert observers for this pattern
|
24 |
+
# should be solved in the new fusion API
|
25 |
+
# reason that this doesn't work: the pattern is a bit complicated and we don't
|
26 |
+
# have a way to specify which input of the pattern we would like to observe
|
27 |
+
# pattern:
|
28 |
+
# bias input weight
|
29 |
+
# \ | /
|
30 |
+
# \ | t
|
31 |
+
# \ | /
|
32 |
+
# addmm
|
33 |
+
# we want to observe "weight" as weight, but there is not way to convey this
|
34 |
+
# information with current pattern language
|
35 |
+
#
|
36 |
+
# right now:
|
37 |
+
# original:
|
38 |
+
# weight - t \
|
39 |
+
# input - addmm
|
40 |
+
# observed (no hack):
|
41 |
+
# weight - t - observer \
|
42 |
+
# input - observer - addmm
|
43 |
+
# target:
|
44 |
+
# weight - observer - t \
|
45 |
+
# input - observer - addmm
|
46 |
+
|
47 |
+
# def root_node_getter(node_pattern):
|
48 |
+
# addmm, bias, act, weight = node_pattern
|
49 |
+
# return addmm
|
50 |
+
|
51 |
+
# linear_configs.append(
|
52 |
+
# BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.aten.t.default))
|
53 |
+
# .set_observation_type(observation_type) # noqa: E131
|
54 |
+
# .set_dtype_configs(dtype_configs)
|
55 |
+
# ._set_root_node_getter(root_node_getter))
|
56 |
+
|
57 |
+
linear_configs.append(
|
58 |
+
BackendPatternConfig(torch.ops.aten.addmm.default)
|
59 |
+
.set_observation_type(observation_type) # noqa: E131
|
60 |
+
.set_dtype_configs(dtype_configs)
|
61 |
+
._set_input_type_to_index({"weight": 2, "bias": 0})
|
62 |
+
)
|
63 |
+
# linear is decomposed to `t - mm` if bias is not present
|
64 |
+
linear_configs.append(
|
65 |
+
BackendPatternConfig(torch.ops.aten.mm.default)
|
66 |
+
.set_observation_type(observation_type) # noqa: E131
|
67 |
+
.set_dtype_configs(dtype_configs)
|
68 |
+
._set_input_type_to_index({"weight": 1})
|
69 |
+
)
|
70 |
+
return linear_configs
|
71 |
+
|
72 |
+
def get_conv_configs():
|
73 |
+
conv_configs = []
|
74 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
75 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
76 |
+
conv_configs.append(
|
77 |
+
BackendPatternConfig(torch.ops.aten.convolution.default)
|
78 |
+
.set_observation_type(observation_type) # noqa: E131
|
79 |
+
.set_dtype_configs(dtype_configs)
|
80 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
81 |
+
)
|
82 |
+
conv_configs.append(
|
83 |
+
BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu.default))
|
84 |
+
.set_observation_type(observation_type) # noqa: E131
|
85 |
+
.set_dtype_configs(dtype_configs)
|
86 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
87 |
+
)
|
88 |
+
# TODO: remove when functionalization is supported in PT2 mode
|
89 |
+
conv_configs.append(
|
90 |
+
BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu_.default))
|
91 |
+
.set_observation_type(observation_type) # noqa: E131
|
92 |
+
.set_dtype_configs(dtype_configs)
|
93 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
94 |
+
)
|
95 |
+
return conv_configs
|
96 |
+
|
97 |
+
def get_pooling_configs():
|
98 |
+
backend_pattern_configs = []
|
99 |
+
observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
|
100 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
101 |
+
|
102 |
+
def root_node_getter(node_pattern):
|
103 |
+
getitem, maxpool, index = node_pattern
|
104 |
+
return maxpool
|
105 |
+
|
106 |
+
backend_pattern_configs.append(
|
107 |
+
BackendPatternConfig()
|
108 |
+
._set_pattern_complex_format((operator.getitem, torch.ops.aten.max_pool2d_with_indices.default, 0))
|
109 |
+
.set_observation_type(observation_type) # noqa: E131
|
110 |
+
.set_dtype_configs(dtype_configs)
|
111 |
+
._set_root_node_getter(root_node_getter)
|
112 |
+
)
|
113 |
+
|
114 |
+
return backend_pattern_configs
|
115 |
+
|
116 |
+
def get_relu_configs():
|
117 |
+
backend_pattern_configs = []
|
118 |
+
observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
|
119 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
120 |
+
backend_pattern_configs.append(
|
121 |
+
BackendPatternConfig(torch.ops.aten.relu.default)
|
122 |
+
.set_observation_type(observation_type) # noqa: E131
|
123 |
+
.set_dtype_configs(dtype_configs))
|
124 |
+
return backend_pattern_configs
|
125 |
+
|
126 |
+
def get_binary_op_configs():
|
127 |
+
binary_op_configs: List[BackendPatternConfig] = []
|
128 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
129 |
+
num_tensor_args_to_observation_type_mapping = {
|
130 |
+
# TODO: this is not used right now since we have extra check in prepare
|
131 |
+
# will need to change this to NO_OBSERVER later after we implemented
|
132 |
+
# Tensor dtype inference properly
|
133 |
+
0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
134 |
+
1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
|
135 |
+
2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
136 |
+
}
|
137 |
+
for op_with_quantized_bop_scalar_variant in [torch.ops.aten.add.Tensor, torch.ops.aten.add_.Tensor]:
|
138 |
+
bop_patterns = [
|
139 |
+
(op_with_quantized_bop_scalar_variant, torch.ops.aten.relu.default),
|
140 |
+
op_with_quantized_bop_scalar_variant,
|
141 |
+
# TODO: remove when functionalization is supported in pt2_mode
|
142 |
+
(op_with_quantized_bop_scalar_variant, torch.ops.aten.relu_.default),
|
143 |
+
]
|
144 |
+
for bop_pattern in bop_patterns:
|
145 |
+
binary_op_configs.append(
|
146 |
+
BackendPatternConfig(bop_pattern)
|
147 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
148 |
+
._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping))
|
149 |
+
|
150 |
+
return binary_op_configs
|
151 |
+
|
152 |
+
def get_qnnpack_pt2e_backend_config():
|
153 |
+
return (
|
154 |
+
BackendConfig("qnnpack_pytorch_2.0_export")
|
155 |
+
.set_backend_pattern_configs(get_linear_configs())
|
156 |
+
.set_backend_pattern_configs(get_binary_op_configs())
|
157 |
+
.set_backend_pattern_configs(get_conv_configs())
|
158 |
+
.set_backend_pattern_configs(get_pooling_configs())
|
159 |
+
.set_backend_pattern_configs(get_relu_configs())
|
160 |
+
)
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py
ADDED
@@ -0,0 +1,659 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from typing import Any, Callable, Dict, List, Optional, Type, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch.ao.quantization.utils import Pattern
|
7 |
+
from enum import Enum
|
8 |
+
|
9 |
+
|
10 |
+
__all__ = [
|
11 |
+
"BackendConfig",
|
12 |
+
"BackendPatternConfig",
|
13 |
+
"DTypeConfig",
|
14 |
+
"DTypeWithConstraints",
|
15 |
+
"ObservationType",
|
16 |
+
]
|
17 |
+
|
18 |
+
|
19 |
+
# DTypeConfig dict keys
|
20 |
+
INPUT_DTYPE_DICT_KEY = "input_dtype"
|
21 |
+
OUTPUT_DTYPE_DICT_KEY = "output_dtype"
|
22 |
+
WEIGHT_DTYPE_DICT_KEY = "weight_dtype"
|
23 |
+
BIAS_DTYPE_DICT_KEY = "bias_dtype"
|
24 |
+
IS_DYNAMIC_DICT_KEY = "is_dynamic"
|
25 |
+
|
26 |
+
# BackendConfig dict keys
|
27 |
+
NAME_DICT_KEY = "name"
|
28 |
+
CONFIGS_DICT_KEY = "configs"
|
29 |
+
|
30 |
+
# BackendPatternConfig dict keys
|
31 |
+
PATTERN_DICT_KEY = "pattern"
|
32 |
+
PATTERN_COMPLEX_FORMAT_DICT_KEY = "pattern_complex_format"
|
33 |
+
OBSERVATION_TYPE_DICT_KEY = "observation_type"
|
34 |
+
DTYPE_CONFIGS_DICT_KEY = "dtype_configs"
|
35 |
+
ROOT_MODULE_DICT_KEY = "root_module"
|
36 |
+
QAT_MODULE_DICT_KEY = "qat_module"
|
37 |
+
REFERENCE_QUANTIZED_MODULE_DICT_KEY = "reference_quantized_module_for_root"
|
38 |
+
FUSED_MODULE_DICT_KEY = "fused_module"
|
39 |
+
FUSER_METHOD_DICT_KEY = "fuser_method"
|
40 |
+
ROOT_NODE_GETTER_DICT_KEY = "root_node_getter"
|
41 |
+
EXTRA_INPUTS_GETTER_DICT_KEY = "extra_inputs_getter"
|
42 |
+
NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY = "num_tensor_args_to_observation_type"
|
43 |
+
INPUT_TYPE_TO_INDEX_DICT_KEY = "input_type_to_index"
|
44 |
+
|
45 |
+
|
46 |
+
# TODO: maybe rename this to something that's not related to observer
|
47 |
+
# e.g. QParamsType
|
48 |
+
class ObservationType(Enum):
|
49 |
+
""" An enum that represents different ways of how an operator/operator pattern
|
50 |
+
should be observed
|
51 |
+
"""
|
52 |
+
|
53 |
+
OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0
|
54 |
+
"""this means input and output are observed with different observers, based
|
55 |
+
on qconfig.activation
|
56 |
+
example: conv, linear, softmax
|
57 |
+
"""
|
58 |
+
|
59 |
+
OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1
|
60 |
+
"""this means the output will use the same observer instance as input, based
|
61 |
+
on qconfig.activation
|
62 |
+
example: torch.cat, maxpool
|
63 |
+
"""
|
64 |
+
|
65 |
+
INPUT_OUTPUT_NOT_OBSERVED = 2
|
66 |
+
"""this means the input and output are never observed
|
67 |
+
example: x.shape, x.size
|
68 |
+
"""
|
69 |
+
|
70 |
+
|
71 |
+
@dataclass
|
72 |
+
class DTypeWithConstraints:
|
73 |
+
"""
|
74 |
+
Config for specifying additional constraints for a given dtype, such as quantization
|
75 |
+
value ranges, scale value ranges, and fixed quantization params, to be used in
|
76 |
+
:class:`~torch.ao.quantization.backend_config.DTypeConfig`.
|
77 |
+
|
78 |
+
The constraints currently supported are:
|
79 |
+
|
80 |
+
* `quant_min_lower_bound` and `quant_max_upper_bound`: Lower and upper
|
81 |
+
bounds for the minimum and maximum quantized values respectively. If
|
82 |
+
the QConfig’s `quant_min` and `quant_max` fall outside this range,
|
83 |
+
then the QConfig will be ignored.
|
84 |
+
|
85 |
+
* `scale_min_lower_bound` and `scale_max_upper_bound`: Lower and upper
|
86 |
+
bounds for the minimum and maximum scale values respectively. If the
|
87 |
+
QConfig’s minimum scale value (currently exposed as `eps`) falls below
|
88 |
+
the lower bound, then the QConfig will be ignored. Note that the upper
|
89 |
+
bound is currently not enforced.
|
90 |
+
|
91 |
+
* `scale_exact_match` and `zero_point_exact_match`: Exact match requirements
|
92 |
+
for scale and zero point, to be used for operators with fixed quantization
|
93 |
+
parameters such as sigmoid and tanh. If the observer specified in the QConfig
|
94 |
+
is neither `FixedQParamsObserver` nor `FixedQParamsFakeQuantize`, or if
|
95 |
+
the quantization parameters don't match, then the QConfig will be ignored.
|
96 |
+
"""
|
97 |
+
dtype: Optional[torch.dtype] = None
|
98 |
+
quant_min_lower_bound: Union[int, float, None] = None
|
99 |
+
quant_max_upper_bound: Union[int, float, None] = None
|
100 |
+
scale_min_lower_bound: Union[int, float, None] = None
|
101 |
+
scale_max_upper_bound: Union[int, float, None] = None
|
102 |
+
scale_exact_match: Optional[float] = None
|
103 |
+
zero_point_exact_match: Optional[int] = None
|
104 |
+
|
105 |
+
|
106 |
+
@dataclass
|
107 |
+
class DTypeConfig:
|
108 |
+
"""
|
109 |
+
Config object that specifies the supported data types passed as arguments to
|
110 |
+
quantize ops in the reference model spec, for input and output activations,
|
111 |
+
weights, and biases.
|
112 |
+
|
113 |
+
For example, consider the following reference model:
|
114 |
+
|
115 |
+
quant1 - [dequant1 - fp32_linear - quant2] - dequant2
|
116 |
+
|
117 |
+
The pattern in the square brackets refers to the reference pattern of
|
118 |
+
statically quantized linear. Setting the input dtype as `torch.quint8`
|
119 |
+
in the DTypeConfig means we pass in `torch.quint8` as the dtype argument
|
120 |
+
to the first quantize op (quant1). Similarly, setting the output dtype as
|
121 |
+
`torch.quint8` means we pass in `torch.quint8` as the dtype argument to
|
122 |
+
the second quantize op (quant2).
|
123 |
+
|
124 |
+
Note that the dtype here does not refer to the interface dtypes of the
|
125 |
+
op. For example, the "input dtype" here is not the dtype of the input
|
126 |
+
tensor passed to the quantized linear op. Though it can still be the
|
127 |
+
same as the interface dtype, this is not always the case, e.g. the
|
128 |
+
interface dtype is fp32 in dynamic quantization but the "input dtype"
|
129 |
+
specified in the DTypeConfig would still be quint8. The semantics of
|
130 |
+
dtypes here are the same as the semantics of the dtypes specified in
|
131 |
+
the observers.
|
132 |
+
|
133 |
+
These dtypes are matched against the ones specified in the user’s
|
134 |
+
QConfig. If there is a match, and the QConfig satisfies the constraints
|
135 |
+
specified in the DTypeConfig (if any), then we will quantize the given
|
136 |
+
pattern using this DTypeConfig. Otherwise, the QConfig is ignored and
|
137 |
+
the pattern will not be quantized.
|
138 |
+
|
139 |
+
Example usage::
|
140 |
+
|
141 |
+
>>> # xdoctest: +SKIP(failing)
|
142 |
+
>>> dtype_config1 = DTypeConfig(
|
143 |
+
... input_dtype=torch.quint8,
|
144 |
+
... output_dtype=torch.quint8,
|
145 |
+
... weight_dtype=torch.qint8,
|
146 |
+
... bias_dtype=torch.float)
|
147 |
+
|
148 |
+
>>> dtype_config2 = DTypeConfig(
|
149 |
+
... input_dtype=DTypeWithConstraints(
|
150 |
+
... dtype=torch.quint8,
|
151 |
+
... quant_min_lower_bound=0,
|
152 |
+
... quant_max_upper_bound=255,
|
153 |
+
... ),
|
154 |
+
... output_dtype=DTypeWithConstraints(
|
155 |
+
... dtype=torch.quint8,
|
156 |
+
... quant_min_lower_bound=0,
|
157 |
+
... quant_max_upper_bound=255,
|
158 |
+
... ),
|
159 |
+
... weight_dtype=DTypeWithConstraints(
|
160 |
+
... dtype=torch.qint8,
|
161 |
+
... quant_min_lower_bound=-128,
|
162 |
+
... quant_max_upper_bound=127,
|
163 |
+
... ),
|
164 |
+
... bias_dtype=torch.float)
|
165 |
+
|
166 |
+
>>> dtype_config1.input_dtype
|
167 |
+
torch.quint8
|
168 |
+
|
169 |
+
>>> dtype_config2.input_dtype
|
170 |
+
torch.quint8
|
171 |
+
|
172 |
+
>>> dtype_config2.input_dtype_with_constraints
|
173 |
+
DTypeWithConstraints(dtype=torch.quint8, quant_min_lower_bound=0, quant_max_upper_bound=255, \
|
174 |
+
scale_min_lower_bound=None, scale_max_upper_bound=None)
|
175 |
+
"""
|
176 |
+
input_dtype_with_constraints: DTypeWithConstraints
|
177 |
+
output_dtype_with_constraints: DTypeWithConstraints
|
178 |
+
weight_dtype_with_constraints: DTypeWithConstraints
|
179 |
+
bias_dtype: Optional[torch.dtype]
|
180 |
+
is_dynamic: Optional[bool]
|
181 |
+
|
182 |
+
def __init__(
|
183 |
+
self,
|
184 |
+
input_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
|
185 |
+
output_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
|
186 |
+
weight_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
|
187 |
+
bias_dtype: Optional[torch.dtype] = None,
|
188 |
+
is_dynamic: Optional[bool] = None,
|
189 |
+
):
|
190 |
+
if isinstance(input_dtype, DTypeWithConstraints):
|
191 |
+
self.input_dtype_with_constraints = input_dtype
|
192 |
+
else:
|
193 |
+
self.input_dtype_with_constraints = DTypeWithConstraints(dtype=input_dtype)
|
194 |
+
|
195 |
+
if isinstance(output_dtype, DTypeWithConstraints):
|
196 |
+
self.output_dtype_with_constraints = output_dtype
|
197 |
+
else:
|
198 |
+
self.output_dtype_with_constraints = DTypeWithConstraints(dtype=output_dtype)
|
199 |
+
|
200 |
+
if isinstance(weight_dtype, DTypeWithConstraints):
|
201 |
+
self.weight_dtype_with_constraints = weight_dtype
|
202 |
+
else:
|
203 |
+
self.weight_dtype_with_constraints = DTypeWithConstraints(dtype=weight_dtype)
|
204 |
+
|
205 |
+
self.bias_dtype = bias_dtype
|
206 |
+
self.is_dynamic = is_dynamic
|
207 |
+
|
208 |
+
@property
|
209 |
+
def input_dtype(self) -> Optional[torch.dtype]:
|
210 |
+
return self.input_dtype_with_constraints.dtype
|
211 |
+
|
212 |
+
@property
|
213 |
+
def output_dtype(self) -> Optional[torch.dtype]:
|
214 |
+
return self.output_dtype_with_constraints.dtype
|
215 |
+
|
216 |
+
@property
|
217 |
+
def weight_dtype(self) -> Optional[torch.dtype]:
|
218 |
+
return self.weight_dtype_with_constraints.dtype
|
219 |
+
|
220 |
+
@classmethod
|
221 |
+
def from_dict(cls, dtype_config_dict: Dict[str, Any]) -> DTypeConfig:
|
222 |
+
"""
|
223 |
+
Create a ``DTypeConfig`` from a dictionary with the following items (all optional):
|
224 |
+
"input_dtype": torch.dtype or ``DTypeWithConstraints``
|
225 |
+
"output_dtype": torch.dtype or ``DTypeWithConstraints``
|
226 |
+
"weight_dtype": torch.dtype or ``DTypeWithConstraints``
|
227 |
+
"bias_type": torch.dtype
|
228 |
+
"is_dynamic": bool
|
229 |
+
"""
|
230 |
+
input_dtype = dtype_config_dict.get(INPUT_DTYPE_DICT_KEY, None)
|
231 |
+
if input_dtype is not None and not isinstance(input_dtype, (torch.dtype, DTypeWithConstraints)):
|
232 |
+
raise ValueError("Expected input_dtype to be a torch.dtype or DTypeWithConstraints")
|
233 |
+
output_dtype = dtype_config_dict.get(OUTPUT_DTYPE_DICT_KEY, None)
|
234 |
+
if output_dtype is not None and not isinstance(output_dtype, (torch.dtype, DTypeWithConstraints)):
|
235 |
+
raise ValueError("Expected output_dtype to be a torch.dtype or DTypeWithConstraints")
|
236 |
+
weight_dtype = dtype_config_dict.get(WEIGHT_DTYPE_DICT_KEY, None)
|
237 |
+
if weight_dtype is not None and not isinstance(weight_dtype, (torch.dtype, DTypeWithConstraints)):
|
238 |
+
raise ValueError("Expected weight_dtype to be a torch.dtype or DTypeWithConstraints")
|
239 |
+
bias_dtype = dtype_config_dict.get(BIAS_DTYPE_DICT_KEY, None)
|
240 |
+
is_dynamic = dtype_config_dict.get(IS_DYNAMIC_DICT_KEY, None)
|
241 |
+
return cls(input_dtype, output_dtype, weight_dtype, bias_dtype, is_dynamic)
|
242 |
+
|
243 |
+
def to_dict(self) -> Dict[str, Any]:
|
244 |
+
"""
|
245 |
+
Convert this ``DTypeConfig`` to a dictionary with the items described in
|
246 |
+
:func:`~torch.ao.quantization.backend_config.DTypeConfig.from_dict`.
|
247 |
+
"""
|
248 |
+
dtype_config_dict: Dict[str, Any] = {}
|
249 |
+
if self.input_dtype is not None:
|
250 |
+
dtype_config_dict[INPUT_DTYPE_DICT_KEY] = self.input_dtype_with_constraints
|
251 |
+
if self.output_dtype is not None:
|
252 |
+
dtype_config_dict[OUTPUT_DTYPE_DICT_KEY] = self.output_dtype_with_constraints
|
253 |
+
if self.weight_dtype is not None:
|
254 |
+
dtype_config_dict[WEIGHT_DTYPE_DICT_KEY] = self.weight_dtype_with_constraints
|
255 |
+
if self.bias_dtype is not None:
|
256 |
+
dtype_config_dict[BIAS_DTYPE_DICT_KEY] = self.bias_dtype
|
257 |
+
if self.is_dynamic is not None:
|
258 |
+
dtype_config_dict[IS_DYNAMIC_DICT_KEY] = self.is_dynamic
|
259 |
+
return dtype_config_dict
|
260 |
+
|
261 |
+
|
262 |
+
class BackendConfig:
|
263 |
+
# TODO: refer to NativeBackendConfig once that is implemented
|
264 |
+
"""Config that defines the set of patterns that can be quantized on a given backend, and how reference
|
265 |
+
quantized models can be produced from these patterns.
|
266 |
+
|
267 |
+
A pattern in this context refers to a module, a functional, an operator, or a directed acyclic graph
|
268 |
+
of the above. Each pattern supported on the target backend can be individually configured through
|
269 |
+
:class:`~torch.ao.quantization.backend_config.BackendPatternConfig` in terms of:
|
270 |
+
|
271 |
+
(1) The supported input/output activation, weight, and bias data types
|
272 |
+
|
273 |
+
(2) How observers and quant/dequant ops are inserted in order to construct the reference pattern, and
|
274 |
+
|
275 |
+
(3) (Optionally) Fusion, QAT, and reference module mappings.
|
276 |
+
|
277 |
+
The format of the patterns is described in:
|
278 |
+
https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md
|
279 |
+
|
280 |
+
Example usage::
|
281 |
+
|
282 |
+
import torch
|
283 |
+
from torch.ao.quantization.backend_config import (
|
284 |
+
BackendConfig,
|
285 |
+
BackendPatternConfig,
|
286 |
+
DTypeConfig,
|
287 |
+
ObservationType,
|
288 |
+
)
|
289 |
+
|
290 |
+
weighted_int8_dtype_config = DTypeConfig(
|
291 |
+
input_dtype=torch.quint8,
|
292 |
+
output_dtype=torch.quint8,
|
293 |
+
weight_dtype=torch.qint8,
|
294 |
+
bias_dtype=torch.float)
|
295 |
+
|
296 |
+
def fuse_conv2d_relu(is_qat, conv, relu):
|
297 |
+
return torch.ao.nn.intrinsic.ConvReLU2d(conv, relu)
|
298 |
+
|
299 |
+
# For quantizing Linear
|
300 |
+
linear_config = BackendPatternConfig(torch.nn.Linear) \
|
301 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
|
302 |
+
.add_dtype_config(weighted_int8_dtype_config) \
|
303 |
+
.set_root_module(torch.nn.Linear) \
|
304 |
+
.set_qat_module(torch.ao.nn.qat.Linear) \
|
305 |
+
.set_reference_quantized_module(torch.ao.nn.quantized.reference.Linear)
|
306 |
+
|
307 |
+
# For fusing Conv2d + ReLU into ConvReLU2d
|
308 |
+
conv_relu_config = BackendPatternConfig((torch.nn.Conv2d, torch.nn.ReLU)) \
|
309 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
|
310 |
+
.add_dtype_config(weighted_int8_dtype_config) \
|
311 |
+
.set_fused_module(torch.ao.nn.intrinsic.ConvReLU2d) \
|
312 |
+
.set_fuser_method(fuse_conv2d_relu)
|
313 |
+
|
314 |
+
# For quantizing ConvReLU2d
|
315 |
+
fused_conv_relu_config = BackendPatternConfig(torch.ao.nn.intrinsic.ConvReLU2d) \
|
316 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
|
317 |
+
.add_dtype_config(weighted_int8_dtype_config) \
|
318 |
+
.set_root_module(torch.nn.Conv2d) \
|
319 |
+
.set_qat_module(torch.ao.nn.intrinsic.qat.ConvReLU2d) \
|
320 |
+
.set_reference_quantized_module(torch.ao.nn.quantized.reference.Conv2d)
|
321 |
+
|
322 |
+
backend_config = BackendConfig("my_backend") \
|
323 |
+
.set_backend_pattern_config(linear_config) \
|
324 |
+
.set_backend_pattern_config(conv_relu_config) \
|
325 |
+
.set_backend_pattern_config(fused_conv_relu_config)
|
326 |
+
|
327 |
+
"""
|
328 |
+
def __init__(self, name: str = ""):
|
329 |
+
self.name = name
|
330 |
+
# Store all BackendPatternConfigs in a map to handle duplicates
|
331 |
+
# Note: the key in this map uses the complex reversed tuple format.
|
332 |
+
# This is intended only for internal use; users who wish to access
|
333 |
+
# the original patterns should go through `self.configs` instead.
|
334 |
+
self._pattern_complex_format_to_config: Dict[Pattern, BackendPatternConfig] = {}
|
335 |
+
|
336 |
+
def __repr__(self):
|
337 |
+
return f"BackendConfig({self.__dict__})"
|
338 |
+
|
339 |
+
def set_name(self, name: str) -> BackendConfig:
|
340 |
+
"""
|
341 |
+
Set the name of the target backend.
|
342 |
+
"""
|
343 |
+
self.name = name
|
344 |
+
return self
|
345 |
+
|
346 |
+
def set_backend_pattern_config(self, config: BackendPatternConfig) -> BackendConfig:
|
347 |
+
"""
|
348 |
+
Set the config for an pattern that can be run on the target backend.
|
349 |
+
This overrides any existing config for the given pattern.
|
350 |
+
"""
|
351 |
+
# Avoid circular dependencies
|
352 |
+
pattern_complex_format = torch.ao.quantization.backend_config.utils \
|
353 |
+
._get_pattern_in_reversed_nested_tuple_format(config) # type: ignore[attr-defined]
|
354 |
+
self._pattern_complex_format_to_config[pattern_complex_format] = config
|
355 |
+
return self
|
356 |
+
|
357 |
+
def set_backend_pattern_configs(self, configs: List[BackendPatternConfig]) -> BackendConfig:
|
358 |
+
"""
|
359 |
+
Set the configs for patterns that can be run on the target backend.
|
360 |
+
This overrides any existing config for a given pattern if it was previously registered already.
|
361 |
+
"""
|
362 |
+
for conf in configs:
|
363 |
+
self.set_backend_pattern_config(conf)
|
364 |
+
return self
|
365 |
+
|
366 |
+
@property
|
367 |
+
def configs(self) -> List[BackendPatternConfig]:
|
368 |
+
"""
|
369 |
+
Return a copy of the list of configs set in this `BackendConfig`.
|
370 |
+
"""
|
371 |
+
return list(self._pattern_complex_format_to_config.values())
|
372 |
+
|
373 |
+
@classmethod
|
374 |
+
def from_dict(cls, backend_config_dict: Dict[str, Any]) -> BackendConfig:
|
375 |
+
"""
|
376 |
+
Create a ``BackendConfig`` from a dictionary with the following items:
|
377 |
+
|
378 |
+
"name": the name of the target backend
|
379 |
+
|
380 |
+
"configs": a list of dictionaries that each represents a `BackendPatternConfig`
|
381 |
+
|
382 |
+
"""
|
383 |
+
conf = cls(backend_config_dict.get(NAME_DICT_KEY, ""))
|
384 |
+
for d in backend_config_dict.get(CONFIGS_DICT_KEY, []):
|
385 |
+
if isinstance(d, BackendPatternConfig):
|
386 |
+
conf.set_backend_pattern_config(d)
|
387 |
+
elif isinstance(d, Dict):
|
388 |
+
conf.set_backend_pattern_config(BackendPatternConfig.from_dict(d))
|
389 |
+
else:
|
390 |
+
raise ValueError(f"Expected backend_config_dict['{CONFIGS_DICT_KEY}'] to be a dictionary")
|
391 |
+
return conf
|
392 |
+
|
393 |
+
def to_dict(self) -> Dict[str, Any]:
|
394 |
+
"""
|
395 |
+
Convert this ``BackendConfig`` to a dictionary with the items described in
|
396 |
+
:func:`~torch.ao.quantization.backend_config.BackendConfig.from_dict`.
|
397 |
+
"""
|
398 |
+
return {
|
399 |
+
NAME_DICT_KEY: self.name,
|
400 |
+
CONFIGS_DICT_KEY: [c.to_dict() for c in self.configs],
|
401 |
+
}
|
402 |
+
|
403 |
+
|
404 |
+
class BackendPatternConfig:
|
405 |
+
"""
|
406 |
+
Config object that specifies quantization behavior for a given operator pattern.
|
407 |
+
For a detailed example usage, see :class:`~torch.ao.quantization.backend_config.BackendConfig`.
|
408 |
+
"""
|
409 |
+
def __init__(self, pattern: Optional[Pattern] = None):
|
410 |
+
self.pattern: Optional[Pattern] = pattern
|
411 |
+
self.observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
412 |
+
self.dtype_configs: List[DTypeConfig] = []
|
413 |
+
self.root_module: Optional[Type[torch.nn.Module]] = None
|
414 |
+
self.qat_module: Optional[Type[torch.nn.Module]] = None
|
415 |
+
self.reference_quantized_module: Optional[Type[torch.nn.Module]] = None
|
416 |
+
self.fused_module: Optional[Type[torch.nn.Module]] = None
|
417 |
+
self.fuser_method: Optional[Callable] = None
|
418 |
+
|
419 |
+
# Temporary/internal configs
|
420 |
+
self._root_node_getter: Optional[Callable] = None
|
421 |
+
self._extra_inputs_getter: Optional[Callable] = None
|
422 |
+
self._num_tensor_args_to_observation_type: Dict[int, ObservationType] = {}
|
423 |
+
self._input_type_to_index: Dict[str, int] = {}
|
424 |
+
self._pattern_complex_format: Optional[Pattern] = None
|
425 |
+
|
426 |
+
def __repr__(self):
|
427 |
+
dict_nonempty = {
|
428 |
+
k: v for k, v in self.__dict__.items()
|
429 |
+
if (
|
430 |
+
(not isinstance(v, (list, dict)) and v is not None)
|
431 |
+
or (isinstance(v, (list, dict)) and len(v) > 0)
|
432 |
+
)
|
433 |
+
}
|
434 |
+
return f"BackendPatternConfig({dict_nonempty})"
|
435 |
+
|
436 |
+
def set_pattern(self, pattern: Pattern) -> BackendPatternConfig:
|
437 |
+
"""
|
438 |
+
Set the pattern to configure.
|
439 |
+
|
440 |
+
The pattern can be a float module, functional operator, pytorch operator, or a tuple
|
441 |
+
combination of the above. Tuple patterns are treated as sequential patterns, and
|
442 |
+
currently only tuples of 2 or 3 elements are supported.
|
443 |
+
"""
|
444 |
+
if self._pattern_complex_format is not None:
|
445 |
+
raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set")
|
446 |
+
self.pattern = pattern
|
447 |
+
return self
|
448 |
+
|
449 |
+
def set_observation_type(self, observation_type: ObservationType) -> BackendPatternConfig:
|
450 |
+
"""
|
451 |
+
Set how observers should be inserted in the graph for this pattern.
|
452 |
+
|
453 |
+
Observation type here refers to how observers (or quant-dequant ops) will be placed
|
454 |
+
in the graph. This is used to produce the desired reference patterns understood by
|
455 |
+
the backend. Weighted ops such as linear and conv require different observers
|
456 |
+
(or quantization parameters passed to quantize ops in the reference model) for the
|
457 |
+
input and the output.
|
458 |
+
|
459 |
+
There are two observation types:
|
460 |
+
|
461 |
+
`OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT` (default): the output observer instance
|
462 |
+
will be different from the input. This is the most common observation type.
|
463 |
+
|
464 |
+
`OUTPUT_SHARE_OBSERVER_WITH_INPUT`: the output observer instance will be the
|
465 |
+
same as the input. This is useful for operators like `cat`.
|
466 |
+
|
467 |
+
Note: This will be renamed in the near future, since we will soon insert QuantDeQuantStubs
|
468 |
+
with observers (and fake quantizes) attached instead of observers themselves.
|
469 |
+
"""
|
470 |
+
self.observation_type = observation_type
|
471 |
+
return self
|
472 |
+
|
473 |
+
def add_dtype_config(self, dtype_config: DTypeConfig) -> BackendPatternConfig:
|
474 |
+
"""
|
475 |
+
Add a set of supported data types passed as arguments to quantize ops in the
|
476 |
+
reference model spec.
|
477 |
+
"""
|
478 |
+
self.dtype_configs.append(dtype_config)
|
479 |
+
return self
|
480 |
+
|
481 |
+
def set_dtype_configs(self, dtype_configs: List[DTypeConfig]) -> BackendPatternConfig:
|
482 |
+
"""
|
483 |
+
Set the supported data types passed as arguments to quantize ops in the
|
484 |
+
reference model spec, overriding all previously registered data types.
|
485 |
+
"""
|
486 |
+
self.dtype_configs = dtype_configs
|
487 |
+
return self
|
488 |
+
|
489 |
+
def set_root_module(self, root_module: Type[torch.nn.Module]) -> BackendPatternConfig:
|
490 |
+
"""
|
491 |
+
Set the module that represents the root for this pattern.
|
492 |
+
|
493 |
+
When we construct the reference quantized model during the convert phase,
|
494 |
+
the root modules (e.g. torch.nn.Linear for torch.ao.nn.intrinsic.LinearReLU)
|
495 |
+
will be swapped to the corresponding reference quantized modules (e.g.
|
496 |
+
torch.ao.nn.reference.quantized.Linear). This allows custom backends to
|
497 |
+
specify custom reference quantized module implementations to match the
|
498 |
+
numerics of their lowered operators. Since this is a one-to-one mapping,
|
499 |
+
both the root module and the reference quantized module must be specified
|
500 |
+
in the same BackendPatternConfig in order for the conversion to take place.
|
501 |
+
"""
|
502 |
+
self.root_module = root_module
|
503 |
+
return self
|
504 |
+
|
505 |
+
def set_qat_module(self, qat_module: Type[torch.nn.Module]) -> BackendPatternConfig:
|
506 |
+
"""
|
507 |
+
Set the module that represents the QAT implementation for this pattern.
|
508 |
+
"""
|
509 |
+
self.qat_module = qat_module
|
510 |
+
return self
|
511 |
+
|
512 |
+
def set_reference_quantized_module(self, reference_quantized_module: Type[torch.nn.Module]) -> BackendPatternConfig:
|
513 |
+
"""
|
514 |
+
Set the module that represents the reference quantized implementation for
|
515 |
+
this pattern's root module.
|
516 |
+
|
517 |
+
For more detail, see :func:`~torch.ao.quantization.backend_config.BackendPatternConfig.set_root_module`.
|
518 |
+
"""
|
519 |
+
self.reference_quantized_module = reference_quantized_module
|
520 |
+
return self
|
521 |
+
|
522 |
+
def set_fused_module(self, fused_module: Type[torch.nn.Module]) -> BackendPatternConfig:
|
523 |
+
"""
|
524 |
+
Set the module that represents the fused implementation for this pattern.
|
525 |
+
"""
|
526 |
+
self.fused_module = fused_module
|
527 |
+
return self
|
528 |
+
|
529 |
+
def set_fuser_method(self, fuser_method: Callable) -> BackendPatternConfig:
|
530 |
+
"""
|
531 |
+
Set the function that specifies how to fuse this BackendPatternConfig's pattern.
|
532 |
+
|
533 |
+
The first argument of this function should be `is_qat`, and the rest of the arguments
|
534 |
+
should be the items in the tuple pattern. The return value of this function should be
|
535 |
+
the resulting fused module.
|
536 |
+
|
537 |
+
For example, the fuser method for the pattern `(torch.nn.Linear, torch.nn.ReLU)` can be:
|
538 |
+
|
539 |
+
def fuse_linear_relu(is_qat, linear, relu):
|
540 |
+
return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
|
541 |
+
|
542 |
+
For a more complicated example, see https://gist.github.com/jerryzh168/8bea7180a8ba3c279f2c9b050f2a69a6.
|
543 |
+
"""
|
544 |
+
self.fuser_method = fuser_method
|
545 |
+
return self
|
546 |
+
|
547 |
+
def _set_root_node_getter(self, root_node_getter: Callable) -> BackendPatternConfig:
|
548 |
+
self._root_node_getter = root_node_getter
|
549 |
+
return self
|
550 |
+
|
551 |
+
def _set_extra_inputs_getter(self, extra_inputs_getter: Callable) -> BackendPatternConfig:
|
552 |
+
self._extra_inputs_getter = extra_inputs_getter
|
553 |
+
return self
|
554 |
+
|
555 |
+
def _set_num_tensor_args_to_observation_type(
|
556 |
+
self, num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> BackendPatternConfig:
|
557 |
+
self._num_tensor_args_to_observation_type = num_tensor_args_to_observation_type
|
558 |
+
return self
|
559 |
+
|
560 |
+
def _set_input_type_to_index(self, input_type_to_index: Dict[str, int]) -> BackendPatternConfig:
|
561 |
+
self._input_type_to_index = input_type_to_index
|
562 |
+
return self
|
563 |
+
|
564 |
+
def _set_pattern_complex_format(self, pattern: Pattern) -> BackendPatternConfig:
|
565 |
+
"""
|
566 |
+
Set the pattern to configure, using the reversed nested tuple format.
|
567 |
+
|
568 |
+
See the BackendConfig README for more detail:
|
569 |
+
https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md#advanced-pattern-specification
|
570 |
+
"""
|
571 |
+
if self.pattern is not None:
|
572 |
+
raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set")
|
573 |
+
self._pattern_complex_format = pattern
|
574 |
+
return self
|
575 |
+
|
576 |
+
@classmethod
|
577 |
+
def from_dict(cls, backend_pattern_config_dict: Dict[str, Any]) -> BackendPatternConfig:
|
578 |
+
"""
|
579 |
+
Create a ``BackendPatternConfig`` from a dictionary with the following items:
|
580 |
+
|
581 |
+
"pattern": the pattern being configured
|
582 |
+
"observation_type": the :class:`~torch.ao.quantization.backend_config.ObservationType` that specifies how
|
583 |
+
observers should be inserted for this pattern
|
584 |
+
"dtype_configs": a list of dictionaries that represents :class:`~torch.ao.quantization.backend_config.DTypeConfig` s
|
585 |
+
"root_module": a :class:`torch.nn.Module` that represents the root for this pattern
|
586 |
+
"qat_module": a :class:`torch.nn.Module` that represents the QAT implementation for this pattern
|
587 |
+
"reference_quantized_module": a :class:`torch.nn.Module` that represents the reference quantized
|
588 |
+
implementation for this pattern's root module.
|
589 |
+
"fused_module": a :class:`torch.nn.Module` that represents the fused implementation for this pattern
|
590 |
+
"fuser_method": a function that specifies how to fuse the pattern for this pattern
|
591 |
+
"pattern_complex_format": the pattern specified in the reversed nested tuple format (deprecated)
|
592 |
+
|
593 |
+
"""
|
594 |
+
def _get_dtype_config(obj: Any) -> DTypeConfig:
|
595 |
+
"""
|
596 |
+
Convert the given object into a ``DTypeConfig`` if possible, else throw an exception.
|
597 |
+
"""
|
598 |
+
if isinstance(obj, DTypeConfig):
|
599 |
+
return obj
|
600 |
+
if isinstance(obj, Dict):
|
601 |
+
return DTypeConfig.from_dict(obj)
|
602 |
+
raise ValueError(
|
603 |
+
f"Expected a list of DTypeConfigs in "
|
604 |
+
f"backend_pattern_config_dict[\"{DTYPE_CONFIGS_DICT_KEY}\"], got '{type(obj)}'"
|
605 |
+
)
|
606 |
+
|
607 |
+
conf = cls()
|
608 |
+
if PATTERN_DICT_KEY in backend_pattern_config_dict:
|
609 |
+
conf.set_pattern(backend_pattern_config_dict[PATTERN_DICT_KEY])
|
610 |
+
if OBSERVATION_TYPE_DICT_KEY in backend_pattern_config_dict:
|
611 |
+
conf.set_observation_type(backend_pattern_config_dict[OBSERVATION_TYPE_DICT_KEY])
|
612 |
+
for d in backend_pattern_config_dict.get(DTYPE_CONFIGS_DICT_KEY, []):
|
613 |
+
conf.add_dtype_config(_get_dtype_config(d))
|
614 |
+
conf.set_root_module(backend_pattern_config_dict.get(ROOT_MODULE_DICT_KEY, None))
|
615 |
+
conf.set_qat_module(backend_pattern_config_dict.get(QAT_MODULE_DICT_KEY, None))
|
616 |
+
conf.set_reference_quantized_module(backend_pattern_config_dict.get(REFERENCE_QUANTIZED_MODULE_DICT_KEY, None))
|
617 |
+
conf.set_fused_module(backend_pattern_config_dict.get(FUSED_MODULE_DICT_KEY, None))
|
618 |
+
conf.set_fuser_method(backend_pattern_config_dict.get(FUSER_METHOD_DICT_KEY, None))
|
619 |
+
conf._set_root_node_getter(backend_pattern_config_dict.get(ROOT_NODE_GETTER_DICT_KEY, None))
|
620 |
+
conf._set_extra_inputs_getter(backend_pattern_config_dict.get(EXTRA_INPUTS_GETTER_DICT_KEY, None))
|
621 |
+
conf._set_num_tensor_args_to_observation_type(
|
622 |
+
backend_pattern_config_dict.get(NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY, {}))
|
623 |
+
conf._set_input_type_to_index(backend_pattern_config_dict.get(INPUT_TYPE_TO_INDEX_DICT_KEY, {}))
|
624 |
+
if PATTERN_COMPLEX_FORMAT_DICT_KEY in backend_pattern_config_dict:
|
625 |
+
conf._set_pattern_complex_format(backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY])
|
626 |
+
return conf
|
627 |
+
|
628 |
+
def to_dict(self) -> Dict[str, Any]:
|
629 |
+
"""
|
630 |
+
Convert this ``BackendPatternConfig`` to a dictionary with the items described in
|
631 |
+
:func:`~torch.ao.quantization.backend_config.BackendPatternConfig.from_dict`.
|
632 |
+
"""
|
633 |
+
backend_pattern_config_dict: Dict[str, Any] = {
|
634 |
+
OBSERVATION_TYPE_DICT_KEY: self.observation_type,
|
635 |
+
DTYPE_CONFIGS_DICT_KEY: [c.to_dict() for c in self.dtype_configs],
|
636 |
+
}
|
637 |
+
if self.pattern is not None:
|
638 |
+
backend_pattern_config_dict[PATTERN_DICT_KEY] = self.pattern
|
639 |
+
if self.root_module is not None:
|
640 |
+
backend_pattern_config_dict[ROOT_MODULE_DICT_KEY] = self.root_module
|
641 |
+
if self.qat_module is not None:
|
642 |
+
backend_pattern_config_dict[QAT_MODULE_DICT_KEY] = self.qat_module
|
643 |
+
if self.reference_quantized_module is not None:
|
644 |
+
backend_pattern_config_dict[REFERENCE_QUANTIZED_MODULE_DICT_KEY] = self.reference_quantized_module
|
645 |
+
if self.fused_module is not None:
|
646 |
+
backend_pattern_config_dict[FUSED_MODULE_DICT_KEY] = self.fused_module
|
647 |
+
if self.fuser_method is not None:
|
648 |
+
backend_pattern_config_dict[FUSER_METHOD_DICT_KEY] = self.fuser_method
|
649 |
+
if self._root_node_getter is not None:
|
650 |
+
backend_pattern_config_dict[ROOT_NODE_GETTER_DICT_KEY] = self._root_node_getter
|
651 |
+
if self._extra_inputs_getter is not None:
|
652 |
+
backend_pattern_config_dict[EXTRA_INPUTS_GETTER_DICT_KEY] = self._extra_inputs_getter
|
653 |
+
if len(self._num_tensor_args_to_observation_type) > 0:
|
654 |
+
backend_pattern_config_dict[NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY] = self._num_tensor_args_to_observation_type
|
655 |
+
if len(self._input_type_to_index) > 0:
|
656 |
+
backend_pattern_config_dict[INPUT_TYPE_TO_INDEX_DICT_KEY] = self._input_type_to_index
|
657 |
+
if self._pattern_complex_format is not None:
|
658 |
+
backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY] = self._pattern_complex_format
|
659 |
+
return backend_pattern_config_dict
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/executorch.py
ADDED
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TODO: rename executorch to qnnpack_executorch since executorch is a general runtime
|
2 |
+
# not a specific backend
|
3 |
+
|
4 |
+
import operator
|
5 |
+
from typing import List
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.ao.nn.qat as nnqat
|
9 |
+
import torch.ao.nn.quantized.reference as nnqr
|
10 |
+
import torch.nn as nn
|
11 |
+
import torch.nn.functional as F
|
12 |
+
|
13 |
+
from ..fuser_method_mappings import (
|
14 |
+
_sequential_wrapper2,
|
15 |
+
fuse_conv_bn,
|
16 |
+
fuse_conv_bn_relu,
|
17 |
+
)
|
18 |
+
from ._common_operator_config_utils import _Conv2dMetadata
|
19 |
+
from .backend_config import (
|
20 |
+
BackendConfig,
|
21 |
+
BackendPatternConfig,
|
22 |
+
DTypeConfig,
|
23 |
+
DTypeWithConstraints,
|
24 |
+
ObservationType,
|
25 |
+
)
|
26 |
+
from .qnnpack import (
|
27 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
28 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config,
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
__all__ = [
|
33 |
+
"get_executorch_backend_config",
|
34 |
+
]
|
35 |
+
|
36 |
+
|
37 |
+
# ===================
|
38 |
+
# | DTYPE CONFIGS |
|
39 |
+
# ===================
|
40 |
+
|
41 |
+
executorch_weighted_op_int8_dtype_config = DTypeConfig(
|
42 |
+
input_dtype=torch.quint8,
|
43 |
+
output_dtype=torch.quint8,
|
44 |
+
weight_dtype=torch.qint8,
|
45 |
+
bias_dtype=torch.float,
|
46 |
+
)
|
47 |
+
|
48 |
+
executorch_default_op_quint8_dtype_config = DTypeConfig(
|
49 |
+
input_dtype=torch.quint8,
|
50 |
+
output_dtype=torch.quint8,
|
51 |
+
)
|
52 |
+
|
53 |
+
executorch_default_dynamic_quint8_dtype_config = DTypeConfig(
|
54 |
+
input_dtype=torch.quint8,
|
55 |
+
output_dtype=torch.float,
|
56 |
+
weight_dtype=torch.qint8,
|
57 |
+
bias_dtype=torch.float,
|
58 |
+
is_dynamic=True,
|
59 |
+
)
|
60 |
+
|
61 |
+
executorch_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints(
|
62 |
+
dtype=torch.qint8,
|
63 |
+
scale_min_lower_bound=2**-12,
|
64 |
+
)
|
65 |
+
|
66 |
+
executorch_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints(
|
67 |
+
dtype=torch.qint8,
|
68 |
+
quant_min_lower_bound=-127,
|
69 |
+
quant_max_upper_bound=127,
|
70 |
+
scale_min_lower_bound=2**-12,
|
71 |
+
)
|
72 |
+
|
73 |
+
executorch_default_dynamic_qint8_dtype_config = DTypeConfig(
|
74 |
+
input_dtype=executorch_act_qint8_scale_min_2_neg_12,
|
75 |
+
output_dtype=torch.float,
|
76 |
+
weight_dtype=executorch_weight_qint8_neg_127_to_127_scale_min_2_neg_12,
|
77 |
+
bias_dtype=torch.float,
|
78 |
+
is_dynamic=True,
|
79 |
+
)
|
80 |
+
|
81 |
+
executorch_default_dynamic_float16_dtype_config = DTypeConfig(
|
82 |
+
input_dtype=torch.float16,
|
83 |
+
output_dtype=torch.float,
|
84 |
+
weight_dtype=torch.float16,
|
85 |
+
bias_dtype=torch.float,
|
86 |
+
is_dynamic=True,
|
87 |
+
)
|
88 |
+
|
89 |
+
executorch_weight_only_quint8_dtype_config = DTypeConfig(
|
90 |
+
input_dtype=torch.float,
|
91 |
+
output_dtype=torch.float,
|
92 |
+
weight_dtype=torch.quint8,
|
93 |
+
)
|
94 |
+
|
95 |
+
|
96 |
+
# =============================
|
97 |
+
# | BACKEND PATTERN CONFIGS |
|
98 |
+
# =============================
|
99 |
+
|
100 |
+
|
101 |
+
def _get_linear_configs() -> List[BackendPatternConfig]:
|
102 |
+
"""
|
103 |
+
Return all configs related to linear modules and ops.
|
104 |
+
"""
|
105 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
106 |
+
dtype_configs = [
|
107 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config,
|
108 |
+
executorch_weighted_op_int8_dtype_config,
|
109 |
+
executorch_default_dynamic_quint8_dtype_config,
|
110 |
+
executorch_default_dynamic_qint8_dtype_config,
|
111 |
+
executorch_default_dynamic_float16_dtype_config,
|
112 |
+
]
|
113 |
+
linear_configs: List[BackendPatternConfig] = []
|
114 |
+
# linear module
|
115 |
+
linear_configs.append(
|
116 |
+
BackendPatternConfig(torch.nn.Linear)
|
117 |
+
.set_observation_type(observation_type) # noqa: E131
|
118 |
+
.set_dtype_configs(dtype_configs)
|
119 |
+
.set_root_module(torch.nn.Linear)
|
120 |
+
.set_reference_quantized_module(nnqr.Linear)
|
121 |
+
.set_qat_module(nnqat.Linear)
|
122 |
+
)
|
123 |
+
# linear qat module
|
124 |
+
linear_configs.append(
|
125 |
+
BackendPatternConfig(nnqat.Linear)
|
126 |
+
.set_observation_type(observation_type) # noqa: E131
|
127 |
+
.set_dtype_configs(dtype_configs)
|
128 |
+
.set_root_module(torch.nn.Linear)
|
129 |
+
.set_reference_quantized_module(nnqr.Linear)
|
130 |
+
)
|
131 |
+
# functional linear
|
132 |
+
linear_configs.append(
|
133 |
+
BackendPatternConfig(torch.nn.functional.linear)
|
134 |
+
.set_observation_type(observation_type) # noqa: E131
|
135 |
+
.set_dtype_configs(dtype_configs)
|
136 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
137 |
+
)
|
138 |
+
return linear_configs
|
139 |
+
|
140 |
+
|
141 |
+
def _get_conv_configs() -> List[BackendPatternConfig]:
|
142 |
+
"""
|
143 |
+
Return all configs related to conv modules and ops.
|
144 |
+
"""
|
145 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
146 |
+
dtype_configs = [
|
147 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config,
|
148 |
+
executorch_weighted_op_int8_dtype_config,
|
149 |
+
]
|
150 |
+
conv_configs = []
|
151 |
+
for convs in [_Conv2dMetadata]:
|
152 |
+
# (1) Single conv modules/functions
|
153 |
+
# -----------------------------------
|
154 |
+
# conv module
|
155 |
+
conv_configs.append(
|
156 |
+
BackendPatternConfig(convs.root)
|
157 |
+
.set_observation_type(observation_type) # noqa: E131
|
158 |
+
.set_dtype_configs(dtype_configs)
|
159 |
+
.set_root_module(convs.root)
|
160 |
+
.set_reference_quantized_module(convs.reference)
|
161 |
+
.set_qat_module(convs.qat)
|
162 |
+
)
|
163 |
+
# conv qat module
|
164 |
+
conv_configs.append(
|
165 |
+
BackendPatternConfig(convs.qat)
|
166 |
+
.set_observation_type(observation_type) # noqa: E131
|
167 |
+
.set_dtype_configs(dtype_configs)
|
168 |
+
.set_root_module(convs.root)
|
169 |
+
.set_reference_quantized_module(convs.reference)
|
170 |
+
)
|
171 |
+
# functional conv
|
172 |
+
conv_configs.append(
|
173 |
+
BackendPatternConfig(convs.func)
|
174 |
+
.set_observation_type(observation_type) # noqa: E131
|
175 |
+
.set_dtype_configs(dtype_configs)
|
176 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
177 |
+
)
|
178 |
+
|
179 |
+
# (2) Conv + relu
|
180 |
+
# -----------------------------------
|
181 |
+
# conv module + relu module
|
182 |
+
conv_configs.append(
|
183 |
+
BackendPatternConfig((convs.root, nn.ReLU))
|
184 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
185 |
+
.set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
|
186 |
+
.set_fused_module(convs.fused_conv_relu)
|
187 |
+
)
|
188 |
+
# conv module + functional relu
|
189 |
+
conv_configs.append(
|
190 |
+
BackendPatternConfig((convs.root, F.relu))
|
191 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
192 |
+
.set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu))
|
193 |
+
.set_fused_module(convs.fused_conv_relu)
|
194 |
+
)
|
195 |
+
# fused conv relu module
|
196 |
+
conv_configs.append(
|
197 |
+
BackendPatternConfig(convs.fused_conv_relu)
|
198 |
+
.set_observation_type(observation_type) # noqa: E131
|
199 |
+
.set_dtype_configs(dtype_configs)
|
200 |
+
.set_root_module(convs.root)
|
201 |
+
.set_reference_quantized_module(convs.reference)
|
202 |
+
.set_qat_module(convs.relu_qat)
|
203 |
+
)
|
204 |
+
# conv relu, qat fused module
|
205 |
+
conv_configs.append(
|
206 |
+
BackendPatternConfig(convs.relu_qat)
|
207 |
+
.set_observation_type(observation_type) # noqa: E131
|
208 |
+
.set_dtype_configs(dtype_configs)
|
209 |
+
.set_root_module(convs.root)
|
210 |
+
.set_reference_quantized_module(convs.reference)
|
211 |
+
)
|
212 |
+
# functional conv + relu module
|
213 |
+
conv_configs.append(
|
214 |
+
BackendPatternConfig((convs.func, nn.ReLU))
|
215 |
+
.set_observation_type(observation_type) # noqa: E131
|
216 |
+
.set_dtype_configs(dtype_configs)
|
217 |
+
)
|
218 |
+
# functional conv + functional relu
|
219 |
+
conv_configs.append(
|
220 |
+
BackendPatternConfig((convs.func, F.relu))
|
221 |
+
.set_observation_type(observation_type) # noqa: E131
|
222 |
+
.set_dtype_configs(dtype_configs)
|
223 |
+
)
|
224 |
+
# fused conv relu
|
225 |
+
conv_configs.append(
|
226 |
+
BackendPatternConfig(convs.fused_conv_relu)
|
227 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
228 |
+
.set_qat_module(convs.relu_qat)
|
229 |
+
)
|
230 |
+
|
231 |
+
conv_configs.append(
|
232 |
+
BackendPatternConfig(convs.relu_qat)
|
233 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
234 |
+
.set_root_module(convs.root)
|
235 |
+
.set_reference_quantized_module(convs.reference)
|
236 |
+
)
|
237 |
+
|
238 |
+
# (3) Conv + batchnorm (+ relu)
|
239 |
+
# -------------------------------
|
240 |
+
# conv + batchnorm (+ relu)
|
241 |
+
conv_configs.append(
|
242 |
+
BackendPatternConfig((convs.root, convs.bn))
|
243 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
244 |
+
.set_fuser_method(fuse_conv_bn)
|
245 |
+
.set_fused_module(convs.fused_conv_bn)
|
246 |
+
)
|
247 |
+
# conv + bn + relu module fusion
|
248 |
+
conv_configs.append(
|
249 |
+
BackendPatternConfig((convs.root, convs.bn, nn.ReLU))
|
250 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
251 |
+
.set_fuser_method(fuse_conv_bn_relu)
|
252 |
+
.set_fused_module(convs.fused_conv_bn_relu)
|
253 |
+
)
|
254 |
+
# conv + bn + relu functional fusion
|
255 |
+
conv_configs.append(
|
256 |
+
BackendPatternConfig((convs.root, convs.bn, F.relu))
|
257 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
258 |
+
.set_root_module(convs.root)
|
259 |
+
.set_fuser_method(fuse_conv_bn_relu)
|
260 |
+
.set_fused_module(convs.fused_conv_bn_relu)
|
261 |
+
)
|
262 |
+
# TODO: we can add fusion for torch.relu as well
|
263 |
+
# 3.2 conv + bn (+ relu) fused module configs
|
264 |
+
# fused conv bn
|
265 |
+
conv_configs.append(
|
266 |
+
BackendPatternConfig(convs.fused_conv_bn)
|
267 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
268 |
+
.set_qat_module(convs.bn_qat)
|
269 |
+
)
|
270 |
+
|
271 |
+
# fused conv bn relu
|
272 |
+
conv_configs.append(
|
273 |
+
BackendPatternConfig(convs.fused_conv_bn_relu)
|
274 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
275 |
+
.set_qat_module(convs.bn_relu_qat)
|
276 |
+
)
|
277 |
+
|
278 |
+
# conv bn, qat fused module
|
279 |
+
conv_configs.append(
|
280 |
+
BackendPatternConfig(convs.bn_qat)
|
281 |
+
.set_observation_type(observation_type) # noqa: E131
|
282 |
+
.set_dtype_configs(dtype_configs)
|
283 |
+
.set_root_module(convs.root)
|
284 |
+
.set_reference_quantized_module(convs.reference)
|
285 |
+
)
|
286 |
+
# conv bn relu, qat fused module
|
287 |
+
conv_configs.append(
|
288 |
+
BackendPatternConfig(convs.bn_relu_qat)
|
289 |
+
.set_observation_type(observation_type) # noqa: E131
|
290 |
+
.set_dtype_configs(dtype_configs)
|
291 |
+
.set_root_module(convs.root)
|
292 |
+
.set_reference_quantized_module(convs.reference)
|
293 |
+
)
|
294 |
+
return conv_configs
|
295 |
+
|
296 |
+
|
297 |
+
def _get_binary_ops_configs() -> List[BackendPatternConfig]:
|
298 |
+
"""
|
299 |
+
Return all configs related to binary ops.
|
300 |
+
"""
|
301 |
+
dtype_configs = [
|
302 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
303 |
+
executorch_weighted_op_int8_dtype_config,
|
304 |
+
]
|
305 |
+
num_tensor_args_to_observation_type_mapping = {
|
306 |
+
# TODO: this is not used right now since we have extra check in prepare
|
307 |
+
# will need to change this to NO_OBSERVER later after we implemented
|
308 |
+
# Tensor dtype inference properly
|
309 |
+
0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
310 |
+
1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
|
311 |
+
2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
312 |
+
}
|
313 |
+
binary_op_configs: List[BackendPatternConfig] = []
|
314 |
+
for op in [operator.add, torch.add, operator.sub, torch.sub, operator.mul, torch.mul]:
|
315 |
+
bop_patterns = [
|
316 |
+
(op, torch.nn.ReLU),
|
317 |
+
(op, torch.nn.functional.relu),
|
318 |
+
(op, torch.relu),
|
319 |
+
op
|
320 |
+
]
|
321 |
+
for bop_pattern in bop_patterns:
|
322 |
+
binary_op_configs.append(
|
323 |
+
BackendPatternConfig(bop_pattern)
|
324 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
325 |
+
._set_num_tensor_args_to_observation_type(
|
326 |
+
num_tensor_args_to_observation_type_mapping
|
327 |
+
)
|
328 |
+
)
|
329 |
+
return binary_op_configs
|
330 |
+
|
331 |
+
|
332 |
+
def _get_share_qparams_ops_configs() -> List[BackendPatternConfig]:
|
333 |
+
"""
|
334 |
+
Return the operator configs for the operators that works for both float and quantized
|
335 |
+
input if input is quantized, the output Tensor shares the same quantization parameter
|
336 |
+
with input.
|
337 |
+
|
338 |
+
Example operator: avgpool2d, reshape, transpose, maxpool2d
|
339 |
+
Example observed operator:
|
340 |
+
observer_0 - avgpool2d - observer_0 (same observer instance as input)
|
341 |
+
"""
|
342 |
+
observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
|
343 |
+
dtype_configs = [
|
344 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
345 |
+
executorch_default_op_quint8_dtype_config,
|
346 |
+
]
|
347 |
+
share_qparams_ops = [
|
348 |
+
torch.nn.Flatten,
|
349 |
+
F.adaptive_avg_pool2d,
|
350 |
+
F.elu,
|
351 |
+
F.hardtanh,
|
352 |
+
F.max_pool2d,
|
353 |
+
F.pad,
|
354 |
+
F.relu,
|
355 |
+
F.relu6,
|
356 |
+
F.leaky_relu,
|
357 |
+
F.leaky_relu_,
|
358 |
+
torch.nn.AdaptiveAvgPool2d,
|
359 |
+
torch.nn.ConstantPad2d,
|
360 |
+
torch.nn.ELU,
|
361 |
+
torch.nn.MaxPool2d,
|
362 |
+
torch.nn.ReLU6,
|
363 |
+
torch.nn.Hardtanh,
|
364 |
+
torch.nn.LeakyReLU,
|
365 |
+
torch.clamp,
|
366 |
+
torch.flatten,
|
367 |
+
torch.mean,
|
368 |
+
torch.permute,
|
369 |
+
torch.permute_copy,
|
370 |
+
torch.squeeze,
|
371 |
+
"clamp",
|
372 |
+
"mean",
|
373 |
+
"permute",
|
374 |
+
"reshape",
|
375 |
+
"relu",
|
376 |
+
"relu_",
|
377 |
+
"squeeze",
|
378 |
+
"squeeze_",
|
379 |
+
"leaky_relu",
|
380 |
+
]
|
381 |
+
share_qparams_op_configs: List[BackendPatternConfig] = []
|
382 |
+
for op in share_qparams_ops:
|
383 |
+
share_qparams_op_configs.append(
|
384 |
+
BackendPatternConfig(op)
|
385 |
+
.set_observation_type(observation_type) # noqa: E131
|
386 |
+
.set_dtype_configs(dtype_configs)
|
387 |
+
)
|
388 |
+
return share_qparams_op_configs
|
389 |
+
|
390 |
+
|
391 |
+
def _get_bn_configs() -> List[BackendPatternConfig]:
|
392 |
+
"""
|
393 |
+
Return all configs related to batchnorm.
|
394 |
+
"""
|
395 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
396 |
+
dtype_configs = [
|
397 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
398 |
+
executorch_default_op_quint8_dtype_config,
|
399 |
+
]
|
400 |
+
bn_configs = []
|
401 |
+
bn_configs.append(
|
402 |
+
BackendPatternConfig(nn.BatchNorm2d)
|
403 |
+
.set_observation_type(observation_type) # noqa: E131
|
404 |
+
.set_dtype_configs(dtype_configs)
|
405 |
+
)
|
406 |
+
return bn_configs
|
407 |
+
|
408 |
+
|
409 |
+
def _get_cat_configs() -> List[BackendPatternConfig]:
|
410 |
+
dtype_configs = [
|
411 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
412 |
+
executorch_default_op_quint8_dtype_config,
|
413 |
+
]
|
414 |
+
cat_configs = []
|
415 |
+
cat_configs.append(
|
416 |
+
BackendPatternConfig(torch.cat)
|
417 |
+
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
|
418 |
+
.set_dtype_configs(dtype_configs)
|
419 |
+
)
|
420 |
+
cat_configs.append(
|
421 |
+
BackendPatternConfig(torch.concat)
|
422 |
+
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
|
423 |
+
.set_dtype_configs(dtype_configs)
|
424 |
+
)
|
425 |
+
cat_configs.append(
|
426 |
+
BackendPatternConfig(torch.concatenate)
|
427 |
+
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
|
428 |
+
.set_dtype_configs(dtype_configs)
|
429 |
+
)
|
430 |
+
return cat_configs
|
431 |
+
|
432 |
+
|
433 |
+
def _get_embedding_op_configs() -> List[BackendPatternConfig]:
|
434 |
+
dtype_configs = [
|
435 |
+
executorch_weight_only_quint8_dtype_config,
|
436 |
+
]
|
437 |
+
embedding_op_configs = []
|
438 |
+
for embedding_op, qat_embedding_op, ref_embedding_op in [
|
439 |
+
(nn.Embedding, nnqat.Embedding, nnqr.Embedding),
|
440 |
+
(nn.EmbeddingBag, nnqat.EmbeddingBag, nnqr.EmbeddingBag),
|
441 |
+
]:
|
442 |
+
embedding_op_configs.append(
|
443 |
+
BackendPatternConfig(embedding_op)
|
444 |
+
.set_observation_type(
|
445 |
+
ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
446 |
+
) # noqa: E131
|
447 |
+
.set_dtype_configs(dtype_configs)
|
448 |
+
.set_qat_module(qat_embedding_op)
|
449 |
+
.set_root_module(embedding_op)
|
450 |
+
.set_reference_quantized_module(ref_embedding_op)
|
451 |
+
)
|
452 |
+
# config for qat op
|
453 |
+
embedding_op_configs.append(
|
454 |
+
BackendPatternConfig(qat_embedding_op)
|
455 |
+
.set_observation_type(
|
456 |
+
ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
457 |
+
) # noqa: E131
|
458 |
+
.set_dtype_configs(dtype_configs)
|
459 |
+
.set_root_module(embedding_op)
|
460 |
+
.set_reference_quantized_module(ref_embedding_op)
|
461 |
+
)
|
462 |
+
|
463 |
+
# config for functional embedding
|
464 |
+
embedding_op_configs.append(
|
465 |
+
BackendPatternConfig(torch.nn.functional.embedding)
|
466 |
+
.set_observation_type(
|
467 |
+
ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
468 |
+
) # noqa: E131
|
469 |
+
.set_dtype_configs(dtype_configs)
|
470 |
+
._set_input_type_to_index({"weight": 1})
|
471 |
+
)
|
472 |
+
return embedding_op_configs
|
473 |
+
|
474 |
+
|
475 |
+
|
476 |
+
# =====================
|
477 |
+
# | BACKEND CONFIGS |
|
478 |
+
# =====================
|
479 |
+
|
480 |
+
|
481 |
+
def get_executorch_backend_config() -> BackendConfig:
|
482 |
+
"""
|
483 |
+
Return the `BackendConfig` for backends PyTorch lowers to through the Executorch stack.
|
484 |
+
"""
|
485 |
+
return (
|
486 |
+
BackendConfig("executorch")
|
487 |
+
.set_backend_pattern_configs(_get_linear_configs())
|
488 |
+
.set_backend_pattern_configs(_get_conv_configs())
|
489 |
+
.set_backend_pattern_configs(_get_binary_ops_configs())
|
490 |
+
.set_backend_pattern_configs(_get_share_qparams_ops_configs())
|
491 |
+
.set_backend_pattern_configs(_get_bn_configs())
|
492 |
+
.set_backend_pattern_configs(_get_cat_configs())
|
493 |
+
.set_backend_pattern_configs(_get_embedding_op_configs())
|
494 |
+
)
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/fbgemm.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from ._common_operator_config_utils import (
|
3 |
+
_get_binary_op_configs,
|
4 |
+
_get_bn_configs,
|
5 |
+
_get_cat_config,
|
6 |
+
_get_conv_configs,
|
7 |
+
_get_default_op_configs,
|
8 |
+
_get_embedding_op_configs,
|
9 |
+
_get_fixed_qparams_op_configs,
|
10 |
+
_get_linear_configs,
|
11 |
+
_get_rnn_op_configs,
|
12 |
+
_get_share_qparams_op_configs,
|
13 |
+
_get_tensor_info_op_configs,
|
14 |
+
)
|
15 |
+
from .backend_config import BackendConfig, DTypeConfig
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
"get_fbgemm_backend_config",
|
19 |
+
]
|
20 |
+
|
21 |
+
# ===================
|
22 |
+
# | DTYPE CONFIGS |
|
23 |
+
# ===================
|
24 |
+
|
25 |
+
# TODO: For now, these DTypeConfigs are identical to the ones defined in native.py
|
26 |
+
# In the future, once we support specifying quant_min/quant_max and scale_min/scale_max,
|
27 |
+
# these will diverge. In particular, for FBGEMM, we will restrict the activation quantized
|
28 |
+
# values to within [0, 127].
|
29 |
+
|
30 |
+
fbgemm_weighted_op_quint8_dtype_config = DTypeConfig(
|
31 |
+
input_dtype=torch.quint8,
|
32 |
+
output_dtype=torch.quint8,
|
33 |
+
weight_dtype=torch.qint8,
|
34 |
+
bias_dtype=torch.float,
|
35 |
+
)
|
36 |
+
|
37 |
+
fbgemm_default_op_quint8_dtype_config = DTypeConfig(
|
38 |
+
input_dtype=torch.quint8,
|
39 |
+
output_dtype=torch.quint8,
|
40 |
+
)
|
41 |
+
|
42 |
+
fbgemm_default_op_fp16_dtype_config = DTypeConfig(
|
43 |
+
input_dtype=torch.float16,
|
44 |
+
output_dtype=torch.float16,
|
45 |
+
weight_dtype=torch.float16,
|
46 |
+
bias_dtype=torch.float16,
|
47 |
+
)
|
48 |
+
|
49 |
+
fbgemm_default_dynamic_int8_dtype_config = DTypeConfig(
|
50 |
+
input_dtype=torch.quint8,
|
51 |
+
output_dtype=torch.float,
|
52 |
+
weight_dtype=torch.qint8,
|
53 |
+
bias_dtype=torch.float,
|
54 |
+
is_dynamic=True,
|
55 |
+
)
|
56 |
+
|
57 |
+
fbgemm_default_dynamic_float16_dtype_config = DTypeConfig(
|
58 |
+
input_dtype=torch.float16,
|
59 |
+
output_dtype=torch.float,
|
60 |
+
weight_dtype=torch.float16,
|
61 |
+
bias_dtype=torch.float,
|
62 |
+
is_dynamic=True,
|
63 |
+
)
|
64 |
+
|
65 |
+
fbgemm_weight_only_quint8_dtype_config = DTypeConfig(
|
66 |
+
input_dtype=torch.float,
|
67 |
+
output_dtype=torch.float,
|
68 |
+
weight_dtype=torch.quint8,
|
69 |
+
)
|
70 |
+
|
71 |
+
fbgemm_weight_only_quint4x2_dtype_config = DTypeConfig(
|
72 |
+
input_dtype=torch.float,
|
73 |
+
output_dtype=torch.float,
|
74 |
+
weight_dtype=torch.quint4x2,
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
# =====================
|
79 |
+
# | BACKEND CONFIGS |
|
80 |
+
# =====================
|
81 |
+
|
82 |
+
def get_fbgemm_backend_config() -> BackendConfig:
|
83 |
+
"""
|
84 |
+
Return the `BackendConfig` for PyTorch's native FBGEMM backend.
|
85 |
+
"""
|
86 |
+
conv_dtype_configs = [fbgemm_weighted_op_quint8_dtype_config]
|
87 |
+
linear_dtype_configs = [
|
88 |
+
fbgemm_weighted_op_quint8_dtype_config,
|
89 |
+
fbgemm_default_dynamic_int8_dtype_config,
|
90 |
+
fbgemm_default_dynamic_float16_dtype_config,
|
91 |
+
]
|
92 |
+
binary_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
|
93 |
+
default_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
|
94 |
+
fixed_qparams_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
|
95 |
+
share_qparams_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
|
96 |
+
tensor_info_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config]
|
97 |
+
rnn_op_dtype_configs = [
|
98 |
+
fbgemm_default_dynamic_int8_dtype_config,
|
99 |
+
fbgemm_default_dynamic_float16_dtype_config,
|
100 |
+
]
|
101 |
+
embedding_op_dtype_configs = [
|
102 |
+
fbgemm_weight_only_quint8_dtype_config,
|
103 |
+
fbgemm_weight_only_quint4x2_dtype_config,
|
104 |
+
]
|
105 |
+
return BackendConfig("fbgemm") \
|
106 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
|
107 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
|
108 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
|
109 |
+
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
|
110 |
+
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
|
111 |
+
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
|
112 |
+
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
|
113 |
+
.set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
|
114 |
+
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
|
115 |
+
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
|
116 |
+
.set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py
ADDED
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from ._common_operator_config_utils import (
|
3 |
+
_get_binary_op_configs,
|
4 |
+
_get_bn_configs,
|
5 |
+
_get_cat_config,
|
6 |
+
_get_conv_configs,
|
7 |
+
_get_default_op_configs,
|
8 |
+
_get_embedding_op_configs,
|
9 |
+
_get_fixed_qparams_op_configs,
|
10 |
+
_get_linear_configs,
|
11 |
+
_get_ln_configs,
|
12 |
+
_get_rnn_op_configs,
|
13 |
+
_get_share_qparams_op_configs,
|
14 |
+
_get_tensor_info_op_configs,
|
15 |
+
)
|
16 |
+
from .backend_config import BackendConfig, DTypeConfig
|
17 |
+
|
18 |
+
__all__ = [
|
19 |
+
"get_test_only_legacy_native_backend_config",
|
20 |
+
"default_op_quint8_dtype_config",
|
21 |
+
"default_op_fp16_dtype_config",
|
22 |
+
"default_dynamic_int8_dtype_config",
|
23 |
+
"default_dynamic_float16_dtype_config",
|
24 |
+
"input_output_only_quint8_dtype_config",
|
25 |
+
"weight_only_quint8_dtype_config",
|
26 |
+
"weight_only_quint4x2_dtype_config",
|
27 |
+
"get_native_backend_config",
|
28 |
+
"get_native_backend_config_dict",
|
29 |
+
"get_test_only_legacy_native_backend_config_dict",
|
30 |
+
]
|
31 |
+
|
32 |
+
# ===================
|
33 |
+
# | DTYPE CONFIGS |
|
34 |
+
# ===================
|
35 |
+
|
36 |
+
# weighted op int8 dtype config
|
37 |
+
# this is config for ops that has quantized weights, like linear, conv
|
38 |
+
weighted_op_quint8_dtype_config = DTypeConfig(
|
39 |
+
input_dtype=torch.quint8,
|
40 |
+
output_dtype=torch.quint8,
|
41 |
+
weight_dtype=torch.qint8,
|
42 |
+
bias_dtype=torch.float,
|
43 |
+
)
|
44 |
+
|
45 |
+
default_op_quint8_dtype_config = DTypeConfig(
|
46 |
+
input_dtype=torch.quint8,
|
47 |
+
output_dtype=torch.quint8,
|
48 |
+
)
|
49 |
+
|
50 |
+
default_op_fp16_dtype_config = DTypeConfig(
|
51 |
+
input_dtype=torch.float16,
|
52 |
+
output_dtype=torch.float16,
|
53 |
+
weight_dtype=torch.float16,
|
54 |
+
bias_dtype=torch.float16,
|
55 |
+
)
|
56 |
+
|
57 |
+
default_dynamic_int8_dtype_config = DTypeConfig(
|
58 |
+
input_dtype=torch.quint8,
|
59 |
+
output_dtype=torch.float,
|
60 |
+
weight_dtype=torch.qint8,
|
61 |
+
bias_dtype=torch.float,
|
62 |
+
# currently the dtype check is not yet enabled, so we provided the dtype_configs but
|
63 |
+
# it is not really used yet,
|
64 |
+
# we will enable it a bit later after we moved everything to backend_config_dict
|
65 |
+
is_dynamic=True,
|
66 |
+
)
|
67 |
+
|
68 |
+
default_dynamic_float16_dtype_config = DTypeConfig(
|
69 |
+
input_dtype=torch.float16,
|
70 |
+
output_dtype=torch.float,
|
71 |
+
weight_dtype=torch.float16,
|
72 |
+
bias_dtype=torch.float,
|
73 |
+
# currently the dtype check is not yet enabled, so we provided the dtype_configs but
|
74 |
+
# it is not really used yet,
|
75 |
+
# we will enable it a bit later after we moved everything to backend_config_dict
|
76 |
+
is_dynamic=True,
|
77 |
+
)
|
78 |
+
|
79 |
+
# Needed for LayerNorm and f.layer_norm, since currently the kernel only supports float weights
|
80 |
+
input_output_only_quint8_dtype_config = DTypeConfig(
|
81 |
+
input_dtype=torch.quint8,
|
82 |
+
output_dtype=torch.quint8,
|
83 |
+
weight_dtype=torch.float,
|
84 |
+
bias_dtype=torch.float,
|
85 |
+
)
|
86 |
+
|
87 |
+
weight_only_quint8_dtype_config = DTypeConfig(
|
88 |
+
input_dtype=torch.float,
|
89 |
+
output_dtype=torch.float,
|
90 |
+
weight_dtype=torch.quint8,
|
91 |
+
)
|
92 |
+
|
93 |
+
weight_only_quint4x2_dtype_config = DTypeConfig(
|
94 |
+
input_dtype=torch.float,
|
95 |
+
output_dtype=torch.float,
|
96 |
+
weight_dtype=torch.quint4x2,
|
97 |
+
)
|
98 |
+
|
99 |
+
|
100 |
+
# =====================
|
101 |
+
# | BACKEND CONFIGS |
|
102 |
+
# =====================
|
103 |
+
|
104 |
+
def get_test_only_legacy_native_backend_config() -> BackendConfig:
|
105 |
+
"""
|
106 |
+
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional fp16 ops.
|
107 |
+
"""
|
108 |
+
conv_dtype_configs = [weighted_op_quint8_dtype_config]
|
109 |
+
linear_dtype_configs = [
|
110 |
+
weighted_op_quint8_dtype_config,
|
111 |
+
default_dynamic_int8_dtype_config,
|
112 |
+
default_dynamic_float16_dtype_config,
|
113 |
+
default_op_fp16_dtype_config,
|
114 |
+
]
|
115 |
+
binary_op_dtype_configs = [
|
116 |
+
default_op_quint8_dtype_config,
|
117 |
+
default_op_fp16_dtype_config,
|
118 |
+
]
|
119 |
+
default_op_dtype_configs = [default_op_quint8_dtype_config]
|
120 |
+
fixed_qparams_op_dtype_configs = [
|
121 |
+
default_op_quint8_dtype_config,
|
122 |
+
default_op_fp16_dtype_config,
|
123 |
+
]
|
124 |
+
share_qparams_op_dtype_configs = [
|
125 |
+
default_op_quint8_dtype_config,
|
126 |
+
default_op_fp16_dtype_config
|
127 |
+
]
|
128 |
+
tensor_info_op_dtype_configs = [
|
129 |
+
default_op_quint8_dtype_config,
|
130 |
+
]
|
131 |
+
rnn_op_dtype_configs = [
|
132 |
+
default_dynamic_int8_dtype_config,
|
133 |
+
default_dynamic_float16_dtype_config,
|
134 |
+
]
|
135 |
+
embedding_op_dtype_configs = [
|
136 |
+
weight_only_quint8_dtype_config,
|
137 |
+
weight_only_quint4x2_dtype_config,
|
138 |
+
]
|
139 |
+
layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config]
|
140 |
+
return BackendConfig("_native_and_fp16") \
|
141 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
|
142 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
|
143 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
|
144 |
+
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
|
145 |
+
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
|
146 |
+
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
|
147 |
+
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
|
148 |
+
.set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
|
149 |
+
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
|
150 |
+
.set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
|
151 |
+
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
|
152 |
+
.set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
|
153 |
+
|
154 |
+
def get_native_backend_config() -> BackendConfig:
|
155 |
+
"""
|
156 |
+
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack).
|
157 |
+
"""
|
158 |
+
# TODO: express this BackendConfig as a union of the FBGEMM and QNNPACK BackendConfigs
|
159 |
+
conv_dtype_configs = [weighted_op_quint8_dtype_config]
|
160 |
+
linear_dtype_configs = [
|
161 |
+
weighted_op_quint8_dtype_config,
|
162 |
+
default_dynamic_int8_dtype_config,
|
163 |
+
default_dynamic_float16_dtype_config,
|
164 |
+
]
|
165 |
+
binary_op_dtype_configs = [default_op_quint8_dtype_config]
|
166 |
+
default_op_dtype_configs = [default_op_quint8_dtype_config]
|
167 |
+
fixed_qparams_op_dtype_configs = [default_op_quint8_dtype_config]
|
168 |
+
share_qparams_op_dtype_configs = [default_op_quint8_dtype_config]
|
169 |
+
tensor_info_op_dtype_configs = [default_op_quint8_dtype_config]
|
170 |
+
rnn_op_dtype_configs = [
|
171 |
+
default_dynamic_int8_dtype_config,
|
172 |
+
default_dynamic_float16_dtype_config,
|
173 |
+
]
|
174 |
+
embedding_op_dtype_configs = [
|
175 |
+
weight_only_quint8_dtype_config,
|
176 |
+
weight_only_quint4x2_dtype_config,
|
177 |
+
]
|
178 |
+
layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config]
|
179 |
+
return BackendConfig("native") \
|
180 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
|
181 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
|
182 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
|
183 |
+
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
|
184 |
+
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
|
185 |
+
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
|
186 |
+
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
|
187 |
+
.set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
|
188 |
+
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
|
189 |
+
.set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
|
190 |
+
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
|
191 |
+
.set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
|
192 |
+
|
193 |
+
def get_native_backend_config_dict():
|
194 |
+
"""
|
195 |
+
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) in dictionary form.
|
196 |
+
"""
|
197 |
+
return get_native_backend_config().to_dict()
|
198 |
+
|
199 |
+
def get_test_only_legacy_native_backend_config_dict():
|
200 |
+
"""
|
201 |
+
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional
|
202 |
+
fp16 ops in dictionary form.
|
203 |
+
"""
|
204 |
+
return get_test_only_legacy_native_backend_config().to_dict()
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/observation_type.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/onednn.py
ADDED
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.ao.nn.intrinsic as nni
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.ao.nn.quantized.reference as nnqr
|
6 |
+
from ._common_operator_config_utils import (
|
7 |
+
_get_conv_configs,
|
8 |
+
_get_linear_configs,
|
9 |
+
_get_binary_op_configs,
|
10 |
+
_get_bn_configs,
|
11 |
+
_get_cat_config,
|
12 |
+
_get_default_op_configs,
|
13 |
+
_get_embedding_op_configs,
|
14 |
+
_get_fixed_qparams_op_configs,
|
15 |
+
_get_ln_configs,
|
16 |
+
_get_rnn_op_configs,
|
17 |
+
_get_share_qparams_op_configs,
|
18 |
+
)
|
19 |
+
from .backend_config import (
|
20 |
+
BackendPatternConfig,
|
21 |
+
BackendConfig,
|
22 |
+
DTypeConfig,
|
23 |
+
ObservationType,
|
24 |
+
)
|
25 |
+
from ..fuser_method_mappings import (
|
26 |
+
_sequential_wrapper2,
|
27 |
+
)
|
28 |
+
import operator
|
29 |
+
from torch.ao.quantization.utils import MatchAllNode
|
30 |
+
import itertools
|
31 |
+
|
32 |
+
# ===================
|
33 |
+
# | DTYPE CONFIGS |
|
34 |
+
# ===================
|
35 |
+
|
36 |
+
onednn_weighted_op_int8_dtype_config = DTypeConfig(
|
37 |
+
input_dtype=torch.quint8,
|
38 |
+
output_dtype=torch.quint8,
|
39 |
+
weight_dtype=torch.qint8,
|
40 |
+
bias_dtype=torch.float,
|
41 |
+
)
|
42 |
+
|
43 |
+
onednn_op_quint8_dtype_config = DTypeConfig(
|
44 |
+
input_dtype=torch.quint8,
|
45 |
+
output_dtype=torch.quint8,
|
46 |
+
)
|
47 |
+
|
48 |
+
onednn_dynamic_int8_dtype_config = DTypeConfig(
|
49 |
+
input_dtype=torch.quint8,
|
50 |
+
output_dtype=torch.float,
|
51 |
+
weight_dtype=torch.qint8,
|
52 |
+
bias_dtype=torch.float,
|
53 |
+
is_dynamic=True,
|
54 |
+
)
|
55 |
+
|
56 |
+
onednn_weight_only_qint8_dtype_config = DTypeConfig(
|
57 |
+
input_dtype=torch.float,
|
58 |
+
output_dtype=torch.float,
|
59 |
+
weight_dtype=torch.qint8,
|
60 |
+
)
|
61 |
+
|
62 |
+
onednn_input_output_only_quint8_dtype_config = DTypeConfig(
|
63 |
+
input_dtype=torch.quint8,
|
64 |
+
output_dtype=torch.quint8,
|
65 |
+
weight_dtype=torch.float,
|
66 |
+
bias_dtype=torch.float,
|
67 |
+
)
|
68 |
+
|
69 |
+
# ===================
|
70 |
+
# | FUSER METHODS |
|
71 |
+
# ===================
|
72 |
+
|
73 |
+
def _fuse_linear_bn_leaky_relu(is_qat, linear, bn, leaky_relu):
|
74 |
+
r"""Given the linear, bn and leaky_relu modules, fuses them and returns the fused module
|
75 |
+
Args:
|
76 |
+
is_qat: a flag for whether we are using quantization aware training fusion
|
77 |
+
or post training quantization fusion
|
78 |
+
linear: Module instance of type Linear
|
79 |
+
bn: BatchNorm1d instance that needs to be fused with the linear layer
|
80 |
+
leaky_relu: LeakyReLU instance that needs to be fused with the linear layer
|
81 |
+
Examples::
|
82 |
+
>>> # xdoctest: +SKIP(failing)
|
83 |
+
>>> m1 = nn.Linear(20, 10)
|
84 |
+
>>> b1 = nn.BatchNorm1d(10)
|
85 |
+
>>> lr = nn.LeakyReLU(0.01)
|
86 |
+
>>> m2 = _fuse_linear_bn_leaky_relu(m1, b1, lr)
|
87 |
+
"""
|
88 |
+
assert(linear.training == bn.training and bn.training == leaky_relu.training),\
|
89 |
+
"Linear, BN and LeakyReLU all must be in the same mode (train or eval)."
|
90 |
+
|
91 |
+
if is_qat:
|
92 |
+
raise NotImplementedError(f"Cannot fuse train modules: {(linear, bn, leaky_relu)}")
|
93 |
+
else:
|
94 |
+
map_to_fused_module_eval = {
|
95 |
+
nn.Linear: nni.LinearLeakyReLU,
|
96 |
+
}
|
97 |
+
fused_module = map_to_fused_module_eval.get(type(linear), None)
|
98 |
+
if fused_module is not None:
|
99 |
+
fused_linear = nn.utils.fusion.fuse_linear_bn_eval(linear, bn)
|
100 |
+
fm = fused_module(fused_linear, leaky_relu)
|
101 |
+
return fm
|
102 |
+
else:
|
103 |
+
raise NotImplementedError(f"Cannot fuse eval modules: {(linear, bn, leaky_relu)}")
|
104 |
+
|
105 |
+
# ======================
|
106 |
+
# | CONFIGS FOR CONV |
|
107 |
+
# ======================
|
108 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
109 |
+
|
110 |
+
conv_dtype_configs = [onednn_weighted_op_int8_dtype_config]
|
111 |
+
conv_configs = _get_conv_configs(conv_dtype_configs)
|
112 |
+
|
113 |
+
# (1) Conv2d + Add
|
114 |
+
|
115 |
+
# conv2d Y
|
116 |
+
# \ /
|
117 |
+
# add
|
118 |
+
|
119 |
+
# include:
|
120 |
+
# conv2d conv2d
|
121 |
+
# \ /
|
122 |
+
# add
|
123 |
+
|
124 |
+
def _fuse_conv_add_left(is_qat, add, conv, _):
|
125 |
+
return nni.ConvAdd2d(conv, add)
|
126 |
+
|
127 |
+
def _conv_add_root_node_getter_left(pattern):
|
128 |
+
_, conv, _ = pattern
|
129 |
+
return conv
|
130 |
+
|
131 |
+
def _conv_add_extra_inputs_getter_left(pattern):
|
132 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
133 |
+
are assumed to be copied over from root node to the fused node
|
134 |
+
"""
|
135 |
+
_, conv, extra_input = pattern
|
136 |
+
return [extra_input]
|
137 |
+
|
138 |
+
# conv2d
|
139 |
+
# \
|
140 |
+
# bn Y
|
141 |
+
# \ /
|
142 |
+
# add
|
143 |
+
|
144 |
+
def _fuse_conv_bn_add_left(is_qat, add, bn_conv, _):
|
145 |
+
bn, conv = bn_conv
|
146 |
+
if is_qat:
|
147 |
+
raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add)}")
|
148 |
+
else:
|
149 |
+
fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
|
150 |
+
return nni.ConvAdd2d(fused_conv, add)
|
151 |
+
|
152 |
+
def _conv_bn_add_root_node_getter_left(add_pattern):
|
153 |
+
_, bn_conv, _ = add_pattern
|
154 |
+
bn, conv = bn_conv
|
155 |
+
return conv
|
156 |
+
|
157 |
+
def _conv_bn_add_extra_inputs_getter_left(add_pattern):
|
158 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
159 |
+
are assumed to be copied over from root node to the fused node
|
160 |
+
"""
|
161 |
+
_, bn_conv, extra_input = add_pattern
|
162 |
+
bn, conv = bn_conv
|
163 |
+
return [extra_input]
|
164 |
+
|
165 |
+
conv_add_left_optioins = itertools.product(
|
166 |
+
[True, False], # with_bn
|
167 |
+
[torch.add, operator.add], # add_op
|
168 |
+
)
|
169 |
+
|
170 |
+
for with_bn, add_op in conv_add_left_optioins:
|
171 |
+
if with_bn:
|
172 |
+
conv_configs.append(
|
173 |
+
BackendPatternConfig()
|
174 |
+
._set_pattern_complex_format((add_op, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode)) # noqa: E131
|
175 |
+
.set_observation_type(observation_type)
|
176 |
+
.set_dtype_configs(conv_dtype_configs)
|
177 |
+
.set_fuser_method(_fuse_conv_bn_add_left)
|
178 |
+
._set_root_node_getter(_conv_bn_add_root_node_getter_left)
|
179 |
+
._set_extra_inputs_getter(_conv_bn_add_extra_inputs_getter_left)
|
180 |
+
.set_fused_module(nni.ConvAdd2d))
|
181 |
+
else:
|
182 |
+
conv_configs.append(
|
183 |
+
BackendPatternConfig()
|
184 |
+
._set_pattern_complex_format((add_op, nn.Conv2d, MatchAllNode)) # noqa: E131
|
185 |
+
.set_observation_type(observation_type)
|
186 |
+
.set_dtype_configs(conv_dtype_configs)
|
187 |
+
.set_fuser_method(_fuse_conv_add_left)
|
188 |
+
._set_root_node_getter(_conv_add_root_node_getter_left)
|
189 |
+
._set_extra_inputs_getter(_conv_add_extra_inputs_getter_left)
|
190 |
+
.set_fused_module(nni.ConvAdd2d))
|
191 |
+
|
192 |
+
# Y conv2d
|
193 |
+
# \ /
|
194 |
+
# add
|
195 |
+
|
196 |
+
def _fuse_conv_add_right(is_qat, add, _, conv):
|
197 |
+
return nni.ConvAdd2d(conv, add)
|
198 |
+
|
199 |
+
def _conv_add_root_node_getter_right(pattern):
|
200 |
+
add, _, conv = pattern
|
201 |
+
return conv
|
202 |
+
|
203 |
+
def _conv_add_extra_inputs_getter_right(pattern):
|
204 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
205 |
+
are assumed to be copied over from root node to the fused node
|
206 |
+
"""
|
207 |
+
_, extra_input, conv = pattern
|
208 |
+
return [extra_input]
|
209 |
+
|
210 |
+
# conv2d
|
211 |
+
# /
|
212 |
+
# Y bn
|
213 |
+
# \ /
|
214 |
+
# add
|
215 |
+
|
216 |
+
def _fuse_conv_bn_add_right(is_qat, add, _, bn_conv):
|
217 |
+
bn, conv = bn_conv
|
218 |
+
if is_qat:
|
219 |
+
raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add)}")
|
220 |
+
else:
|
221 |
+
fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
|
222 |
+
return nni.ConvAdd2d(fused_conv, add)
|
223 |
+
|
224 |
+
def _conv_bn_add_root_node_getter_right(pattern):
|
225 |
+
add, _, bn_conv = pattern
|
226 |
+
bn, conv = bn_conv
|
227 |
+
return conv
|
228 |
+
|
229 |
+
def _conv_bn_add_extra_inputs_getter_right(pattern):
|
230 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
231 |
+
are assumed to be copied over from root node to the fused node
|
232 |
+
"""
|
233 |
+
_, extra_input, bn_conv = pattern
|
234 |
+
bn, conv = bn_conv
|
235 |
+
return [extra_input]
|
236 |
+
|
237 |
+
conv_add_optioins = itertools.product(
|
238 |
+
[True, False], # with_bn
|
239 |
+
[torch.add, operator.add], # add_op
|
240 |
+
)
|
241 |
+
|
242 |
+
for with_bn, add_op in conv_add_optioins:
|
243 |
+
if with_bn:
|
244 |
+
conv_configs.append(
|
245 |
+
BackendPatternConfig()
|
246 |
+
._set_pattern_complex_format((add_op, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d))) # noqa: E131
|
247 |
+
.set_observation_type(observation_type)
|
248 |
+
.set_dtype_configs(conv_dtype_configs)
|
249 |
+
.set_fuser_method(_fuse_conv_bn_add_right)
|
250 |
+
._set_root_node_getter(_conv_bn_add_root_node_getter_right)
|
251 |
+
._set_extra_inputs_getter(_conv_bn_add_extra_inputs_getter_right)
|
252 |
+
.set_fused_module(nni.ConvAdd2d))
|
253 |
+
else:
|
254 |
+
conv_configs.append(
|
255 |
+
BackendPatternConfig()
|
256 |
+
._set_pattern_complex_format((add_op, MatchAllNode, nn.Conv2d)) # noqa: E131
|
257 |
+
.set_observation_type(observation_type)
|
258 |
+
.set_dtype_configs(conv_dtype_configs)
|
259 |
+
.set_fuser_method(_fuse_conv_add_right)
|
260 |
+
._set_root_node_getter(_conv_add_root_node_getter_right)
|
261 |
+
._set_extra_inputs_getter(_conv_add_extra_inputs_getter_right)
|
262 |
+
.set_fused_module(nni.ConvAdd2d))
|
263 |
+
|
264 |
+
conv_configs.append(
|
265 |
+
BackendPatternConfig(nni.ConvAdd2d)
|
266 |
+
.set_observation_type(observation_type) # noqa: E131
|
267 |
+
.set_dtype_configs(conv_dtype_configs)
|
268 |
+
.set_root_module(nn.Conv2d)
|
269 |
+
.set_reference_quantized_module(nnqr.Conv2d))
|
270 |
+
|
271 |
+
# (2) Conv2d + Add + Relu
|
272 |
+
|
273 |
+
# conv2d Y
|
274 |
+
# \ /
|
275 |
+
# add
|
276 |
+
# \
|
277 |
+
# relu
|
278 |
+
|
279 |
+
def _fuse_conv_add_relu_left(is_qat, relu, add_pattern):
|
280 |
+
add, conv, _ = add_pattern
|
281 |
+
return nni.ConvAddReLU2d(conv, add, relu)
|
282 |
+
|
283 |
+
def _conv_add_relu_root_node_getter_left(pattern):
|
284 |
+
relu, add_pattern = pattern
|
285 |
+
_, conv, _ = add_pattern
|
286 |
+
return conv
|
287 |
+
|
288 |
+
def _conv_add_relu_extra_inputs_getter_left(pattern):
|
289 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
290 |
+
are assumed to be copied over from root node to the fused node
|
291 |
+
"""
|
292 |
+
relu, add_pattern = pattern
|
293 |
+
_, conv, extra_input = add_pattern
|
294 |
+
return [extra_input]
|
295 |
+
|
296 |
+
# conv2d
|
297 |
+
# \
|
298 |
+
# bn Y
|
299 |
+
# \ /
|
300 |
+
# add
|
301 |
+
# \
|
302 |
+
# relu
|
303 |
+
|
304 |
+
def _fuse_conv_bn_add_relu_left(is_qat, relu, add_pattern):
|
305 |
+
add, bn_conv, _ = add_pattern
|
306 |
+
bn, conv = bn_conv
|
307 |
+
if is_qat:
|
308 |
+
raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}")
|
309 |
+
else:
|
310 |
+
fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
|
311 |
+
return nni.ConvAddReLU2d(fused_conv, add, relu)
|
312 |
+
|
313 |
+
def _conv_bn_add_relu_root_node_getter_left(pattern):
|
314 |
+
relu, add_pattern = pattern
|
315 |
+
_, bn_conv, _ = add_pattern
|
316 |
+
bn, conv = bn_conv
|
317 |
+
return conv
|
318 |
+
|
319 |
+
def _conv_bn_add_relu_extra_inputs_getter_left(pattern):
|
320 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
321 |
+
are assumed to be copied over from root node to the fused node
|
322 |
+
"""
|
323 |
+
relu, add_pattern = pattern
|
324 |
+
_, bn_conv, extra_input = add_pattern
|
325 |
+
bn, conv = bn_conv
|
326 |
+
return [extra_input]
|
327 |
+
|
328 |
+
conv_add_relu_left_optioins = itertools.product(
|
329 |
+
[True, False], # with_bn
|
330 |
+
[torch.add, operator.add], # add_op
|
331 |
+
)
|
332 |
+
|
333 |
+
for with_bn, add_op in conv_add_relu_left_optioins:
|
334 |
+
if with_bn:
|
335 |
+
conv_configs.append(
|
336 |
+
BackendPatternConfig()
|
337 |
+
._set_pattern_complex_format((nn.ReLU, (add_op, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode))) # noqa: E131
|
338 |
+
.set_observation_type(observation_type)
|
339 |
+
.set_dtype_configs(conv_dtype_configs)
|
340 |
+
.set_fuser_method(_fuse_conv_bn_add_relu_left)
|
341 |
+
._set_root_node_getter(_conv_bn_add_relu_root_node_getter_left)
|
342 |
+
._set_extra_inputs_getter(_conv_bn_add_relu_extra_inputs_getter_left)
|
343 |
+
.set_fused_module(nni.ConvAddReLU2d))
|
344 |
+
else:
|
345 |
+
conv_configs.append(
|
346 |
+
BackendPatternConfig()
|
347 |
+
._set_pattern_complex_format((nn.ReLU, (add_op, nn.Conv2d, MatchAllNode))) # noqa: E131
|
348 |
+
.set_observation_type(observation_type)
|
349 |
+
.set_dtype_configs(conv_dtype_configs)
|
350 |
+
.set_fuser_method(_fuse_conv_add_relu_left)
|
351 |
+
._set_root_node_getter(_conv_add_relu_root_node_getter_left)
|
352 |
+
._set_extra_inputs_getter(_conv_add_relu_extra_inputs_getter_left)
|
353 |
+
.set_fused_module(nni.ConvAddReLU2d))
|
354 |
+
|
355 |
+
# Y conv2d
|
356 |
+
# \ /
|
357 |
+
# add
|
358 |
+
# \
|
359 |
+
# relu
|
360 |
+
|
361 |
+
def _fuse_conv_add_relu_right(is_qat, relu, add_pattern):
|
362 |
+
add, _, conv = add_pattern
|
363 |
+
return nni.ConvAddReLU2d(conv, add, relu)
|
364 |
+
|
365 |
+
def _conv_add_relu_root_node_getter_right(pattern):
|
366 |
+
relu, add_pattern = pattern
|
367 |
+
_, _, conv = add_pattern
|
368 |
+
return conv
|
369 |
+
|
370 |
+
def _conv_add_relu_extra_inputs_getter_right(pattern):
|
371 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
372 |
+
are assumed to be copied over from root node to the fused node
|
373 |
+
"""
|
374 |
+
relu, add_pattern = pattern
|
375 |
+
_, extra_input, conv = add_pattern
|
376 |
+
return [extra_input]
|
377 |
+
|
378 |
+
# conv2d
|
379 |
+
# /
|
380 |
+
# Y bn
|
381 |
+
# \ /
|
382 |
+
# add
|
383 |
+
# \
|
384 |
+
# relu
|
385 |
+
|
386 |
+
def _fuse_conv_bn_add_relu_right(is_qat, relu, add_pattern):
|
387 |
+
add, _, bn_conv = add_pattern
|
388 |
+
bn, conv = bn_conv
|
389 |
+
if is_qat:
|
390 |
+
raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}")
|
391 |
+
else:
|
392 |
+
fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
|
393 |
+
return nni.ConvAddReLU2d(fused_conv, add, relu)
|
394 |
+
|
395 |
+
def _conv_bn_add_relu_root_node_getter_right(pattern):
|
396 |
+
relu, add_pattern = pattern
|
397 |
+
_, _, bn_conv = add_pattern
|
398 |
+
bn, conv = bn_conv
|
399 |
+
return conv
|
400 |
+
|
401 |
+
def _conv_bn_add_relu_extra_inputs_getter_right(pattern):
|
402 |
+
""" get inputs pattern for extra inputs, inputs for root node
|
403 |
+
are assumed to be copied over from root node to the fused node
|
404 |
+
"""
|
405 |
+
relu, add_pattern = pattern
|
406 |
+
_, extra_input, bn_conv = add_pattern
|
407 |
+
bn, conv = bn_conv
|
408 |
+
return [extra_input]
|
409 |
+
|
410 |
+
conv_add_relu_optioins = itertools.product(
|
411 |
+
[True, False], # with_bn
|
412 |
+
[torch.add, operator.add], # add_op
|
413 |
+
)
|
414 |
+
|
415 |
+
for with_bn, add_op in conv_add_relu_optioins:
|
416 |
+
if with_bn:
|
417 |
+
conv_configs.append(
|
418 |
+
BackendPatternConfig()
|
419 |
+
._set_pattern_complex_format((nn.ReLU, (add_op, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d)))) # noqa: E131
|
420 |
+
.set_observation_type(observation_type)
|
421 |
+
.set_dtype_configs(conv_dtype_configs)
|
422 |
+
.set_fuser_method(_fuse_conv_bn_add_relu_right)
|
423 |
+
._set_root_node_getter(_conv_bn_add_relu_root_node_getter_right)
|
424 |
+
._set_extra_inputs_getter(_conv_bn_add_relu_extra_inputs_getter_right)
|
425 |
+
.set_fused_module(nni.ConvAddReLU2d))
|
426 |
+
else:
|
427 |
+
conv_configs.append(
|
428 |
+
BackendPatternConfig()
|
429 |
+
._set_pattern_complex_format((nn.ReLU, (add_op, MatchAllNode, nn.Conv2d))) # noqa: E131
|
430 |
+
.set_observation_type(observation_type)
|
431 |
+
.set_dtype_configs(conv_dtype_configs)
|
432 |
+
.set_fuser_method(_fuse_conv_add_relu_right)
|
433 |
+
._set_root_node_getter(_conv_add_relu_root_node_getter_right)
|
434 |
+
._set_extra_inputs_getter(_conv_add_relu_extra_inputs_getter_right)
|
435 |
+
.set_fused_module(nni.ConvAddReLU2d))
|
436 |
+
|
437 |
+
conv_configs.append(
|
438 |
+
BackendPatternConfig(nni.ConvAddReLU2d)
|
439 |
+
.set_observation_type(observation_type) # noqa: E131
|
440 |
+
.set_dtype_configs(conv_dtype_configs)
|
441 |
+
.set_root_module(nn.Conv2d)
|
442 |
+
.set_reference_quantized_module(nnqr.Conv2d))
|
443 |
+
|
444 |
+
# ========================
|
445 |
+
# | CONFIGS FOR LINEAR |
|
446 |
+
# ========================
|
447 |
+
|
448 |
+
linear_dtype_configs = [
|
449 |
+
onednn_weighted_op_int8_dtype_config,
|
450 |
+
onednn_dynamic_int8_dtype_config,
|
451 |
+
]
|
452 |
+
linear_configs = _get_linear_configs(linear_dtype_configs)
|
453 |
+
|
454 |
+
def _add_eltwise_fusion_configs(configs, root_module, root_op, post_module, post_op,
|
455 |
+
dtype_configs, fuser_method, fused_module, observation_type,
|
456 |
+
ref_quant_module):
|
457 |
+
# 1 base module + op module fusion config
|
458 |
+
configs.append(
|
459 |
+
BackendPatternConfig((root_module, post_module))
|
460 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
461 |
+
.set_fuser_method(fuser_method)
|
462 |
+
.set_fused_module(fused_module))
|
463 |
+
# base module + functional post op
|
464 |
+
configs.append(
|
465 |
+
BackendPatternConfig((root_module, post_op))
|
466 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
467 |
+
.set_fuser_method(fuser_method)
|
468 |
+
.set_fused_module(fused_module))
|
469 |
+
|
470 |
+
# 2 fused module configs
|
471 |
+
configs.append(
|
472 |
+
BackendPatternConfig(fused_module)
|
473 |
+
.set_observation_type(observation_type) # noqa: E131
|
474 |
+
.set_dtype_configs(dtype_configs)
|
475 |
+
.set_root_module(root_module)
|
476 |
+
.set_reference_quantized_module(ref_quant_module))
|
477 |
+
|
478 |
+
# 3 functional base op + post op configs
|
479 |
+
configs.append(
|
480 |
+
BackendPatternConfig((root_op, post_module))
|
481 |
+
.set_observation_type(observation_type) # noqa: E131
|
482 |
+
.set_dtype_configs(dtype_configs))
|
483 |
+
configs.append(
|
484 |
+
BackendPatternConfig((root_op, post_op))
|
485 |
+
.set_observation_type(observation_type) # noqa: E131
|
486 |
+
.set_dtype_configs(dtype_configs))
|
487 |
+
|
488 |
+
# Configs for linear + leaky_relu fusion
|
489 |
+
_add_eltwise_fusion_configs(linear_configs, nn.Linear, F.linear,
|
490 |
+
nn.LeakyReLU, F.leaky_relu, linear_dtype_configs,
|
491 |
+
_sequential_wrapper2(nni.LinearLeakyReLU),
|
492 |
+
nni.LinearLeakyReLU, observation_type, nnqr.Linear)
|
493 |
+
|
494 |
+
# Configs for linear module + batchnorm + leaky_relu
|
495 |
+
linear_configs.append(
|
496 |
+
BackendPatternConfig((nn.Linear, nn.BatchNorm1d, nn.LeakyReLU))
|
497 |
+
.set_dtype_configs(linear_dtype_configs) # noqa: E131
|
498 |
+
.set_fuser_method(_fuse_linear_bn_leaky_relu)
|
499 |
+
.set_fused_module(nni.LinearLeakyReLU))
|
500 |
+
|
501 |
+
# Configs for linear + tanh fusion
|
502 |
+
_add_eltwise_fusion_configs(linear_configs, nn.Linear, F.linear,
|
503 |
+
nn.Tanh, torch.tanh, linear_dtype_configs,
|
504 |
+
_sequential_wrapper2(nni.LinearTanh),
|
505 |
+
nni.LinearTanh, observation_type, nnqr.Linear)
|
506 |
+
|
507 |
+
# ===========================
|
508 |
+
# | CONFIGS FOR OTHER OPS |
|
509 |
+
# ===========================
|
510 |
+
|
511 |
+
binary_op_dtype_configs = [onednn_op_quint8_dtype_config]
|
512 |
+
default_op_dtype_configs = [onednn_op_quint8_dtype_config]
|
513 |
+
fixed_qparams_op_dtype_configs = [onednn_op_quint8_dtype_config]
|
514 |
+
share_qparams_op_dtype_configs = [onednn_op_quint8_dtype_config]
|
515 |
+
rnn_op_dtype_configs = [onednn_dynamic_int8_dtype_config]
|
516 |
+
embedding_op_dtype_configs = [onednn_weight_only_qint8_dtype_config]
|
517 |
+
layer_norm_op_dtype_configs = [onednn_input_output_only_quint8_dtype_config]
|
518 |
+
|
519 |
+
# =====================
|
520 |
+
# | BACKEND CONFIGS |
|
521 |
+
# =====================
|
522 |
+
|
523 |
+
def get_onednn_backend_config() -> BackendConfig:
|
524 |
+
"""
|
525 |
+
Return the `BackendConfig` for PyTorch's native ONEDNN backend.
|
526 |
+
"""
|
527 |
+
return BackendConfig("onednn") \
|
528 |
+
.set_backend_pattern_configs(conv_configs) \
|
529 |
+
.set_backend_pattern_configs(linear_configs) \
|
530 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
|
531 |
+
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
|
532 |
+
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
|
533 |
+
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
|
534 |
+
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
|
535 |
+
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
|
536 |
+
.set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
|
537 |
+
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
|
538 |
+
.set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
|
539 |
+
|
540 |
+
__all__ = [
|
541 |
+
"get_onednn_backend_config",
|
542 |
+
]
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from ._common_operator_config_utils import (
|
3 |
+
_get_binary_op_configs,
|
4 |
+
_get_bn_configs,
|
5 |
+
_get_cat_config,
|
6 |
+
_get_conv_configs,
|
7 |
+
_get_default_op_configs,
|
8 |
+
_get_embedding_op_configs,
|
9 |
+
_get_fixed_qparams_op_configs,
|
10 |
+
_get_linear_configs,
|
11 |
+
_get_rnn_op_configs,
|
12 |
+
_get_share_qparams_op_configs,
|
13 |
+
)
|
14 |
+
from .backend_config import BackendConfig, DTypeConfig, DTypeWithConstraints
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
"get_qnnpack_backend_config",
|
18 |
+
]
|
19 |
+
|
20 |
+
# ===================
|
21 |
+
# | DTYPE CONFIGS |
|
22 |
+
# ===================
|
23 |
+
|
24 |
+
qnnpack_weighted_op_quint8_dtype_config = DTypeConfig(
|
25 |
+
input_dtype=torch.quint8,
|
26 |
+
output_dtype=torch.quint8,
|
27 |
+
weight_dtype=torch.qint8,
|
28 |
+
bias_dtype=torch.float,
|
29 |
+
)
|
30 |
+
|
31 |
+
qnnpack_default_op_quint8_dtype_config = DTypeConfig(
|
32 |
+
input_dtype=torch.quint8,
|
33 |
+
output_dtype=torch.quint8,
|
34 |
+
)
|
35 |
+
|
36 |
+
qnnpack_default_op_fp16_dtype_config = DTypeConfig(
|
37 |
+
input_dtype=torch.float16,
|
38 |
+
output_dtype=torch.float16,
|
39 |
+
weight_dtype=torch.float16,
|
40 |
+
bias_dtype=torch.float16,
|
41 |
+
)
|
42 |
+
|
43 |
+
qnnpack_default_dynamic_int8_dtype_config = DTypeConfig(
|
44 |
+
input_dtype=torch.quint8,
|
45 |
+
output_dtype=torch.float,
|
46 |
+
weight_dtype=torch.qint8,
|
47 |
+
bias_dtype=torch.float,
|
48 |
+
is_dynamic=True,
|
49 |
+
)
|
50 |
+
|
51 |
+
qnnpack_default_dynamic_float16_dtype_config = DTypeConfig(
|
52 |
+
input_dtype=torch.float16,
|
53 |
+
output_dtype=torch.float,
|
54 |
+
weight_dtype=torch.float16,
|
55 |
+
bias_dtype=torch.float,
|
56 |
+
is_dynamic=True,
|
57 |
+
)
|
58 |
+
|
59 |
+
qnnpack_weight_only_quint8_dtype_config = DTypeConfig(
|
60 |
+
input_dtype=torch.float,
|
61 |
+
output_dtype=torch.float,
|
62 |
+
weight_dtype=torch.quint8,
|
63 |
+
)
|
64 |
+
|
65 |
+
qnnpack_weight_only_quint4x2_dtype_config = DTypeConfig(
|
66 |
+
input_dtype=torch.float,
|
67 |
+
output_dtype=torch.float,
|
68 |
+
weight_dtype=torch.quint4x2,
|
69 |
+
)
|
70 |
+
|
71 |
+
# xnnpack compatible dtype configs
|
72 |
+
|
73 |
+
# We restrict scale values to be 2 ** -12 to ensure the
|
74 |
+
# requantization scale never falls below the xnnpack lower
|
75 |
+
# threshold. Additionally, for qint8 weight, we restrict
|
76 |
+
# the quantization values to [-127, +127], excluding -128.
|
77 |
+
# For more detail, refer to the description of
|
78 |
+
# `default_symmetric_qnnpack_qconfig`.
|
79 |
+
|
80 |
+
# TODO: add additional restriction on qscheme to ensure it
|
81 |
+
# is either per_tensor_symmetric or per_channel_symmetric
|
82 |
+
|
83 |
+
qnnpack_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints(
|
84 |
+
dtype=torch.qint8,
|
85 |
+
scale_min_lower_bound=2 ** -12,
|
86 |
+
)
|
87 |
+
|
88 |
+
qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints(
|
89 |
+
dtype=torch.qint8,
|
90 |
+
quant_min_lower_bound=-127,
|
91 |
+
quant_max_upper_bound=127,
|
92 |
+
scale_min_lower_bound=2 ** -12,
|
93 |
+
)
|
94 |
+
|
95 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config = DTypeConfig(
|
96 |
+
input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
97 |
+
output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
98 |
+
weight_dtype=qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12,
|
99 |
+
bias_dtype=torch.float,
|
100 |
+
)
|
101 |
+
|
102 |
+
qnnpack_default_op_qint8_symmetric_dtype_config = DTypeConfig(
|
103 |
+
input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
104 |
+
output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
105 |
+
)
|
106 |
+
|
107 |
+
|
108 |
+
# =====================
|
109 |
+
# | BACKEND CONFIGS |
|
110 |
+
# =====================
|
111 |
+
|
112 |
+
def get_qnnpack_backend_config() -> BackendConfig:
|
113 |
+
"""
|
114 |
+
Return the `BackendConfig` for PyTorch's native QNNPACK backend.
|
115 |
+
"""
|
116 |
+
conv_dtype_configs = [
|
117 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config,
|
118 |
+
qnnpack_weighted_op_quint8_dtype_config,
|
119 |
+
]
|
120 |
+
linear_dtype_configs = [
|
121 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config,
|
122 |
+
qnnpack_weighted_op_quint8_dtype_config,
|
123 |
+
qnnpack_default_dynamic_int8_dtype_config,
|
124 |
+
qnnpack_default_dynamic_float16_dtype_config,
|
125 |
+
]
|
126 |
+
binary_op_dtype_configs = [
|
127 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
128 |
+
qnnpack_default_op_quint8_dtype_config,
|
129 |
+
]
|
130 |
+
default_op_dtype_configs = [
|
131 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
132 |
+
qnnpack_default_op_quint8_dtype_config,
|
133 |
+
]
|
134 |
+
fixed_qparams_op_dtype_configs = [
|
135 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
136 |
+
qnnpack_default_op_quint8_dtype_config,
|
137 |
+
]
|
138 |
+
share_qparams_op_dtype_configs = [
|
139 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
140 |
+
qnnpack_default_op_quint8_dtype_config,
|
141 |
+
]
|
142 |
+
rnn_op_dtype_configs = [
|
143 |
+
qnnpack_default_dynamic_int8_dtype_config,
|
144 |
+
qnnpack_default_dynamic_float16_dtype_config,
|
145 |
+
]
|
146 |
+
embedding_op_dtype_configs = [
|
147 |
+
qnnpack_weight_only_quint8_dtype_config,
|
148 |
+
qnnpack_weight_only_quint4x2_dtype_config,
|
149 |
+
]
|
150 |
+
return BackendConfig("qnnpack") \
|
151 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
|
152 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
|
153 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
|
154 |
+
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
|
155 |
+
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
|
156 |
+
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
|
157 |
+
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
|
158 |
+
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
|
159 |
+
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
|
160 |
+
.set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from .backend_config import (
|
3 |
+
BackendConfig,
|
4 |
+
BackendPatternConfig,
|
5 |
+
DTypeConfig,
|
6 |
+
ObservationType
|
7 |
+
)
|
8 |
+
from ._common_operator_config_utils import (
|
9 |
+
_get_binary_op_configs,
|
10 |
+
_get_linear_configs,
|
11 |
+
_get_conv_configs,
|
12 |
+
_get_share_qparams_op_configs,
|
13 |
+
_get_tensor_info_op_configs,
|
14 |
+
)
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
"get_tensorrt_backend_config",
|
18 |
+
"get_tensorrt_backend_config_dict",
|
19 |
+
]
|
20 |
+
|
21 |
+
def get_tensorrt_backend_config() -> BackendConfig:
|
22 |
+
"""
|
23 |
+
Return the `BackendConfig` for the TensorRT backend.
|
24 |
+
NOTE: Current api will change in the future, it's just to unblock experimentation for
|
25 |
+
new backends, please don't use it right now.
|
26 |
+
TODO: add a README when it's more stable
|
27 |
+
"""
|
28 |
+
# dtype configs
|
29 |
+
weighted_op_qint8_dtype_config = DTypeConfig(
|
30 |
+
input_dtype=torch.qint8,
|
31 |
+
output_dtype=torch.qint8,
|
32 |
+
weight_dtype=torch.qint8,
|
33 |
+
bias_dtype=torch.float,
|
34 |
+
)
|
35 |
+
non_weighted_op_qint8_dtype_config = DTypeConfig(
|
36 |
+
input_dtype=torch.qint8,
|
37 |
+
output_dtype=torch.qint8,
|
38 |
+
)
|
39 |
+
|
40 |
+
addmm_config = BackendPatternConfig(torch.addmm) \
|
41 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
|
42 |
+
.add_dtype_config(weighted_op_qint8_dtype_config) \
|
43 |
+
._set_input_type_to_index({
|
44 |
+
"bias": 0,
|
45 |
+
"input": 1,
|
46 |
+
"weight": 2,
|
47 |
+
})
|
48 |
+
cat_config = BackendPatternConfig(torch.cat) \
|
49 |
+
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
|
50 |
+
.add_dtype_config(non_weighted_op_qint8_dtype_config)
|
51 |
+
conv_dtype_configs = [
|
52 |
+
weighted_op_qint8_dtype_config,
|
53 |
+
]
|
54 |
+
linear_dtype_configs = [
|
55 |
+
weighted_op_qint8_dtype_config,
|
56 |
+
]
|
57 |
+
binary_op_dtype_configs = [
|
58 |
+
weighted_op_qint8_dtype_config,
|
59 |
+
]
|
60 |
+
share_qparams_op_dtype_configs = [
|
61 |
+
non_weighted_op_qint8_dtype_config,
|
62 |
+
]
|
63 |
+
tensor_info_op_dtype_configs = [
|
64 |
+
non_weighted_op_qint8_dtype_config,
|
65 |
+
]
|
66 |
+
# there might be things not supported in fx2trt, but it will error out
|
67 |
+
# during fx2trt conversion and can support them after that
|
68 |
+
return BackendConfig("tensorrt") \
|
69 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
|
70 |
+
.set_backend_pattern_config(addmm_config) \
|
71 |
+
.set_backend_pattern_config(cat_config) \
|
72 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
|
73 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
|
74 |
+
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
|
75 |
+
.set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs))
|
76 |
+
|
77 |
+
def get_tensorrt_backend_config_dict():
|
78 |
+
"""
|
79 |
+
Return the `BackendConfig` for the TensorRT backend in dictionary form.
|
80 |
+
"""
|
81 |
+
return get_tensorrt_backend_config().to_dict()
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py
ADDED
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any, List, Callable, Union, Tuple, Type
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from .backend_config import (
|
7 |
+
BackendConfig,
|
8 |
+
BackendPatternConfig,
|
9 |
+
DTypeConfig,
|
10 |
+
)
|
11 |
+
from ..utils import Pattern
|
12 |
+
from ..fuser_method_mappings import (
|
13 |
+
_reverse2,
|
14 |
+
_reverse3,
|
15 |
+
)
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
"get_pattern_to_dtype_configs",
|
19 |
+
"get_qat_module_classes",
|
20 |
+
"get_fused_module_classes",
|
21 |
+
"get_pattern_to_input_type_to_index",
|
22 |
+
"get_root_module_to_quantized_reference_module",
|
23 |
+
"get_fuser_method_mapping",
|
24 |
+
"get_module_to_qat_module",
|
25 |
+
"get_fusion_pattern_to_root_node_getter",
|
26 |
+
"get_fusion_pattern_to_extra_inputs_getter",
|
27 |
+
"remove_boolean_dispatch_from_name",
|
28 |
+
"pattern_to_human_readable",
|
29 |
+
"entry_to_pretty_str",
|
30 |
+
]
|
31 |
+
|
32 |
+
def get_pattern_to_dtype_configs(backend_config: BackendConfig) -> Dict[Pattern, List[DTypeConfig]]:
|
33 |
+
pattern_to_dtype_configs: Dict[Pattern, List[DTypeConfig]] = {}
|
34 |
+
for pattern, config in backend_config._pattern_complex_format_to_config.items():
|
35 |
+
pattern_to_dtype_configs[pattern] = config.dtype_configs
|
36 |
+
return pattern_to_dtype_configs
|
37 |
+
|
38 |
+
def get_qat_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
|
39 |
+
qat_module_classes = []
|
40 |
+
for config in backend_config.configs:
|
41 |
+
if config.qat_module is not None:
|
42 |
+
qat_module_classes.append(config.qat_module)
|
43 |
+
return tuple(set(qat_module_classes))
|
44 |
+
|
45 |
+
def get_fused_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
|
46 |
+
fused_module_classes = []
|
47 |
+
for config in backend_config.configs:
|
48 |
+
if config.fused_module is not None:
|
49 |
+
fused_module_classes.append(config.fused_module)
|
50 |
+
return tuple(set(fused_module_classes))
|
51 |
+
|
52 |
+
def get_pattern_to_input_type_to_index(backend_config: BackendConfig) -> Dict[Pattern, Dict[str, int]]:
|
53 |
+
pattern_to_input_type_to_index: Dict[Pattern, Dict[str, int]] = {}
|
54 |
+
for pattern, config in backend_config._pattern_complex_format_to_config.items():
|
55 |
+
pattern_to_input_type_to_index[pattern] = config._input_type_to_index
|
56 |
+
return pattern_to_input_type_to_index
|
57 |
+
|
58 |
+
def get_root_module_to_quantized_reference_module(
|
59 |
+
backend_config: BackendConfig) -> Dict[Type[torch.nn.Module], Type[torch.nn.Module]]:
|
60 |
+
mapping: Dict[Type[torch.nn.Module], Type[torch.nn.Module]] = {}
|
61 |
+
for config in backend_config.configs:
|
62 |
+
if config.root_module is not None and config.reference_quantized_module is not None:
|
63 |
+
mapping[config.root_module] = config.reference_quantized_module
|
64 |
+
return mapping
|
65 |
+
|
66 |
+
def get_fuser_method_mapping(backend_config: BackendConfig) -> Dict[Pattern, Union[nn.Sequential, Callable]]:
|
67 |
+
fuser_method_mapping : Dict[Pattern, Union[nn.Sequential, Callable]] = {}
|
68 |
+
for pattern, config in backend_config._pattern_complex_format_to_config.items():
|
69 |
+
if config.fuser_method is not None:
|
70 |
+
# Note: both the fuser method and the pattern are specified in forward order in the
|
71 |
+
# BackendConfig, but the internal pattern matching code uses the reversed nested tuple
|
72 |
+
# format, so we need to convert both to the internal format
|
73 |
+
fuser_method = _get_fuser_method_in_reversed_nested_tuple_format(config)
|
74 |
+
fuser_method_mapping[pattern] = fuser_method
|
75 |
+
return fuser_method_mapping
|
76 |
+
|
77 |
+
def get_module_to_qat_module(backend_config: BackendConfig) -> Dict[Pattern, Type[torch.nn.Module]]:
|
78 |
+
module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]] = {}
|
79 |
+
for pattern, config in backend_config._pattern_complex_format_to_config.items():
|
80 |
+
if config.qat_module is not None:
|
81 |
+
module_to_qat_module[pattern] = config.qat_module
|
82 |
+
return module_to_qat_module
|
83 |
+
|
84 |
+
def get_fusion_pattern_to_root_node_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
|
85 |
+
""" Get a map from fusion pattern to a function that returns the root node
|
86 |
+
from the fusion pattern, e.g. the most common one is:
|
87 |
+
def get_root_node(node_pattern):
|
88 |
+
while not isinstance(node_pattern[-1], Node):
|
89 |
+
node_pattern = node_pattern[-1]
|
90 |
+
return node_pattern[-1]
|
91 |
+
This can work for all patterns whose root node is the "last node" in the pattern,
|
92 |
+
e.g. (torch.add, MatchAllNode, (torch.ReLU, torch.Conv2d))
|
93 |
+
"""
|
94 |
+
root_node_getter_mapping: Dict[Pattern, Callable] = {}
|
95 |
+
for pattern, config in backend_config._pattern_complex_format_to_config.items():
|
96 |
+
if config._root_node_getter is not None:
|
97 |
+
root_node_getter_mapping[pattern] = config._root_node_getter
|
98 |
+
return root_node_getter_mapping
|
99 |
+
|
100 |
+
def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
|
101 |
+
""" Get a map from fusion pattern to a function that returns extra input nodes
|
102 |
+
from the fusion pattern, in the order required by the root node. This is optional,
|
103 |
+
if not specified, we will not copy over any extra inputs for the root node.
|
104 |
+
Example:
|
105 |
+
# Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d))
|
106 |
+
# and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra
|
107 |
+
# argument to the fused module, we can unpack the pattern and return the node at
|
108 |
+
# MatchAllNode here
|
109 |
+
# we can implement extra_inputs_getter as follows:
|
110 |
+
def extra_inputs_getter(pattern) -> List[Any]:
|
111 |
+
add, extra_input, conv_pattern = pattern
|
112 |
+
return [extra_input]
|
113 |
+
"""
|
114 |
+
extra_inputs_getter_mapping: Dict[Pattern, Callable] = {}
|
115 |
+
for pattern, config in backend_config._pattern_complex_format_to_config.items():
|
116 |
+
if config._extra_inputs_getter is not None:
|
117 |
+
extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter
|
118 |
+
return extra_inputs_getter_mapping
|
119 |
+
|
120 |
+
def remove_boolean_dispatch_from_name(p) -> Any:
|
121 |
+
"""
|
122 |
+
Some ops have a default string representation such as
|
123 |
+
'<function boolean_dispatch.<locals>.fn at 0x7ff1106bf280>',
|
124 |
+
this function replaces them with the hardcoded function names.
|
125 |
+
"""
|
126 |
+
if p is F.fractional_max_pool2d:
|
127 |
+
return "torch.nn.functional.fractional_max_pool2d"
|
128 |
+
elif p is F.fractional_max_pool3d:
|
129 |
+
return "torch.nn.functional.fractional_max_pool3d"
|
130 |
+
elif p is F.max_pool1d:
|
131 |
+
return "torch.nn.functional.max_pool1d"
|
132 |
+
elif p is F.max_pool2d:
|
133 |
+
return "torch.nn.functional.max_pool2d"
|
134 |
+
elif p is F.max_pool3d:
|
135 |
+
return "torch.nn.functional.max_pool3d"
|
136 |
+
elif p is F.adaptive_max_pool1d:
|
137 |
+
return "torch.nn.functional.adaptive_max_pool1d"
|
138 |
+
elif p is F.adaptive_max_pool2d:
|
139 |
+
return "torch.nn.functional.adaptive_max_pool2d"
|
140 |
+
elif p is F.adaptive_max_pool3d:
|
141 |
+
return "torch.nn.functional.adaptive_max_pool3d"
|
142 |
+
assert "boolean_dispatch" not in str(p), \
|
143 |
+
f"{p} does not have a human readable representation in " + \
|
144 |
+
"quantization documentation"
|
145 |
+
return p
|
146 |
+
|
147 |
+
def pattern_to_human_readable(p) -> Any:
|
148 |
+
if isinstance(p, tuple):
|
149 |
+
# nested patterns, recurse
|
150 |
+
return tuple(pattern_to_human_readable(inner_p) for inner_p in p)
|
151 |
+
elif isinstance(p, str):
|
152 |
+
# method names are already human readable
|
153 |
+
return p
|
154 |
+
else:
|
155 |
+
p = remove_boolean_dispatch_from_name(p)
|
156 |
+
return p
|
157 |
+
|
158 |
+
# TODO(future PR): move backend_config_dict to use dataclass and move this logic to
|
159 |
+
# the corresponding __str__ function
|
160 |
+
def entry_to_pretty_str(entry) -> str:
|
161 |
+
"""
|
162 |
+
Given a backend_config_dict entry, returns a string with the human readable
|
163 |
+
representation of it.
|
164 |
+
"""
|
165 |
+
s = "{\n"
|
166 |
+
|
167 |
+
# always output the pattern first
|
168 |
+
if "pattern" in entry:
|
169 |
+
pattern_str = pattern_to_human_readable(entry["pattern"])
|
170 |
+
|
171 |
+
s += f" 'pattern': {pattern_str},\n"
|
172 |
+
|
173 |
+
# custom output for dtype_configs to make it look nice
|
174 |
+
if "dtype_configs" in entry:
|
175 |
+
s += " 'dtype_configs': [\n"
|
176 |
+
for dtype_config in entry["dtype_configs"]:
|
177 |
+
s += " {\n"
|
178 |
+
for k, v in dtype_config.items():
|
179 |
+
s += f" '{k}': {v},\n"
|
180 |
+
s += " },\n"
|
181 |
+
s += " ],\n"
|
182 |
+
|
183 |
+
# custom output for num_tensor_args_to_observation_type to make it look nice
|
184 |
+
if "num_tensor_args_to_observation_type" in entry:
|
185 |
+
s += " 'num_tensor_args_to_observation_type': {\n"
|
186 |
+
for k, v in entry["num_tensor_args_to_observation_type"].items():
|
187 |
+
s += f" {k}: {v},\n"
|
188 |
+
s += " },\n"
|
189 |
+
|
190 |
+
# output all the other fields
|
191 |
+
custom_handled_fields = [
|
192 |
+
"pattern",
|
193 |
+
"dtype_configs",
|
194 |
+
"num_tensor_args_to_observation_type",
|
195 |
+
]
|
196 |
+
for field_name in entry:
|
197 |
+
if field_name in custom_handled_fields:
|
198 |
+
continue
|
199 |
+
s += f" '{field_name}': {entry[field_name]},\n"
|
200 |
+
|
201 |
+
s += "}"
|
202 |
+
return s
|
203 |
+
|
204 |
+
def _get_pattern_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Pattern:
|
205 |
+
"""
|
206 |
+
Return the pattern specified in the given config in the reversed nested tuple format
|
207 |
+
used internally in the quantization pattern matching code.
|
208 |
+
|
209 |
+
If the pattern is not a tuple, or the pattern is already specified in the reversed
|
210 |
+
nested tuple format, return the pattern as is. Otherwise:
|
211 |
+
|
212 |
+
For 2-tuples (a, b), return (b, a).
|
213 |
+
For 3-tuples (a, b, c), return (c, (b, a)).
|
214 |
+
|
215 |
+
For example:
|
216 |
+
* Given nn.Linear, return nn.Linear
|
217 |
+
* Given (nn.Linear, nn.ReLU), return (nn.ReLU, nn.Linear)
|
218 |
+
* Given (nn.Conv2d, nn.BatchNorm2d, nn.ReLU), return
|
219 |
+
(nn.ReLU, (nn.BatchNorm2d, nn.Conv2d))
|
220 |
+
|
221 |
+
For context, the reason why this is needed is the user-facing BackendConfig
|
222 |
+
API accepts the flat 2-or-3-tuple format in forward order. While this simple
|
223 |
+
format handles the vast majority of use cases, it does not handle the more
|
224 |
+
complex ones, and so the internal pattern matching code for quantization uses
|
225 |
+
the following, more general reversed nested tuple format instead:
|
226 |
+
|
227 |
+
operator = module_type | functional | torch op | native op | MatchAllNode
|
228 |
+
Pattern = (operator, Pattern, Pattern, ...) | operator
|
229 |
+
|
230 |
+
In the future, we expect to replace the above complex format with the one used
|
231 |
+
by the subgraph rewriter in torch.fx, so we don't have to maintain our own
|
232 |
+
complex pattern matching code. Then we won't need this helper function anymore.
|
233 |
+
"""
|
234 |
+
if config._pattern_complex_format is not None:
|
235 |
+
return config._pattern_complex_format
|
236 |
+
if config.pattern is None:
|
237 |
+
raise ValueError("Either 'pattern' or 'pattern_complex_format' must be specified")
|
238 |
+
if not isinstance(config.pattern, tuple):
|
239 |
+
return config.pattern
|
240 |
+
|
241 |
+
# Pattern is specified in the simple tuple format, need to convert
|
242 |
+
if len(config.pattern) == 2:
|
243 |
+
(a, b) = config.pattern
|
244 |
+
return (b, a)
|
245 |
+
elif len(config.pattern) == 3:
|
246 |
+
(a, b, c) = config.pattern
|
247 |
+
return (c, (b, a))
|
248 |
+
else:
|
249 |
+
raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
|
250 |
+
|
251 |
+
def _get_fuser_method_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Callable:
|
252 |
+
"""
|
253 |
+
Return the fuser method specified in the given config in the reversed nested
|
254 |
+
tuple format used internally in the quantization pattern matching code.
|
255 |
+
|
256 |
+
If pattern is specified in the reversed nested tuple format, we assume the
|
257 |
+
fuser method is also specified in this format and simply return it as is.
|
258 |
+
Otherwise, we convert the fuser method as follows:
|
259 |
+
|
260 |
+
* Given f(is_qat, conv, relu), return f'(is_qat, relu, conv)
|
261 |
+
* Given f(is_qat, conv, bn, relu), return f'(is_qat, relu, bn_conv),
|
262 |
+
where bn_conv is a 2-tuple (bn, conv)
|
263 |
+
|
264 |
+
The first argument of a fuser method is always `is_qat` and is not affected
|
265 |
+
in the conversion. We currently only support functions with 3 or 4 arguments.
|
266 |
+
"""
|
267 |
+
assert config.fuser_method is not None
|
268 |
+
if config._pattern_complex_format is not None:
|
269 |
+
return config.fuser_method
|
270 |
+
if not isinstance(config.pattern, tuple):
|
271 |
+
raise ValueError("Expected pattern to be a tuple, got: ", config.pattern)
|
272 |
+
|
273 |
+
# Pattern is specified in the simple tuple format, need to convert
|
274 |
+
if len(config.pattern) == 2:
|
275 |
+
return _reverse2(config.fuser_method)
|
276 |
+
elif len(config.pattern) == 3:
|
277 |
+
return _reverse3(config.fuser_method)
|
278 |
+
else:
|
279 |
+
raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from ._common_operator_config_utils import (
|
3 |
+
_get_binary_op_configs,
|
4 |
+
_get_bn_configs,
|
5 |
+
_get_cat_config,
|
6 |
+
_get_conv_configs,
|
7 |
+
_get_default_op_configs,
|
8 |
+
_get_embedding_op_configs,
|
9 |
+
_get_fixed_qparams_op_configs,
|
10 |
+
_get_linear_configs,
|
11 |
+
_get_rnn_op_configs,
|
12 |
+
_get_share_qparams_op_configs,
|
13 |
+
_get_tensor_info_op_configs,
|
14 |
+
)
|
15 |
+
from .backend_config import BackendConfig, DTypeConfig
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
"get_x86_backend_config",
|
19 |
+
]
|
20 |
+
|
21 |
+
# ===================
|
22 |
+
# | DTYPE CONFIGS |
|
23 |
+
# ===================
|
24 |
+
|
25 |
+
# X86 aligns with FBGEMM for now
|
26 |
+
|
27 |
+
x86_weighted_op_int8_dtype_config = DTypeConfig(
|
28 |
+
input_dtype=torch.quint8,
|
29 |
+
output_dtype=torch.quint8,
|
30 |
+
weight_dtype=torch.qint8,
|
31 |
+
bias_dtype=torch.float,
|
32 |
+
)
|
33 |
+
|
34 |
+
x86_default_op_quint8_dtype_config = DTypeConfig(
|
35 |
+
input_dtype=torch.quint8,
|
36 |
+
output_dtype=torch.quint8,
|
37 |
+
)
|
38 |
+
|
39 |
+
x86_default_op_fp16_dtype_config = DTypeConfig(
|
40 |
+
input_dtype=torch.float16,
|
41 |
+
output_dtype=torch.float16,
|
42 |
+
weight_dtype=torch.float16,
|
43 |
+
bias_dtype=torch.float16,
|
44 |
+
)
|
45 |
+
|
46 |
+
x86_default_dynamic_int8_dtype_config = DTypeConfig(
|
47 |
+
input_dtype=torch.quint8,
|
48 |
+
output_dtype=torch.float,
|
49 |
+
weight_dtype=torch.qint8,
|
50 |
+
bias_dtype=torch.float,
|
51 |
+
is_dynamic=True,
|
52 |
+
)
|
53 |
+
|
54 |
+
x86_default_dynamic_float16_dtype_config = DTypeConfig(
|
55 |
+
input_dtype=torch.float16,
|
56 |
+
output_dtype=torch.float,
|
57 |
+
weight_dtype=torch.float16,
|
58 |
+
bias_dtype=torch.float,
|
59 |
+
is_dynamic=True,
|
60 |
+
)
|
61 |
+
|
62 |
+
x86_weight_only_quint8_dtype_config = DTypeConfig(
|
63 |
+
input_dtype=torch.float,
|
64 |
+
output_dtype=torch.float,
|
65 |
+
weight_dtype=torch.quint8,
|
66 |
+
)
|
67 |
+
|
68 |
+
x86_weight_only_quint4x2_dtype_config = DTypeConfig(
|
69 |
+
input_dtype=torch.float,
|
70 |
+
output_dtype=torch.float,
|
71 |
+
weight_dtype=torch.quint4x2,
|
72 |
+
)
|
73 |
+
|
74 |
+
|
75 |
+
# =====================
|
76 |
+
# | BACKEND CONFIGS |
|
77 |
+
# =====================
|
78 |
+
|
79 |
+
def get_x86_backend_config() -> BackendConfig:
|
80 |
+
"""
|
81 |
+
Return the `BackendConfig` for PyTorch's native x86 backend.
|
82 |
+
"""
|
83 |
+
conv_dtype_configs = [x86_weighted_op_int8_dtype_config]
|
84 |
+
linear_dtype_configs = [
|
85 |
+
x86_weighted_op_int8_dtype_config,
|
86 |
+
x86_default_dynamic_int8_dtype_config,
|
87 |
+
x86_default_dynamic_float16_dtype_config,
|
88 |
+
]
|
89 |
+
binary_op_dtype_configs = [x86_weighted_op_int8_dtype_config]
|
90 |
+
default_op_dtype_configs = [x86_default_op_quint8_dtype_config]
|
91 |
+
fixed_qparams_op_dtype_configs = [x86_weighted_op_int8_dtype_config]
|
92 |
+
share_qparams_op_dtype_configs = [x86_default_op_quint8_dtype_config]
|
93 |
+
tensor_info_op_dtype_configs = [x86_default_op_quint8_dtype_config]
|
94 |
+
rnn_op_dtype_configs = [
|
95 |
+
x86_default_dynamic_int8_dtype_config,
|
96 |
+
x86_default_dynamic_float16_dtype_config,
|
97 |
+
]
|
98 |
+
embedding_op_dtype_configs = [
|
99 |
+
x86_weight_only_quint8_dtype_config,
|
100 |
+
x86_weight_only_quint4x2_dtype_config,
|
101 |
+
]
|
102 |
+
return BackendConfig("x86") \
|
103 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
|
104 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
|
105 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
|
106 |
+
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
|
107 |
+
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
|
108 |
+
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
|
109 |
+
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
|
110 |
+
.set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
|
111 |
+
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
|
112 |
+
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
|
113 |
+
.set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (191 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc
ADDED
Binary file (2.05 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/eval_utils.cpython-310.pyc
ADDED
Binary file (3.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc
ADDED
Binary file (695 Bytes). View file
|
|