diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da3dc2973b129caf6a8aa59282bdbe18cdb613ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..3f0df31dfd2a1de31b7f10e3d870f6cc8a80757b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py @@ -0,0 +1,526 @@ +import torch +import torch.nn as nn +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.dynamic as nnqd +from torch.ao.quantization import prepare +from typing import Dict, List, Optional, Any, Union, Callable, Set + +from torch.ao.quantization.quantization_mappings import ( + get_default_compare_output_module_list, +) + +NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST = { + nnqd.Linear, + nnq.Linear, + nnqd.LSTM, + nn.LSTM, +} + + +def _find_match( + str_list: Union[Dict[str, Any], List[str]], key_str: str, + postfix: str, +) -> Optional[str]: + split_str = key_str.split(".") + if split_str[-1] == postfix: + match_string = "".join(key_str.split(".")[0:-1]) + for s2 in str_list: + pattern1 = "".join(s2.split(".")[0:-1]) + pattern2 = "".join(s2.split(".")[0:-2]) + if match_string == pattern1: + return s2 + if match_string == pattern2: + return s2 + + # For matching "fc.weight" and "fc._packed_params._packed_params" + if postfix == "_packed_params": + match_string = "".join(key_str.split(".")[0:-2]) + if len(match_string) == 0: + return None + for s2 in str_list: + pattern1 = "".join(s2.split(".")[0:-1]) + pattern2 = "".join(s2.split(".")[0:-2]) + if match_string == pattern1: + return s2 + if match_string == pattern2: + return s2 + return None + else: + return None + + +def compare_weights( + float_dict: Dict[str, Any], quantized_dict: Dict[str, Any] +) -> Dict[str, Dict[str, torch.Tensor]]: + r"""Compare the weights of the float module with its corresponding quantized + module. Return a dict with key corresponding to module names and each entry being + a dictionary with two keys 'float' and 'quantized', containing the float and + quantized weights. This dict can be used to compare and compute the quantization + error of the weights of float and quantized models. + + Example usage:: + + wt_compare_dict = compare_weights( + float_model.state_dict(), qmodel.state_dict()) + for key in wt_compare_dict: + print( + key, + compute_error( + wt_compare_dict[key]['float'], + wt_compare_dict[key]['quantized'].dequantize() + ) + ) + + Args: + float_dict: state dict of the float model + quantized_dict: state dict of the quantized model + + Return: + weight_dict: dict with key corresponding to module names and each entry being + a dictionary with two keys 'float' and 'quantized', containing the float and + quantized weights + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_weights") + weight_dict: Dict[str, Dict] = {} + for key in quantized_dict: + match_key = _find_match(float_dict, key, "weight") + if match_key is not None: + weight_dict[key] = {} + weight_dict[key]["float"] = float_dict[match_key] + weight_dict[key]["quantized"] = quantized_dict[key] + continue + + # For matching "fc.weight" and "fc._packed_params._packed_params" + match_key = _find_match(float_dict, key, "_packed_params") + if match_key is not None: + weight_dict[key] = {} + weight_dict[key]["float"] = float_dict[match_key] + weight_dict[key]["quantized"] = quantized_dict[key][0] + + # For LSTM + split_str = key.split(".") + if split_str[-1] == "param" and split_str[-3] == "_all_weight_values": + layer = split_str[-2] + module_name = ".".join(split_str[:-3]) + float_weight_ih_key = module_name + ".weight_ih_l" + layer + float_weight_hh_key = module_name + ".weight_hh_l" + layer + if float_weight_ih_key in float_dict and float_weight_hh_key in float_dict: + weight_dict[key] = {} + weight_dict[key]["float"] = float_dict[float_weight_ih_key] + weight_dict[key]["quantized"] = ( + quantized_dict[key].__getstate__()[0][4][0].__getstate__()[0][0] + ) + weight_dict[key]["float"] = float_dict[float_weight_hh_key] + weight_dict[key]["quantized"] = ( + quantized_dict[key].__getstate__()[0][4][1].__getstate__()[0][0] + ) + + return weight_dict + + +def _get_logger_dict_helper( + mod: nn.Module, target_dict: Dict[str, Any], + prefix: str = "", +) -> None: + r"""This is the helper function for get_logger_dict + + Args: + mod: module we want to save all logger stats + prefix: prefix for the current module + target_dict: the dictionary used to save all logger stats + """ + + def get_prefix(prefix): + return prefix if prefix == "" else prefix + "." + + for name, child in mod.named_children(): + if isinstance(child, Logger): + target_dict[get_prefix(prefix) + "stats"] = child.stats + break + + for name, child in mod.named_children(): + module_prefix = get_prefix(prefix) + name if prefix else name + _get_logger_dict_helper(child, target_dict, module_prefix) + + +def get_logger_dict(mod: nn.Module, prefix: str = "") -> Dict[str, Dict]: + r"""Traverse the modules and save all logger stats into target dict. + This is mainly used for quantization accuracy debug. + + Type of loggers supported: + ShadowLogger: used to log the outputs of the quantized module and its matching float shadow module, + OutputLogger: used to log the outputs of the modules + + Args: + mod: module we want to save all logger stats + prefix: prefix for the current module + + Return: + target_dict: the dictionary used to save all logger stats + + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite.get_logger_dict") + + target_dict: Dict[str, Dict] = {} + _get_logger_dict_helper(mod, target_dict, prefix) + return target_dict + + +class Logger(nn.Module): + r"""Base class for stats logging + """ + + def __init__(self): + super().__init__() + self.stats = {} + # We only insert observer if the op is quantized with static quantization, + # which is identified by activation_observer.dtype == quint8. This is needed + # when attaching Logger as observer for FX mode + self.dtype = torch.quint8 + + def forward(self, x): + """ + """ # blank docblock to make autodoc happy + pass + + +class ShadowLogger(Logger): + r"""Class used in Shadow module to record the outputs of the original and + shadow modules. + """ + + def __init__(self): + super().__init__() + self.stats["float"] = [] + self.stats["quantized"] = [] + + def forward(self, x, y): + """ + """ # blank docblock to make autodoc happy + if len(x) > 1: + x = x[0] + if len(y) > 1: + y = y[0] + self.stats["quantized"].append(x.detach()) + self.stats["float"].append(y.detach()) + + +class OutputLogger(Logger): + r"""Class used to log the outputs of the module + """ + + def __init__(self): + super().__init__() + self.stats["tensor_val"] = [] + + + def forward(self, x): + """ + """ # blank docblock to make autodoc happy + self.stats["tensor_val"].append(x) + return x + + +def _convert_tuple_to_list(t: Any) -> Any: + return [_convert_tuple_to_list(x) for x in t] if type(t) is tuple else t + + +def _dequantize_tensor_list(t: Any) -> Any: + return ( + [_dequantize_tensor_list(x) for x in t] + if type(t) is list + else t.dequantize() + if t.is_quantized + else t + ) + + +class Shadow(nn.Module): + r"""Shadow module attaches the float module to its matching quantized module + as the shadow. Then it uses Logger module to process the outputs of both + modules. + + Args: + q_module: module quantized from float_module that we want to shadow + float_module: float module used to shadow q_module + logger_cls: type of logger used to process the outputs of q_module and + float_module. ShadowLogger or custom loggers can be used. + """ + + def __init__(self, q_module, float_module, logger_cls): + super().__init__() + self.orig_module = q_module + self.shadow_module = float_module + self.dequant = nnq.DeQuantize() + self.logger = logger_cls() + + def forward(self, *x) -> torch.Tensor: + """ + """ # blank docblock to make autodoc happy + xl = _convert_tuple_to_list(x) + output = self.orig_module(*xl) + xl_float = _dequantize_tensor_list(xl) + shadow_output = self.shadow_module(*xl_float) + self.logger(output, shadow_output) + return output + + def add(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """ + """ # blank docblock to make autodoc happy + output = self.orig_module.add(x, y) + x = x.dequantize() + y = y.dequantize() + shadow_output = self.shadow_module.add(x, y) + self.logger(output, shadow_output) + return output + + def add_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor: + """ + """ # blank docblock to make autodoc happy + output = self.orig_module.add_scalar(x, y) + x = x.dequantize() + shadow_output = self.shadow_module.add_scalar(x, y) + self.logger(output, shadow_output) + return output + + def mul(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """ + """ # blank docblock to make autodoc happy + output = self.orig_module.mul(x, y) + x = x.dequantize() + y = y.dequantize() + shadow_output = self.shadow_module.mul(x, y) + self.logger(output, shadow_output) + return output + + def mul_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor: + """ + """ # blank docblock to make autodoc happy + output = self.orig_module.mul_scalar(x, y) + x = x.dequantize() + shadow_output = self.shadow_module.mul_scalar(x, y) + self.logger(output, shadow_output) + return output + + def cat(self, x: List[torch.Tensor], dim: int = 0) -> torch.Tensor: + """ + """ # blank docblock to make autodoc happy + output = self.orig_module.cat(x, dim) + x = [y.dequantize() for y in x] + shadow_output = self.shadow_module.cat(x, dim) + self.logger(output, shadow_output) + return output + + def add_relu(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """ + """ # blank docblock to make autodoc happy + output = self.orig_module.add_relu(x, y) + x = x.dequantize() + y = y.dequantize() + shadow_output = self.shadow_module.add_relu(x, y) + self.logger(output, shadow_output) + return output + + +def prepare_model_with_stubs( + float_module: nn.Module, q_module: nn.Module, + module_swap_list: Set[type], logger_cls: Callable, +) -> None: + r"""Prepare the model by attaching the float module to its matching quantized + module as the shadow if the float module type is in module_swap_list. + + Example usage:: + + prepare_model_with_stubs(float_model, q_model, module_swap_list, Logger) + q_model(data) + ob_dict = get_logger_dict(q_model) + + Args: + float_module: float module used to generate the q_module + q_module: module quantized from float_module + module_swap_list: list of float module types to attach the shadow + logger_cls: type of logger to be used in shadow module to process the outputs of + quantized module and its float shadow module + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_with_stubs") + + float_module_children = {} + for name, mod in float_module.named_children(): + float_module_children[name] = mod + + reassign = {} + for name, mod in q_module.named_children(): + + if name not in float_module_children: + continue + + float_mod = float_module_children[name] + + if type(float_mod) not in module_swap_list: + prepare_model_with_stubs(float_mod, mod, module_swap_list, logger_cls) + + # Insert shadow module only if the module is not of the same type as + # the floating point module + if type(float_mod) in module_swap_list and not _is_identical_module_type(mod, float_mod): + reassign[name] = Shadow(mod, float_mod, logger_cls) + + for key, value in reassign.items(): + q_module._modules[key] = value + +def _is_identical_module_type(mod1, mod2): + # Compare if two modules have the same dtype + mod1_module_types = [type(mod) for mod in mod1.modules()] + mod2_module_types = [type(mod) for mod in mod2.modules()] + return mod1_module_types == mod2_module_types + + + +def compare_model_stub( + float_model: nn.Module, q_model: nn.Module, module_swap_list: Set[type], + *data, logger_cls=ShadowLogger +) -> Dict[str, Dict]: + r"""Compare quantized module in a model with its floating point counterpart, + feeding both of them the same input. Return a dict with key corresponding to + module names and each entry being a dictionary with two keys 'float' and + 'quantized', containing the output tensors of quantized and its matching + float shadow module. This dict can be used to compare and compute the module + level quantization error. + + This function first call prepare_model_with_stubs() to swap the quantized + module that we want to compare with the Shadow module, which takes quantized + module, corresponding float module and logger as input, and creates a forward + path inside to make the float module to shadow quantized module sharing the + same input. The logger can be customizable, default logger is ShadowLogger + and it will save the outputs of the quantized module and float module that + can be used to compute the module level quantization error. + + Example usage:: + + module_swap_list = [torchvision.models.quantization.resnet.QuantizableBasicBlock] + ob_dict = compare_model_stub(float_model,qmodel,module_swap_list, data) + for key in ob_dict: + print(key, compute_error(ob_dict[key]['float'], ob_dict[key]['quantized'].dequantize())) + + Args: + float_model: float model used to generate the q_model + q_model: model quantized from float_model + module_swap_list: list of float module types at which shadow modules will + be attached. + data: input data used to run the prepared q_model + logger_cls: type of logger to be used in shadow module to process the outputs of + quantized module and its float shadow module + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_stub") + prepare_model_with_stubs(float_model, q_model, module_swap_list, logger_cls) + q_model(*data) + ob_dict = get_logger_dict(q_model) + return ob_dict + + +def get_matching_activations( + float_module: nn.Module, q_module: nn.Module, +) -> Dict[str, Dict[str, torch.Tensor]]: + r"""Find the matching activation between float and quantized modules. + + Args: + float_module: float module used to generate the q_module + q_module: module quantized from float_module + + Return: + act_dict: dict with key corresponding to quantized module names and each + entry being a dictionary with two keys 'float' and 'quantized', containing + the matching float and quantized activations + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite.get_matching_activations") + float_dict = get_logger_dict(float_module) + quantized_dict = get_logger_dict(q_module) + act_dict: Dict[str, Dict] = {} + for key in quantized_dict: + if len(quantized_dict[key]["tensor_val"]) == 0: + continue + match_key = _find_match(sorted(float_dict, reverse=True), key, "stats") + if match_key is not None: + act_dict[key] = {} + act_dict[key]["float"] = float_dict[match_key]["tensor_val"] + act_dict[key]["quantized"] = quantized_dict[key]["tensor_val"] + return act_dict + + +def prepare_model_outputs( + float_module: nn.Module, + q_module: nn.Module, + logger_cls=OutputLogger, + allow_list=None +) -> None: + r"""Prepare the model by attaching the logger to both float module + and quantized module if they are in the allow_list. + + Args: + float_module: float module used to generate the q_module + q_module: module quantized from float_module + logger_cls: type of logger to be attached to float_module and q_module + allow_list: list of module types to attach logger + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_outputs") + if allow_list is None: + allow_list = get_default_compare_output_module_list() + + qconfig_debug = torch.ao.quantization.QConfig(activation=logger_cls, weight=None) + float_module.qconfig = qconfig_debug # type: ignore[assignment] + prepare(float_module, inplace=True, allow_list=allow_list, prepare_custom_config_dict={}) + q_module.qconfig = qconfig_debug # type: ignore[assignment] + prepare( + q_module, + inplace=True, + allow_list=allow_list, + observer_non_leaf_module_list=NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST, + prepare_custom_config_dict={} + ) + + +def compare_model_outputs( + float_model: nn.Module, + q_model: nn.Module, + *data, + logger_cls=OutputLogger, + allow_list=None +) -> Dict[str, Dict[str, torch.Tensor]]: + r"""Compare output activations between float and quantized models at + corresponding locations for the same input. Return a dict with key corresponding + to quantized module names and each entry being a dictionary with two keys + 'float' and 'quantized', containing the activations of quantized model and + float model at matching locations. This dict can be used to compare and + compute the propagation quantization error. + + Example usage:: + + act_compare_dict = compare_model_outputs(float_model, qmodel, data) + for key in act_compare_dict: + print( + key, + compute_error( + act_compare_dict[key]['float'], + act_compare_dict[key]['quantized'].dequantize() + ) + ) + + Args: + float_model: float model used to generate the q_model + q_model: model quantized from float_model + data: input data used to run the prepared float_model and q_model + logger_cls: type of logger to be attached to float_module and q_module + allow_list: list of module types to attach logger + + Return: + act_compare_dict: dict with key corresponding to quantized module names + and each entry being a dictionary with two keys 'float' and 'quantized', + containing the matching float and quantized activations + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_outputs") + if allow_list is None: + allow_list = get_default_compare_output_module_list() + prepare_model_outputs(float_model, q_model, logger_cls, allow_list) + float_model(*data) + q_model(*data) + act_compare_dict = get_matching_activations(float_model, q_model) + return act_compare_dict diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite_fx.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..b26dbadc006823cdbb2fb9f9cfce537336abf842 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite_fx.py @@ -0,0 +1,1025 @@ +""" +This module contains tooling to compare weights and activations +across models. Example usage:: + + import copy + import torch + import torch.ao.quantization.quantize_fx as quantize_fx + import torch.ao.ns._numeric_suite_fx as ns + + m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)).eval() + mp = quantize_fx.prepare_fx(m, {'': torch.ao.quantization.default_qconfig}) + # We convert a copy because we need the original prepared model + # to be available for comparisons, and `quantize_fx.convert_fx` is inplace. + mq = quantize_fx.convert_fx(copy.deepcopy(mp)) + + # + # Comparing weights + # + + # extract weight pairs + weight_comparison = ns.extract_weights('a', mp, 'b', mq) + + # add SQNR for each comparison, inplace + ns.extend_logger_results_with_comparison( + weight_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr, + 'sqnr') + + # weight_comparison contains the weights from `mp` and `mq` stored + # in pairs, and can be used for further analysis. + + + # + # Comparing activations, with error propagation + # + + # add loggers + mp_ns, mq_ns = ns.add_loggers( + 'a', copy.deepcopy(mp), + 'b', copy.deepcopy(mq), + ns.OutputLogger) + + # send an example datum to capture intermediate activations + datum = torch.randn(1, 1, 1, 1) + mp_ns(datum) + mq_ns(datum) + + # extract intermediate activations + act_comparison = ns.extract_logger_info( + mp_ns, mq_ns, ns.OutputLogger, 'b') + + # add SQNR for each comparison, inplace + ns.extend_logger_results_with_comparison( + act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr, + 'sqnr') + + # act_comparison contains the activations from `mp_ns` and `mq_ns` stored + # in pairs, and can be used for further analysis. + + # + # Comparing activations, without error propagation + # + + # create shadow model + mp_shadows_mq = ns.add_shadow_loggers( + 'a', copy.deepcopy(mp), + 'b', copy.deepcopy(mq), + ns.OutputLogger) + + # send an example datum to capture intermediate activations + datum = torch.randn(1, 1, 1, 1) + mp_shadows_mq(datum) + + # extract intermediate activations + shadow_act_comparison = ns.extract_shadow_logger_info( + mp_shadows_mq, ns.OutputLogger, 'b') + + # add SQNR for each comparison, inplace + ns.extend_logger_results_with_comparison( + shadow_act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr, + 'sqnr') + + # shadow_act_comparison contains the activations from `mp_ns` and `mq_ns` stored + # in pairs, and can be used for further analysis. + +""" + +import collections + +import torch +import torch.nn as nn +import torch.ao.quantization.quantize_fx as quantize_fx +from torch.fx import GraphModule +from torch.fx.graph import Node +from torch.ao.ns.fx.mappings import ( + get_base_name_to_sets_of_related_ops, +) +from torch.ao.ns.fx.graph_matcher import ( + get_matching_subgraph_pairs, + get_type_a_related_to_b, +) + +from .fx.weight_utils import ( + extract_weight_from_node, +) + +from .fx.graph_passes import ( + add_loggers_to_model, + create_a_shadows_b, +) + +from .fx.utils import ( + rekey_logger_info_on_node_name_of_model, + maybe_add_missing_fqns, + get_target_type_str, +) + +from .fx.ns_types import ( + NSSingleResultValuesType, + NSResultsType, + NSNodeTargetType, +) +from torch.ao.quantization.backend_config.utils import get_fusion_pattern_to_root_node_getter +from torch.ao.quantization.backend_config import BackendConfig +from torch.ao.quantization.fx.match_utils import _find_matches +from torch.ao.quantization.fx.graph_module import _get_observed_graph_module_attr +from torch.ao.quantization.fx.qconfig_mapping_utils import _generate_node_name_to_qconfig +from torch.ao.quantization.fx.quantize_handler import _get_pattern_to_quantize_handlers +from torch.ao.quantization.qconfig import QConfigAny +from torch.ao.quantization import QConfigMapping +from torch.ao.ns.fx.n_shadows_utils import ( + OutputProp, + _get_dedup_subgraphs, + SHADOW_WRAPPER_NODE_NAME_PREFIX, + group_results_by_subgraph, + create_results_comparison, + print_n_shadows_summary, + create_n_transformed_and_logged_copies_of_subgraph, + create_add_loggers_graph, + extract_weight_comparison, +) +from torch.ao.ns.fx.qconfig_multi_mapping import QConfigMultiMapping + +from typing import Dict, Tuple, Callable, List, Optional, Set, Any, Type + +RNNReturnType = Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] + +class OutputLogger(nn.Module): + """ + Base class for capturing intermediate values. + """ + stats: List[torch.Tensor] + stats_rnn: List[RNNReturnType] + + # Mark as impure so that calls to it will not be removed during DCE. + _is_impure = True + + def __init__( + self, + ref_node_name: str, + prev_node_name: str, + model_name: str, + ref_name: str, + prev_node_target_type: str, + ref_node_target_type: str, + results_type: str, + index_within_arg: int, + index_of_arg: int, + fqn: Optional[str], + qconfig_str: Optional[str] = '', + ): + super().__init__() + self.stats: List[torch.Tensor] = [] + self.stats_rnn: List[RNNReturnType] = [] + + # name of the node which was responsible for adding this logger + # Note: + # - if we are logging node outputs, this is the same as prev_node_name + # - if we are logging node inputs, this is the name of the node + # whose input this logger is logging. + # + # example, where logger1 is logging input of op1 and logger2 is logging + # the output of op1: + # + # x1 -> logger1 -> op1 -> logger2 -> x2 + # + # in this example, + # - logger1's prev_node_name is x1 and ref_node_name is op1 + # - logger2's prev_node_name is op1 and ref_node_name is op1 + self.ref_node_name = ref_node_name + # name of the node whose output this Logger is capturing + self.prev_node_name = prev_node_name + + # name of the model from which the node originated from + self.model_name = model_name + # reference name, used to match loggers from separate models + # to each other + self.ref_name = ref_name + # type of the target of the node whose output this logger is logging + self.prev_node_target_type = prev_node_target_type + # type of the target of the node which was responsible for adding this + # logger + self.ref_node_target_type = ref_node_target_type + # what kind of values are inside of stats + self.results_type = results_type + # index of this node within the arg of the input/output node + # for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1 + self.index_within_arg = index_within_arg + # index of this node within the args of the input/output node + # for example, in add(x1, x2), x2 would have index_of_arg == 1 + self.index_of_arg = index_of_arg + # fully qualified name + self.fqn = fqn + # if loggers are added before prepare_fx, but we do not want + # collect results of calibration, only results after convert_fx + # so, we add a flag to control whether this logger collects data + self.enabled = True + # string representation of qconfig + self.qconfig_str = qconfig_str + # this can be turned off to reduce memory usage during calibration + self.save_activations = True + + # Note: cannot annotate the type of x because TorchScript does not support + # the Union type. + def forward(self, x): + """ + """ # blank docblock to make autodoc happy + # TODO(future PR): consider designing this better, as the difference + # between these two flags is subtle and not obvious. + if not self.enabled: + return x + if not self.save_activations: + return x + # TODO(future PR): consider refactoring this to better reuse the parent + # class + if isinstance(x, torch.Tensor): + self.stats.append(x.detach()) + elif isinstance(x, tuple) and len(x) == 2 and len(x[1]) == 2: + new_res = (x[0].detach(), (x[1][0].detach(), x[1][1].detach())) + self.stats_rnn.append(new_res) + return x + + def __repr__(self): + clean_dict = { + k: v + for k, v in self.__dict__.items() + # skip nn.Module keys + if (k != 'training') and not k.startswith('_') + } + return f"OutputLogger({clean_dict})" + + +class OutputComparisonLogger(OutputLogger): + """ + Same as OutputLogger, but also requires the original activation + in order to calculate the comparison at calibration time + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # TODO(future PR): make the comparison function configurable + self.comparison_fn = torch.ao.ns.fx.utils.compute_sqnr + self.comparison_fn_name = 'sqnr' + # precalculated comparisons of logger output versus reference + self.comparisons = [] + # precalculated comparisons function + + def forward(self, x, x_ref): + """ + """ # blank docblock to make autodoc happy + if not self.enabled: + return x + assert isinstance(x, torch.Tensor), 'non-tensor inputs not yet supported' + if self.save_activations: + # save the activation, for debugging + self.stats.append(x.detach()) + # save the comparison + self.comparisons.append(self.comparison_fn(x, x_ref)) + return x + + def __repr__(self): + clean_dict = { + k: v + for k, v in self.__dict__.items() + # skip nn.Module keys + if (k != 'training') and not k.startswith('_') + } + return f"OutputComparisonLogger({clean_dict})" + + +class NSTracer(quantize_fx.QuantizationTracer): + """ + Just like a regular FX quantization tracer, but treats observers and fake_quantize + modules as leaf modules. + """ + def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: + """ + """ # blank docblock to make autodoc happy + if isinstance(m, torch.ao.quantization.ObserverBase): + return True + elif isinstance(m, torch.ao.quantization.FakeQuantizeBase): + return True + return super().is_leaf_module(m, module_qualified_name) + + +def _extract_weights_one_model( + model_name: str, + model: GraphModule, + nodes_and_names_to_instrument: List[Tuple[Node, str]], + results: NSResultsType, + op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None, +) -> None: + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_one_model") + for node, ref_name in nodes_and_names_to_instrument: + res_type = NSSingleResultValuesType.WEIGHT.value + extracted_weight = extract_weight_from_node( + node, model, op_to_type_to_weight_extraction_fn) + if extracted_weight: + if ref_name not in results: + results[ref_name] = {res_type: {}} + results[ref_name][res_type][model_name] = [extracted_weight] + + +def _extract_weights_impl( + model_name_a: str, + gm_a: GraphModule, + model_name_b: str, + gm_b: GraphModule, + base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None, +) -> NSResultsType: + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_impl") + matched_subgraph_pairs = get_matching_subgraph_pairs( + gm_a, gm_b, base_name_to_sets_of_related_ops, + unmatchable_types_map) + + # split the subgraph pairs into one data structure for each model + nodes_and_names_to_instrument_a: List[Tuple[Node, str]] = [] + nodes_and_names_to_instrument_b: List[Tuple[Node, str]] = [] + for match_name, match in matched_subgraph_pairs.items(): + subgraph_a, subgraph_b = match + nodes_and_names_to_instrument_a.append((subgraph_a.base_op_node, match_name)) + nodes_and_names_to_instrument_b.append((subgraph_b.base_op_node, match_name)) + + # populate the results, one model at a time + results: NSResultsType = {} + _extract_weights_one_model( + model_name_a, gm_a, nodes_and_names_to_instrument_a, results, + op_to_type_to_weight_extraction_fn) + _extract_weights_one_model( + model_name_b, gm_b, nodes_and_names_to_instrument_b, results, + op_to_type_to_weight_extraction_fn) + + # fill in missing fqn entries + maybe_add_missing_fqns(results) + + # rekey on names of nodes in gm_b + results = rekey_logger_info_on_node_name_of_model(results, model_name_b) + + return results + + +def extract_weights( + model_name_a: str, + model_a: nn.Module, + model_name_b: str, + model_b: nn.Module, + base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None, +) -> NSResultsType: + """ + Extract weights from model A and model B, and return a comparison. + + Args: + model_name_a: string name of model A to use in results + model_a: model A + model_name_b: string name of model B to use in results + model_b: model B + base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change + unmatchable_types_map: optional override of unmatchable types, subject to change + op_to_type_to_weight_extraction_fn: optional override of function which extracts weight + from a type, subject to change + + Return: + NSResultsType, containing the weight comparisons + """ + + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_weights") + if base_name_to_sets_of_related_ops is None: + base_name_to_sets_of_related_ops = \ + get_base_name_to_sets_of_related_ops() + type_a_related_to_b = \ + get_type_a_related_to_b(base_name_to_sets_of_related_ops) + + # TODO(future PR): expose these + skipped_module_names: List[str] = [] + skipped_module_classes: List[Callable] = [] + tracer_a = NSTracer(skipped_module_names, skipped_module_classes) + tracer_b = NSTracer(skipped_module_names, skipped_module_classes) + gm_a = GraphModule(model_a, tracer_a.trace(model_a)) + maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope') + if maybe_model_a_node_name_to_scope is not None: + gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope + gm_b = GraphModule(model_b, tracer_b.trace(model_b)) + maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope') + if maybe_model_b_node_name_to_scope is not None: + gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope + return _extract_weights_impl( + model_name_a, gm_a, model_name_b, gm_b, base_name_to_sets_of_related_ops, + unmatchable_types_map, op_to_type_to_weight_extraction_fn) + + +def _add_loggers_one_model( + model_name: str, + model: GraphModule, + nodes_and_names_to_instrument_inputs: List[Tuple[Node, str, str]], + nodes_and_names_to_instrument_outputs: List[Tuple[Node, str, str]], + logger_cls: Callable, +) -> nn.Module: + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_one_model") + + # TODO(future PR): do not observe nodes we do not care + # about (both fp32, denylist, etc) + node_to_instrument_inputs_to_ref_name: Dict[Node, Tuple[str, str]] = {} + node_to_instrument_outputs_to_ref_name: Dict[Node, Tuple[str, str]] = {} + for node, ref_name, ref_node_type in nodes_and_names_to_instrument_inputs: + node_to_instrument_inputs_to_ref_name[node] = (ref_name, ref_node_type) + for node, ref_name, ref_node_type in nodes_and_names_to_instrument_outputs: + node_to_instrument_outputs_to_ref_name[node] = (ref_name, ref_node_type) + + model = add_loggers_to_model( + model, node_to_instrument_inputs_to_ref_name, + node_to_instrument_outputs_to_ref_name, logger_cls, model_name) + return model + + +def _add_loggers_impl( + name_a: str, + gm_a: GraphModule, + name_b: str, + gm_b: GraphModule, + logger_cls: Callable, + should_log_inputs: bool, + base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, +) -> Tuple[nn.Module, nn.Module]: + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_impl") + matched_subgraph_pairs = get_matching_subgraph_pairs( + gm_a, gm_b, + base_name_to_sets_of_related_ops, unmatchable_types_map) + nodes_and_names_to_instrument_inputs_a = [] + nodes_and_names_to_instrument_inputs_b = [] + nodes_and_names_to_instrument_outputs_a = [] + nodes_and_names_to_instrument_outputs_b = [] + for match_name, (subgraph_a, subgraph_b) in matched_subgraph_pairs.items(): + ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a) + ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b) + # Note: for matching inputs we use start_node, such as observing + # the input of linear in linear-relu + if should_log_inputs: + nodes_and_names_to_instrument_inputs_a.append( + (subgraph_a.start_node, match_name, ref_node_type_a)) + nodes_and_names_to_instrument_inputs_b.append( + (subgraph_b.start_node, match_name, ref_node_type_b)) + # Note: for matching activations we always use end_node, + # such as observing the output of relu in linear-relu + nodes_and_names_to_instrument_outputs_a.append( + (subgraph_a.end_node, match_name, ref_node_type_a)) + nodes_and_names_to_instrument_outputs_b.append( + (subgraph_b.end_node, match_name, ref_node_type_b)) + + new_model_a = _add_loggers_one_model( + name_a, gm_a, nodes_and_names_to_instrument_inputs_a, + nodes_and_names_to_instrument_outputs_a, logger_cls) + new_model_b = _add_loggers_one_model( + name_b, gm_b, nodes_and_names_to_instrument_inputs_b, + nodes_and_names_to_instrument_outputs_b, logger_cls) + return (new_model_a, new_model_b) + + +def add_loggers( + name_a: str, + model_a: nn.Module, + name_b: str, + model_b: nn.Module, + logger_cls: Callable, + should_log_inputs : bool = False, + base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, +) -> Tuple[nn.Module, nn.Module]: + """ + Instrument model A and model B with loggers. + + Args: + name_a: string name of model A to use in results + model_a: model A + name_b: string name of model B to use in results + model_b: model B + logger_cls: class of Logger to use + base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change + unmatchable_types_map: optional override of unmatchable types, subject to change + + Return: + Returns a tuple of (model_a_with_loggers, model_b_with_loggers). Modifies both models inplace. + """ + + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_loggers") + # TODO(future PR): expose these + skipped_module_names: List[str] = [] + skipped_module_classes: List[Callable] = [] + tracer_a = NSTracer(skipped_module_names, skipped_module_classes) + tracer_b = NSTracer(skipped_module_names, skipped_module_classes) + gm_a = GraphModule(model_a, tracer_a.trace(model_a)) + maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope') + if maybe_model_a_node_name_to_scope is not None: + gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope + gm_b = GraphModule(model_b, tracer_b.trace(model_b)) + maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope') + if maybe_model_b_node_name_to_scope is not None: + gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope + return _add_loggers_impl( + name_a, gm_a, name_b, gm_b, logger_cls, + should_log_inputs=should_log_inputs, + base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops, + unmatchable_types_map=unmatchable_types_map) + + +def _extract_logger_info_one_model( + model: nn.Module, + results: NSResultsType, + logger_cls: Callable, +) -> None: + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_logger_info_one_model") + for gm_name, mod in model.named_modules(): + # TODO(future PR): better check when scripted + is_logger = ( + isinstance(mod, logger_cls) # type: ignore[arg-type] + or ( + isinstance(mod, torch.jit.RecursiveScriptModule) + and mod.original_name == 'OutputLogger' + ) + ) + if is_logger: + key = mod.ref_name + if key not in results: + results[key] = {} + assert mod.model_name not in results[key], \ + f"{mod.model_name} is already present in results" + if mod.results_type not in results[key]: + results[key][mod.results_type] = {} + if mod.model_name not in results[key][mod.results_type]: + results[key][mod.results_type][mod.model_name] = [] + stats_to_use = mod.stats + if len(mod.stats_rnn) > 0: + stats_to_use = mod.stats_rnn + data = { + 'type': mod.results_type, + 'values': stats_to_use, + 'ref_node_name': mod.ref_node_name, + 'ref_node_target_type': mod.ref_node_target_type, + 'prev_node_name': mod.prev_node_name, + 'prev_node_target_type': mod.prev_node_target_type, + 'index_within_arg': mod.index_within_arg, + 'index_of_arg': mod.index_of_arg, + 'fqn': mod.fqn, + 'qconfig_str': mod.qconfig_str, + } + if hasattr(mod, 'comparisons'): + data['comparisons'] = mod.comparisons + data['comparison_fn_name'] = mod.comparison_fn_name + else: + data['comparisons'] = [] + data['comparison_fn_name'] = '' + results[key][mod.results_type][mod.model_name].append(data) + # ensure the list stays sorted + results[key][mod.results_type][mod.model_name].sort( + key=lambda res: + f"{res['index_of_arg']}:{res['index_within_arg']}" + ) + + +# TODO(future PR): align on naming +# this is equivalent of just the comparison extraction part of `ns.compare_model_outputs` +def extract_logger_info( + model_a: nn.Module, + model_b: nn.Module, + logger_cls: Callable, + model_name_to_use_for_layer_names: str, +) -> NSResultsType: + """ + Traverse all loggers in `model_a` and `model_b`, and extract the logged + information. + + Args: + model_a: model A + model_b: model B + logger_cls: class of Logger to use + model_name_to_use_for_layer_names: string name of model to use for + layer names in the output + + Return: + NSResultsType, containing the logged comparisons + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_logger_info") + results: NSResultsType = {} + for model in (model_a, model_b): + _extract_logger_info_one_model(model, results, logger_cls) + # fill in missing fqn entries + maybe_add_missing_fqns(results) + # rekey on the name of model b + results = rekey_logger_info_on_node_name_of_model( + results, model_name_to_use_for_layer_names) + return results + + +def _add_shadow_loggers_impl( + name_a: str, + gm_a: GraphModule, + name_b: str, + gm_b: GraphModule, + logger_cls: Callable, + should_log_inputs: bool, + base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, +) -> nn.Module: + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_shadow_loggers_impl") + matched_subgraph_pairs = get_matching_subgraph_pairs( + gm_a, gm_b, base_name_to_sets_of_related_ops, + unmatchable_types_map) + gm_a_shadows_b = create_a_shadows_b( + name_a, gm_a, name_b, gm_b, matched_subgraph_pairs, logger_cls, + should_log_inputs=should_log_inputs, + node_type_to_io_type_map=node_type_to_io_type_map) + return gm_a_shadows_b + + +def add_shadow_loggers( + name_a: str, + model_a: nn.Module, + name_b: str, + model_b: nn.Module, + logger_cls: Callable, + should_log_inputs: bool = False, + base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, +) -> nn.Module: + """ + Instrument model A and model B with shadow loggers. + + Args: + name_a: string name of model A to use in results + model_a: model A + name_b: string name of model B to use in results + model_b: model B + logger_cls: class of Logger to use + should_log_inputs: whether to log inputs + base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change + unmatchable_types_map: optional override of unmatchable types, subject to change + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_shadow_loggers") + # TODO(future PR): expose these + skipped_module_names: List[str] = [] + skipped_module_classes: List[Callable] = [] + tracer_a = NSTracer(skipped_module_names, skipped_module_classes) + tracer_b = NSTracer(skipped_module_names, skipped_module_classes) + gm_a = GraphModule(model_a, tracer_a.trace(model_a)) + maybe_model_a_node_name_to_scope = _get_observed_graph_module_attr(model_a, 'node_name_to_scope') + if maybe_model_a_node_name_to_scope is not None: + gm_a._node_name_to_scope = maybe_model_a_node_name_to_scope + gm_b = GraphModule(model_b, tracer_b.trace(model_b)) + maybe_model_b_node_name_to_scope = _get_observed_graph_module_attr(model_b, 'node_name_to_scope') + if maybe_model_b_node_name_to_scope is not None: + gm_b._node_name_to_scope = maybe_model_b_node_name_to_scope + return _add_shadow_loggers_impl( + name_a, gm_a, name_b, gm_b, logger_cls, + should_log_inputs=should_log_inputs, + base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops, + node_type_to_io_type_map=node_type_to_io_type_map, + unmatchable_types_map=unmatchable_types_map) + + +def extract_shadow_logger_info( + model_a_shadows_b: nn.Module, + logger_cls: Callable, + model_name_to_use_for_layer_names: str, +) -> NSResultsType: + """ + Traverse all loggers in a shadow model, and extract the logged + information. + + Args: + model_a_shadows_b: shadow model + logger_cls: class of Logger to use + model_name_to_use_for_layer_names: string name of model to use for + layer names in the output + + Return: + NSResultsType, containing the logged comparisons + """ + torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_shadow_logger_info") + results: NSResultsType = collections.defaultdict(dict) + _extract_logger_info_one_model(model_a_shadows_b, results, logger_cls) + # fill in missing fqn entries + maybe_add_missing_fqns(results) + # rekey on the name of model b + results = rekey_logger_info_on_node_name_of_model( + results, model_name_to_use_for_layer_names) + return dict(results) + + +def extend_logger_results_with_comparison( + results: NSResultsType, + model_name_1: str, + model_name_2: str, + comparison_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], + comparison_name: str, +) -> None: + """ + Compares the logged values from `model_name_2` against the corresponding + values in `model_name_1`, using `comparison_fn`. Records the result + in `model_name_2`'s results under `comparison_name`. Modifies `results` inplace. + + Args: + results: the result data structure from `extract_logger_info` or + `extract_shadow_logger_info`. + model_name_1: string name of model 1 + model_name_2: string name of model 2 + comparison_fn: function to compare two Tensors + comparison_name: string name of model to use for + layer names in the output + """ + for results_type_to_results in results.values(): + for model_name_to_results in results_type_to_results.values(): + assert model_name_1 in model_name_to_results, \ + f"{model_name_1} not found in results" + assert model_name_2 in model_name_to_results, \ + f"{model_name_2} not found in results" + + results_1 = model_name_to_results[model_name_1] + results_2 = model_name_to_results[model_name_2] + + for result_2 in results_2: + index_within_arg_2 = result_2['index_within_arg'] + index_of_arg_2 = result_2['index_of_arg'] + # find corresponding result_1 + result_1 = None + for cur_result_1 in results_1: + index_within_arg_1 = cur_result_1['index_within_arg'] + index_of_arg_1 = cur_result_1['index_of_arg'] + if ( + (index_within_arg_1 == index_within_arg_2) and + (index_of_arg_1 == index_of_arg_2) + ): + result_1 = cur_result_1 + break + assert result_1 is not None + + values_1 = result_1['values'] + values_2 = result_2['values'] + result_2[comparison_name] = [] + for value_1, value_2 in zip(values_1, values_2): + comparison_result = comparison_fn(value_1, value_2) + result_2[comparison_name].append(comparison_result) + +def prepare_n_shadows_model( + model: torch.nn.Module, + example_inputs: Any, + qconfig_multi_mapping: QConfigMultiMapping, + backend_config: BackendConfig, + custom_prepare_fn: Optional[Callable] = None, + custom_prepare_kwargs: Optional[Dict[str, Any]] = None, + custom_tracer: Any = None, +) -> GraphModule: + """ + Given a model with a graph with M ops such as + + + args_kwargs_m -> op_m -> output_m + + + And a set of N qconfigs for each op, creates a new model, with + each of the subgraph of `op_m` transformed into + + .. code:: + + |---------> op_m_n -> log_m_n + | / + args_kwargs_m ---------> op_m -> log_m_0 + + Where op_m_n is op_m wrapped in a submodule and transformed with + qconfig_n, and its inner graph looks like + + .. code:: + + args_m -------- op_m_prepared_with_qconfig_n -> out_m_n + / + kwargs_m --- + + This is useful for testing different quantization of multiple layers in + a single pass through the model. + + High level TODOs for future PRs: + * figure out a better way to name the output structure + * return a results data structure instead of printing it out + * add examples to docblocks + """ + + if custom_tracer is None: + tracer = quantize_fx.QuantizationTracer([], []) + else: + tracer = custom_tracer + mt = torch.fx.GraphModule(model, tracer.trace(model)) + # this is necessary to ensure logger FQNs get populated + mt._node_name_to_scope = tracer.node_name_to_scope + + # run example input propagation, we need this to call prepare_fx on + # individual subgraphs + output_prop = OutputProp(mt) + output_prop.propagate(*example_inputs) + + # Find the set of subgraphs in the original graph which we need to + # consider. + modules = dict(mt.named_modules(remove_duplicate=False)) + patterns = _get_pattern_to_quantize_handlers(backend_config) + root_node_getter_mapping = \ + get_fusion_pattern_to_root_node_getter(backend_config) + standalone_module_names: List[str] = [] + standalone_module_classes: List[Type] = [] + custom_module_classes: List[Type] = [] + matches = _find_matches( + mt.graph, modules, patterns, root_node_getter_mapping, + standalone_module_names, standalone_module_classes, custom_module_classes) + subgraphs_dedup: Dict[str, List[Node]] = \ + _get_dedup_subgraphs(matches) + + # generate node to qconfig for each subgraph + # TODO(future PR): deduplicate repeating entries + list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]] = [] + for qconfig_mapping in qconfig_multi_mapping.qconfig_mappings_list: + node_name_to_qconfig = _generate_node_name_to_qconfig( + mt, modules, mt.graph, qconfig_mapping, tracer.node_name_to_scope) + list_of_node_name_to_qconfig.append(node_name_to_qconfig) + + # For each region in the model, do the following: + # For each qconfig for that region, do the following: + # 1. create a copy of the region wrapped in a module + # 2. pass original args, original kwargs, and expected output to module + # 3. add an output comparison logger and hook it up to compare + # actual output to expected output + # 4. run `prepare_fx` on the module + for (subgraph_idx, (match_name, nodes_in_this_subgraph)) in \ + enumerate(subgraphs_dedup.items()): + create_n_transformed_and_logged_copies_of_subgraph( + mt, subgraph_idx, match_name, nodes_in_this_subgraph, + qconfig_multi_mapping.qconfig_mappings_list, list_of_node_name_to_qconfig, + custom_prepare_fn, custom_prepare_kwargs # type: ignore[arg-type] + ) + + return mt + +# TODO(future PR): we should rethink the names of all the PNP APIs +def _prepare_n_shadows_add_loggers_model( + model: torch.nn.Module, + example_inputs: Any, + qconfig_mapping: QConfigMapping, + backend_config: BackendConfig, +) -> torch.nn.Module: + r""" + Note: this API is not recommended for wide usage, it is only + provided for customers who need to migrate from the `add_loggers` + API. + + This creates a model which provides logging for the following + problem: if we quantize `model` with `qconfig_mapping` and feed + the same input through both models, log the comparisons of + corresponding intermediate layers. + + The problem is solved with a single model. Specifically, we + partition `model` into N subgraphs, create a copy of each relevant + subgraph, wrap it in a module, apply the quantization API to that + module, and hook up loggers to measure the comparisons. + + Example starting graph: + + x0 -> op0 -> x1 -> op1 -> x2 + + Example config: quantize op0 to int8, do nothing to op1. + The following graph will be created: + + .. code:: + + x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log + \ \ \ # noqa: W605 + ---> op0_1 -> x1_1 ----> clog -> op1_0 -> x2_1 ----> clog + + Where op0_0 is op0, op0_1 is op0 wrapped in a submodule and quantized + to int8, op1_0 is op1 (appearing in the graph twice), log is a logger, + and clog is a comparison logger. + """ + + tracer = quantize_fx.QuantizationTracer([], []) + mt = torch.fx.GraphModule(model, tracer.trace(model)) + # this is necessary to ensure logger FQNs get populated + mt._node_name_to_scope = tracer.node_name_to_scope + + # run example input propagation, we need this to call prepare_fx on + # individual subgraphs + output_prop = OutputProp(mt) + output_prop.propagate(*example_inputs) + + # Find the set of subgraphs in the original graph which we need to + # consider. + modules = dict(mt.named_modules(remove_duplicate=False)) + patterns = _get_pattern_to_quantize_handlers(backend_config) + root_node_getter_mapping = \ + get_fusion_pattern_to_root_node_getter(backend_config) + standalone_module_names: List[str] = [] + standalone_module_classes: List[Type] = [] + custom_module_classes: List[Type] = [] + matches = _find_matches( + mt.graph, modules, patterns, root_node_getter_mapping, + standalone_module_names, standalone_module_classes, custom_module_classes) + subgraphs_dedup: Dict[str, List[Node]] = \ + _get_dedup_subgraphs(matches) + + # generate node to qconfig for each subgraph + node_name_to_qconfig = _generate_node_name_to_qconfig( + mt, modules, mt.graph, qconfig_mapping, tracer.node_name_to_scope) + + # Now, mutate the graph to be the add_loggers graph with propagation + # error. + create_add_loggers_graph( + mt, subgraphs_dedup, qconfig_mapping, node_name_to_qconfig) + + return mt + +# TODO(future PR): we should rethink the names of all the PNP APIs +def _n_shadows_compare_weights( + model: torch.nn.Module, + example_inputs: Any, + qconfig_mapping: QConfigMapping, + backend_config: BackendConfig, +) -> NSResultsType: + """ + Note: this API is not recommended for wide usage, it is only + provided for customers who need to migrate from the `add_loggers` + API. + """ + qconfig_multi_mapping = \ + QConfigMultiMapping.from_list_qconfig_mapping([qconfig_mapping]) + mp = prepare_n_shadows_model( + model, example_inputs, qconfig_multi_mapping, backend_config) + # passing inputs through the model is necessary to populate + # observers which observe weights with real values + mp(*example_inputs) + mq = convert_n_shadows_model(mp) + weight_comparison = extract_weight_comparison(mq) + return weight_comparison + +# TODO(future PR): consider aligning API signature with other similar quantization +# functions (enable_fake_quant, etc) +def loggers_set_enabled(model: torch.nn.Module, enabled: bool) -> None: + """ + Sets the `enabled` setting on a `model`'s loggers + """ + for name, child in model.named_modules(): + if isinstance(child, OutputLogger): + child.enabled = enabled + +# TODO(future PR): consider aligning API signature with other similar quantization +# functions (enable_fake_quant, etc) +def loggers_set_save_activations( + model: torch.nn.Module, + save_activations: bool, +) -> None: + """ + Sets the `save_activations` setting on a `model`'s loggers + """ + for name, child in model.named_modules(): + if isinstance(child, OutputLogger): + child.save_activations = save_activations + +def convert_n_shadows_model( + model: GraphModule, + custom_convert_fn: Optional[Callable] = None, + custom_convert_kwargs: Optional[Dict[str, Any]] = None +) -> GraphModule: + """ + Given a model from `prepare_n_shadows_model`, runs `convert_fx` + on each shadow submodule. + """ + for node in model.graph.nodes: + # TODO(future PR): consider matching in a safer way than + # node name string match + if node.name.startswith(SHADOW_WRAPPER_NODE_NAME_PREFIX): + orig_mod = getattr(model, node.name) + if custom_convert_fn is None: + converted_mod = torch.ao.quantization.quantize_fx.convert_fx( + orig_mod) + else: + if custom_convert_kwargs is None: + custom_convert_kwargs = {} + converted_mod = custom_convert_fn(orig_mod, **custom_convert_kwargs) + setattr(model, node.name, converted_mod) + + return model + +def extract_results_n_shadows_model(model: torch.nn.Module) -> NSResultsType: + """ + Extracts logger results from `model`. + """ + results: NSResultsType = {} + _extract_logger_info_one_model(model, results, OutputLogger) + return results + +def print_comparisons_n_shadows_model(results: NSResultsType) -> None: + """ + Prints a summary of extracted `results`. + """ + results_grouped = group_results_by_subgraph(results) + results_comparison = create_results_comparison(results_grouped) + print_n_shadows_summary(results_comparison) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68589c1a654b7aced7c25b314b5924fdc88aa692 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_matcher.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_passes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_passes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d7b95ebfe578dcbcc416b0d3ec87c341424a23c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/graph_passes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78a818d16f3d997d631c4f41ae8fc219130fc233 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/mappings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d22f1d2febf689307761e38749a1e6dd7f206d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/pattern_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e01a8b2aa94edd87180c50035208ee8bab0b1d86 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/weight_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_matcher.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..8db946ec707a71c752bb13a73fbc48fb0a003cf1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_matcher.py @@ -0,0 +1,460 @@ +import collections +import enum + +import torch +toq = torch.ops.quantized + +from torch.fx import GraphModule +from torch.fx.graph import Graph, Node + +from torch.ao.quantization.utils import getattr_from_fqn +from .ns_types import NSSubgraph, NSNodeTargetType +from .mappings import ( + get_base_name_to_sets_of_related_ops, + get_unmatchable_types_map, +) +from .pattern_utils import ( + get_type_a_related_to_b, + get_reversed_fusions, + end_node_matches_reversed_fusion, +) +from torch.ao.quantization import ( + ObserverBase, + FakeQuantizeBase, +) + +from typing import Dict, Tuple, List, Optional, Set, Any + +def _get_output_nodes(g: Graph) -> List[Node]: + return [n for n in g.nodes if n.op == 'output'] + +class _NSGraphMatchableSubgraphsIterator: + """ + Iterates through the graph of gm, starting with the output nodes + and continuing backwards. + 1. Returns matchable subgraphs, in order. A subgraph is defined by + (start_node, end_node). + 2. Skips over non-matchable subgraphs + """ + def __init__( + self, + gm: GraphModule, + non_matchable_functions: Set[NSNodeTargetType], + non_matchable_modules: Set[NSNodeTargetType], + non_matchable_methods: Set[NSNodeTargetType], + ): + self.gm: GraphModule = gm + self.non_matchable_functions: Set[NSNodeTargetType] = non_matchable_functions + self.non_matchable_modules: Set[NSNodeTargetType] = non_matchable_modules + self.non_matchable_methods: Set[NSNodeTargetType] = non_matchable_methods + self.seen_nodes: Set[Node] = set() + self.stack: List[Node] = [] + for start_node in _get_output_nodes(self.gm.graph): + self.stack.append(start_node) + + def __iter__(self): + return self + + def __next__(self) -> NSSubgraph: + """ + Returns the next matchable subgraph. + """ + while len(self.stack) > 0: + cur_end_node = self.stack.pop() + if cur_end_node in self.seen_nodes: + continue + + # for subgraphs which are single nodes, start_node == end_node + # for subgraphs with more than one node, start node != end_node + cur_start_node = cur_end_node + # Subgraphs like linear-relu have the base node as the start node. + # Subgraphs like dequantize-linear-relu-to(torch.float16) have the + # base node as the second node. + # The cur_base_op_node var will move to the actual node during + # the fusion matching later in this code block. + cur_base_op_node = cur_end_node + + # Check for potential fusions. For now, we are greedy + # and always skip all non-base nodes of a fusion. For example, + # if we match linear-relu backwards, we will always skip the + # relu node and attempt to match the linear node. This can + # be made configurable later if needed. + for _reverse_fusion_ops, base_op_idx in get_reversed_fusions(): + is_match = end_node_matches_reversed_fusion( + cur_end_node, _reverse_fusion_ops, self.gm, self.seen_nodes) + if is_match: + # navigate to the base node + for rev_fusion_idx in range(len(_reverse_fusion_ops) - 1): + self.seen_nodes.add(cur_start_node) + # for now, assume that there are no other nodes + # which need to be added to the stack + cur_start_node = cur_start_node.args[0] # type: ignore[assignment] + # if the base op index matches the current node, set it + rev_base_op_idx = \ + len(_reverse_fusion_ops) - 2 - base_op_idx + if rev_fusion_idx == rev_base_op_idx: + cur_base_op_node = cur_start_node + break + + self.seen_nodes.add(cur_start_node) + # add args of previous nodes to stack + for arg in cur_start_node.all_input_nodes: + self._recursively_add_node_arg_to_stack(arg) + + # skip unmatchable nodes + # note: this check is done on the start_node, i.e. + # if we are matching linear-relu in reverse, this would do the matchable + # check on the linear + if not self._is_matchable(cur_base_op_node): + continue + + # If an observer or a fake_quant was not matched as a part of + # a pattern of multiple nodes, ignore it. One case where this is + # relevant is an observer on a graph input, which was added because + # it is necessary for the next node. + if cur_end_node.op == 'call_module' and cur_start_node is cur_end_node: + maybe_obs = getattr_from_fqn(self.gm, cur_end_node.target) # type: ignore[arg-type] + if isinstance(maybe_obs, (ObserverBase, FakeQuantizeBase)): + continue + + return NSSubgraph( + start_node=cur_start_node, end_node=cur_end_node, + base_op_node=cur_base_op_node) + + raise StopIteration + + def _recursively_add_node_arg_to_stack(self, arg: Any) -> None: + """ + Adds all of the nodes in this arg to the stack, properly navigating + through list, dicts and tuples. + """ + if isinstance(arg, Node): + self.stack.append(arg) + elif isinstance(arg, torch.fx.immutable_collections.immutable_list) or type(arg) is tuple: + for inner_arg in arg: + self._recursively_add_node_arg_to_stack(inner_arg) + elif isinstance(arg, torch.fx.immutable_collections.immutable_dict): + for value in arg.values(): + self._recursively_add_node_arg_to_stack(value) + + def _is_matchable(self, node: Node) -> bool: + if node.op == 'call_function': + return node.target not in self.non_matchable_functions + elif node.op == 'call_module': + assert isinstance(node.target, str) + target_mod = getattr_from_fqn(self.gm, node.target) + return not \ + any(isinstance(target_mod, t) # type: ignore[arg-type] + for t in self.non_matchable_modules) + elif node.op == 'call_method': + return node.target not in self.non_matchable_methods + else: + return False + +class GraphMatchingException(Exception): + """ + Exception raised when two graphs cannot be matched. + """ + pass + +class SubgraphTypeRelationship(enum.Enum): + # same type, known + # example: F.linear and F.linear, or nn.Conv2d and nn.Conv2d + EQUAL = enum.auto() + # same type, but the type is not known to Numerical Suite + # (user defined type, etc). + EQUAL_BUT_UKNOWN = enum.auto() + # known, same subgraph_relationship set, but not the same type + # example: F.linear and toq.linear + RELATED_BUT_NOT_EQUAL = enum.auto() + # not related + NOT_RELATED = enum.auto() + +def _get_subgraph_relationship_type( + subgraph_a: NSSubgraph, + subgraph_b: NSSubgraph, + gm_a: GraphModule, + gm_b: GraphModule, + type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]], +) -> SubgraphTypeRelationship: + node_a = subgraph_a.base_op_node + node_b = subgraph_b.base_op_node + + # TODO(next): make this code handle matching by what is before the base op + if node_a.op != node_b.op: + if not ( + node_a.op in ('call_function', 'call_method') and + node_b.op in ('call_function', 'call_method') + ): + return SubgraphTypeRelationship.NOT_RELATED + + if node_a.op in ('call_function', 'call_method'): + key = (node_a.target, node_b.target) + + if key not in type_a_related_to_b: + if node_a.target == node_b.target: + return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN + else: + return SubgraphTypeRelationship.NOT_RELATED + # after this point, we are dealing with known types + + if node_a.target == node_b.target: + node_a_has_prev = subgraph_a.base_op_node == subgraph_a.start_node + node_b_has_prev = subgraph_b.base_op_node == subgraph_b.start_node + if node_a_has_prev and (not node_b_has_prev): + return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL + elif (not node_a_has_prev) and node_b_has_prev: + return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL + elif (not node_a_has_prev) and (not node_b_has_prev): + return SubgraphTypeRelationship.EQUAL + else: + # TODO(future PR): check for matches start_op_node and base_op_node + return SubgraphTypeRelationship.EQUAL + + if key in type_a_related_to_b: + return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL + else: + return SubgraphTypeRelationship.NOT_RELATED + elif node_a.op == 'call_module': + assert (subgraph_a.base_op_node == subgraph_a.start_node and + subgraph_b.base_op_node == subgraph_b.start_node), \ + "Matching call_module patterns where base_op_node != start_node is not supported yet" + # for call_module, we need to look up the modules to do the type check + assert isinstance(node_a.target, str) + mod_a = getattr_from_fqn(gm_a, node_a.target) + assert isinstance(node_b.target, str) + mod_b = getattr_from_fqn(gm_b, node_b.target) + + key = (type(mod_a), type(mod_b)) + + if key not in type_a_related_to_b: + if type(mod_a) == type(mod_b): + return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN + else: + return SubgraphTypeRelationship.NOT_RELATED + elif type(mod_a) == type(mod_b): + return SubgraphTypeRelationship.EQUAL + else: + return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL + + return SubgraphTypeRelationship.NOT_RELATED + +def _get_name_for_subgraph( + subgraph_a: NSSubgraph, + gm_a: GraphModule, + base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]], + existing_names: Set[str], +) -> str: + """ + Returns a unique name for a subgraph. This name is based on two things: + 1. the name of the set containing the underlying type of the base op in the + subgraph (i.e. 'torch.nn.functional.linear' if this is related to a linear op) + 2. the number of previous subgraphs with related underlying type of the base op + + For example, in the graph + + linear0 -> relu0 -> linear1 -> relu1 + + The subgraphs are (linear0, relu0) and (linear1, relu1). If we iterate + from the output node backwards, the name given to (linear1, relu1) will be + `base_op_torch.nn.functional.linear_0`, and the name given to (linear0, relu0) + will be `base_op_torch.nn.functional.linear_1`. + + Why are we not just using the node name? Answer: because of two requirements: + A. fusions must be supported + B. some Numeric Suite APIs can be called without having all of the models in memory + + For example, let's say we need to match nodes of + + (1) ... -> linear0 -> relu0 -> ... + + And + + (2) ... -> linear_relu0 -> ... + + Without being able to inspect them together. With the current naming scheme, if + we iterate through both of these graphs in the same order, and assuming the rest + of the graphs match, both of these subgraphs will get the same name without + (1) and (2) knowing anything about each other. + """ + target_type = _get_node_target_type(subgraph_a.base_op_node, gm_a) + target_base_type = None + for base_name, sets_of_related_ops in base_name_to_sets_of_related_ops.items(): + if target_type in sets_of_related_ops: + target_base_type = base_name + target_base_name = 'base_op_' + str(target_base_type) + counter = 0 + proposed_name = target_base_name + '_' + str(counter) + while proposed_name in existing_names: + counter += 1 + proposed_name = target_base_name + '_' + str(counter) + existing_names.add(proposed_name) + return proposed_name + +def _get_node_target_type(node: Node, gm: GraphModule) -> Optional[NSNodeTargetType]: + if node.op in ('call_function', 'call_method'): + return node.target + elif node.op == 'call_module': + assert isinstance(node.target, str) + mod = getattr_from_fqn(gm, node.target) + return type(mod) + return None + +def get_matching_subgraph_pairs( + gm_a: GraphModule, + gm_b: GraphModule, + base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None, + unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, +) -> Dict[str, Tuple[NSSubgraph, NSSubgraph]]: + """ + Matches matchable subgraphs of graph_a to graph_b. + + For a node, "matchable" is defined as a node which is not an observer, + fake_quants, quant or dequant. + + A subgraph can contain one or more nodes. A subgraph is matchable if + at least one node inside of it is matchable. Currently, all nodes in + a subgraph must be matchable (because we assume no observers will be + inserted in the middle of a fusion). + + A subgraph is defined by (start_node, end_node). We assume that only + start_node and end_node are linked with the surrounding graph, all other + nodes in a subgraph are self-contained. + + A pair of nodes is "related" if both nodes represent the same mathematical + operation across different quantization flavors. For example, + `F.linear` and `torch.ops.quantized.linear` are related, and + `F.linear` and `torch.nn.Conv` are not related. + + For each matchable pair of nodes node_a and node_b, they will match + if node_a and node_b are related. + + For graphs A and B, they will match iff: + 1. the number of matchable subgraphs in A and B is equivalent + 2. when iterating through the matchable subgraphs of A and B in the same order, each + corresponding pair of base nodes is related. + + This enables us to find the corresponding subgraphs between + graphs of related models. For example, if we had two graphs such as: + + graph_a: x0 -> conv_0 (type: nn.Conv2d) -> obs_0 -> x1 + w -/ + b -/ + + graph_b: x0 -> quant_0 -> qconv_0 (type: nnq.Conv2d) -> dequant_0 -> x1 + packed_params_0 -/ + + This function will return the following result: + { + 'conv_0': ( # the name of the node in graph_b + (conv_0, conv_0), # (start_node_a, end_node_a) + (qconv_0, qconv_0), # (start_node_b, end_node_b) + ), + } + + Or, if we have a fusion pattern, + + graph_a: x0 -> linear_0 -> relu_0 -> obs_0 -> x1 + w -/ + b -/ + + graph_b: x0 -> quant_0 -> linear_relu_0 -> dequant_0 -> x1 + packed_params_0 -/ + + This function will return the following result: + { + 'linear_relu_0': ( # the name of the node in graph_b + (linear_0, relu_0), # (start_node_a, end_node_a) + (linear_relu_0, linear_relu_0), # (start_node_b, end_node_b) + ), + } + """ + if unmatchable_types_map is None: + unmatchable_types_map = get_unmatchable_types_map() + non_matchable_functions = unmatchable_types_map['funs_unmatchable'] + non_matchable_modules = unmatchable_types_map['mods_unmatchable'] + non_matchable_methods = unmatchable_types_map['meths_unmatchable'] + + graph_a_iterator = _NSGraphMatchableSubgraphsIterator( + gm_a, non_matchable_functions, non_matchable_modules, + non_matchable_methods) + graph_b_iterator = _NSGraphMatchableSubgraphsIterator( + gm_b, non_matchable_functions, non_matchable_modules, + non_matchable_methods) + results = collections.OrderedDict() + if base_name_to_sets_of_related_ops is None: + base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops() + type_a_related_to_b = \ + get_type_a_related_to_b(base_name_to_sets_of_related_ops) + + existing_names_a: Set[str] = set() + existing_names_b: Set[str] = set() + + while True: + # fetch the next subgraphs from a and b + cur_subgraph_a, cur_subgraph_b = None, None + try: + cur_subgraph_a = next(graph_a_iterator) + except StopIteration: + pass + try: + cur_subgraph_b = next(graph_b_iterator) + except StopIteration: + pass + + # look up types of a and b for useful error messages + type_start_a, type_start_b = None, None + if cur_subgraph_a is not None: + type_start_a = _get_node_target_type(cur_subgraph_a.start_node, gm_a) + if cur_subgraph_b is not None: + type_start_b = _get_node_target_type(cur_subgraph_b.start_node, gm_b) + + # check for results and determine what to do next + if cur_subgraph_a is not None and cur_subgraph_b is not None: + # both nodes were fetched, check for subgraph_relationship + # note: subgraph_relationship is checked on the start node, i.e. + # if a linear-relu pattern is checked, we would check for subgraph_relationship + # of the linear + subgraph_relationship = _get_subgraph_relationship_type( + cur_subgraph_a, cur_subgraph_b, + gm_a, gm_b, type_a_related_to_b) + if subgraph_relationship == SubgraphTypeRelationship.NOT_RELATED: + msg = f""" +The subgraphs +({cur_subgraph_a}, {type_start_a}) and +({cur_subgraph_b}, {type_start_b}) +are not related. Please ensure that the two models you pass in have the same number +of subgraphs, and each pair of subgraphs is related to each other.""" + raise GraphMatchingException(msg) + elif subgraph_relationship == SubgraphTypeRelationship.EQUAL_BUT_UKNOWN: + # skip matching but unknown types + continue + key_name_a = _get_name_for_subgraph( + cur_subgraph_a, gm_a, base_name_to_sets_of_related_ops, + existing_names_a) + key_name_b = _get_name_for_subgraph( + cur_subgraph_b, gm_b, base_name_to_sets_of_related_ops, + existing_names_b) + assert key_name_a == key_name_b, \ + f"Subgraph names {key_name_a} and {key_name_b} do not match" + results[key_name_a] = (cur_subgraph_a, cur_subgraph_b) + continue + elif cur_subgraph_a is None and cur_subgraph_b is None: + # we reached the end of both graphs + break + else: + # only one node was fetched, no match possible, throw error + msg = f""" +Attempting to match +({cur_subgraph_a}, {type_start_a}) and +({cur_subgraph_b}, {type_start_b}), +one of which is empty. Please ensure that the two models you pass in have the same number +of subgraphs.""" + raise GraphMatchingException(msg) + + # The subgraph pairs are originally created by traversing the two graphs + # from the outputs to the inputs. Reverse the results to return the + # subgraphs in their order of execution. + results = collections.OrderedDict(reversed(list(results.items()))) + + return results diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_passes.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_passes.py new file mode 100644 index 0000000000000000000000000000000000000000..edd5284cf6eb6f0187762cd53153b06e2ce82589 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/graph_passes.py @@ -0,0 +1,950 @@ +import torch +from torch.fx import GraphModule, map_arg +from torch.fx.graph import Graph, Node +from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix + +from .utils import ( + get_node_first_input_and_output_type, + getattr_from_fqn, + NodeInputOrOutputType, + return_first_non_observer_node, + get_number_of_non_param_args, + get_target_type_str, + get_arg_indices_of_inputs_to_log, + get_node_input_qparams, + op_type_supports_shadowing, + get_normalized_nth_input, +) + +from .ns_types import ( + NSSingleResultValuesType, + NSSubgraph, + NSNodeTargetType, +) +from torch.ao.ns.fx.mappings import ( + get_node_type_to_io_type_map, +) +from torch.ao.quantization.observer import _is_activation_post_process + +from typing import Dict, Tuple, Callable, List, Any, Union, Optional, Set + +def _maybe_get_fqn(node: Node, gm: GraphModule) -> Optional[str]: + fqn = None + if hasattr(gm, '_node_name_to_scope'): + # fqn on observers is not present, because they do not + # exist when the fqns are created during tracing. If this is + # an observer, get the fqn of the node being observed. + node_to_use_for_fqn = node + if node.op == 'call_module': + assert isinstance(node.target, str) + module = getattr_from_fqn(gm, node.target) + if _is_activation_post_process(module): + node_to_use_for_fqn = get_normalized_nth_input(node, gm, 0) + fqn = gm._node_name_to_scope[node_to_use_for_fqn.name][0] # type: ignore[index] + return fqn # type: ignore[return-value] + +def _insert_logger_after_node( + node: Node, + gm: GraphModule, + logger_cls: Callable, + logger_node_name_suffix: str, + ref_node_name: str, + model_name: str, + ref_name: str, + ref_node_target_type: str, + results_type: str, + index_within_arg: int, + index_of_arg: int, + fqn: Optional[str], +) -> Node: + """ + Given a starting graph of + + prev_node -> node -> next_node + + This function creates a new logger_cls obj and adds it + after node, resulting in + + prev_node -> node -> logger_obj -> next_node + """ + # create new name + logger_node_name = \ + get_new_attr_name_with_prefix(node.name + logger_node_name_suffix)(gm) + target_type = get_target_type_str(node, gm) + # create the logger object + logger_obj = logger_cls( + ref_node_name, node.name, model_name, ref_name, target_type, + ref_node_target_type, + results_type, index_within_arg, index_of_arg, fqn) + # attach the logger object to the parent module + setattr(gm, logger_node_name, logger_obj) + logger_node = node.graph.create_node( + 'call_module', logger_node_name, (node,), {}) + return logger_node + +def add_loggers_to_model( + gm: GraphModule, + node_to_instrument_inputs_to_ref_node_name: Dict[Node, Tuple[str, str]], + node_to_instrument_outputs_to_ref_node_name: Dict[Node, Tuple[str, str]], + logger_cls: Callable, + model_name: str, +) -> GraphModule: + """ + Takes the graph of gm, adds loggers to the output + of each node in nodes_to_instrument. Returns a GraphModule with the new + graph. + """ + + new_graph = Graph() + env: Dict[str, Any] = {} + modules = dict(gm.named_modules()) + + def load_arg(a): + return map_arg(a, lambda node: env[node.name]) + + for node in gm.graph.nodes: + if node.op == 'output': + new_graph.output(map_arg(get_normalized_nth_input(node, gm, 0), load_arg)) + continue + + if ( + (node in node_to_instrument_inputs_to_ref_node_name) or + (node in node_to_instrument_outputs_to_ref_node_name) + ): + fqn = _maybe_get_fqn(node, gm) + + if node in node_to_instrument_inputs_to_ref_node_name: + ref_name, ref_node_type = node_to_instrument_inputs_to_ref_node_name[node] + # Ops such add and mul are special because either + # one or two of the first two arguments can be tensors, + # and if one argument is a tensor it can be first or + # second (x + 1 versus 1 + x). + arg_indices_to_log = get_arg_indices_of_inputs_to_log(node) + for node_arg_idx in arg_indices_to_log: + node_arg = get_normalized_nth_input(node, gm, node_arg_idx) + if type(node_arg) == Node: + # create a single input logger + prev_node = env[node_arg.name] + env[node_arg.name] = _insert_logger_after_node( + prev_node, gm, logger_cls, '_ns_logger_', node.name, + model_name, ref_name, ref_node_type, + NSSingleResultValuesType.NODE_INPUT.value, + index_within_arg=0, index_of_arg=node_arg_idx, + fqn=fqn) + elif type(node_arg) == torch.fx.immutable_collections.immutable_list: + # create N input loggers, one for each node + for arg_idx, arg in enumerate(node_arg): # type: ignore[var-annotated, arg-type] + prev_node = env[arg.name] + env[prev_node.name] = _insert_logger_after_node( + prev_node, gm, logger_cls, '_ns_logger_', node.name, + model_name, ref_name, ref_node_type, + NSSingleResultValuesType.NODE_INPUT.value, + index_within_arg=arg_idx, index_of_arg=node_arg_idx, + fqn=fqn) + else: + pass + + # ensure env is populated with base node + # Note: runs for both inputs and outputs + env[node.name] = new_graph.node_copy(node, load_arg) + + if node in node_to_instrument_outputs_to_ref_node_name: + ref_name, ref_node_type = node_to_instrument_outputs_to_ref_node_name[node] + # add the logger after the base node + env[node.name] = _insert_logger_after_node( + env[node.name], gm, logger_cls, '_ns_logger_', node.name, + model_name, ref_name, ref_node_type, + NSSingleResultValuesType.NODE_OUTPUT.value, + index_within_arg=0, index_of_arg=0, fqn=fqn) + + else: + env[node.name] = new_graph.node_copy(node, load_arg) + + new_gm = GraphModule(gm, new_graph) + return new_gm + +def _insert_quantize_per_tensor_node( + prev_node_c: Node, + node_a: Node, + gm_b: GraphModule, + graph_c: Graph, + scale: Union[torch.Tensor, float], + zero_point: Union[torch.Tensor, int], + dtype_cast_name: str, +) -> Node: + # copy scale + scale_node_name = \ + get_new_attr_name_with_prefix( + node_a.name + '_input_scale_')(gm_b) + setattr(gm_b, scale_node_name, scale) + scale_node = graph_c.create_node( + 'get_attr', scale_node_name, (), {}, scale_node_name) + # copy zero_point + zero_point_node_name = \ + get_new_attr_name_with_prefix( + node_a.name + '_input_zero_point_')(gm_b) + setattr(gm_b, zero_point_node_name, zero_point) + zero_point_node = graph_c.create_node( + 'get_attr', zero_point_node_name, (), {}, zero_point_node_name) + # create the quantize_per_tensor call + return graph_c.create_node( + 'call_function', torch.quantize_per_tensor, + (prev_node_c, scale_node, zero_point_node, torch.quint8), {}, + dtype_cast_name) + +def _insert_dtype_cast_after_node( + node_a: Node, + node_c: Node, + prev_node_c: Union[Node, List[Node]], + gm_a: GraphModule, + gm_b: GraphModule, + graph_c: Graph, + node_name_prefix: str, + logger_cls: Callable, + node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]], +) -> Union[Node, List[Node]]: + """ + Given a starting graph C (derived from graph B) of + + ... -> prev_node_c -> node_c -> ... + + And a corresponding related node_a, inserts the correct dtype + cast node after prev_node_c to cast into the dtype expected + by node_a, resulting in: + + dtype_cast + / + ... -> prev_node_c -> node_c -> ... + + For example, if node_c is an int8 op and node_a is an fp32 op, this function + will insert a dequant. + """ + dtype_cast_op = None + dtype_cast_mod_cls = None + dtype_cast_method = None + dtype_cast_method_dtype = None + dtype_cast_scale = None + dtype_cast_zero_point = None + node_input_type_a, _node_output_type_a = \ + get_node_first_input_and_output_type( + node_a, gm_a, logger_cls, node_type_to_io_type_map) + node_input_type_c, _node_output_type_c = \ + get_node_first_input_and_output_type( + node_c, gm_b, logger_cls, node_type_to_io_type_map) + + if ( + (node_input_type_a == NodeInputOrOutputType.FP32 and + node_input_type_c == NodeInputOrOutputType.INT8) or + (node_input_type_a == NodeInputOrOutputType.FP32 and + node_input_type_c == NodeInputOrOutputType.FP16) or + # TODO(future PR): determine the actual dtype of node_c, + # the current code only works because dequantize works with + # multiple input dtypes. + (node_input_type_a == NodeInputOrOutputType.FP32 and + node_input_type_c == NodeInputOrOutputType.FP32_OR_INT8) + ): + dtype_cast_op = torch.dequantize + elif ( + node_input_type_a == node_input_type_c and + node_input_type_a != NodeInputOrOutputType.UNKNOWN + ): + dtype_cast_mod_cls = torch.nn.Identity + elif ( + node_input_type_a == NodeInputOrOutputType.INT8 and + node_input_type_c == NodeInputOrOutputType.FP32 + ): + # int8 shadows fp32, the dtype cast needs to quantize to int8 + # with the right qparams. + node_a_input_qparams = get_node_input_qparams( + node_a, gm_a, node_type_to_io_type_map) + if node_a_input_qparams is not None: + dtype_cast_op = torch.quantize_per_tensor # type: ignore[assignment] + dtype_cast_scale, dtype_cast_zero_point = node_a_input_qparams + elif ( + node_input_type_a == NodeInputOrOutputType.FP16 and + node_input_type_c == NodeInputOrOutputType.FP32 + ): + dtype_cast_method = 'to' + dtype_cast_method_dtype = torch.float16 + else: + raise AssertionError( + f"dtype cast from {node_input_type_c} {node_c.format_node()} to " + + f"{node_input_type_a} {node_a.format_node()} needs to be implemented") + + if isinstance(prev_node_c, Node): + new_dtype_cast_name = \ + get_new_attr_name_with_prefix(node_name_prefix)(gm_b) + if dtype_cast_op: + if dtype_cast_scale is not None and dtype_cast_zero_point is not None: + return _insert_quantize_per_tensor_node( + prev_node_c, node_a, gm_b, graph_c, dtype_cast_scale, + dtype_cast_zero_point, new_dtype_cast_name) + else: + return graph_c.create_node( + 'call_function', dtype_cast_op, (prev_node_c,), {}, + new_dtype_cast_name) + elif dtype_cast_method: + return graph_c.create_node( + 'call_method', dtype_cast_method, + (prev_node_c, dtype_cast_method_dtype), {}, new_dtype_cast_name) + else: + assert dtype_cast_mod_cls + dtype_cast_mod = dtype_cast_mod_cls() + setattr(gm_b, new_dtype_cast_name, dtype_cast_mod) + return graph_c.create_node( + 'call_module', new_dtype_cast_name, (prev_node_c,), {}, + new_dtype_cast_name) + elif isinstance(prev_node_c, list): + results = [] + for prev_node_c_inner in prev_node_c: + new_dtype_cast_name = \ + get_new_attr_name_with_prefix(node_name_prefix)(gm_b) + if dtype_cast_op: + # TODO(future PR): add handling for quantize_per_tensor + new_dtype_cast_node = graph_c.create_node( + 'call_function', dtype_cast_op, (prev_node_c_inner,), {}, + new_dtype_cast_name) + results.append(new_dtype_cast_node) + else: + assert dtype_cast_mod_cls + dtype_cast_mod = dtype_cast_mod_cls() + setattr(gm_b, new_dtype_cast_name, dtype_cast_mod) + new_dtype_cast_node = graph_c.create_node( + 'call_module', new_dtype_cast_name, (prev_node_c_inner,), {}, + new_dtype_cast_name) + results.append(new_dtype_cast_node) + return results + else: + raise AssertionError(f"type f{type(prev_node_c)} is not handled") + +# TODO(future PR): look into using copy_node API instead +def _copy_node_from_a_to_c( + node_a: Node, + gm_a: GraphModule, + gm_b: GraphModule, + graph_c: Graph, +) -> Node: + """ + Simple copy of node_a to graph_c. + """ + if node_a.op == 'get_attr': + node_a_copy_name = \ + get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b) + node_a_obj = getattr_from_fqn(gm_a, node_a.target) # type: ignore[arg-type] + if torch.is_tensor(node_a_obj): + node_a_obj = node_a_obj.detach() + setattr(gm_b, node_a_copy_name, node_a_obj) + node_a_copy = graph_c.create_node( + node_a.op, node_a_copy_name, (), {}, node_a_copy_name) + return node_a_copy + elif node_a.op == 'call_method': + assert node_a.target in ('dequantize', 'to'), \ + f"target {node_a.target} is not implemented" + if node_a.target == 'dequantize': + arg_copy = _copy_node_from_a_to_c( + get_normalized_nth_input(node_a, gm_a, 0), + gm_a, gm_b, graph_c) # type: ignore[arg-type] + node_a_copy_name = \ + get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b) + node_a_copy = graph_c.create_node( + node_a.op, node_a.target, (arg_copy,), {}, node_a_copy_name) + return node_a_copy + else: # to + arg_copy = _copy_node_from_a_to_c( + get_normalized_nth_input(node_a, gm_a, 0), gm_a, gm_b, graph_c) # type: ignore[arg-type] + node_a_copy_name = \ + get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b) + node_a_copy = graph_c.create_node( + node_a.op, node_a.target, + (arg_copy, get_normalized_nth_input(node_a, gm_a, 1)), + {}, node_a_copy_name) + return node_a_copy + + else: + raise AssertionError( + f"handling of node {node_a.format_node()} with op {node_a.op} is not implemented") + +def _can_insert_copy_of_subgraph_a( + subgraph_a: NSSubgraph, + gm_a: GraphModule, + num_non_param_args_node_a: int, +) -> bool: + """ + This function returns `False` if the input subgraph cannot be copied by + `_insert_copy_of_subgraph_a_after_input_node_c`. This usually means + that there is a corner case logic for which copy is not yet implemented. + """ + # populate the list of nodes we need to check + nodes = [] + cur_node = subgraph_a.end_node + while cur_node != subgraph_a.start_node: + nodes.append(cur_node) + cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment] + nodes.append(cur_node) + nodes.reverse() + + def _can_insert(node_a_arg, gm_a): + if isinstance(node_a_arg, Node): + arg_a = return_first_non_observer_node(node_a_arg, gm_a) + if arg_a.op == 'call_method': + return arg_a.target in ('dequantize', 'to') + elif arg_a.op == 'get_attr': + return True + else: + return False + elif isinstance(node_a_arg, (list, tuple)): + for el in node_a_arg: + if not isinstance(el, Node): + return False + return True + + # For each node, check if we handle the copy behavior. This follows the + # logic in `_insert_copy_of_subgraph_a_after_input_node_c`. + for node_a in nodes: + + local_num_non_param_args_node_a = num_non_param_args_node_a \ + if node_a is nodes[0] else 1 + + norm_args_kwargs = node_a.normalized_arguments( + gm_a, normalize_to_only_use_kwargs=True) + if norm_args_kwargs is not None: + norm_args, norm_kwargs = norm_args_kwargs + else: + norm_args, norm_kwargs = node_a.args, node_a.kwargs + + cur_idx = 0 + + while cur_idx < len(norm_args): + if cur_idx == 0: + pass + elif cur_idx == 1 and local_num_non_param_args_node_a == 2: + pass + else: + if not _can_insert(norm_args[cur_idx], gm_a): + return False + cur_idx += 1 + + for kwarg_val in norm_kwargs.values(): + # stitch the inputs from base graph + if cur_idx == 0: + pass + elif cur_idx == 1 and local_num_non_param_args_node_a == 2: + pass + else: + if not _can_insert(kwarg_val, gm_a): + return False + cur_idx += 1 + + return True + +def _insert_copy_of_subgraph_a_after_input_node_c( + input_node_c: Union[Node, List[Node]], + input_node_c_2: Optional[Union[Node, List[Node]]], + subgraph_a: NSSubgraph, + gm_a: GraphModule, + gm_b: GraphModule, + node_name_prefix: str, +) -> Node: + """ + TODO(before land): real docblock + """ + if isinstance(input_node_c, Node): + graph_c = input_node_c.graph + else: + assert isinstance(input_node_c, list) + graph_c = input_node_c[0].graph + + # create a sequential list of the subgraphs' nodes from start to end, + # because we need to add the nodes to graph C in non-reverse order + nodes_of_a = [subgraph_a.end_node] + cur_node = subgraph_a.end_node + while cur_node != subgraph_a.start_node: + cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment] + nodes_of_a.insert(0, cur_node) + + # go through nodes of a in order, and insert them into the graph of c + # sequentially + cur_node_a = nodes_of_a[0] + cur_node_c = _insert_copy_of_node_a_after_input_node_c( + input_node_c, + input_node_c_2, + cur_node_a, + gm_a, + gm_b, + node_name_prefix) + for cur_idx_a in range(1, len(nodes_of_a)): + cur_node_a = nodes_of_a[cur_idx_a] + prev_node_c = cur_node_c # previous added node is the input to next node + cur_node_c = _insert_copy_of_node_a_after_input_node_c( + prev_node_c, + # TODO(future PR): enable multiple inputs for nodes which are not at start of subgraph + None, + cur_node_a, + gm_a, + gm_b, + node_name_prefix) + # return the last inserted node + return cur_node_c + + +def _insert_copy_of_node_a_after_input_node_c( + input_node_c: Union[Node, List[Node]], + input_node_c_2: Optional[Union[Node, List[Node]]], + node_a: Node, + gm_a: GraphModule, + gm_b: GraphModule, + node_name_prefix: str, +) -> Node: + """ + Assume that node_a from graph_a has + args (input, (input2)?, arg1, ...), and + kwargs {kw0: kwarg0, ...} + + Note: input2 is optional. If it equals to None, we assume that the op + has a single non-param input. If it is specified, we assume that the op + has two non-param inputs. + + Copies the underlying values of arg1..argn and kwarg0..kwargn into gm_b, + and creates the corresponding nodes in graph_c. Note: observers are ignored, + so if an arg is an observer we navigate up until we find a non-observer parent. + + If node_a is a call_module, points the module pointed to by node_a to gm_b. + + Creates the copy of node_a in graph_c, with input as the first arg, + and all other args and kwargs pointing to the copies of the objects + in gm_b created above. + + An example in pictures: + + graph A: + ======== + + input -------------> node_a + / / / + (input_2)?----------/ / / + / / + weight -> weight_obs / + / + bias ---------------- + + graph C (derived from B): + ========================= + + input_node_c --> node_a_copy + / / / + (input_node_c_2)? / / + / / + weight_copy ----/ / + / + bias_copy ------/ + """ + if isinstance(input_node_c, Node): + graph_c = input_node_c.graph + else: + assert isinstance(input_node_c, list) + graph_c = input_node_c[0].graph + + norm_args_kwargs = node_a.normalized_arguments( + gm_a, normalize_to_only_use_kwargs=True) + if norm_args_kwargs is not None: + norm_args, norm_kwargs = norm_args_kwargs + else: + norm_args, norm_kwargs = node_a.args, node_a.kwargs + + new_args = [] + new_kwargs = {} + + def _copy_arg(arg): + # copy the other inputs from the other graph + if isinstance(arg, Node): + arg = return_first_non_observer_node(arg, gm_a) + arg = _copy_node_from_a_to_c(arg, gm_a, gm_b, graph_c) + return arg + elif isinstance(arg, (int, float, torch.dtype)): + return arg + elif isinstance(kwarg_val, (list, tuple)): + for el in kwarg_val: + assert not isinstance(el, Node), \ + "handling of Node inside list is not implemented" + return arg + else: + raise AssertionError( + f"handling for kwarg of type {type(kwarg_val)} is not implemented") + + cur_idx = 0 + + while cur_idx < len(norm_args): + if cur_idx == 0: + new_arg = input_node_c + elif cur_idx == 1 and input_node_c_2 is not None: + new_arg = input_node_c_2 + else: + new_arg = _copy_arg(norm_args[cur_idx]) + new_args.append(new_arg) + cur_idx += 1 + + for kwarg_name, kwarg_val in norm_kwargs.items(): + # stitch the inputs from base graph + if cur_idx == 0: + new_kwargs[kwarg_name] = input_node_c + elif cur_idx == 1 and input_node_c_2 is not None: + new_kwargs[kwarg_name] = input_node_c_2 + else: + new_kwargs[kwarg_name] = _copy_arg(kwarg_val) + cur_idx += 1 + + new_args = tuple(new_args) # type: ignore[assignment] + + node_a_shadows_c_name = \ + get_new_attr_name_with_prefix(node_name_prefix)(gm_b) + + if node_a.op == 'call_module': + # if target is a module, we point to the module from gm_b + new_mod_copy_name = \ + get_new_attr_name_with_prefix(node_name_prefix)(gm_b) + # fetch the corresponding module from gm_a + assert isinstance(node_a.target, str) + mod_a = getattr_from_fqn(gm_a, node_a.target) + setattr(gm_b, new_mod_copy_name, mod_a) + node_a_shadows_c = graph_c.create_node( + node_a.op, new_mod_copy_name, new_args, + new_kwargs, node_a_shadows_c_name) + return node_a_shadows_c + else: + assert node_a.op in ('call_function', 'call_method') + node_a_shadows_c = graph_c.create_node( + node_a.op, node_a.target, new_args, + new_kwargs, node_a_shadows_c_name) + return node_a_shadows_c + +def create_a_shadows_b( + name_a: str, + gm_a: GraphModule, + name_b: str, + gm_b: GraphModule, + matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]], + logger_cls: Callable, + should_log_inputs: bool, + node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None, +) -> GraphModule: + """ + Creates a new GraphModule consisting of the graph of C, with the meaningful + nodes of A shadowing the corresponding nodes of B. For example, + + Graph A: + a0 -> op0_fp32 -> a1 -> op1_fp32 -> a2 + + Graph B: + b0 -> op0_int8 -> b1 -> op1_int8 -> b2 + + matched_node_pairs: {'op0': (op0_fp32, op0_int8), 'op1': (op1_fp32, op1_int8)} + + Graph C (A shadows B): + + / dequant0 -> op0_fp32 -> logger_a_0 / dequant_1 -> op1_fp32 -> logger_a_1 + / / + b0 -------------> op0_int8 -> logger_b_0 --------------> op1_int8 -> logger_b_1 + + In a nutshell, this function does the following for each node pair: + * copies the necessary attributes and modules from gm_a to gm_b, + keeping names unique + * adds a dtype cast op (dequant, quant, etc) + * adds a copy of node_a in gm_b's graph + * adds loggers to the outputs of node_a and node_b + """ + + if node_type_to_io_type_map is None: + node_type_to_io_type_map = get_node_type_to_io_type_map() + + # graph_c is the graph created from copying the nodes of graph_b and inserting + # the shadows with the nodes copied from graph_a + graph_c = Graph() + env_c: Dict[str, Any] = {} + modules = dict(gm_b.named_modules()) + + def load_arg(a): + return map_arg(a, lambda node: env_c[node.name]) + + start_node_b_to_matched_subgraph_a_and_name = {} + end_node_b_to_matched_subgraph_a_and_name = {} + for match_name, match in matched_subgraph_pairs.items(): + subgraph_a, subgraph_b = match + ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a) + ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b) + start_node_b_to_matched_subgraph_a_and_name[subgraph_b.start_node] = \ + (subgraph_a, match_name, ref_node_type_a, ref_node_type_b) + end_node_b_to_matched_subgraph_a_and_name[subgraph_b.end_node] = \ + (subgraph_a, match_name, ref_node_type_a, ref_node_type_b) + + for node_b in gm_b.graph.nodes: + if node_b.op == 'output': + graph_c.output(map_arg(node_b.args[0], load_arg)) + continue + + # calculate the flags to determine what to do with this node + node_b_is_start_node = node_b in start_node_b_to_matched_subgraph_a_and_name + node_b_is_end_node = node_b in end_node_b_to_matched_subgraph_a_and_name + + if (node_b_is_start_node or node_b_is_end_node): + + if node_b_is_start_node: + subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \ + start_node_b_to_matched_subgraph_a_and_name[node_b] + else: + assert node_b_is_end_node + subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \ + end_node_b_to_matched_subgraph_a_and_name[node_b] + + all_op_types_support_shadowing = ( + op_type_supports_shadowing(subgraph_a.start_node) and + op_type_supports_shadowing(node_b) + ) + if not all_op_types_support_shadowing: + print( + f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' + + f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' + + ', unsupported') + env_c[node_b.name] = graph_c.node_copy(node_b, load_arg) + continue + + # For both start_node and end_node verify that we know how to do + # the dtype cast. If we do not, skip. + node_input_type_a, node_output_type_a = \ + get_node_first_input_and_output_type( + subgraph_a.start_node, gm_a, logger_cls, + node_type_to_io_type_map) + node_input_type_b, node_output_type_b = \ + get_node_first_input_and_output_type( + node_b, gm_b, logger_cls, + node_type_to_io_type_map) + node_io_types_known_a_and_b = ( + node_input_type_a != NodeInputOrOutputType.UNKNOWN and + node_output_type_a != NodeInputOrOutputType.UNKNOWN and + node_input_type_b != NodeInputOrOutputType.UNKNOWN and + node_output_type_b != NodeInputOrOutputType.UNKNOWN + ) + if not node_io_types_known_a_and_b: + print( + f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' + + f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' + + ', unknown dtype cast') + env_c[node_b.name] = graph_c.node_copy(node_b, load_arg) + continue + + # If we are shadowing from fp32 to int8, we need to insert + # quantize_per_tensor call with qparams from the previous node. + # Only do this if we are able to infer these qparams from the graph. + if ( + node_input_type_a == NodeInputOrOutputType.INT8 and + node_input_type_b == NodeInputOrOutputType.FP32 + ): + node_a_input_qparams = get_node_input_qparams( + subgraph_a.start_node, gm_a, node_type_to_io_type_map) + if not node_a_input_qparams: + print( + f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' + + f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' + + ', unknown input qparams') + env_c[node_b.name] = graph_c.node_copy(node_b, load_arg) + continue + + num_non_param_args_node_a = \ + get_number_of_non_param_args(subgraph_a.start_node, gm_a) + if not _can_insert_copy_of_subgraph_a(subgraph_a, gm_a, num_non_param_args_node_a): + print( + f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' + + f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' + + ', unhandled logic in subgraph copy') + env_c[node_b.name] = graph_c.node_copy(node_b, load_arg) + continue + + fqn_base_a = _maybe_get_fqn(subgraph_a.base_op_node, gm_a) + fqn_base_b = _maybe_get_fqn(subgraph_b.base_op_node, gm_b) + + if node_b_is_start_node: + + # if necessary, log the input of node_c + if should_log_inputs: + prev_node_b = get_normalized_nth_input(node_b, gm_b, 0) + if isinstance(prev_node_b, Node): + prev_node_c = env_c[prev_node_b.name] + env_c[prev_node_c.name] = _insert_logger_after_node( + prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_', + node_b.name, name_b, ref_name, ref_node_type_b, + NSSingleResultValuesType.NODE_INPUT.value, + index_within_arg=0, index_of_arg=0, + fqn=fqn_base_b) + elif isinstance(prev_node_b, list): + # first, save the prev_node instances, because they + # will be overwritten in the env after the first logger + # is added + prev_node_c_list = [env_c[arg.name] for arg in prev_node_b] + + for arg_idx, arg in enumerate(prev_node_b): + prev_node_c = prev_node_c_list[arg_idx] + env_c[prev_node_c.name] = _insert_logger_after_node( + prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_', + node_b.name, name_b, ref_name, ref_node_type_b, + NSSingleResultValuesType.NODE_INPUT.value, + index_within_arg=arg_idx, index_of_arg=0, + fqn=fqn_base_b) + else: + # logging of inputs which are not lists is not supported yet + raise AssertionError(f"type {type(prev_node_b)} is not handled yet") + # subgraph so far: + # + # (prev_node_c)+ -> (logger_c_input)? + + # Note: this if statement is always True, spelling it out to clarify code + # intent. + if node_b_is_start_node or node_b_is_end_node: + # ensure env_c is populated with base node + env_c[node_b.name] = graph_c.node_copy(node_b, load_arg) + node_c = env_c[node_b.name] + + # after this point, + # + # node_a is the original node from graph_a, with parent module gm_a + # node_b is the original node from graph_b, with parent module gm_b + # node_c is the copy of node_b in graph_c + # + # subgraph so far: + # + # (prev_node_c)+ -> (logger_c_input)? -> node_start_c + + if node_b_is_start_node: + + # cast dtype from the dtype of node_c's input to the dtype of + # node_a's input (dequant, etc) + # prev_node_c = node_c.args[0] + prev_node_c = get_normalized_nth_input(node_c, gm_b, 0) + if should_log_inputs: + # skip the input logger when inserting a dtype cast + if isinstance(prev_node_c, Node): + prev_node_c = get_normalized_nth_input(node_c, gm_b, 0) + elif isinstance(prev_node_c, list): + prev_node_c = [get_normalized_nth_input(arg, gm_b, 0) for arg in prev_node_c] + dtype_cast_node = _insert_dtype_cast_after_node( + subgraph_a.start_node, node_c, prev_node_c, gm_a, gm_b, graph_c, + node_b.name + '_dtype_cast_', logger_cls, + node_type_to_io_type_map) + # note: not inserting to env_c because all nodes which use the dtype + # casts are copied from graph_a + # + # subgraph so far: + # + # (dtype_cast_node)+ + # / + # (prev_node_c)+ -> (logger_c_input)? -> node_start_c + + # if input logging is enabled, log the input to the subgraph + if should_log_inputs: + # TODO: explain this + ref_node_name = '' + if isinstance(dtype_cast_node, Node): + dtype_cast_node = _insert_logger_after_node( + dtype_cast_node, gm_b, logger_cls, '_ns_logger_a_inp_', + ref_node_name, name_a, ref_name, ref_node_type_a, + NSSingleResultValuesType.NODE_INPUT.value, + index_within_arg=0, index_of_arg=0, + fqn=fqn_base_a) + input_logger: Union[Node, List[Node]] = dtype_cast_node + else: + assert isinstance(dtype_cast_node, list) + new_loggers = [] + for dtype_cast_idx, dtype_cast_node_inner in enumerate(dtype_cast_node): + dtype_cast_logger = _insert_logger_after_node( + dtype_cast_node_inner, gm_b, logger_cls, '_ns_logger_a_inp_', + ref_node_name, name_a, ref_name, ref_node_type_a, + NSSingleResultValuesType.NODE_INPUT.value, + index_within_arg=dtype_cast_idx, + index_of_arg=0, + fqn=fqn_base_a) + new_loggers.append(dtype_cast_logger) + dtype_cast_node = new_loggers + input_logger = dtype_cast_node + # subgraph so far: + # + # (dtype_cast_node)+ -> (logger_a_input)? + # / + # prev_node_c -> (logger_c_input)? -> node_start_c + + # hook up the new mod_a copy to be in the graph, receiving the + # same inputs as mod_b does, with dtype cast to match a + # Some ops, such as LSTMs, have two non-param inputs. If we have + # such an op, pass the second param as well. Note: dtype casting + # for the second param is not implemented yet, it can be added + # later if there is a use case. + node_c_second_non_param_arg = None + num_non_param_args_node_a = get_number_of_non_param_args(subgraph_a.start_node, gm_a) + if num_non_param_args_node_a == 2: + # node_c_second_non_param_arg = node_c.args[1] + node_c_second_non_param_arg = get_normalized_nth_input(node_c, gm_b, 1) + node_a_shadows_c = _insert_copy_of_subgraph_a_after_input_node_c( + dtype_cast_node, node_c_second_non_param_arg, + subgraph_a, gm_a, gm_b, node_c.name + '_shadow_copy_') + env_c[node_a_shadows_c.name] = node_a_shadows_c + # subgraph so far: + # + # dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy(args/kwargs not shown) + # / + # (prev_node_c)+ -> (logger_c_input)? -> node_start_c + + if should_log_inputs: + # When we created the input logger, we left the ref_node_name + # as an empty string, because the subgraph copy did not exist + # yet. Now that the subgraph copy exists, we modify this name + # to its true value. + # Note: the alternative to this is to create the input logger + # after creating the subgraph, which is slightly more + # complicated. This is the lesser of two evils. + # input_logger = env_c[dtype_cast_node.name] + # Find the first node in the subgraph + cur_node = node_a_shadows_c + while get_normalized_nth_input(cur_node, gm_b, 0) != input_logger: + cur_node = get_normalized_nth_input(cur_node, gm_b, 0) # type: ignore[assignment] + if isinstance(input_logger, Node): + input_logger_mod = getattr(gm_b, input_logger.name) + input_logger_mod.ref_node_name = cur_node.name + else: + assert isinstance(input_logger, list) + for input_logger_inner in input_logger: + input_logger_mod = getattr(gm_b, input_logger_inner.name) + input_logger_mod.ref_node_name = cur_node.name + + # hook up a logger to the mod_a copy + env_c[node_a_shadows_c.name] = _insert_logger_after_node( + env_c[node_a_shadows_c.name], gm_b, logger_cls, '_ns_logger_a_', + node_a_shadows_c.name, name_a, ref_name, ref_node_type_a, + NSSingleResultValuesType.NODE_OUTPUT.value, + index_within_arg=0, index_of_arg=0, + fqn=fqn_base_a) + # subgraph so far: + # + # dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a + # / + # (prev_node_c)+ -> (logger_c_input)? -> node_start_c + + if node_b_is_end_node: + + # hook up a logger to the mod_b copy + env_c[node_b.name] = _insert_logger_after_node( + env_c[node_b.name], gm_b, logger_cls, '_ns_logger_b_', + node_b.name, name_b, ref_name, ref_node_type_b, + NSSingleResultValuesType.NODE_OUTPUT.value, + index_within_arg=0, index_of_arg=0, + fqn=fqn_base_b) + # subgraph so far: + # + # dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a + # / + # (prev_node_c+) -> (logger_c_input)? -> node_start_c -> ... -> node_end_c -> logger_c + # + # Note: node_start_c may be the same node as node_end_c, or they + # may have nodes inbetween. + + else: + env_c[node_b.name] = graph_c.node_copy(node_b, load_arg) + + gm_c = GraphModule(gm_b, graph_c) + return gm_c diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/mappings.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a872056d16cf502e0e0c6943dec56a0c6bc4df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/mappings.py @@ -0,0 +1,761 @@ +import operator + +import torch +import torch.nn as nn +import torch.nn.functional as F +toq = torch.ops.quantized + +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.dynamic as nnqd +import torch.ao.nn.intrinsic.quantized as nniq +import torch.ao.nn.intrinsic.quantized.dynamic as nniqd +import torch.ao.nn.intrinsic.qat as nniqat +import torch.ao.nn.intrinsic as nni +import torch.ao.nn.qat as nnqat +import torch.ao.nn.qat.dynamic as nnqatd +from torch.ao.quantization.backend_config import get_native_backend_config +import torch.ao.quantization.fx._lower_to_native_backend as \ + _lower_to_native_backend +import torch.ao.quantization.quantization_mappings as quantization_mappings + +from .ns_types import NSNodeTargetType + +from typing import Callable, Dict, List, Optional, Set, Tuple + + +def get_base_name_to_sets_of_related_ops() -> Dict[str, Set[NSNodeTargetType]]: + # note: this set is modified below by items from backend_config + sets_of_related_ops: List[Set[NSNodeTargetType]] = [ + # conv modules + { + nn.Conv1d, + }, + { + nn.Conv2d, + }, + { + nn.Conv3d, + }, + # conv functionals + { + F.conv1d, + }, + { + F.conv2d, + }, + { + F.conv3d, + }, + # linear modules + { + nn.Linear, + }, + # linear functionals + { + F.linear, + }, + # average pool + { + nn.AvgPool1d, + torch.avg_pool1d, + }, + { + nn.AvgPool2d, + torch._C._nn.avg_pool2d, + }, + { + nn.AvgPool3d, + torch._C._nn.avg_pool3d, + }, + # adaptive average pool + { + nn.AdaptiveAvgPool1d, + F.adaptive_avg_pool1d, + }, + { + nn.AdaptiveAvgPool2d, + F.adaptive_avg_pool2d, + }, + { + nn.AdaptiveAvgPool3d, + F.adaptive_avg_pool3d, + }, + # LSTM + { + nn.LSTM, + }, + # add + { + torch.add, + operator.add, # x + y + }, + # cat + { + torch.cat, + }, + # mul + { + torch.mul, + operator.mul, + }, + # relu + { + F.relu, + nn.ReLU, + 'relu', + 'relu_', + torch.relu, + }, + # maxpool + { + nn.MaxPool1d, + F.max_pool1d, + }, + { + nn.MaxPool2d, + F.max_pool2d, + }, + { + nn.MaxPool3d, + F.max_pool3d, + }, + # sigmoid + { + torch.sigmoid, + 'sigmoid', + 'sigmoid_', + nn.Sigmoid, + F.sigmoid, + }, + # BatchNorm + { + nn.BatchNorm2d, + }, + { + nn.BatchNorm3d, + }, + # ConvTranspose + { + nn.ConvTranspose1d, + }, + { + nn.ConvTranspose2d, + }, + { + nn.ConvTranspose3d, + }, + # functional transposed conv + { + F.conv_transpose1d, + }, + { + F.conv_transpose2d, + }, + { + F.conv_transpose3d, + }, + # ELU + { + nn.ELU, + }, + # Embedding + { + nn.Embedding, + }, + # EmbeddingBag + { + nn.EmbeddingBag, + }, + # GroupNorm + { + nn.GroupNorm, + }, + # Hardswish + { + nn.Hardswish, + }, + # InstanceNorm + { + nn.InstanceNorm1d, + }, + { + nn.InstanceNorm2d, + }, + { + nn.InstanceNorm3d, + }, + # LayerNorm + { + nn.LayerNorm, + }, + # LeakyReLU + { + nn.LeakyReLU, + }, + # ReLU6 + { + nn.ReLU6, + F.relu6, + }, + # F.elu + { + F.elu, + }, + # F.hardswish + { + F.hardswish, + }, + # F.group_norm + { + F.group_norm, + }, + # F.instance_norm + { + F.instance_norm, + }, + # F.layer_norm + { + F.layer_norm, + }, + # F.leaky_relu + { + F.leaky_relu, + }, + # F.silu + { + nn.SiLU, + F.silu, + }, + # F.mish + { + nn.Mish, + F.mish, + }, + # F.tanh + { + nn.Tanh, + F.tanh, + torch.tanh, + 'tanh_', + 'tanh', + }, + # F.hardsigmoid + { + 'hardsigmoid_', + 'hardsigmoid', + F.hardsigmoid, + nn.Hardsigmoid, + }, + # F.hardtanh + { + nn.Hardtanh, + F.hardtanh, + F.hardtanh_, + }, + # floordiv + { + operator.floordiv, + }, + # unsqueeze + { + torch.unsqueeze, + }, + # stack + { + torch.stack, + }, + # squeeze + { + torch.squeeze, + }, + # sort + { + torch.sort, + }, + # repeat_interleave + { + torch.repeat_interleave, + }, + # min + { + torch.min, + }, + # mean + { + torch.mean, + }, + # max + { + torch.max, + }, + # transpose + { + torch.transpose, + }, + # flatten + { + torch.flatten, + }, + # clamp + { + torch.clamp, + }, + # chunk + { + torch.chunk, + }, + # interpolate + { + torch.nn.functional.interpolate, + }, + # dropout + { + nn.Dropout, + }, + # F.dropout + { + F.dropout, + }, + # matmul + { + torch.matmul, + }, + # Softmax + { + nn.Softmax, + }, + # PReLU + { + nn.PReLU, + nnq.PReLU, + }, + # F.prelu + { + F.prelu, + toq.prelu, + }, + # pixel shuffle + { + nn.PixelShuffle, + }, + { + F.pixel_shuffle, + }, + # pixel unshuffle + { + nn.PixelUnshuffle, + }, + { + F.pixel_unshuffle, + }, + # narrow + { + torch.narrow, + }, + ] + + # for each floating point op, add versions of the op added by + # backend_config + backend_config = get_native_backend_config() + + new_connections: List[Tuple[Callable, Callable]] = [ + # technical debt edge case + (nn.Linear, nn.modules.linear.NonDynamicallyQuantizableLinear), + ] + + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + + # pattern format: (c, (b, a)) + first_element = pattern + # look from the end, because pattern is in reverse order + while isinstance(first_element, (list, tuple)): + first_element = first_element[-1] + + if config.fused_module is not None: + # case 1: pattern fuses a pattern of ops into an op + # example: nn.Conv1d, nn.ReLU fused into nni.ConvReLU1d + new_connections.append((first_element, config.fused_module)) + + if config.qat_module is not None: + # case 2: pattern swaps a module into a QAT module + # example: nni.ConvReLU1d swapped into nniqat.ConvReLU1d + new_connections.append((first_element, config.qat_module)) + + if config.reference_quantized_module is not None: + # case 3: reference version of floating point module, such as + # nn.Conv2d and nnqr.Conv2d + new_connections.append((first_element, config.reference_quantized_module)) + + # + # Add reference module swaps from default lowering path + # + + for source_to_target in ( + _lower_to_native_backend.STATIC_LOWER_MODULE_MAP, + _lower_to_native_backend.DYNAMIC_LOWER_MODULE_MAP, + _lower_to_native_backend.WEIGHT_ONLY_LOWER_MODULE_MAP, + _lower_to_native_backend.SPECIAL_PATTERN_LOWER_MODULE_MAP, + ): + for source, target in source_to_target.items(): # type: ignore[attr-defined] + new_connections.append((source, target)) + + for source_to_double_target in ( + _lower_to_native_backend.STATIC_LOWER_FUSED_MODULE_MAP, + _lower_to_native_backend.STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP, + _lower_to_native_backend.DYNAMIC_LOWER_FUSED_MODULE_MAP, + ): + for source, (target1, target2) in source_to_double_target.items(): # type: ignore[attr-defined] + new_connections.append((source, target1)) + new_connections.append((source, target2)) + + # + # Add function swaps from default lowering path + # + + for source, (target1, target2) in \ + _lower_to_native_backend.STATIC_LOWER_FUNCTIONAL_MAP.items(): + new_connections.append((source, target1)) + new_connections.append((source, target2)) + + for source_to_target in ( + _lower_to_native_backend.QBIN_OP_MAPPING, + _lower_to_native_backend.QBIN_RELU_OP_MAPPING, + quantization_mappings.DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS, + ): + for source, target in source_to_target.items(): + new_connections.append((source, target)) + + # + # Add other swaps, ideally in the future this could be removed + # after the lowering code stops using these. + # + for source_to_target in ( + quantization_mappings.DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, + ): + for source, target in source_to_target.items(): + new_connections.append((source, target)) + + + # add the new connections from backend_config + for item1, item2 in new_connections: + for set_of_related_ops in sets_of_related_ops: + if item1 in set_of_related_ops or item2 in set_of_related_ops: + set_of_related_ops.add(item1) + set_of_related_ops.add(item2) + break + + base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]] = {} + + counter = 0 + for set_of_related_ops in sets_of_related_ops: + base_name = str(counter) + counter += 1 + base_name_to_sets_of_related_ops[base_name] = set_of_related_ops + + return base_name_to_sets_of_related_ops + + +def get_base_name_for_op( + base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]], + op: NSNodeTargetType, +) -> Optional[str]: + for base_name, set_of_related_ops in base_name_to_sets_of_related_ops.items(): + if op in set_of_related_ops: + return base_name + return None + + +def add_op_to_sets_of_related_ops( + base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]], + op: NSNodeTargetType, + related_op: Optional[NSNodeTargetType], +) -> None: + if related_op is not None: + for set_of_related_ops in base_name_to_sets_of_related_ops.values(): + if related_op in set_of_related_ops: + set_of_related_ops.add(op) + return + # if we got here, related_op was not found + raise AssertionError(f"{related_op} was not found") + else: + counter = 0 + while str(counter) in base_name_to_sets_of_related_ops: + counter += 1 + base_name_to_sets_of_related_ops[str(counter)] = {op} + + +# TODO(future PR): clean this up +def get_node_type_to_io_type_map() -> Dict[str, Set[NSNodeTargetType]]: + FUNS_IO_TYPE_FP32: Set[NSNodeTargetType] = { + F.linear, + F.conv1d, + F.conv2d, + F.conv3d, + torch.cat, + F.elu, + F.hardswish, + F.instance_norm, + F.layer_norm, + F.leaky_relu, + F.dropout, + F.silu, + F.mish, + operator.add, + torch.add, + operator.mul, + torch.mul, + torch.sum, + F.prelu, + } + + FUNS_IO_TYPE_FP16: Set[NSNodeTargetType] = set() + + FUNS_IO_TYPE_INT8: Set[NSNodeTargetType] = { + toq.linear, + toq.linear_relu, + toq.conv1d, + toq.conv1d_relu, + toq.conv2d, + toq.conv2d_relu, + toq.conv3d, + toq.conv3d_relu, + toq.cat, + toq.elu, + toq.hardswish, + toq.instance_norm, + toq.layer_norm, + toq.leaky_relu, + toq.dropout, + toq.prelu, + # TODO(future PR): implement shadowing for binary ops and + # uncomment below + # toq.add, + # toq.mul, + } + + FUNS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = { + F.relu, + F.tanh, + torch.tanh, + F.sigmoid, + torch.sigmoid, + F.hardsigmoid, + operator.floordiv, + torch.adaptive_avg_pool1d, + F.adaptive_avg_pool2d, + F.adaptive_avg_pool3d, + F.dropout, + F.hardtanh, + F.hardtanh_, + F.interpolate, + F.max_pool1d, + F.max_pool2d, + F.max_pool3d, + F.relu6, + F.pixel_shuffle, + F.pixel_unshuffle, + torch.avg_pool1d, + torch._C._nn.avg_pool2d, + torch._C._nn.avg_pool3d, + torch.cat, + torch.chunk, + torch.clamp, + torch.flatten, + torch.transpose, + torch.max, + torch.mean, + torch.min, + torch.narrow, + torch.repeat_interleave, + torch.sort, + torch.squeeze, + torch.stack, + torch.unsqueeze, + operator.add, + } + + MODS_IO_TYPE_FP32: Set[NSNodeTargetType] = { + nn.Linear, + nnqat.Linear, + nnqatd.Linear, + nnqd.Linear, + torch.nn.modules.linear.NonDynamicallyQuantizableLinear, + nn.Conv1d, + nn.Conv2d, + nn.Conv3d, + nnqat.Conv1d, + nnqat.Conv2d, + nnqat.Conv3d, + nnqat.Embedding, + nnqat.EmbeddingBag, + nn.LSTM, + # note: nnqd.Linear is an instance of nnq.Linear, so this + # check has to happen before the int8 module check + nnqd.LSTM, + nn.BatchNorm2d, + nn.BatchNorm3d, + nn.Dropout, + nn.ConvTranspose1d, + nn.ConvTranspose2d, + nn.ConvTranspose3d, + nn.ELU, + nn.GroupNorm, + nn.InstanceNorm1d, + nn.InstanceNorm2d, + nn.InstanceNorm3d, + nn.LayerNorm, + nn.Hardswish, + nn.LeakyReLU, + nn.ReLU6, + nn.SiLU, + nn.Mish, + nn.Softmax, + nn.PReLU, + nni.BNReLU2d, + nni.BNReLU3d, + nni.ConvReLU1d, + nni.ConvReLU2d, + nni.ConvReLU3d, + nni.LinearReLU, + nni.LinearBn1d, + nni.ConvBn1d, + nni.ConvBn2d, + nni.ConvBn3d, + nniqat.ConvBn1d, + nniqat.ConvBn2d, + nniqat.ConvBn3d, + nniqat.ConvBnReLU1d, + nniqat.ConvBnReLU2d, + nniqat.ConvBnReLU3d, + nniqat.ConvReLU1d, + nniqat.ConvReLU2d, + nniqat.ConvReLU3d, + nniqat.LinearReLU, + nniqat.LinearBn1d, + nniqd.LinearReLU, + nni.LinearLeakyReLU, + nni.LinearTanh, + nni.ConvAdd2d, + nni.ConvAddReLU2d, + } + + MODS_IO_TYPE_INT8: Set[NSNodeTargetType] = { + nnq.Linear, + nnq.Conv1d, + nnq.Conv2d, + nnq.Conv3d, + nnq.BatchNorm2d, + nnq.BatchNorm3d, + nnq.Dropout, + nnq.ConvTranspose1d, + nnq.ConvTranspose2d, + nnq.ELU, + nnq.InstanceNorm1d, + nnq.InstanceNorm2d, + nnq.InstanceNorm3d, + nnq.LayerNorm, + nnq.Hardswish, + nnq.LeakyReLU, + nnq.Embedding, + nnq.EmbeddingBag, + nnq.Dropout, + nnq.Softmax, + nnq.PReLU, + nniq.BNReLU2d, + nniq.BNReLU3d, + nniq.ConvReLU1d, + nniq.ConvReLU2d, + nniq.ConvReLU3d, + nniq.LinearReLU, + nniq.LinearLeakyReLU, + nniq.LinearTanh, + nniq.ConvAdd2d, + nniq.ConvAddReLU2d, + } + + MODS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = { + nn.ReLU, + nn.Tanh, + nn.Sigmoid, + nn.Hardsigmoid, + nn.AdaptiveAvgPool1d, + nn.AdaptiveAvgPool2d, + nn.AdaptiveAvgPool3d, + nn.AvgPool1d, + nn.AvgPool2d, + nn.AvgPool3d, + nn.Dropout, + nn.Hardtanh, + nn.Identity, + nn.MaxPool1d, + nn.MaxPool2d, + nn.MaxPool3d, + nn.PixelShuffle, + nn.PixelUnshuffle, + nn.ReLU6, + } + + METHS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = { + 'sigmoid_', + 'sigmoid', + 'tanh_', + 'tanh', + 'hardsigmoid_', + 'hardsigmoid', + 'relu_', + 'relu', + } + + return { + 'funs_io_type_fp32': FUNS_IO_TYPE_FP32, + 'funs_io_type_fp16': FUNS_IO_TYPE_FP16, + 'funs_io_type_int8': FUNS_IO_TYPE_INT8, + 'funs_io_type_fp32_or_int8': FUNS_IO_TYPE_FP32_OR_INT8, + 'mods_io_type_fp32': MODS_IO_TYPE_FP32, + 'mods_io_type_int8': MODS_IO_TYPE_INT8, + 'mods_io_type_fp32_or_int8': MODS_IO_TYPE_FP32_OR_INT8, + 'meths_io_type_fp32_or_int8': METHS_IO_TYPE_FP32_OR_INT8, + } + + +def get_unmatchable_types_map() -> Dict[str, Set[NSNodeTargetType]]: + + FUNS_UNMATCHABLE: Set[NSNodeTargetType] = { + torch.quantize_per_tensor, + operator.getitem, + } + + MODS_UNMATCHABLE: Set[NSNodeTargetType] = { + nn.Identity, + } + + METHS_UNMATCHABLE: Set[NSNodeTargetType] = { + 'to', + 'dequantize', + 'reshape', + 'view', + 'unsqueeze_', + 'unsqueeze', + 'transpose', + 'squeeze_', + 'squeeze', + 'size', + 'shape', + 'resize_', + 'repeat_interleave', + 'repeat', + 'permute', + 'numel', + 'mean', + 'detach_', + 'detach', + 'contiguous', + 'clamp', + 'chunk', + } + + return { + 'funs_unmatchable': FUNS_UNMATCHABLE, + 'mods_unmatchable': MODS_UNMATCHABLE, + 'meths_unmatchable': METHS_UNMATCHABLE, + } diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/n_shadows_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/n_shadows_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c29da777836d2ad53c6e67ea99689427d72506c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/n_shadows_utils.py @@ -0,0 +1,1312 @@ +import torch +import torch.fx +from torch.fx import ( + Node, + GraphModule, + Graph, +) + +from torch.ao.ns.fx.utils import ( + # TODO(future PR): make this work correctly for methods + get_target_type_str, + get_normalized_nth_input, +) +from torch.ao.ns.fx.ns_types import ( + NSSingleResultValuesType, + NSResultsType, +) +from torch.ao.ns.fx.graph_passes import _maybe_get_fqn +from torch.ao.quantization import QConfigMapping +from torch.ao.quantization.qconfig import QConfigAny +from torch.ao.quantization.utils import getattr_from_fqn +from torch.ao.quantization.fx.match_utils import _MatchResult +from torch.utils._pytree import tree_map + +import collections +import copy +from typing import List, Dict, Set, Tuple, Callable, Any, Optional +import operator + +SHADOW_NODE_NAME_PREFIX = 'shadow' +SHADOW_WRAPPER_NODE_NAME_PREFIX = 'shadow_wrapper' + +# TODO(future PR): reuse existing mapping instead of creating a new one +BINARY_FUNCTIONS = { + torch.add, + torch.Tensor.add, + operator.add, + torch.mul, + torch.Tensor.mul, + operator.mul, +} + +def _get_attr_name(subgraph_idx, subgraph_candidate_idx): + return f"{SHADOW_NODE_NAME_PREFIX}_{subgraph_idx}_{subgraph_candidate_idx}" + +def _get_attr_wrapper_name(subgraph_idx, subgraph_candidate_idx): + return f"{SHADOW_WRAPPER_NODE_NAME_PREFIX}_{subgraph_idx}_{subgraph_candidate_idx}" + + +class OutputProp: + """ + Output propagation (modeled from shape propagation). + + Given a GraphModule and an example input, saves the output flowing + through each node on `node.traced_result`. + + Code based on the example from + https://pytorch.org/docs/stable/fx.html#the-interpreter-pattern + """ + def __init__(self, mod): + self.mod = mod + self.graph = mod.graph + self.modules = dict(self.mod.named_modules()) + + def propagate(self, *args): + args_iter = iter(args) + env : Dict[str, Node] = {} + + def load_arg(a): + return torch.fx.graph.map_arg(a, lambda n: env[n.name]) + + def fetch_attr(target : str): + target_atoms = target.split('.') + attr_itr = self.mod + for i, atom in enumerate(target_atoms): + if not hasattr(attr_itr, atom): + raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}") + attr_itr = getattr(attr_itr, atom) + return attr_itr + + for node in self.graph.nodes: + if node.op == 'placeholder': + result = next(args_iter) + elif node.op == 'get_attr': + result = fetch_attr(node.target) + elif node.op == 'call_function': + result = node.target(*load_arg(node.args), **load_arg(node.kwargs)) + elif node.op == 'call_method': + self_obj, *args = load_arg(node.args) + kwargs = load_arg(node.kwargs) + result = getattr(self_obj, node.target)(*args, **kwargs) + elif node.op == 'call_module': + result = self.modules[node.target](*load_arg(node.args), **load_arg(node.kwargs)) + + if isinstance(result, torch.Tensor): + node.traced_result = result + + env[node.name] = result + + return None + +def _get_dedup_subgraphs( + matches: Dict[str, _MatchResult] +) -> Dict[str, List[Node]]: + # the original matches variable is unique by node, make it unique by subgraph + # instead + seen_nodes = set() + subgraphs_dedup = {} + + # Dict items are not reversible until Python 3.8, so we hack it + # to be compatible with previous Python versions + # TODO(future PR): try reversed(list(matches.items())) + matches_items_reversed: List[Tuple[str, _MatchResult]] = [] + for name, cur_match in matches.items(): + matches_items_reversed.insert(0, (name, cur_match)) + + # Note: the order is important. `matches` currently provides the matches + # in reverse order. We would like to process the matches in non-reverse + # order, so that we can create an intuitive naming scheme, such as + # naming the first op's submodules `shadow_0_0` through `shadow_0_(n-1)` + for name, cur_match in matches_items_reversed: # type: ignore[call-overload] + was_seen = False + for node_or_tuple in cur_match[1]: + + # Cur_match[1] has an unusual type. It says that it's a `List[Node]`, + # but it is really not. Furthermore, the contents of this field + # can change from match results of multiple nodes of the same pattern + # + # For example, for conv -> bn -> relu, we see + # match_results = { + # 'conv': (relu, [(bn, conv), relu], ...), + # 'bn': (relu, [(bn, conv), relu], ...), + # 'relu': (relu, [(bn, conv), relu], ...), + # } + # + # Ideally we should clean up the `find_matches` function to make + # this more intuitive. For the purposes of this prototype, we hack + # around it. + + if isinstance(node_or_tuple, Node): + if node_or_tuple in seen_nodes: + was_seen = True + seen_nodes.add(node_or_tuple) + + else: + assert isinstance(node_or_tuple, tuple) + for node in node_or_tuple: + assert isinstance(node, Node) + if node in seen_nodes: + was_seen = True + seen_nodes.add(node) + + if was_seen: + continue + + # Start with the unusual type, convert it to [op_0, ..., op_n] + list_of_nodes = [] + + if len(cur_match[1]) == 1: + list_of_nodes = cur_match[1] + else: + assert len(cur_match[1]) == 2 + # either (a, b), or ((a, b), c) or (c, (a, b)) + # cannot make any assumptions on order, not clear what the + # _find_matches function is doing to populate this + # TODO(future PR): make this code less confusing, see discussion + # in https://github.com/pytorch/pytorch/pull/80521/files#r975918836 + + def _order_nodes(node_a, node_b, node_c) -> List[Node]: + nodes = [node_a, node_b, node_c] + first_node = None + mid_node = None + last_node = None + for n in nodes: + prev_n = n.args[0] + next_n = next(iter(n.users)) + if prev_n not in nodes: + first_node = n + elif next_n not in nodes: + last_node = n + else: + mid_node = n + assert first_node is not None and mid_node is not None and \ + last_node is not None + assert mid_node.args[0] is first_node + assert last_node.args[0] is mid_node + return [last_node, mid_node, first_node] + + if isinstance(cur_match[1][0], Node) and isinstance(cur_match[1][1], Node): + # (a, b) + list_of_nodes = cur_match[1] + elif isinstance(cur_match[1][0], tuple): + # ((a, b), c) + node_a, node_b = cur_match[1][0] + node_c = cur_match[1][1] + list_of_nodes = _order_nodes(node_a, node_b, node_c) + elif isinstance(cur_match[1][1], tuple): + # (a, (b, c)) + node_a, node_b = cur_match[1][1] + node_c = cur_match[1][0] + list_of_nodes = _order_nodes(node_a, node_b, node_c) + + # [node_n, ..., node_0], note that the order is reversed + # to make it chronological for simple subgraphs + list_of_nodes.reverse() + subgraphs_dedup[name] = list_of_nodes + + return subgraphs_dedup + +def _get_logger_for_subgraph( + model: GraphModule, + first_node: Node, + last_node: Node, + subgraph_idx: int, + subgraph_candidate_idx: int, + qconfig_str: str, + logger_cls: Callable, + fqn: Optional[str], +) -> torch.nn.Module: + """ + Given a model and a linear subgraph starting from `first_node` and + ending with `last_node`, creates a logger for the end of this + subgraph. + """ + if fqn is None: + fqn = '' + logger_mod_orig = logger_cls( + first_node.name, # ref_node_name + last_node.name, # prev_node_name + f'subgraph_{subgraph_idx}_{subgraph_candidate_idx}', # model_name + 'model', # ref_name + get_target_type_str(last_node, model), # prev_node_target_type + get_target_type_str(first_node, model), # ref_node_target_type + NSSingleResultValuesType.NODE_OUTPUT.value, # results_type + 0, # index_within_arg + 0, # index_of_arg + fqn, # fqn + qconfig_str, + ) + # Usually we expect the user to add loggers, then calibrate, then convert, + # and then populate loggers. This is why the loggers start disabled. + # TODO(future PR): reconsider the design to make this more intuitive. + logger_mod_orig.enabled = False + return logger_mod_orig + +def create_submodule_from_subgraph( + model: torch.nn.Module, + first_node: Node, + last_node: Node, +) -> GraphModule: + """ + Input: a model, and a linear subgraph within the model from first_node to + last_node. + + Output: a new submodule containing a copy of the subgraph, with the inputs + to the first node becoming the inputs to the submodule, and all other + nodes in the subgraph being copied. + + Example inputs: + + `model`: a module with graph + + x0 -> op1 -> x1 -> op2 -> x2 + | + arg1 + + `first_node`: op1 + `last_node`: op2 + + Example output: a new module with graph + + input1 -> op1_copy -> x1 -> op2_copy -> output1 + | + arg1 + """ + + # + # create a blank GraphModule with an empty graph + # + + class M(torch.nn.Module): + def forward(self, x): + pass + + m = M() + gm = torch.fx.symbolic_trace(m) + g = gm.graph + for node in reversed(gm.graph.nodes): + g.erase_node(node) + + # + # modify the graph to have a copy of our subgraph + # + + cur_node_orig = first_node + cur_args_orig = cur_node_orig.args + cur_kwargs_orig = cur_node_orig.kwargs + + cur_name_idx = 0 + + iteration_limit = 100 + cur_iteration = 0 + + while True: + if cur_node_orig is first_node: + # we are at the first node, we need to set up graph inputs + # TODO(future): some graphs could have placeholders which are unrelated + # to the first node, need to handle this + cur_args_copy = [] + cur_kwargs_copy = {} + seen_names: Set[str] = set() + old_name_to_new_node: Dict[str, Node] = {} + + def _add_placeholder( + g: Graph, node: Node, seen_names, old_name_to_new_node + ): + # note: for graphs starting with patterns such as `y = x + x`, we + # need to ensure we do not add multiple placeholders with the + # same name + counter = 0 + while node.name + '_' + str(counter) in seen_names: + counter += 1 + cur_name = node.name + '_' + str(counter) + seen_names.add(cur_name) + placeholder = g.placeholder(cur_name) + old_name_to_new_node[node.name] = placeholder + return placeholder + + for arg in cur_node_orig.args: + if isinstance(arg, Node): + p = _add_placeholder( + g, arg, seen_names, old_name_to_new_node) + cur_args_copy.append(p) + elif isinstance(arg, (list, tuple)): + new_arg = [] + for inner_arg in arg: + if isinstance(inner_arg, Node): + new_arg.append(_add_placeholder( + g, inner_arg, seen_names, old_name_to_new_node)) + else: + new_arg.append(inner_arg) + cur_args_copy.append(new_arg) + else: + cur_args_copy.append(arg) + + # TODO(future PR): handle non-normalized kwargs + for kwarg_name, kwarg in cur_node_orig.kwargs.items(): + if isinstance(kwarg, Node): + cur_kwargs_copy[kwarg_name] = _add_placeholder( + g, kwarg, seen_names, old_name_to_new_node) + elif isinstance(kwarg, (list, tuple)): + new_kwarg = [] + for inner_kwarg in kwarg: + p = _add_placeholder( + g, inner_kwarg, seen_names, old_name_to_new_node) + new_kwarg.append(p) + cur_kwargs_copy[kwarg_name] = new_kwarg + else: + cur_kwargs_copy[kwarg_name] = kwarg + + cur_args_copy = tuple(cur_args_copy) # type: ignore[assignment] + else: + # we are not at first node, first arg is from the previous node, + # and all other args are copied + + # the current implementation is simplistic and cannot handle + # ops with two or more arguments which need to be passed from + # the previous op, so we assert them out + assert cur_node_orig.target not in BINARY_FUNCTIONS + + # at this point in the code, cur_node_copy is pointing to the copy + # of the previous node + # TODO(future PR): this is not handling complicated graphs correctly, need to + # look at actual relationships instead of assuming sequential graph + # TODO(future PR): this is ignoring kwargs, will need to support kwargs + # for any fusion pattern which has them for a node that is not the + # first node. + cur_args_copy = [cur_node_copy] # type: ignore[has-type] + + if len(cur_node_orig.args) > 1: + for arg in cur_node_orig.args[1:]: + if isinstance(arg, torch.nn.Parameter): + new_arg = arg.clone().detach() # type: ignore[assignment] + mod_name = f"mod_{cur_name_idx}" + cur_name_idx += 1 + setattr(gm, mod_name, new_arg) + new_arg_placeholder = gm.placeholder(mod_name) + cur_args_copy.append(new_arg_placeholder) + elif isinstance(arg, (float, int, torch.dtype)): + cur_args_copy.append(arg) + else: + raise AssertionError(f'arg of type {type(arg)} not handled yet') + cur_args_copy = tuple(cur_args_copy) # type: ignore[assignment] + + # copy the node + if cur_node_orig.op == 'call_module': + orig_mod = getattr_from_fqn(model, cur_node_orig.target) # type: ignore[arg-type] + orig_mod_copy = copy.deepcopy(orig_mod) + mod_name = f"mod_{cur_name_idx}" + setattr(gm, mod_name, orig_mod_copy) + cur_name_idx += 1 + cur_node_copy = g.call_module(mod_name, cur_args_copy, cur_kwargs_copy) + + elif cur_node_orig.op == 'call_function': + cur_node_copy = g.call_function( + cur_node_orig.target, cur_args_copy, cur_kwargs_copy) + + elif cur_node_orig.op == 'call_method': + cur_node_copy = g.call_method( + cur_node_orig.target, cur_args_copy, cur_kwargs_copy) + + else: + raise AssertionError(f'{cur_node_orig.op} not supported yet') + + if cur_node_orig is last_node: + break + + # go to next node + assert len(cur_node_orig.users.keys()) == 1, \ + f'{cur_node_orig} has more than 1 users, not supported yet' + cur_node_orig = next(iter(cur_node_orig.users.keys())) + cur_args_orig = cur_node_orig.args + cur_kwargs_orig = cur_node_orig.kwargs + + cur_iteration += 1 + if cur_iteration > iteration_limit: + raise AssertionError('iteration limit exceeded') + + # set up outputs + g.output(cur_node_copy) + + gm.recompile() + return gm + +def create_one_transformed_and_logged_copy_of_subgraph( + mt: GraphModule, + subgraph_idx: int, + subgraph_candidate_idx: int, + first_node: Node, + last_node: Node, + fqn: Optional[str], + list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]], + example_inputs: Any, + last_added_shadow_node_list: List[Optional[Node]], + custom_prepare_fn: Optional[Callable] = None, + custom_prepare_kwargs: Optional[Dict[str, Any]] = None, +) -> None: + """ + Given a subgraph in `mt` and a subgraph candidate idx, inserts the + subgraph candidate copy and instruments it with loggers. + + If subgraph_candidate_idx is 0, this is the baseline fp32 subgraph and we just + add a logger to the end. + + If subgraph_candidate_idx is not 0, we create a copy of the subgraph and + prepare it with `prepare_fx`. + """ + + # TODO(future PR): move logger classes to utils to remove circular dependency + from torch.ao.ns._numeric_suite_fx import OutputLogger, OutputComparisonLogger + + if subgraph_candidate_idx == 0: + # idx = 0 is the floating point (original) version of the subgraph + # We keep the subgraph as is, and add a logger at the end + + qconfig_str = '' + logger_mod_orig = _get_logger_for_subgraph( + mt, first_node, last_node, subgraph_idx, subgraph_candidate_idx, + qconfig_str, OutputLogger, fqn) + + attr_name = _get_attr_name(subgraph_idx, subgraph_candidate_idx) + assert not hasattr(mt, attr_name) + setattr(mt, attr_name, logger_mod_orig) + with mt.graph.inserting_after(last_node): + new_node = mt.graph.call_module(attr_name, args=(last_node,), kwargs={}) + last_added_shadow_node_list[0] = new_node + + else: + # idx > 0 means we have a candidate qconfig to try, so we need + # to make a copy of the subgraph, feed it with the right inputs, + # and add a logger at the end + + # get the qconfig + # subtract one because the first candidate is the floating point + # version of the subgraph + node_name_to_qconfig = \ + list_of_node_name_to_qconfig[subgraph_candidate_idx - 1] + qconfig = node_name_to_qconfig[first_node.name] + + # if no quantization is requested, skip + # TODO(future PR): deduplicate equivalent qconfigs that come from + # different qconfig mapping objects + if qconfig is None: + return + + qconfig_mapping = QConfigMapping().set_global(qconfig) + + # create a copy of the submodule, wrapped in a separate module + orig_mod_copy_wrapped = create_submodule_from_subgraph( + mt, first_node, last_node) + + # add a call to prepare_fx on the wrapper module + if custom_prepare_fn is None: + orig_mod_copy_wrapped = torch.ao.quantization.quantize_fx.prepare_fx( + orig_mod_copy_wrapped, qconfig_mapping, example_inputs=example_inputs) + else: + if custom_prepare_kwargs is None: + custom_prepare_kwargs = {} + for kwarg_name in ["example_inputs", "prepare_custom_config", "qconfig_mapping"]: + assert kwarg_name not in custom_prepare_kwargs, f"cannot specify {kwarg_name} in custom_prepare_kwargs" + prepare_kwargs: Dict[str, Any] = { + "example_inputs": example_inputs, + "qconfig_mapping": qconfig_mapping + } + prepare_kwargs.update(custom_prepare_kwargs) + orig_mod_copy_wrapped = custom_prepare_fn( + orig_mod_copy_wrapped, + **prepare_kwargs) + + # attach the wrapper to the model + attr_name = _get_attr_wrapper_name(subgraph_idx, subgraph_candidate_idx) + assert not hasattr(mt, attr_name) + setattr(mt, attr_name, orig_mod_copy_wrapped) + + # add a call to the wrapper module from the parent graph + insert_after_node = last_added_shadow_node_list[0] + with mt.graph.inserting_after(insert_after_node): + # TODO(future PR): handle fusion patterns where non-first nodes + # need inputs + + # pass in all node args and kwargs + + new_args = [] + for arg in first_node.args: + if isinstance(arg, Node): + new_args.append(arg) + elif isinstance(arg, (list, tuple)) and len(arg) and isinstance(arg[0], Node): + for inner_arg in arg: + if isinstance(inner_arg, Node): + new_args.append(inner_arg) + + new_kwargs = {} + for name, old_kwarg in first_node.kwargs.items(): + if isinstance(old_kwarg, Node): + new_kwargs[name] = old_kwarg + elif isinstance(old_kwarg, (list, tuple)) and len(old_kwarg): + for inner_old_kwarg in old_kwarg: + # TODO(future PR): clarify why we are adding kwargs to args + new_args.append(inner_old_kwarg) + + new_args = tuple(new_args) # type: ignore[assignment] + + new_node = mt.graph.call_module( + attr_name, args=new_args, kwargs=new_kwargs) + + # add a logger to parent graph to observe the shadow wrapper + logger_mod_orig = _get_logger_for_subgraph( + mt, first_node, last_node, subgraph_idx, subgraph_candidate_idx, + str(qconfig), OutputComparisonLogger, fqn) + + attr_name = _get_attr_name(subgraph_idx, subgraph_candidate_idx) + assert not hasattr(mt, attr_name) + setattr(mt, attr_name, logger_mod_orig) + with mt.graph.inserting_after(new_node): + logger = mt.graph.call_module(attr_name, args=(new_node, last_node), kwargs={}) + last_added_shadow_node_list[0] = logger + + mt.recompile() + +def create_n_transformed_and_logged_copies_of_subgraph( + mt: GraphModule, + subgraph_idx: int, + match_name: str, + nodes_in_this_subgraph: List[Any], + qconfig_mappings: List[QConfigMapping], + list_of_node_name_to_qconfig: List[Dict[str, QConfigAny]], + custom_prepare_fn: Optional[Callable] = None, + custom_prepare_kwargs: Optional[Dict[str, Any]] = None, +) -> None: + """ + Given a model `mt` and a subgraph_idx, creates the needed copies + of the subgraph for all qconfigs, and instruments them with loggers. + """ + # for now, assume that + # 1. the first node has one input + # 2. the last node has one output + + # for now, ignore all subgraphs that contain non-nodes (tuples, etc) + # TODO(future PR): implement this + if any( + not isinstance(node, Node) + for node in nodes_in_this_subgraph + ): + return + + first_node = nodes_in_this_subgraph[0] + last_node = nodes_in_this_subgraph[-1] + # We used output propagation to populate example values on each + # node. Use the example values from the previous node as the input + # to the current node. + prev_node = get_normalized_nth_input(first_node, mt, 0) + if isinstance(prev_node, list): + example_inputs = [x.traced_result for x in prev_node] + elif isinstance(prev_node, tuple): + example_inputs = (x.traced_result for x in prev_node) # type: ignore[assignment] + else: + # currently some customer models do not have a traced_result in + # every node, so we have to guard for this case since we cannot + # quantize without an example input + # TODO(future PR): add a test case for this once we have an easy + # repro, see https://github.com/pytorch/pytorch/pull/80521/files#r975940489 + # for additional context + if hasattr(prev_node, 'traced_result'): + example_inputs = (prev_node.traced_result,) # type: ignore[attr-defined, assignment] + else: + print( + 'unable to get example input for node ' + + f'{first_node.format_node()}, skipping') + return + + # If there are no quantization configs for this subgraph, skip adding + # loggers. This reduces memory usage for models where not all layers are + # quantized. + # TODO(future): consider making this configurable + found_at_least_one_qconfig = False + for subgraph_candidate_idx in range(len(qconfig_mappings) + 1): + + if subgraph_candidate_idx == 0: + # fp32 baseline does not need a qconfig + continue + + # a. we have N shadows, so len(qconfig_mappings) is N + # b. we will have the fp32 layer + N shadows, so overall number of + # (original_op) + (*shadows) will be N+1 + # c. since `subgraph_candidate_idx` represents (b), we need + # to subtract 1 to query from (a) + node_name_to_qconfig = \ + list_of_node_name_to_qconfig[subgraph_candidate_idx - 1] + qconfig = node_name_to_qconfig[first_node.name] + if qconfig is not None: + found_at_least_one_qconfig = True + break + if not found_at_least_one_qconfig: + print('unable to find at least one qconfig for node ' + + f'{first_node.format_node()}, skipping') + return + + fqn = _maybe_get_fqn(first_node, mt) + + # We want the results to contain the subgraphs in natural order, + # and the graph to also contain shadow wrappers and shadow loggers + # in natural order. + # If we just iterate in reverse, the graph will be in natural + # order but the eventual results will be in reverse order. + # So, we keep track of the last shadow logger we added and + # always insert after it. + last_added_shadow_node_list: List[Optional[Node]] = [None] + for subgraph_candidate_idx in range(len(qconfig_mappings) + 1): + + create_one_transformed_and_logged_copy_of_subgraph( + mt, subgraph_idx, subgraph_candidate_idx, first_node, + last_node, fqn, list_of_node_name_to_qconfig, + example_inputs, last_added_shadow_node_list, custom_prepare_fn, + custom_prepare_kwargs) + +def create_add_loggers_graph( + model: GraphModule, + subgraphs_dedup: Dict[str, List[Node]], + qconfig_mapping: QConfigMapping, + node_name_to_qconfig: Dict[str, QConfigAny], +) -> None: + r""" + Given a model, a model graph partition (currently a set of matched + subgraphs) and instructions how to transform each subgraph + (currently quantizing it according to qconfig_mapping), modifies + the model graph to create an alternate path through the original graph, + with each of the subgraphs quantized. This is useful to compare + propagation error of a transformation such as quantization. + + For example, given layer op0 and op1, there are four cases when handling op1: + 1. op0 and op1 quantized + 2. op0 and op1 unquantized + 3. op0 quantized, op1 unquantized + 4. op0 unquantized, op1 quantized + + Example input, case 1: + + .. code:: + + x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log + \ \ \ \ # noqa: W605 + ---> op0_1 -> x1_1 ----> clog op1_1 -> x2_1 ----> clog + + Example output, case 1: + + .. code:: + + x0_0 -> op0_0 -> x1_0 -> log -----> op1_0 -> x2_0 -> log + \ \ \ # noqa: W605 + ---> op0_1 -> x1_1 ----> clog -> op1_1 -> x2_1 ----> clog + + """ + # TODO(future PR): move logger classes to utils to remove circular dependency + from torch.ao.ns._numeric_suite_fx import OutputLogger, OutputComparisonLogger + + def _get_subgraph_containing_node(node, subgraphs_dedup): + for subgraph in subgraphs_dedup.values(): + if node in subgraph: + return subgraph + return None + + # First, we need to create shadow branches, going from + # + # x0 -> op0 -> x1 -> ... + # + # + # to + # + # x0 -> op0_0 -> x1_0 -> log -> ... + # \ \ + # -> op0_1 -> x1_1 -> clog + # + # Later, the outputs of each shadow will be rerouted to calculate + # propagation error. + + # Note: we cannot iterate over matched subgraphs because some nodes + # may not be matched. So, we iterate over nodes in the graph, and + # associate them to matched subgraphs if possible. + + nodes_to_skip = set() + # for each subgraph, save a mapping from first node of subgraph + # to first and last node of the shadow of this subgraph + orig_first_node_to_shadow_in_node = {} + orig_first_node_to_shadow_out_node = {} + # need to record original list because we will mutate the graph as we go + orig_nodes = list(model.graph.nodes) # type: ignore[union-attr, arg-type] + cur_subgraph_idx = 0 + for n in orig_nodes: + if n.op in ('placeholder', 'get_attr', 'output') or n in nodes_to_skip: + continue + + maybe_subgraph = _get_subgraph_containing_node(n, subgraphs_dedup) + insert_submodule_copy = False + if maybe_subgraph is not None: + first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1] + for node_to_skip in maybe_subgraph: + nodes_to_skip.add(node_to_skip) + qconfig = node_name_to_qconfig[first_node.name] + if qconfig is not None: + insert_submodule_copy = True + else: + first_node, last_node = n, n + + if insert_submodule_copy: + match_name = first_node.name + create_n_transformed_and_logged_copies_of_subgraph( + model, cur_subgraph_idx, match_name, maybe_subgraph, + [qconfig_mapping], [node_name_to_qconfig], + None, None # type: ignore[arg-type] + ) + # find the created shadow module and record it so we + # can find it easily in step 2 + expected_shadow_target = f"shadow_wrapper_{cur_subgraph_idx}_1" + new_shadow_mod = None + for maybe_shadow_mod in model.graph.nodes: + if maybe_shadow_mod.op == 'call_module' and \ + maybe_shadow_mod.target == expected_shadow_target: + new_shadow_mod = maybe_shadow_mod + break + assert new_shadow_mod is not None + orig_first_node_to_shadow_in_node[first_node] = new_shadow_mod + orig_first_node_to_shadow_out_node[first_node] = new_shadow_mod + + else: + # create a copy of the subgraph by only copying FX nodes + # but not copying any parameters, to minimize memory usage + subgraph_to_use = maybe_subgraph if maybe_subgraph is not None \ + else [first_node] + + # add a regular logger after last_node + qconfig_str = '' + subgraph_candidate_idx = 0 + fqn = _maybe_get_fqn(first_node, model) + logger_mod_orig = _get_logger_for_subgraph( + model, first_node, last_node, cur_subgraph_idx, subgraph_candidate_idx, + qconfig_str, OutputLogger, fqn) + attr_name = _get_attr_name(cur_subgraph_idx, subgraph_candidate_idx) + assert not hasattr(model, attr_name) + setattr(model, attr_name, logger_mod_orig) + insertion_point = last_node + with model.graph.inserting_after(insertion_point): + logger = model.graph.call_module( + attr_name, args=(last_node,), kwargs={}) + insertion_point = logger + + # create a copy of the subgraph + cur_node_orig = first_node + cur_node_copy = None + first_node_copy = None + while cur_node_orig in subgraph_to_use: + # TODO(future PR): make this support all possible args/kwargs + if cur_node_orig is first_node: + new_args = cur_node_orig.args + new_kwargs = cur_node_orig.kwargs + else: + first_arg_for_copy = cur_node_copy + new_args = tuple([first_arg_for_copy, *cur_node_orig.args[1:]]) # noqa: C409 + new_kwargs = cur_node_orig.kwargs + # make a copy of cur_node_orig + with model.graph.inserting_after(insertion_point): + cur_node_copy = model.graph.create_node( + cur_node_orig.op, + cur_node_orig.target, + new_args, + new_kwargs, + # cur_node_orig.name, # TODO(future PR): set name explicitly + ) + if first_node_copy is None: + first_node_copy = cur_node_copy + # since now only linear subgraphs are supported, all nodes + # except the last one must have only one user + if cur_node_orig != last_node: + assert len(cur_node_orig.users.keys()) == 1 + cur_node_orig = next(iter(cur_node_orig.users.keys())) + assert not cur_node_orig.name.startswith(SHADOW_NODE_NAME_PREFIX) + insertion_point = cur_node_copy + + # add a comparison logger after last_node's copy + subgraph_candidate_idx = 1 + logger_mod_orig = _get_logger_for_subgraph( + model, first_node, last_node, cur_subgraph_idx, subgraph_candidate_idx, + qconfig_str, OutputComparisonLogger, fqn) + attr_name = _get_attr_name(cur_subgraph_idx, subgraph_candidate_idx) + assert not hasattr(model, attr_name) + setattr(model, attr_name, logger_mod_orig) + with model.graph.inserting_after(insertion_point): + logger = model.graph.call_module( + attr_name, args=(cur_node_copy, last_node), kwargs={}) + + # save the final node so we can use it in step 2 + orig_first_node_to_shadow_in_node[first_node] = first_node_copy + orig_first_node_to_shadow_out_node[first_node] = cur_node_copy + + cur_subgraph_idx += 1 + + model.recompile() + + # Now, we go from + # + # x0 -> op0_0 -> x1_0 -> log -> x1 -> op1_0 -> ... + # \ \ \ + # -> op0_1 -> x1_1 -> clog -> op1_1 -> ... + # + # to + # + # x0 -> op0_0 -> x1_0 -> log --> x1_0 -> op1_0 -> ... + # \ \ + # -> op0_1 -> x1_1 -> clog -> x1_1 -> op1_1 -> ... + # + # sample values of key internal variables for the example above: + # + # orig_first_node_to_shadow_in_node = {op0_0: op0_1, op1_0: op1_1} + # orig_first_node_to_shadow_out_node = {op0_0: op0_1, op1_0: op1_1} + # + # note: for subgraphs with more than one node, in_node will be different + # compared to out_node + + + nodes_to_skip = set() + for n in orig_nodes: + if n.op in ('placeholder', 'get_attr', 'output') or n in nodes_to_skip: + continue + + maybe_subgraph = _get_subgraph_containing_node(n, subgraphs_dedup) + if maybe_subgraph is not None: + first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1] + for node_to_skip in maybe_subgraph: + nodes_to_skip.add(node_to_skip) + else: + first_node, last_node = n, n + + def maybe_remap_node_to_shadow(node): + """ + If unshadowed `node` has a shadow version, return that. If not, + return `node`. + """ + if not isinstance(node, Node): + # handle scalars + return node + + if node.op in ('placeholder', 'get_attr'): + return node + + # Find the shadowed version of this arg from the previous + # subgraph. For this, we need to: + # 1. navigate to the first node of the previous subgraph + # 2. get the output of the shadow wrapper which has (1) as an input + + # For now, assume the arg is in matched subgraphs. In the + # future we may have to handle the case where this is not true. + prev_subgraph = _get_subgraph_containing_node( + node, subgraphs_dedup) + if prev_subgraph is None: + prev_subgraph = [node] + prev_first_node = prev_subgraph[0] + prev_shadow_output = \ + orig_first_node_to_shadow_out_node[prev_first_node] + return prev_shadow_output + + cur_shadow_input = \ + orig_first_node_to_shadow_in_node[first_node] + assert cur_shadow_input is not None + cur_shadow_input.args = tree_map( + maybe_remap_node_to_shadow, cur_shadow_input.args) + cur_shadow_input.kwargs = tree_map( + maybe_remap_node_to_shadow, cur_shadow_input.kwargs) + + model.recompile() + +def _get_weight_info_from_shadow_wrapper(shadow_wrapper: torch.nn.Module): + # input: shadow wrapper module + # output if shadow wrapper module has a weighted op: + # (quantize_fn, (quantize_fn_args)) + # output if shadow wrapper module doesn't have a weighted op: + # None + + # For now, assume that the weight is the second input + # to the shadow module. If that changes, we can fix it later. + placeholders_seen = 0 + for shadow_n in shadow_wrapper.graph.nodes: # type: ignore[union-attr] + if shadow_n.op != 'placeholder': + continue + + placeholders_seen += 1 + if placeholders_seen != 2: + continue + + # the subgraph looks like + # + # _input_scale_1 = self._input_scale_1 + # _input_zero_point_1 = self._input_zero_point_1 + # quantize_per_channel = torch.quantize_per_channel( + # w2_0, _input_scale_1, _input_zero_point_1, + # 0, torch.qint8) + # + # we have `w2_0`, and are navigating this subgraph + # to get `_input_scale_1` and `_input_zero_point_1` + + assert len(shadow_n.users) == 1 + quant_node = next(iter(shadow_n.users.keys())) + new_args: Any = None + if quant_node.target == torch.quantize_per_channel: + _weight, scale_node, zp_node, axis, dtype = quant_node.args + scale_val = getattr_from_fqn( + shadow_wrapper, scale_node.target) + zp_val = getattr_from_fqn( + shadow_wrapper, zp_node.target) + new_args = (scale_val, zp_val, axis, dtype) + else: + assert quant_node.target == torch.quantize_per_tensor + _weight, scale_node, zp_node, dtype = quant_node.args + scale_val = getattr_from_fqn( + shadow_wrapper, scale_node.target) + zp_val = getattr_from_fqn( + shadow_wrapper, zp_node.target) + new_args = (scale_val, zp_val, dtype) + return (quant_node.target, new_args) + + return None + + +def extract_weight_comparison(m: GraphModule) -> NSResultsType: + + # example graph: + # + # w1 = self.w1 + # b1 = self.b1 + # linear = torch._C._nn.linear(x, w1, b1) + # shadow_0_0 = self.shadow_0_0(linear) + # shadow_wrapper_0_1 = self.shadow_wrapper_0_1(x, w1, b1) + # shadow_0_1 = self.shadow_0_1(shadow_wrapper_0_1, linear) + # + # algorithm: + # 1. for each call_function node matching our allowlist: + # 2. if corresponding shadow wrapper exists, extract the weight pair + # + # Note: this is not super robust, but that's ok because this is + # just for legacy customers who depend on the previous two-model version + # of this API. TBD if we need to make this robust. + # Note: modules are not supported, since existing customers only + # use functions. + + # TODO(future PR): move this to config + weighted_ops = { + torch.nn.functional.linear, + } + + results: NSResultsType = { + 'model': {NSSingleResultValuesType.WEIGHT.value: {}} + } + + for n in m.graph.nodes: # type: ignore[union-attr] + if not (n.op == 'call_function' and n.target in weighted_ops): + continue + + # Check if we have a corresponding shadow wrapper + # TODO(future PR, if needed): support kwargs + # TODO(future PR, if needed): support multiple shadow users + first_arg = n.args[0] + shadow_wrapper_node = None + for user in first_arg.users: + # TODO(before land): fix string match + if user.op == 'call_module' and \ + user.target.startswith('shadow_wrapper'): + shadow_wrapper_node = user + break + + if shadow_wrapper_node is None: + continue + + shadow_wrapper = getattr_from_fqn( + m, shadow_wrapper_node.target) # type: ignore[arg-type] + weight_info = _get_weight_info_from_shadow_wrapper( + shadow_wrapper) + if weight_info is None: + continue + + # get weight + w_node = n.args[1] + w_obj = getattr_from_fqn(m, w_node.target).detach() + + # get a quantized version of weight + quant_fn, quant_fn_args_except_first = weight_info + new_args = (w_obj, *quant_fn_args_except_first) + w_obj_q = quant_fn(*new_args) + + # add a comparison + ref_node_name = n.name + prev_node_name = n.name + ref_node_type = get_target_type_str(n, m) + prev_node_type = ref_node_type + fqn = None + if hasattr(m, '_node_name_to_scope'): + fqn = m._node_name_to_scope[n.name][0] # type: ignore[index] + comparison = torch.ao.ns.fx.utils.compute_sqnr(w_obj, w_obj_q) + result_fp32 = { + 'res_type': NSSingleResultValuesType.WEIGHT.value, + 'values': [w_obj], + 'prev_node_name': prev_node_name, + 'prev_node_target_type': prev_node_type, + 'ref_node_name': ref_node_name, + 'ref_node_target_type': ref_node_type, + 'index_within_arg': 0, + 'index_of_arg': 0, + 'fqn': fqn, + 'qconfig_str': '', + 'comparisons': [comparison], + 'comparison_fn_name': 'sqnr', + } + result_q = { + 'res_type': NSSingleResultValuesType.WEIGHT.value, + 'values': [w_obj_q], + 'prev_node_name': prev_node_name, + 'prev_node_target_type': prev_node_type, + 'ref_node_name': ref_node_name, + 'ref_node_target_type': ref_node_type, + 'index_within_arg': 0, + 'index_of_arg': 0, + 'fqn': fqn, + 'qconfig_str': '', + 'comparisons': [comparison], + 'comparison_fn_name': 'sqnr', + } + + # go from subgraph_n_1 to subgraph_n_0 + _1, _2, node_idx, _3 = shadow_wrapper_node.target.split('_') + name_fp32 = f"subgraph_{node_idx}_0" + name_q = f"subgraph_{node_idx}_1" + + results['model'][NSSingleResultValuesType.WEIGHT.value][name_fp32] = \ + [result_fp32] + results['model'][NSSingleResultValuesType.WEIGHT.value][name_q] = \ + [result_q] + + return results + +# TODO(future PR): redesign this to make it easier to consume outputs +def group_results_by_subgraph(results: NSResultsType) -> Any: + """ + Creates a comparison of results + + Input: + + { + 'model': { + 'node_output': { + 'subgraph_0_0': [ + 'values': [torch.tensor(...), ...], ... + 'ref_node_name': ..., + 'ref_node_target_type': ..., + 'qconfig_str': ..., + 'comparisons': [], ... + 'comparison_fn_name': '', + 'fqn': '...', + ], + 'subgraph_0_1': [ + 'values': [torch.tensor(...), ...], ... + 'ref_node_name': ..., + 'ref_node_target_type': ..., + 'qconfig_str': ..., + 'comparisons': [torch.tensor(...), ...], ... + 'comparison_fn_name': '...', + 'fqn': '...', + ], + ... + }, + }, + } + + Output: + { + 'subgraph_0': { + '0': { + 'ref_node_name': '...', + 'ref_node_target_type': ..., + 'values': [torch.tensor(...), ...], + 'qconfig_str': None, + 'comparisons': [torch.tensor(...), ...], ... + 'comparison_fn_name': '...', + 'fqn': '...', + }, + '1': { + 'ref_node_name': '...', + 'ref_node_target_type': ..., + 'values': [torch.tensor(...), ...], + 'qconfig_str': '...', + 'comparisons': [torch.tensor(...), ...], ... + 'comparison_fn_name': '...', + 'fqn': '...', + }, + }, + } + + """ + subgraph_name_to_subgraph_results: Any = collections.defaultdict(dict) + + # node_output or weight + key_to_use = next(iter(results['model'].keys())) + + for subgraph_name_with_idx, subgraph_candidate_results in \ + results['model'][key_to_use].items(): + + # convert from `subgraph_m_n` to `subgraph_m` and `n` + subgraph_str, subgraph_idx, subgraph_candidate_idx = \ + subgraph_name_with_idx.split('_') + subgraph_name = f'{subgraph_str}_{subgraph_idx}' + + subgraph_results = { + 'ref_node_name': subgraph_candidate_results[0]['ref_node_name'], + 'ref_node_target_type': subgraph_candidate_results[0]['ref_node_target_type'], + 'fqn': subgraph_candidate_results[0]['fqn'], + 'values': subgraph_candidate_results[0]['values'], + 'qconfig_str': subgraph_candidate_results[0]['qconfig_str'], + 'comparisons': subgraph_candidate_results[0]['comparisons'], + 'comparison_fn_name': subgraph_candidate_results[0]['comparison_fn_name'], + } + + subgraph_name_to_subgraph_results[subgraph_name][subgraph_candidate_idx] = \ + subgraph_results + + return dict(subgraph_name_to_subgraph_results) + +# TODO(future PR): redesign this to make it easier to consume outputs +def create_results_comparison( + results_grouped, +) -> Any: + """ + Input: + + { + 'subgraph_0': { + '0': { + 'ref_node_name': '...', + 'ref_node_target_type': ..., + 'values': [torch.tensor(...), ...], + 'qconfig_str': '', + 'comparisons': [], + 'comparison_fn_name': '', + 'fqn': '...', + }, + '1': { + 'ref_node_name': '...', + 'ref_node_target_type': ..., + 'values': [torch.tensor(...), ...], + 'qconfig_str': '...', + 'comparisons': [torch.tensor(...), ...], + 'comparison_fn_name': 'sqnr', + 'fqn': '...', + }, + }, + } + + Output: + { + 'subgraph_0': { + 'ref_node_name': '...', + 'ref_node_target_type': '...', + 'fqn': '...', + 'candidates': { + '1': { + 'qconfig_str': ..., + 'comparison_fn_name': 'sqnr', + 'cmp_raw': [..., ...], + 'cmp_mean': ..., + }, + ..., + }, + }, + } + """ + + results_comparison = {} + + for subgraph_name, subgraph_results in results_grouped.items(): + + candidates = {} + for subgraph_inner_name, subgraph_inner_result in subgraph_results.items(): + # skip comparing baseline to baseline + if subgraph_inner_name == '0': + continue + + # we expect the comparisons to be precalculated from + # calibration, so we just fetch them here + cmp_raw = subgraph_inner_result['comparisons'] + cmp_raw_tensor = torch.stack(cmp_raw) + + candidates[subgraph_inner_name] = { + 'qconfig_str': subgraph_inner_result['qconfig_str'], + 'comparison_fn_name': subgraph_inner_result['comparison_fn_name'], + 'cmp_raw': cmp_raw_tensor, + 'cmp_mean': torch.mean(cmp_raw_tensor), + } + + results_comparison[subgraph_name] = { + 'ref_node_name': subgraph_results['0']['ref_node_name'], + 'ref_node_target_type': subgraph_results['0']['ref_node_target_type'], + 'fqn': subgraph_results['0']['fqn'], + 'candidates': candidates, + } + + return results_comparison + +# TODO(future PR): redesign this to make it easier to consume outputs +def print_n_shadows_summary( + results_comparison, +) -> None: + """ + Input: + + { + 'subgraph_0': { + 'ref_node_name': 'linear1', + 'ref_node_target_type': '...', + 'fqn': '...', + 'candidates': { + '1': { + 'qconfig_str': ..., + 'comparison_fn_name': ..., + 'cmp_raw': [45.0, 55.0], + 'cmp_mean': 50.0, + }, + ..., + }, + }, + } + + Prints: + + node_name | node_type | fqn | 0 | 1 | ... + linear1 | ... | ... | 45.0 | 50.0 | ... + """ + + try: + from tabulate import tabulate + except ImportError: + print("`print_tabular` relies on the library `tabulate`, " + "which could not be found on this machine. Run `pip " + "install tabulate` to install the library.") + return + + results = [] + for subgraph_data in results_comparison.values(): + mean_all_candidates = [ + candidate['cmp_mean'] + for candidate_name, candidate in subgraph_data['candidates'].items() + ] + + data_row = [ + subgraph_data['ref_node_name'], + subgraph_data['ref_node_target_type'], + subgraph_data['fqn'], + *mean_all_candidates, + ] + results.append(data_row) + + max_candidate_idx_len = -1 + for data_row in results: + max_candidate_idx_len = max(max_candidate_idx_len, len(data_row[1])) + candidate_idx_headers = [str(x) for x in range(max_candidate_idx_len)] + + headers = ['node_name', 'node_type', 'fqn', *candidate_idx_headers] + print(tabulate(results, headers=headers)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/ns_types.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/ns_types.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3c422dd4ae9d698645615f285efdae24dc278c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/ns_types.py @@ -0,0 +1,64 @@ +import enum +from typing import NamedTuple + +from torch.fx.graph import Node + +from typing import Dict, Any, List, Union, Callable + +class NSSingleResultValuesType(str, enum.Enum): + WEIGHT = 'weight' + NODE_OUTPUT = 'node_output' + NODE_INPUT = 'node_input' + +class NSSubgraph(NamedTuple): + start_node: Node + end_node: Node + base_op_node: Node + +# TODO(future PR): see if we can use typing_extensions's TypedDict instead +# to properly type the various keys +# { +# # one of NSSingleResultValuesType +# 'type': 'weight', +# # the values of type specified above +# 'values': [torch.tensor(...), ...], +# # name of the node directly before the logger +# 'prev_node_name': 'linear1', +# # type of the underlying function or module +# 'prev_node_target_type': torch.nn.functional.linear # or torch.nn.Linear, etc +# # name of the node responsible for adding this logger +# # Note: this may differ from prev_node_name if we are logging inputs +# 'ref_node_name': 'linear1', +# # index of this node within the arg of the input/output node +# # for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1 +# 'index_within_arg': 0, +# # index of this node within the args of the input/output node +# # for example, in add(x1, x2), x2 would have index_of_arg == 1 +# 'index_of_arg': 0, +# # precomputed comparisons of logger values to reference values +# 'comparisons': [torch.tensor(...), ...] +# # name of function used for precomputed comparisons +# 'comparison_fn_name': 'sqnr', +# # string representation of qconfig responsible for creating this logger +# 'qconfig_str': 'QConfig(...)', +# } +NSSingleResultType = Dict[str, Any] + +# { +# 'layer_name_1': { # subgraph name +# 'node_output': { # results type (node_output, node_input, weight) +# 'model_name_a': # model name +# [NSSingleResultType, ...], # results, ordered by index_within_arg +# 'model_name_b': +# [NSSingleResultType, ...], +# }, +# }, +# } +# +NSResultsType = Dict[str, Dict[str, Dict[str, List[NSSingleResultType]]]] + +# Defines the underlying target type of a node, for example: +# `F.conv1d` for a `call_function` conv node +# `nn.Conv1d` for a `call_module` node calling the forward of a `nn.Conv1d` module +# `'sigmoid'` for a `call_method` node calling `x.sigmoid()` +NSNodeTargetType = Union[Callable, str] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/pattern_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/pattern_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2925dfe012125f3428d156602199e1e9d840e926 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/pattern_utils.py @@ -0,0 +1,200 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +toq = torch.ops.quantized + +from torch.fx import GraphModule +from torch.fx.graph import Node + +from torch.ao.quantization.backend_config import get_native_backend_config +from torch.ao.quantization.fx.quantize_handler import _get_pattern_to_quantize_handlers +from torch.ao.quantization.utils import getattr_from_fqn +from .ns_types import NSNodeTargetType +from torch.ao.quantization import ( + ObserverBase, + FakeQuantizeBase, +) + +from typing import Dict, Tuple, Set, Callable, Any, Union, List + + +def get_type_a_related_to_b( + base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]], +) -> Set[Tuple[NSNodeTargetType, NSNodeTargetType]]: + # TODO(future PR): allow customizations + # TODO(future PR): reuse existing quantization mappings + # TODO(future PR): add the rest of modules and ops here + type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]] = set() + + for s in base_name_to_sets_of_related_ops.values(): + s_list = list(s) + # add every bidirectional pair + for idx_0 in range(0, len(s_list)): + for idx_1 in range(idx_0, len(s_list)): + type_a_related_to_b.add((s_list[idx_0], s_list[idx_1])) + type_a_related_to_b.add((s_list[idx_1], s_list[idx_0])) + + return type_a_related_to_b + + +NSFusionElType = Union[ + Callable, # call_function or call_module type, example: F.linear or nn.Conv2d + str, # call_method name, example: "dequantize" + Tuple[str, Any], # call_method name and first argument, example: ("to", torch.float16) +] +NSFusionType = Union[ + Tuple[NSFusionElType, NSFusionElType], + Tuple[NSFusionElType, NSFusionElType, NSFusionElType, NSFusionElType], +] + +def get_reversed_fusions() -> List[Tuple[NSFusionType, int]]: + """ + Set of potential fusions, in reverse order. The order is reversed + to match how fusion patterns are defined in quantization code. + + Fusion format: + ((fusion_op_0, fusion_op_1), base_op_idx) + + Where base_op_idx is the idx of the op we should use to match other related + ops. Note: base_op_idx is specified in non-reverse order, i.e. a base_op_idx + of 0 represents the first op in regular (non-reverse) order, 1 represents the + second op, etc. + """ + results: List[Tuple[NSFusionType, int]] = [] + + # Possible syntaxes: + # * single op: torch.nn.Conv2d + # * multiple ops: (torch.nn.ReLU, torch.nn.Conv2d) + # For fusions, we only care about patterns composed of multiple ops. + # TODO(future PR): allow customizations from default patterns. + all_quant_patterns = _get_pattern_to_quantize_handlers(get_native_backend_config()) + + default_base_op_idx = 0 + for quant_pattern in all_quant_patterns.keys(): + # TODO: this is a temporary hack to flatten the patterns from quantization so + # that it works with the ns matcher function, maybe we should use `_is_match` + # in torch.ao.quantization.fx.match_utils to match the patterns + if isinstance(quant_pattern, tuple) and len(quant_pattern) == 2 and \ + isinstance(quant_pattern[1], tuple) and len(quant_pattern[1]) == 2: + # flatten the pattern with form (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d)) + quant_pattern = (quant_pattern[0], quant_pattern[1][0], quant_pattern[1][1]) + + # Only patterns of multiple ops are fusions, ignore + # patterns which contain a single ops (they get matched + # without caring about fusions). + if isinstance(quant_pattern, tuple): + results.append((quant_pattern, default_base_op_idx)) # type: ignore[arg-type] + + # For each pattern, add additional patterns with observers and + # fake quants at the end. + # TODO(future PR): if needed, implement matching for a node + # having multiple output observers. + for cls in (ObserverBase, FakeQuantizeBase): + if isinstance(quant_pattern, tuple): + new_pattern = (cls, *quant_pattern) + else: + new_pattern = (cls, quant_pattern) + results.append((new_pattern, default_base_op_idx)) # type: ignore[arg-type] + + + # After this point, results contains values such as + # [..., ((torch.nn.Relu, torch.nn.Conv2d), 0), ...] + + # Patterns for matching fp16 emulation are not specified in the quantization + # fusion mappings. For now, define them here. + fp16_em_base_op_idx = 1 + patterns_to_add = [ + # linear-relu fp16 emulation: + # fp16_to_fp32 -> linear -> relu -> fp32_to_fp16 + ((("to", torch.float16), F.relu, F.linear, "dequantize"), fp16_em_base_op_idx,), + # Conv-BN fusion (this happens outside of quantization patterns, + # which is why it is defined separately here). + ((nn.BatchNorm1d, nn.Conv1d), default_base_op_idx), + ((nn.BatchNorm2d, nn.Conv2d), default_base_op_idx), + ((nn.BatchNorm3d, nn.Conv3d), default_base_op_idx), + ((nn.ReLU, nn.BatchNorm1d, nn.Conv1d), default_base_op_idx), + ((nn.ReLU, nn.BatchNorm2d, nn.Conv2d), default_base_op_idx), + ((nn.ReLU, nn.BatchNorm3d, nn.Conv3d), default_base_op_idx), + ] + for p in patterns_to_add: + results.append(p) # type: ignore[arg-type] + results.append(((ObserverBase, *p[0]), p[1])) # type: ignore[arg-type] + results.append(((FakeQuantizeBase, *p[0]), p[1])) # type: ignore[arg-type] + + return results + + +def end_node_matches_reversed_fusion( + end_node: Node, + reversed_fusion: NSFusionType, + gm: GraphModule, + seen_nodes: Set[Node], +) -> bool: + """ + Returns true if a pattern ending with `end_node` matches + the fusion pattern. + """ + cur_node = end_node + for fusion_idx in range(len(reversed_fusion)): + # each node can only belong to one matched pattern + if cur_node in seen_nodes: + return False + + cur_fusion_el = reversed_fusion[fusion_idx] + + if cur_node.op == 'call_function': + fusion_el_is_fun = (not isinstance(cur_fusion_el, str)) and \ + (not isinstance(cur_fusion_el, type)) + if fusion_el_is_fun: + if cur_node.target != cur_fusion_el: + return False + if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node): + cur_node = cur_node.args[0] + else: + return False + else: + return False + + elif cur_node.op == 'call_module': + fusion_el_is_mod = isinstance(cur_fusion_el, type) + if fusion_el_is_mod: + assert isinstance(cur_node.target, str) + target_mod = getattr_from_fqn(gm, cur_node.target) + if not isinstance(cur_fusion_el, type): + return False + if not isinstance(target_mod, cur_fusion_el): + return False + if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node): + cur_node = cur_node.args[0] + else: + return False + else: + return False + + elif cur_node.op == 'call_method': + fusion_el_is_meth_with_second_arg = \ + isinstance(cur_fusion_el, tuple) and len(cur_fusion_el) == 2 + fusion_el_is_meth_without_args = isinstance(cur_fusion_el, str) + if fusion_el_is_meth_without_args or fusion_el_is_meth_with_second_arg: + if fusion_el_is_meth_without_args: + if cur_node.target != cur_fusion_el: + return False + else: + assert isinstance(cur_fusion_el, tuple) + if cur_node.target != cur_fusion_el[0]: + return False + elif len(cur_node.args) < 2: + return False + elif cur_node.args[1] != cur_fusion_el[1]: + return False + + if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node): + cur_node = cur_node.args[0] + else: + return False + else: + return False + else: + return False + + return True diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/qconfig_multi_mapping.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/qconfig_multi_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..20a005d0c8bf9441554113e9a1bb49754b415ee1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/qconfig_multi_mapping.py @@ -0,0 +1,243 @@ +from __future__ import annotations + +import copy +from typing import Any, Callable, Dict, List, Union + +import torch +from torch.ao.quantization import QConfigMapping +from torch.ao.quantization.qconfig_mapping import _QCONFIG_STYLE_ORDER +from torch.ao.quantization.qconfig import QConfigAny + +__all__ = ["QConfigMultiMapping"] + +_QCONFIG_STYLE_TO_METHOD: Dict[str, str] = { + "global_qconfig": "set_global", + "object_type_qconfigs": "set_object_type", + "module_name_regex_qconfigs": "set_module_name_regex", + "module_name_qconfigs": "set_module_name", + "module_name_object_type_order_qconfigs": "set_module_name_object_type_order", +} + +def _remove_duplicates_and_none(qconfig_list: List[QConfigAny]) -> None: + to_remove = [] + for index, cur_qconfig in enumerate(qconfig_list): + if cur_qconfig is None: + to_remove.append(index) + break + for checked_qconfig in qconfig_list[:index]: + if torch.ao.quantization.qconfig_equals(cur_qconfig, checked_qconfig): + to_remove.append(index) + break + for index in to_remove[::-1]: + qconfig_list.pop(index) + +class QConfigMultiMapping: + """ + This class, used with the prepare_n_shadows_model API, stores a list of :class:`torch.ao.quantization.QConfigMapping`s + so that multiple QConfigs can be specified for each QConfig matching style. + + The user can specify QConfigs using the following methods (in increasing match priority): + + ``set_global`` : sets the global (default) QConfigs + + ``set_object_type`` : sets the QConfigs for a given module type, function, or method name + + ``set_module_name_regex`` : sets the QConfigs for modules matching the given regex string + + ``set_module_name`` : sets the QConfigs for modules matching the given module name + + ``set_module_name_object_type_order`` : sets the QConfigs for modules matching a combination + of the given module name, object type, and the index at which the module appears + + Note: Usage of set methods is the same as in QConfigMapping except with a passed in list of QConfigs rather than a + single QConfig. + + Example usage:: + + qconfig_mapping = QConfigMultiMapping() + .set_global([qconfig1, qconfig2]) + .set_object_type(torch.nn.Linear, [qconfig2, qconfig3]) + .set_object_type(torch.nn.ReLU, [qconfig1]) + .set_module_name_regex("foo.*bar.*conv[0-9]+", [qconfig2]) + .set_module_name_regex("foo.*", [qconfig1, qconfig2, qconfig3]) + .set_module_name("module1", [None]) + .set_module_name("module2", [qconfig2]) + .set_module_name_object_type_order("foo.bar", torch.nn.functional.linear, 0, [qconfig3]) + + """ + + def __init__(self): + # initialize this with 1 QConfigMapping to avoid corner cases + self.qconfig_mappings_list: List[QConfigMapping] = [QConfigMapping()] + + def _handle_list_size_mismatch( + self, qconfig_list: List[QConfigAny], style: str + ) -> None: + # this method handles cases where the size of qconfig_list does not match + # the size of qconfig_mappings_list. + # Issue: Consider a user inserting global_qconfig A and B first, then inserting + # qconfig C as an object_type_qconfig for conv ops. If we internally store + # 1 QConfigMapping with A and C and another with just B, then the + # second QConfigMapping will match B to conv ops (which is not wanted), since B is global. + + # we avoid this by maintaining the invariant that if any QConfigMapping + # has a qconfig style+key with a qconfig in it, all QConfigMappings must + # have either a qconfig or None for that same style+key. In the above + # example, a None qconfig would prevent the unwanted match in the + # second QConfigMapping + + if len(qconfig_list) > len(self.qconfig_mappings_list): + # Case: we have more qconfigs (in qconfig_list) than QConfigMappings + + # Add new QConfigMappings (initialized so we maintain the `invariant`) + + new_qconfig_mapping = QConfigMapping() + # searches other QConfigMappings for qconfig style+keys + # that need to be inserted as `None` into the new QConfigMapping + for qconfig_mapping in self.qconfig_mappings_list: + + # global_qconfig has None by default + for check_style in _QCONFIG_STYLE_ORDER[1:]: + qconfigs_dict = getattr(qconfig_mapping, check_style) + target_qconfigs_dict = getattr(new_qconfig_mapping, check_style) + for key in qconfigs_dict: + target_qconfigs_dict[key] = None + break + + # insert copies of this new QConfigMapping until all entires + # in qconfig_list can fit among the QConfigMappings + while len(qconfig_list) > len(self.qconfig_mappings_list): + self.qconfig_mappings_list.append(copy.deepcopy(new_qconfig_mapping)) + else: + # Case: we have fewer qconfigs in qconfig_list than QConfigMappings + + # pad qconfig_list with `None` until length is same + while len(qconfig_list) < len(self.qconfig_mappings_list): + qconfig_list.append(None) + + # this function applies the insertion method across each QConfigMapping + def _insert_qconfig_list( + self, + style: str, + args: List[Union[str, int, Callable]], + qconfig_list: List[QConfigAny], + ) -> None: + + # we remove duplicates and None to make the ordering of qconfigs + # deterministic upon insertion. + _remove_duplicates_and_none(qconfig_list) + + self._handle_list_size_mismatch(qconfig_list, style) + method_name = _QCONFIG_STYLE_TO_METHOD[style] + for qconfig_mapping, qconfig in zip(self.qconfig_mappings_list, qconfig_list): + # uses QConfigMapping set method to insert qconfig + set_method = getattr(qconfig_mapping, method_name) + set_method(*args, qconfig) + + def set_global(self, global_qconfig_list: List[QConfigAny]) -> QConfigMultiMapping: + """ + Set global QConfigs + see :func:`~torch.ao.quantization.QConfigMapping.set_global()` for more info + """ + self._insert_qconfig_list("global_qconfig", [], global_qconfig_list) + return self + + def set_object_type( + self, object_type: Union[Callable, str], qconfig_list: List[QConfigAny] + ) -> QConfigMultiMapping: + """ + Set object type QConfigs + see :func:`~torch.ao.quantization.QConfigMapping.set_object_type()` for more info + """ + self._insert_qconfig_list("object_type_qconfigs", [object_type], qconfig_list) + return self + + def set_module_name_regex( + self, module_name_regex: str, qconfig_list: List[QConfigAny] + ) -> QConfigMultiMapping: + """ + Set module_name_regex QConfigs + see :func:`~torch.ao.quantization.QConfigMapping.set_module_name_regex()` for more info + """ + self._insert_qconfig_list( + "module_name_regex_qconfigs", [module_name_regex], qconfig_list + ) + return self + + def set_module_name( + self, module_name: str, qconfig_list: List[QConfigAny] + ) -> QConfigMultiMapping: + """ + Set module_name QConfigs + see :func:`~torch.ao.quantization.QConfigMapping.set_module_name()` for more info + """ + self._insert_qconfig_list("module_name_qconfigs", [module_name], qconfig_list) + return self + + def set_module_name_object_type_order( + self, + module_name: str, + object_type: Callable, + index: int, + qconfig_list: List[QConfigAny], + ) -> QConfigMultiMapping: + """ + Set module_name QConfigs + see :func:`~torch.ao.quantization.QConfigMapping.set_module_name_object_type_order()` for more info + """ + self._insert_qconfig_list( + "module_name_object_type_order_qconfigs", + [module_name, object_type, index], + qconfig_list, + ) + return self + + def __repr__(self): + return ( + self.__class__.__name__ + + " [" + + "".join(f"\n{qconfig_mapping.__repr__()}," for qconfig_mapping in self.qconfig_mappings_list) + + "\n]" + ) + + @classmethod + def from_list_qconfig_mapping( + cls, qconfig_mapping_list: List[QConfigMapping] + ) -> QConfigMultiMapping: + """ + Creates a QConfigMultiMapping from a list of QConfigMappings + """ + new_qconfig_multi_mapping = cls() + + new_qconfig_multi_mapping.qconfig_mappings_list = copy.deepcopy( + qconfig_mapping_list + ) + + # we need to avoid the issue described in _handle_list_size_mismatch, + # so we reinsert all the qconfigs using the QConfigMultiMapping + # set methods + + # go through all qconfig styles + # note: global can be ignored since it is None by default + for style in _QCONFIG_STYLE_ORDER[1:]: + + # gather all key+qconfigs for current style + # into qconfig_dict_list + qconfig_dict_list: Dict[Any, List[QConfigAny]] = {} + for qconfig_mapping in qconfig_mapping_list: + qconfig_dict = getattr(qconfig_mapping, style) + for key, qconfig in qconfig_dict.items(): + if key not in qconfig_dict_list: + qconfig_dict_list[key] = [] + qconfig_dict_list[key].append(qconfig) + + # reinsert all gathered key+qconfigs + set_method_name = _QCONFIG_STYLE_TO_METHOD[style] + set_method = getattr(new_qconfig_multi_mapping, set_method_name) + for key, qconfig_list in qconfig_dict_list.items(): + if isinstance(key, tuple): + set_method(*key, qconfig_list) + else: + set_method(key, qconfig_list) + + return new_qconfig_multi_mapping diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf35a7e531e1abc1aa60be4a3ec4a2430a01a21a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/utils.py @@ -0,0 +1,533 @@ +import enum +import operator + +import torch +import torch.nn as nn +import torch.ao.nn.intrinsic.quantized as nniq +import torch.ao.nn.quantized as nnq + +toq = torch.ops.quantized +from typing import Tuple, Callable, Dict, Set, List, Optional, Union + +from torch.fx import GraphModule +from torch.fx.graph import Node +from torch.ao.quantization import ( + ObserverBase, + FakeQuantizeBase, +) +from torch.ao.quantization.utils import getattr_from_fqn +from torch.ao.quantization.observer import _is_activation_post_process + +from .ns_types import NSNodeTargetType, NSResultsType + +# TODO(future PR): consider deleting this enum and using the torch types +# directly. This might be tricky because it is not a one to one mapping. +class NodeInputOrOutputType(enum.Enum): + FP32 = enum.auto() # torch.float + INT8 = enum.auto() # torch.qint8 or torch.quint8 + FP16 = enum.auto() # torch.float16 + UNKNOWN = enum.auto() # we cannot determine input/output dtype + # TODO(future PR): while these functions can support multiple dtypes, + # for the purposes of numerical debugging we want to get the actual + # dtype used in the model. We will likely need some kind of dtype + # propagation to estimate this. + FP32_OR_INT8 = enum.auto() # either torch.float or torch.quint8 or torch.qint8 + # TODO(future PRs): dynamic quant, fake quant, etc + + +def get_node_first_input_and_output_type( + node: Node, + gm: GraphModule, + logger_cls: Callable, + node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]], +) -> Tuple[NodeInputOrOutputType, NodeInputOrOutputType]: + + # TODO(future PR): clean this up + FUNS_IO_TYPE_FP32 = node_type_to_io_type_map["funs_io_type_fp32"] + FUNS_IO_TYPE_FP16 = node_type_to_io_type_map["funs_io_type_fp16"] + FUNS_IO_TYPE_INT8 = node_type_to_io_type_map["funs_io_type_int8"] + FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["funs_io_type_fp32_or_int8"] + MODS_IO_TYPE_FP32 = node_type_to_io_type_map["mods_io_type_fp32"] + MODS_IO_TYPE_INT8 = node_type_to_io_type_map["mods_io_type_int8"] + MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"] + METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["meths_io_type_fp32_or_int8"] + + if node.op == "call_function": + if node.target in FUNS_IO_TYPE_FP32: + return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32) + if node.target in FUNS_IO_TYPE_FP16: + return (NodeInputOrOutputType.FP16, NodeInputOrOutputType.FP16) + elif node.target in FUNS_IO_TYPE_INT8: + return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8) + elif node.target in FUNS_IO_TYPE_FP32_OR_INT8: + first_arg = get_normalized_nth_input(node, gm, 0) + assert isinstance(first_arg, Node) + ( + _prev_node_input_type, + prev_node_output_type, + ) = get_node_first_input_and_output_type( + first_arg, gm, logger_cls, node_type_to_io_type_map + ) + return (prev_node_output_type, prev_node_output_type) + else: + return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN) + + elif node.op == "call_module": + assert node.op == "call_module" + assert isinstance(node.target, str) + mod = getattr_from_fqn(gm, node.target) + is_known_fp32_or_int8_input_module = any( + isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type] + ) + if ( + isinstance(mod, (logger_cls, ObserverBase, FakeQuantizeBase)) # type: ignore[arg-type] + or is_known_fp32_or_int8_input_module + ): + # A logger or observer's input and output type is the output + # type of the preceding node. + first_arg = get_normalized_nth_input(node, gm, 0) + assert isinstance(first_arg, Node) + ( + _prev_node_input_type, + prev_node_output_type, + ) = get_node_first_input_and_output_type( + first_arg, gm, logger_cls, node_type_to_io_type_map + ) + return (prev_node_output_type, prev_node_output_type) + is_known_fp32_input_module = any( + isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32 # type: ignore[arg-type] + ) + is_known_int8_input_module = any( + isinstance(mod, target_type) for target_type in MODS_IO_TYPE_INT8 # type: ignore[arg-type] + ) + if is_known_fp32_input_module: + return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32) + elif is_known_int8_input_module: + return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8) + else: + return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN) + + elif node.op == "call_method": + if node.target == "dequantize": + # Dequantize is a special node because it allows multiple input types. + # So, we look up the output type of the previous node and return that + # as the input type of this node instance. + prev_node = get_normalized_nth_input(node, gm, 0) + assert isinstance(prev_node, Node) + ( + _prev_node_input_type, + prev_node_output_type, + ) = get_node_first_input_and_output_type( + prev_node, gm, logger_cls, node_type_to_io_type_map + ) + return (prev_node_output_type, NodeInputOrOutputType.FP32) + + elif node.target == "to": + # to is a special node because it allows multiple input types. + # So, we look up the output type of the previous node and return that + # as the input type of this node instance. We also look up the target + # of to and return the correct output type. + prev_node = get_normalized_nth_input(node, gm, 0) + assert isinstance(prev_node, Node) + ( + _prev_node_input_type, + prev_node_output_type, + ) = get_node_first_input_and_output_type( + prev_node, gm, logger_cls, node_type_to_io_type_map + ) + + cur_node_dtype_target = get_normalized_nth_input(node, gm, 1) + assert ( + cur_node_dtype_target is torch.float16 + ), f"{cur_node_dtype_target} handling needs to be added" + + return (prev_node_output_type, NodeInputOrOutputType.FP16) + + elif node.target in METHS_IO_TYPE_FP32_OR_INT8: + first_arg = get_normalized_nth_input(node, gm, 0) + assert isinstance(first_arg, Node) + ( + _prev_node_input_type, + prev_node_output_type, + ) = get_node_first_input_and_output_type( + first_arg, gm, logger_cls, node_type_to_io_type_map + ) + return (prev_node_output_type, prev_node_output_type) + + return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN) + else: + return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN) + + +def get_node_input_qparams( + node: Node, + gm: GraphModule, + node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]], +) -> Optional[Tuple[Union[torch.Tensor, float], Union[torch.Tensor, int]]]: + """ + Returns the qparams (scale, zero_point) of the first input to `node`, + if they can be inferred from the graph. + """ + prev_node = get_normalized_nth_input(node, gm, 0) + + if not isinstance(prev_node, Node): + return None + + MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"] + + def _get_scale_zp_from_function_args(node, gm, scale_arg_idx, zp_arg_idx): + scale_node = get_normalized_nth_input(node, gm, scale_arg_idx) + zp_node = get_normalized_nth_input(node, gm, zp_arg_idx) + assert isinstance(scale_node, Node) and isinstance(scale_node.target, str) + assert isinstance(zp_node, Node) and isinstance(zp_node.target, str) + scale_obj = getattr_from_fqn(gm, scale_node.target) + zp_obj = getattr_from_fqn(gm, zp_node.target) + return (scale_obj, zp_obj) + + if prev_node.op == "call_function": + + # quantize - read the args directly + if prev_node.target == torch.quantize_per_tensor: + return _get_scale_zp_from_function_args(prev_node, gm, 1, 2) + elif prev_node.target in (toq.add, toq.add_relu, toq.mul, toq.mul_relu): + return _get_scale_zp_from_function_args(prev_node, gm, 2, 3) + + return None + # TODO(future PR): handle more functionals + # TODO(future PR): handle functional ops which inherit qparams from input + + elif prev_node.op == "call_module": + + # get type of the module + assert isinstance(prev_node.target, str) + module_obj = getattr_from_fqn(gm, prev_node.target) + if isinstance( + module_obj, + ( + nnq.Linear, + nnq.Conv1d, + nnq.Conv2d, + nniq.ConvReLU2d, + nnq.Conv3d, + nnq.BatchNorm2d, + nnq.BatchNorm3d, + nnq.ConvTranspose1d, + nnq.ConvTranspose2d, + nnq.ELU, + nnq.GroupNorm, + nnq.InstanceNorm1d, + nnq.InstanceNorm2d, + nnq.InstanceNorm3d, + nnq.LayerNorm, + nnq.Hardswish, + nnq.LeakyReLU, + nnq.ReLU6, + nniq.BNReLU2d, + nniq.BNReLU3d, + nniq.ConvReLU1d, + nniq.ConvReLU2d, + nniq.ConvReLU3d, + nniq.LinearReLU, + ), + ): + return (module_obj.scale, module_obj.zero_point) # type: ignore[return-value] + + is_known_fp32_or_int8_input_module = any( + isinstance(module_obj, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type] + ) + if is_known_fp32_or_int8_input_module: + return get_node_input_qparams(prev_node, gm, node_type_to_io_type_map) + + return None + + +def return_first_non_observer_node( + node: Node, + gm: GraphModule, +) -> Node: + """ + If node is not an observer, returns it. If node is an observer, + navigates up the graph and returns the first parent which is not an + observer. For example, + + graph: (node_non_obs), node = node_non_obs : returns node_non_obs + graph: (node_non_obs -> obs0), node = obs0 : returns node_non_obs + graph: (node_non_obs -> obs0 -> fq0), node = fq0 : returns node_non_obs + """ + if node.op == "call_module": + node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type] + if _is_activation_post_process(node_obj): + assert len(node.args) == 1 + assert isinstance(node.args[0], Node) + node = node.args[0] + # code duplication intended, not worth refactoring + assert isinstance(node.target, str) + node_obj = getattr_from_fqn(gm, node.target) + if _is_activation_post_process(node_obj): + assert len(node.args) == 1 + assert isinstance(node.args[0], Node) + node = node.args[0] + return node + + +def get_number_of_non_param_args( + node: Node, + gm: GraphModule, +) -> int: + """ + Assumes that all non-param args occur first. Returns the number of + non-param args expected for a node. For example, for + + F.linear(x, weight, bias) + + Returns 1, because x is a non-param arg and weight and bias are params. + For + + lstm_mod(x, hid) + + Returns 2, because both x and hid are non-param args. + """ + if node.op == "call_module": + node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type] + if isinstance(node_obj, nn.LSTM): + return 2 + + # default is 1 + return 1 + + +def get_arg_indices_of_inputs_to_log(node: Node) -> List[int]: + """ + Returns the indices of args of the node which we should attach + loggers to, if input logging is enabled. + + For example, + * for (x + y), returns [0, 1] + * for (1 + y), returns [1] + * for (x + 1), returns [0] + * for (linear(x, w, b)) returns [0] + * by default, returns [0] + """ + if len(node.args) == 0: + return [] + if node.op == "call_function" and ( + # TODO(future PR): use relationship map instead of hardcoding + node.target in (torch.add, torch.ops.quantized.add, operator.add) + or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul) + ): + result = [] + for i in range(2): + if type(node.args[i]) == Node: + result.append(i) + return result + return [0] + + +def get_target_type_str(node: Node, gm: GraphModule) -> str: + """ + Returns a string representation of the type of the function or module + pointed to by this node, or '' for other node types. + """ + target_type = "" + if node.op in ("call_function", "call_method"): + target_type = torch.typename(node.target) + elif node.op == "call_module": + assert isinstance(node.target, str) + target_mod = getattr_from_fqn(gm, node.target) + target_type = torch.typename(target_mod) + return target_type + + +def rekey_logger_info_on_node_name_of_model( + results: NSResultsType, + model_name: str, +) -> NSResultsType: + """ + Rekeys the layer name of a results dictionary to use node names + from `model_name`. + + For example, transforms + + {'base_op_1_0': {'node_output': {'model_a': + [{'ref_node_name': 'linear1', ...}]}}} + + into + + {'linear1': {'node_output': {'model_a': + [{'ref_node_name': 'linear1', ...}]}}} + + Note: we cannot use these node names directly because they are not + guaranteed to be consistent across models. This is why we extract + the results first and rekey afterwards. + """ + new_results = {} + for old_layer_name, result_type_to_results in results.items(): + new_layer_name = None + for model_name_to_results in result_type_to_results.values(): + for cur_model_name, list_of_results in model_name_to_results.items(): + if cur_model_name == model_name: + assert len(list_of_results) + new_layer_name = list_of_results[0]["ref_node_name"] + else: + continue + if new_layer_name is not None: + new_results[new_layer_name] = result_type_to_results + else: + new_results[old_layer_name] = result_type_to_results + return new_results + + +def maybe_add_missing_fqns(results: NSResultsType) -> None: + """ + If `fqn` entries are filled in for one of the models in `results`, copies + them over to any models which do not have them filled out. + + A common use case benefitting from this is comparing a model prepared by + quantization to a quantized model. In this case, the model prepared by + quantization would have `fqn` entries, and the quantized model would not. + """ + + # Check in the first result to find any model with fqn entries defined. + model_name_with_fqns = None + for result_type_to_results in results.values(): + for model_name_to_results in result_type_to_results.values(): + for model_name, model_results in model_name_to_results.items(): + if len(model_results) > 0: + if model_results[0]["fqn"] is not None: + model_name_with_fqns = model_name + break + break + break + + if model_name_with_fqns: + for result_type_to_results in results.values(): + for model_name_to_results in result_type_to_results.values(): + ref_model_results = model_name_to_results[model_name_with_fqns] + for model_name, model_results in model_name_to_results.items(): + if model_name == model_name_with_fqns: + continue + for i in range(len(model_results)): + fqn = ref_model_results[i]["fqn"] + model_results[i]["fqn"] = fqn + + +def maybe_dequantize_first_two_tensor_args_and_handle_tuples(f): + def inner(*args, **kwargs): + a0, a1, *a_other = args + + if (isinstance(a0, tuple) and isinstance(a1, tuple)) or ( + isinstance(a0, list) and isinstance(a1, list) + ): + results = [] + for el0, el1 in zip(a0, a1): + new_args = (el0, el1, *a_other) + results.append(inner(*new_args, **kwargs)) + return results + + elif isinstance(a0, torch.Tensor) and isinstance(a1, torch.Tensor): + if a0.is_quantized: + a0 = a0.dequantize() + if a1.is_quantized: + a1 = a1.dequantize() + + # for the purposes of this util, only handle floats + if a0.dtype != torch.float or a1.dtype != torch.float: + return None + + new_args = (a0, a1, *a_other) + return f(*new_args, **kwargs) + + return inner + + +@maybe_dequantize_first_two_tensor_args_and_handle_tuples +def compute_sqnr(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """ + Computes the SQNR between `x` and `y`. + + Args: + x: Tensor or tuple of tensors + y: Tensor or tuple of tensors + + Return: + float or tuple of floats + """ + Ps = torch.norm(x) + Pn = torch.norm(x - y) + return 20 * torch.log10(Ps / Pn) + + +@maybe_dequantize_first_two_tensor_args_and_handle_tuples +def compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """ + Computes the normalized L2 error between `x` and `y`. + + Args: + x: Tensor or tuple of tensors + y: Tensor or tuple of tensors + + Return: + float or tuple of floats + """ + return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum()) + + +@maybe_dequantize_first_two_tensor_args_and_handle_tuples +def compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """ + Computes the cosine similarity between `x` and `y`. + + Args: + x: Tensor or tuple of tensors + y: Tensor or tuple of tensors + + Return: + float or tuple of floats + """ + # For convolutions, the shape of the quantized weight has one additional + # dimension compared to the shape of the fp32 weight. Match the shapes + # to enable cosine similarity comparison. + x = x.reshape(1, -1) + y = y.reshape(1, -1) + return torch.nn.functional.cosine_similarity(x, y) + +def op_type_supports_shadowing(node: Node) -> bool: + if node.op == 'call_function': + if node.target in (torch.add, torch.mul, operator.add, operator.mul, torch.cat, torch.stack): + # shadowing for ops with multiple tensor inputs is not implemented yet + return False + return True + +def get_normalized_nth_input(node: Node, gm: GraphModule, idx: int) -> Node: + """ + Given a node, gets the n'th input to that node, normalizing + args and kwargs to the best of its ability. + """ + try: + norm_args_and_kwargs = node.normalized_arguments( + gm, normalize_to_only_use_kwargs=True) + if norm_args_and_kwargs is not None: + norm_args, norm_kwargs = norm_args_and_kwargs + assert len(norm_args) + len(norm_kwargs) > idx + if idx < len(norm_args): + return norm_args[idx] + else: + # note: in Python 3.7+ dicts are ordered + return list(norm_kwargs.values())[idx] + else: + assert len(node.args) + len(node.kwargs) > idx + if idx < len(node.args): + return node.args[idx] # type: ignore[return-value] + else: + kwargs_idx = idx + len(node.args) + return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value] + except RuntimeError: + # this RuntimeError happens when node argument normalization + # requires typehints to proceed, such as for torch.add where + # either the first, second or both arguments could be tensors + assert len(node.args) + len(node.kwargs) > idx + if idx < len(node.args): + return node.args[idx] # type: ignore[return-value] + else: + kwargs_idx = idx + len(node.args) + return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/weight_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/weight_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d375694b88b300cab05154e9cdf0f0088f824575 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/weight_utils.py @@ -0,0 +1,275 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.ao.nn.quantized.dynamic as nnqd +import torch.ao.nn.quantized as nnq +import torch.ao.nn.intrinsic.qat as nniqat +import torch.ao.nn.qat as nnqat +import torch.ao.nn.intrinsic as nni +import torch.ao.nn.intrinsic.quantized as nniq +toq = torch.ops.quantized +from torch.fx import GraphModule +from torch.fx.graph import Node + +from .utils import ( + get_target_type_str, + getattr_from_fqn, + return_first_non_observer_node, +) + +from .ns_types import ( + NSSingleResultValuesType, + NSSingleResultType, +) + +from typing import List, Optional, Dict, Callable + +def mod_weight_detach(mod: nn.Module) -> torch.Tensor: + return mod.weight.detach() # type: ignore[operator] + +def mod_0_weight_detach(mod: nn.Module) -> torch.Tensor: + return mod[0].weight.detach() # type: ignore[index] + +def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor: + return mod._weight_bias()[0] # type: ignore[operator] + +def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]: + res = [] + for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type] + if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name: + param_value = mod._flat_weights[idx].detach() # type: ignore[index] + res.append(param_value) + return res + +def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]: + res = [] + for weight_value in mod._all_weight_values: # type: ignore[union-attr] + res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0]) + res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0]) + return res + +def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor: + if ( + isinstance(mod, (nn.Conv1d, nn.Conv2d, nn.Conv3d)) + ): + return mod.weight.detach() + elif ( + isinstance(mod, (nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d)) + ): + return mod[0].weight.detach() + else: + return mod._weight_bias()[0] # type: ignore[operator] + +def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor: + if isinstance(mod, nn.Linear): + return mod.weight.detach() + elif isinstance(mod, nni.LinearReLU): + return mod[0].weight.detach() + else: + return mod._weight_bias()[0] # type: ignore[operator] + +def get_lstm_mod_weights(mod: nn.Module) -> List[torch.Tensor]: + # TODO(future PR): make more generic, handle everything + if isinstance(mod, nn.LSTM): + res = [] + for idx, param_name in enumerate(mod._flat_weights_names): + if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name: + param_value = mod._flat_weights[idx].detach() + res.append(param_value) + return res + else: + assert isinstance(mod, nnqd.LSTM), f"type {type(mod)} not handled yet" + res = [] + for weight_value in mod._all_weight_values: + res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0]) + res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0]) + return res + +def get_conv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor: + # traverse backwards from the weight arg, accounting for any observers + weight_arg_node = node.args[1] + assert isinstance(weight_arg_node, Node) + weight_node = return_first_non_observer_node(weight_arg_node, gm) + assert isinstance(weight_node, Node) + assert weight_node.op == 'get_attr' + weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type] + return weight.detach() + +def get_qconv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor: + # qconv state is arg 1 + qconv_state_node = node.args[1] + assert isinstance(qconv_state_node, Node) + assert qconv_state_node.op == 'get_attr' + qconv_state_obj = getattr_from_fqn(gm, qconv_state_node.target) # type: ignore[arg-type] + return qconv_state_obj.weight() + +def get_linear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor: + # traverse backwards from the weight arg, accounting for any observers + # supported patterns: + # weight -> obs -> linear + # weight -> to(torch.float16) -> dequantize -> linear + linear_second_arg = node.args[1] + assert isinstance(linear_second_arg, Node) + + if linear_second_arg.op == 'call_module': + # weight -> obs -> linear + weight_arg_node = node.args[1] + assert isinstance(weight_arg_node, Node) + weight_node = weight_arg_node.args[0] + assert isinstance(weight_node, Node) + assert weight_node.op == 'get_attr' + weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type] + return weight.detach() + elif linear_second_arg.op == 'call_method': + # weight -> to(torch.float16) -> dequantize -> linear + assert linear_second_arg.op == 'call_method' + dequant_node = node.args[1] + assert isinstance(dequant_node, Node) + to_fp16_node = dequant_node.args[0] + assert isinstance(to_fp16_node, Node) + # extract the dtype, so we can cast to it before returning + target_dtype = to_fp16_node.args[1] + weight_node = to_fp16_node.args[0] + assert isinstance(weight_node, Node) + assert weight_node.op == 'get_attr' + weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type] + # return the weight with fp16 cast + return weight.detach().to(target_dtype) + else: + assert linear_second_arg.op == 'get_attr' + weight = getattr_from_fqn(gm, linear_second_arg.target) # type: ignore[arg-type] + return weight.detach() + +def get_qlinear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor: + # packed weight is arg 1 + packed_weight_node = node.args[1] + assert isinstance(packed_weight_node, Node) + assert packed_weight_node.op == 'get_attr' + packed_weight = getattr_from_fqn(gm, packed_weight_node.target) # type: ignore[arg-type] + # TODO(future PR): why does packed_weight.unpack() not work? + (weight, _bias), _name = packed_weight.__getstate__() + return weight + +def get_op_to_type_to_weight_extraction_fn() -> Dict[str, Dict[Callable, Callable]]: + + op_to_type_to_weight_extraction_fn: Dict[str, Dict[Callable, Callable]] = { + 'call_module': { + # Conv1d + nn.Conv1d: mod_weight_detach, + nni.ConvReLU1d: mod_0_weight_detach, + nnq.Conv1d: mod_weight_bias_0, + nnqat.Conv1d: mod_weight_detach, + nniqat.ConvBn1d: mod_weight_detach, + nniqat.ConvBnReLU1d: mod_weight_detach, + nniqat.ConvReLU1d: mod_weight_detach, + nniq.ConvReLU1d: mod_weight_bias_0, + # Conv2d + nn.Conv2d: mod_weight_detach, + nni.ConvReLU2d: mod_0_weight_detach, + nnq.Conv2d: mod_weight_bias_0, + nnqat.Conv2d: mod_weight_detach, + nniqat.ConvBn2d: mod_weight_detach, + nniqat.ConvBnReLU2d: mod_weight_detach, + nniqat.ConvReLU2d: mod_weight_detach, + nniq.ConvReLU2d: mod_weight_bias_0, + # Conv3d + nn.Conv3d: mod_weight_detach, + nni.ConvReLU3d: mod_0_weight_detach, + nnq.Conv3d: mod_weight_bias_0, + nnqat.Conv3d: mod_weight_detach, + nniqat.ConvBn3d: mod_weight_detach, + nniqat.ConvBnReLU3d: mod_weight_detach, + nniqat.ConvReLU3d: mod_weight_detach, + nniq.ConvReLU3d: mod_weight_bias_0, + # Linear + nn.Linear: mod_weight_detach, + nnq.Linear: mod_weight_bias_0, + nni.LinearReLU: mod_0_weight_detach, + nniq.LinearReLU: mod_weight_bias_0, + nnqat.Linear: mod_weight_detach, + nnqd.Linear: mod_weight_bias_0, + nniqat.LinearReLU: mod_weight_detach, + nniqat.LinearBn1d: mod_weight_detach, + nn.modules.linear.NonDynamicallyQuantizableLinear: mod_weight_detach, + # LSTM + nn.LSTM: get_lstm_weight, + nnqd.LSTM: get_qlstm_weight, + }, + 'call_function': { + # Conv + F.conv1d: get_conv_fun_weight, + F.conv2d: get_conv_fun_weight, + F.conv3d: get_conv_fun_weight, + toq.conv1d: get_qconv_fun_weight, + toq.conv2d: get_qconv_fun_weight, + toq.conv3d: get_qconv_fun_weight, + toq.conv1d_relu: get_qconv_fun_weight, + toq.conv2d_relu: get_qconv_fun_weight, + toq.conv3d_relu: get_qconv_fun_weight, + # Linear + F.linear: get_linear_fun_weight, + toq.linear: get_qlinear_fun_weight, + toq.linear_relu: get_qlinear_fun_weight, + }, + } + + return op_to_type_to_weight_extraction_fn + +def extract_weight_from_node( + node: Node, + gm: GraphModule, + op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None, +) -> Optional[NSSingleResultType]: + res_type = NSSingleResultValuesType.WEIGHT.value + + # Not all graphmodules have _node_name_to_scope, so only fill it + # out if it exists. + fqn = None + if hasattr(gm, '_node_name_to_scope'): + fqn = gm._node_name_to_scope[node.name][0] # type: ignore[index] + + if op_to_type_to_weight_extraction_fn is None: + op_to_type_to_weight_extraction_fn = get_op_to_type_to_weight_extraction_fn() + + ref_node_type = get_target_type_str(node, gm) + # for extracting weights, these are always the same + prev_node_type = ref_node_type + + if node.op == 'call_function': + function_mapping = op_to_type_to_weight_extraction_fn['call_function'] + for target_fn_type, weight_extraction_fn in function_mapping.items(): + if node.target == target_fn_type: + weight = weight_extraction_fn(node, gm) + return { + 'type': res_type, + 'values': [weight], + 'prev_node_name': node.name, + 'prev_node_target_type': prev_node_type, + 'ref_node_name': node.name, + 'ref_node_target_type': ref_node_type, + 'index_within_arg': 0, + 'index_of_arg': 0, + 'fqn': fqn, + } + + elif node.op == 'call_module': + # for call_module, we need to look up the modules to do the type check + assert isinstance(node.target, str) + mod = getattr_from_fqn(gm, node.target) + module_mapping = op_to_type_to_weight_extraction_fn['call_module'] + for target_mod_type, weight_extraction_fn in module_mapping.items(): + if type(mod) == target_mod_type: + weight = weight_extraction_fn(mod) + return { + 'type': res_type, + 'values': [weight], + 'prev_node_name': node.name, + 'prev_node_target_type': prev_node_type, + 'ref_node_name': node.name, + 'ref_node_target_type': ref_node_type, + 'index_within_arg': 0, + 'index_of_arg': 0, + 'fqn': fqn, + } + + return None diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0eab9c9521644825141f0684c7ad5bac6176d1e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py @@ -0,0 +1,23 @@ +from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig, DTypeWithConstraints, ObservationType +from .fbgemm import get_fbgemm_backend_config +from .native import get_native_backend_config, get_native_backend_config_dict +from .qnnpack import get_qnnpack_backend_config +from .tensorrt import get_tensorrt_backend_config, get_tensorrt_backend_config_dict +from .executorch import get_executorch_backend_config +from .onednn import get_onednn_backend_config + +__all__ = [ + "get_fbgemm_backend_config", + "get_native_backend_config", + "get_native_backend_config_dict", + "get_qnnpack_backend_config", + "get_tensorrt_backend_config", + "get_tensorrt_backend_config_dict", + "get_executorch_backend_config", + "BackendConfig", + "BackendPatternConfig", + "DTypeConfig", + "DTypeWithConstraints", + "ObservationType", + "get_onednn_backend_config", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a40e553916e4a60e76ffebae3d91701fa238a04 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_common_operator_config_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_common_operator_config_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..563de62689830dceab1a6661f3b6bab1f832758e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_common_operator_config_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_qnnpack_pt2e.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_qnnpack_pt2e.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..798091c72edd6324bef074db980cef67760039d2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/_qnnpack_pt2e.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/backend_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/backend_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aaa78678ec6734575d3e8c865a21a0f15e5c53d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/backend_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/executorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/executorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92d7ba5a6633c4af2b236ebad26657fe1a53de75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/executorch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/fbgemm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/fbgemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd533c8db3be9c829143a7d19e855bfcaed386fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/fbgemm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/native.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/native.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..541c6ead3f780367129bf1780815380893614450 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/native.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/observation_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/observation_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e1b990bdf568886b3d2c29da0230cfe3853a67f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/observation_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/onednn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/onednn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aef4dc707ed77975d01c802f4a5a36c0c24f2a2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/onednn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/qnnpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/qnnpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd233c78b96161015ad07f0bc48b64af7fb3ff35 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/qnnpack.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/tensorrt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/tensorrt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a076d5cde235916373d50e6156ef053b2f0ac77e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/tensorrt.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e9c98de1b7d390b7c1147f6bfbf87b3aaee9365 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..150e9c5a43a6c4b49a154d633cccbbcf195983b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_common_operator_config_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_common_operator_config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4e946a25ffbbf003d39a020ea75fea185551ce46 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_common_operator_config_utils.py @@ -0,0 +1,637 @@ +import copy +import operator +import torch +import torch.nn.functional as F +import torch.nn as nn +import torch.ao.nn.intrinsic as nni +import torch.ao.nn.intrinsic.qat as nniqat +import torch.ao.nn.qat as nnqat +import torch.ao.nn.quantized.reference as nnqr +from collections import namedtuple +from typing import Callable, Dict, List, Union +from .backend_config import ( + BackendPatternConfig, + DTypeConfig, + DTypeWithConstraints, + ObservationType, +) +from ..fuser_method_mappings import ( + _sequential_wrapper2, + fuse_conv_bn, + fuse_conv_bn_relu, + fuse_linear_bn, + fuse_convtranspose_bn, +) + +__all__: List[str] = [] + +# TODO: rename to be more explicit, e.g. qat_conv_relu +_ConvMetadata = namedtuple( + "_ConvMetadata", + ["root", "transpose", "bn", "reference", "transpose_reference", + "fused_conv_relu", "fused_conv_bn", "fused_conv_bn_relu", + "qat", "relu_qat", "bn_qat", "bn_relu_qat", + "func", "func_transpose"]) +_Conv1dMetadata = _ConvMetadata( + nn.Conv1d, nn.ConvTranspose1d, nn.BatchNorm1d, nnqr.Conv1d, nnqr.ConvTranspose1d, + nni.ConvReLU1d, nni.ConvBn1d, nni.ConvBnReLU1d, + nnqat.Conv1d, nniqat.ConvReLU1d, nniqat.ConvBn1d, nniqat.ConvBnReLU1d, + F.conv1d, F.conv_transpose1d) +_Conv2dMetadata = _ConvMetadata( + nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d, nnqr.Conv2d, nnqr.ConvTranspose2d, + nni.ConvReLU2d, nni.ConvBn2d, nni.ConvBnReLU2d, + nnqat.Conv2d, nniqat.ConvReLU2d, nniqat.ConvBn2d, nniqat.ConvBnReLU2d, + F.conv2d, F.conv_transpose2d) +_Conv3dMetadata = _ConvMetadata( + nn.Conv3d, nn.ConvTranspose3d, nn.BatchNorm3d, nnqr.Conv3d, nnqr.ConvTranspose3d, + nni.ConvReLU3d, nni.ConvBn3d, nni.ConvBnReLU3d, + nnqat.Conv3d, nniqat.ConvReLU3d, nniqat.ConvBn3d, nniqat.ConvBnReLU3d, + F.conv3d, F.conv_transpose3d) + +# Add constraints for fixed qparams ops like sigmoid and tanh to ensure values +# fall within the proper ranges, e.g. [0, 1] for sigmoid, [-1, 1] for tanh +_FIXED_QPARAM_OP_0TO1_CONSTRAINTS = DTypeWithConstraints( + dtype=torch.quint8, + quant_min_lower_bound=0, + quant_max_upper_bound=255, + scale_exact_match=1.0 / 256.0, + zero_point_exact_match=0, +) +_FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS = DTypeWithConstraints( + dtype=torch.quint8, + quant_min_lower_bound=0, + quant_max_upper_bound=255, + scale_exact_match=2.0 / 256.0, + zero_point_exact_match=128, +) +_FIXED_QPARAMS_OP_TO_CONSTRAINTS: Dict[Union[Callable, str], DTypeWithConstraints] = { + torch.nn.Hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + torch.nn.functional.hardsigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + "hardsigmoid": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + "hardsigmoid_": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + torch.nn.Sigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + torch.sigmoid: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + "sigmoid": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + "sigmoid_": _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + torch.nn.Softmax: _FIXED_QPARAM_OP_0TO1_CONSTRAINTS, + torch.nn.Tanh: _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS, + torch.tanh: _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS, + "tanh": _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS, + "tanh_": _FIXED_QPARAM_OP_NEG1TO1_CONSTRAINTS, +} + +def _get_binary_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + binary_op_configs: List[BackendPatternConfig] = [] + num_tensor_args_to_observation_type_mapping = { + # TODO: this is not used right now since we have extra check in prepare + # will need to change this to NO_OBSERVER later after we implemented + # Tensor dtype inference properly + 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT, + 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT, + 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT, + } + for op_with_quantized_bop_scalar_variant in [operator.add, torch.add, operator.mul, torch.mul]: + bop_patterns = [ + (op_with_quantized_bop_scalar_variant, nn.ReLU), + (op_with_quantized_bop_scalar_variant, F.relu), + (op_with_quantized_bop_scalar_variant, torch.relu), + op_with_quantized_bop_scalar_variant + ] + for bop_pattern in bop_patterns: + binary_op_configs.append( + BackendPatternConfig(bop_pattern) + .set_dtype_configs(dtype_configs) # noqa: E131 + ._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping)) + # matmul + binary_op_configs.append( + BackendPatternConfig(torch.matmul) + .set_dtype_configs(dtype_configs) # noqa: E131 + ) + return binary_op_configs + +def _get_linear_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + """ + Return all configs related to linear modules and ops. + """ + observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + linear_configs: List[BackendPatternConfig] = [] + + # (1) Single linear modules/functions + # ------------------------------------- + # linear module + linear_configs.append( + BackendPatternConfig(torch.nn.Linear) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear) + .set_qat_module(nnqat.Linear)) + # linear qat module + linear_configs.append( + BackendPatternConfig(nnqat.Linear) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear)) + # functional linear + linear_configs.append( + BackendPatternConfig(torch.nn.functional.linear) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1, "bias": 2})) + + # (2) Linear + relu + # ------------------- + # 2.1 linear module + relu fusion config + # linear relu, linear module + relu module + linear_configs.append( + BackendPatternConfig((torch.nn.Linear, torch.nn.ReLU)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(nni.LinearReLU)) + .set_fused_module(nni.LinearReLU)) + # linear relu, linear module + functional relu + linear_configs.append( + BackendPatternConfig((torch.nn.Linear, torch.nn.functional.relu)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(nni.LinearReLU)) + .set_fused_module(nni.LinearReLU)) + + # 2.2 linear module + relu, fused module configs + # linear relu, fused module + linear_configs.append( + BackendPatternConfig(nni.LinearReLU) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear) + .set_qat_module(nniqat.LinearReLU)) + # linear relu, qat fused module + linear_configs.append( + BackendPatternConfig(nniqat.LinearReLU) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear)) + # 2.3 functional linear + relu configs + # linear relu, functional linear + relu module + linear_configs.append( + BackendPatternConfig((F.linear, torch.nn.ReLU)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs)) + # linear relu, functional linear + functional relu + linear_configs.append( + BackendPatternConfig((F.linear, F.relu)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs)) + + # (3) Linear + batchnorm + # ------------------------ + # 3.1 linear bn fusion + linear_configs.append( + BackendPatternConfig((nn.Linear, nn.BatchNorm1d)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuse_linear_bn) + .set_fused_module(nni.LinearBn1d)) + + # 3.2 linear bn fused + # linear bn, fused module + linear_configs.append( + BackendPatternConfig(nni.LinearBn1d) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear) + .set_qat_module(nniqat.LinearBn1d)) + # linear bn, qat fused module + linear_configs.append( + BackendPatternConfig(nniqat.LinearBn1d) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear)) + return linear_configs + +def _get_conv_configs(dtype_configs): + """ + Return all configs related to conv modules and ops. + """ + conv_configs = [] + observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + for convs in [_Conv1dMetadata, _Conv2dMetadata, _Conv3dMetadata]: + + # (1) Single conv modules/functions + # ----------------------------------- + # conv module + conv_configs.append( + BackendPatternConfig(convs.root) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + .set_qat_module(convs.qat)) + # conv qat module + conv_configs.append( + BackendPatternConfig(convs.qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference)) + # functional conv + conv_configs.append( + BackendPatternConfig(convs.func) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1, "bias": 2})) + + # (2) Conv + relu + # ----------------- + # 2.1 conv module + relu fusion configs + # conv relu fusion, conv module + relu module + conv_configs.append( + BackendPatternConfig((convs.root, torch.nn.ReLU)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu)) + .set_fused_module(convs.fused_conv_relu)) + # conv relu fusion, conv module + functional relu + conv_configs.append( + BackendPatternConfig((convs.root, F.relu)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu)) + .set_fused_module(convs.fused_conv_relu)) + # 2.2 conv module + relu fused module configs + # conv relu, fused module + conv_configs.append( + BackendPatternConfig(convs.fused_conv_relu) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + .set_qat_module(convs.relu_qat)) + # conv relu, qat fused module + conv_configs.append( + BackendPatternConfig(convs.relu_qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference)) + # 2.3 functional conv + relu configs + # conv relu, functional conv + relu module + conv_configs.append( + BackendPatternConfig((convs.func, torch.nn.ReLU)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs)) + # conv relu, functional conv + functional relu + conv_configs.append( + BackendPatternConfig((convs.func, F.relu)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs)) + + # fused conv relu + conv_configs.append( + BackendPatternConfig(convs.fused_conv_relu) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_qat_module(convs.relu_qat)) + + conv_configs.append( + BackendPatternConfig(convs.relu_qat) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference)) + + # (3) Conv + batchnorm (+ relu) + # ------------------------------- + # 3.1 conv bn fusion configs + # conv + bn fusion + conv_configs.append( + BackendPatternConfig((convs.root, convs.bn)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuse_conv_bn) + .set_fused_module(convs.fused_conv_bn)) + # conv + bn + relu module fusion + conv_configs.append( + BackendPatternConfig((convs.root, convs.bn, nn.ReLU)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuse_conv_bn_relu) + .set_fused_module(convs.fused_conv_bn_relu)) + # conv + bn + relu functional fusion + conv_configs.append( + BackendPatternConfig((convs.root, convs.bn, F.relu)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_root_module(convs.root) + .set_fuser_method(fuse_conv_bn_relu) + .set_fused_module(convs.fused_conv_bn_relu)) + # TODO: we can add fusion for torch.relu as well + + # 3.2 conv + bn (+ relu) fused module configs + # fused conv bn + conv_configs.append( + BackendPatternConfig(convs.fused_conv_bn) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_qat_module(convs.bn_qat)) + + # fused conv bn relu + conv_configs.append( + BackendPatternConfig(convs.fused_conv_bn_relu) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_qat_module(convs.bn_relu_qat)) + + # conv bn, qat fused module + conv_configs.append( + BackendPatternConfig(convs.bn_qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference)) + # conv bn relu, qat fused module + conv_configs.append( + BackendPatternConfig(convs.bn_relu_qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference)) + + # (4) conv transpose and its fusion + # 4.1 conv transpose config + conv_configs.append( + BackendPatternConfig(convs.transpose) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_root_module(convs.transpose) + .set_reference_quantized_module(convs.transpose_reference)) + + # 4.2 conv transpose + bn fusion + conv_configs.append( + BackendPatternConfig((convs.transpose, convs.bn)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuse_convtranspose_bn) + .set_root_module(convs.transpose) + .set_reference_quantized_module(convs.transpose_reference)) + + # 4.3 functional conv transpose + conv_configs.append( + BackendPatternConfig(convs.func_transpose) + .set_dtype_configs(dtype_configs) # noqa: E131 + ._set_input_type_to_index({"weight": 1, "bias": 2})) + + return conv_configs + +def _get_cat_config(dtype_configs: List[DTypeConfig]) -> BackendPatternConfig: + return BackendPatternConfig(torch.cat) \ + .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \ + .set_dtype_configs(dtype_configs) + +def _get_ln_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + ln_configs = [] + ln_configs.append( + BackendPatternConfig(torch.nn.LayerNorm) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs) + ) + ln_configs.append( + BackendPatternConfig(torch.nn.functional.layer_norm) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 2, "bias": 3}) + ) + return ln_configs + +def _get_default_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + configs = [] + default_ops = [ + torch.nn.ELU, + torch.nn.LeakyReLU, + torch.nn.Hardswish, + torch.nn.InstanceNorm1d, + torch.nn.InstanceNorm2d, + torch.nn.InstanceNorm3d, + torch.nn.Dropout, + torch.nn.PReLU, + torch.nn.functional.elu, + torch.nn.functional.hardswish, + torch.nn.functional.leaky_relu, + torch.nn.functional.dropout, + ] + for op in default_ops: + configs.append( + BackendPatternConfig(op) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs)) + + configs.append( + BackendPatternConfig(torch.nn.functional.group_norm) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 2, "bias": 3}) + ) + + configs.append( + BackendPatternConfig(torch.nn.functional.instance_norm) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 3, "bias": 4}) + ) + return configs + +def _add_fixed_qparams_to_dtype_configs( + dtype_configs: List[DTypeConfig], + constraints: DTypeWithConstraints, +) -> List[DTypeConfig]: + """ + Return a copy of the list of DTypeConfigs where activations are subject to the specified + constraints required for fixed qparams ops. + + If the data type doesn't match the one in the constraints, simply leave the corresponding + DTypeConfig unchanged. + + If `scale_min_lower_bound` or `scale_max_upper_bound` is specified in the activations, + throw an exception since these settings are incompatible with fixed qparams ops. + """ + new_dtype_configs = [] + for dtype_config in dtype_configs: + dc = copy.deepcopy(dtype_config) + for orig_constraints in [dc.input_dtype_with_constraints, dc.output_dtype_with_constraints]: + if orig_constraints.dtype != constraints.dtype: + continue + if orig_constraints.scale_min_lower_bound is not None: + raise ValueError(f"scale_min_lower_bound is invalid for fixed qparams ops: {dtype_config}") + if orig_constraints.scale_max_upper_bound is not None: + raise ValueError(f"scale_max_upper_bound is invalid for fixed qparams ops: {dtype_config}") + orig_constraints.quant_min_lower_bound = constraints.quant_min_lower_bound + orig_constraints.quant_max_upper_bound = constraints.quant_max_upper_bound + orig_constraints.scale_exact_match = constraints.scale_exact_match + orig_constraints.zero_point_exact_match = constraints.zero_point_exact_match + new_dtype_configs.append(dc) + return new_dtype_configs + +def _get_fixed_qparams_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + fixed_qparams_op_configs = [] + for fixed_qparam_op, constraints in _FIXED_QPARAMS_OP_TO_CONSTRAINTS.items(): + new_dtype_configs = _add_fixed_qparams_to_dtype_configs(dtype_configs, constraints) + fixed_qparams_op_configs.append( + BackendPatternConfig(fixed_qparam_op) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(new_dtype_configs)) + return fixed_qparams_op_configs + +def _get_share_qparams_op_configs(dtype_configs): + """ Get the operator config for the operators that works for both float and quantized input + if input is quantized, the output Tensor shares the same quantization parameter + with input. + Example operator: avgpool2d, reshape, transpose, maxpool2d + Example observed operator: + observer_0 - avgpool2d - observer_0 (same observer instance as input) + """ + + def _get_share_qprams_op_backend_config(op): + return BackendPatternConfig(op) \ + .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \ + .set_dtype_configs(dtype_configs) + + share_qparams_ops = [ + torch.nn.AdaptiveAvgPool1d, + torch.nn.AdaptiveAvgPool2d, + torch.nn.AdaptiveAvgPool3d, + torch.nn.AvgPool1d, + torch.nn.AvgPool2d, + torch.nn.AvgPool3d, + torch.nn.Hardtanh, + torch.nn.Identity, + torch.nn.MaxPool1d, + torch.nn.MaxPool2d, + torch.nn.MaxPool3d, + torch.nn.PixelShuffle, + torch.nn.PixelUnshuffle, + torch.nn.ReLU, + torch.nn.ReLU6, + torch.adaptive_avg_pool1d, + torch.nn.functional.adaptive_avg_pool2d, + torch.nn.functional.adaptive_avg_pool3d, + torch.nn.functional.hardtanh, + torch.nn.functional.hardtanh_, + torch.nn.functional.interpolate, + torch.nn.functional.max_pool1d, + torch.nn.functional.max_pool2d, + torch.nn.functional.max_pool3d, + torch.nn.functional.pixel_shuffle, + torch.nn.functional.pixel_unshuffle, + torch.nn.functional.relu, + torch.nn.functional.relu6, + torch.avg_pool1d, + torch._C._nn.avg_pool2d, + torch._C._nn.avg_pool3d, + torch.clamp, + torch.flatten, + torch.mean, + torch.narrow, + torch.repeat_interleave, + torch.transpose, + torch.squeeze, + torch.stack, + torch.unsqueeze, + operator.floordiv, + "contiguous", + "clamp", + "detach", + "detach_", + "mean", + "permute", + "repeat", + "repeat_interleave", + "reshape", + "resize_", + "relu", + "relu_", + "squeeze", + "squeeze_", + "transpose", + "unsqueeze", + "unsqueeze_", + "view" + ] + return [_get_share_qprams_op_backend_config(op) for op in share_qparams_ops] + +def _get_bn_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + """ Get configs related to batchnorm. """ + bn_configs = [] + bn_to_fused_bn = { + torch.nn.BatchNorm2d: nni.BNReLU2d, + torch.nn.BatchNorm3d: nni.BNReLU3d, + } + for bn in bn_to_fused_bn.keys(): + fused_bn = bn_to_fused_bn[bn] + # bn module + relu module fusion config + bn_configs.append( + BackendPatternConfig((bn, nn.ReLU)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(fused_bn)) + .set_fused_module(fused_bn)) + # bn module + F.relu fusion config + bn_configs.append( + BackendPatternConfig((bn, F.relu)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(fused_bn)) + .set_fused_module(fused_bn)) + bn_configs.append( + BackendPatternConfig(bn) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs)) + + # fused bn configs + for fused_bn in bn_to_fused_bn.values(): + bn_configs.append( + BackendPatternConfig(fused_bn) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs)) + return bn_configs + +def _get_rnn_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + rnn_op_configs = [] + for rnn_op, ref_rnn_op in [ + (nn.GRUCell, nnqr.GRUCell), + (nn.LSTMCell, nnqr.LSTMCell), + (nn.RNNCell, nnqr.RNNCell), + (nn.LSTM, nnqr.LSTM), + (nn.GRU, nnqr.GRU) + ]: + rnn_op_configs.append( + BackendPatternConfig(rnn_op) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(rnn_op) + .set_reference_quantized_module(ref_rnn_op)) + return rnn_op_configs + +def _get_embedding_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]: + embedding_op_configs = [] + for embedding_op, qat_embedding_op, ref_embedding_op in [ + (nn.Embedding, nnqat.Embedding, nnqr.Embedding), + (nn.EmbeddingBag, nnqat.EmbeddingBag, nnqr.EmbeddingBag), + ]: + embedding_op_configs.append( + BackendPatternConfig(embedding_op) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_qat_module(qat_embedding_op) + .set_root_module(embedding_op) + .set_reference_quantized_module(ref_embedding_op)) + + # config for qat op + embedding_op_configs.append( + BackendPatternConfig(qat_embedding_op) + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(embedding_op) + .set_reference_quantized_module(ref_embedding_op)) + return embedding_op_configs + +def _get_tensor_info_op_configs(dtype_configs): + """ + These ops work on tensors of different dtypes but return non-tensors + containing information about the input tensor. + """ + + def _get_config(op): + return BackendPatternConfig(op) \ + .set_observation_type(ObservationType.INPUT_OUTPUT_NOT_OBSERVED) \ + .set_dtype_configs(dtype_configs) + + return [_get_config(op) for op in ("shape", "size")] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py new file mode 100644 index 0000000000000000000000000000000000000000..01e112b688c0428cadb5e31502f18387bcd9282f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py @@ -0,0 +1,160 @@ +import operator +import torch +from torch.ao.quantization.backend_config import ( + BackendConfig, + DTypeConfig, + ObservationType, + BackendPatternConfig, +) + +weighted_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, +) +from typing import List + +def get_linear_configs(): + linear_configs = [] + observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + dtype_configs = [weighted_op_quint8_dtype_config] + + # TODO: need to fix the way we insert observers for this pattern + # should be solved in the new fusion API + # reason that this doesn't work: the pattern is a bit complicated and we don't + # have a way to specify which input of the pattern we would like to observe + # pattern: + # bias input weight + # \ | / + # \ | t + # \ | / + # addmm + # we want to observe "weight" as weight, but there is not way to convey this + # information with current pattern language + # + # right now: + # original: + # weight - t \ + # input - addmm + # observed (no hack): + # weight - t - observer \ + # input - observer - addmm + # target: + # weight - observer - t \ + # input - observer - addmm + + # def root_node_getter(node_pattern): + # addmm, bias, act, weight = node_pattern + # return addmm + + # linear_configs.append( + # BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.aten.t.default)) + # .set_observation_type(observation_type) # noqa: E131 + # .set_dtype_configs(dtype_configs) + # ._set_root_node_getter(root_node_getter)) + + linear_configs.append( + BackendPatternConfig(torch.ops.aten.addmm.default) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 2, "bias": 0}) + ) + # linear is decomposed to `t - mm` if bias is not present + linear_configs.append( + BackendPatternConfig(torch.ops.aten.mm.default) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1}) + ) + return linear_configs + +def get_conv_configs(): + conv_configs = [] + observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + dtype_configs = [weighted_op_quint8_dtype_config] + conv_configs.append( + BackendPatternConfig(torch.ops.aten.convolution.default) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1, "bias": 2}) + ) + conv_configs.append( + BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu.default)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1, "bias": 2}) + ) + # TODO: remove when functionalization is supported in PT2 mode + conv_configs.append( + BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu_.default)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1, "bias": 2}) + ) + return conv_configs + +def get_pooling_configs(): + backend_pattern_configs = [] + observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT + dtype_configs = [weighted_op_quint8_dtype_config] + + def root_node_getter(node_pattern): + getitem, maxpool, index = node_pattern + return maxpool + + backend_pattern_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((operator.getitem, torch.ops.aten.max_pool2d_with_indices.default, 0)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_root_node_getter(root_node_getter) + ) + + return backend_pattern_configs + +def get_relu_configs(): + backend_pattern_configs = [] + observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT + dtype_configs = [weighted_op_quint8_dtype_config] + backend_pattern_configs.append( + BackendPatternConfig(torch.ops.aten.relu.default) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs)) + return backend_pattern_configs + +def get_binary_op_configs(): + binary_op_configs: List[BackendPatternConfig] = [] + dtype_configs = [weighted_op_quint8_dtype_config] + num_tensor_args_to_observation_type_mapping = { + # TODO: this is not used right now since we have extra check in prepare + # will need to change this to NO_OBSERVER later after we implemented + # Tensor dtype inference properly + 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT, + 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT, + 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT, + } + for op_with_quantized_bop_scalar_variant in [torch.ops.aten.add.Tensor, torch.ops.aten.add_.Tensor]: + bop_patterns = [ + (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu.default), + op_with_quantized_bop_scalar_variant, + # TODO: remove when functionalization is supported in pt2_mode + (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu_.default), + ] + for bop_pattern in bop_patterns: + binary_op_configs.append( + BackendPatternConfig(bop_pattern) + .set_dtype_configs(dtype_configs) # noqa: E131 + ._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping)) + + return binary_op_configs + +def get_qnnpack_pt2e_backend_config(): + return ( + BackendConfig("qnnpack_pytorch_2.0_export") + .set_backend_pattern_configs(get_linear_configs()) + .set_backend_pattern_configs(get_binary_op_configs()) + .set_backend_pattern_configs(get_conv_configs()) + .set_backend_pattern_configs(get_pooling_configs()) + .set_backend_pattern_configs(get_relu_configs()) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a4d2f3afa349688365fe19e498cd3bedcb08e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py @@ -0,0 +1,659 @@ +from __future__ import annotations +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Type, Union + +import torch +from torch.ao.quantization.utils import Pattern +from enum import Enum + + +__all__ = [ + "BackendConfig", + "BackendPatternConfig", + "DTypeConfig", + "DTypeWithConstraints", + "ObservationType", +] + + +# DTypeConfig dict keys +INPUT_DTYPE_DICT_KEY = "input_dtype" +OUTPUT_DTYPE_DICT_KEY = "output_dtype" +WEIGHT_DTYPE_DICT_KEY = "weight_dtype" +BIAS_DTYPE_DICT_KEY = "bias_dtype" +IS_DYNAMIC_DICT_KEY = "is_dynamic" + +# BackendConfig dict keys +NAME_DICT_KEY = "name" +CONFIGS_DICT_KEY = "configs" + +# BackendPatternConfig dict keys +PATTERN_DICT_KEY = "pattern" +PATTERN_COMPLEX_FORMAT_DICT_KEY = "pattern_complex_format" +OBSERVATION_TYPE_DICT_KEY = "observation_type" +DTYPE_CONFIGS_DICT_KEY = "dtype_configs" +ROOT_MODULE_DICT_KEY = "root_module" +QAT_MODULE_DICT_KEY = "qat_module" +REFERENCE_QUANTIZED_MODULE_DICT_KEY = "reference_quantized_module_for_root" +FUSED_MODULE_DICT_KEY = "fused_module" +FUSER_METHOD_DICT_KEY = "fuser_method" +ROOT_NODE_GETTER_DICT_KEY = "root_node_getter" +EXTRA_INPUTS_GETTER_DICT_KEY = "extra_inputs_getter" +NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY = "num_tensor_args_to_observation_type" +INPUT_TYPE_TO_INDEX_DICT_KEY = "input_type_to_index" + + +# TODO: maybe rename this to something that's not related to observer +# e.g. QParamsType +class ObservationType(Enum): + """ An enum that represents different ways of how an operator/operator pattern + should be observed + """ + + OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0 + """this means input and output are observed with different observers, based + on qconfig.activation + example: conv, linear, softmax + """ + + OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1 + """this means the output will use the same observer instance as input, based + on qconfig.activation + example: torch.cat, maxpool + """ + + INPUT_OUTPUT_NOT_OBSERVED = 2 + """this means the input and output are never observed + example: x.shape, x.size + """ + + +@dataclass +class DTypeWithConstraints: + """ + Config for specifying additional constraints for a given dtype, such as quantization + value ranges, scale value ranges, and fixed quantization params, to be used in + :class:`~torch.ao.quantization.backend_config.DTypeConfig`. + + The constraints currently supported are: + + * `quant_min_lower_bound` and `quant_max_upper_bound`: Lower and upper + bounds for the minimum and maximum quantized values respectively. If + the QConfig’s `quant_min` and `quant_max` fall outside this range, + then the QConfig will be ignored. + + * `scale_min_lower_bound` and `scale_max_upper_bound`: Lower and upper + bounds for the minimum and maximum scale values respectively. If the + QConfig’s minimum scale value (currently exposed as `eps`) falls below + the lower bound, then the QConfig will be ignored. Note that the upper + bound is currently not enforced. + + * `scale_exact_match` and `zero_point_exact_match`: Exact match requirements + for scale and zero point, to be used for operators with fixed quantization + parameters such as sigmoid and tanh. If the observer specified in the QConfig + is neither `FixedQParamsObserver` nor `FixedQParamsFakeQuantize`, or if + the quantization parameters don't match, then the QConfig will be ignored. + """ + dtype: Optional[torch.dtype] = None + quant_min_lower_bound: Union[int, float, None] = None + quant_max_upper_bound: Union[int, float, None] = None + scale_min_lower_bound: Union[int, float, None] = None + scale_max_upper_bound: Union[int, float, None] = None + scale_exact_match: Optional[float] = None + zero_point_exact_match: Optional[int] = None + + +@dataclass +class DTypeConfig: + """ + Config object that specifies the supported data types passed as arguments to + quantize ops in the reference model spec, for input and output activations, + weights, and biases. + + For example, consider the following reference model: + + quant1 - [dequant1 - fp32_linear - quant2] - dequant2 + + The pattern in the square brackets refers to the reference pattern of + statically quantized linear. Setting the input dtype as `torch.quint8` + in the DTypeConfig means we pass in `torch.quint8` as the dtype argument + to the first quantize op (quant1). Similarly, setting the output dtype as + `torch.quint8` means we pass in `torch.quint8` as the dtype argument to + the second quantize op (quant2). + + Note that the dtype here does not refer to the interface dtypes of the + op. For example, the "input dtype" here is not the dtype of the input + tensor passed to the quantized linear op. Though it can still be the + same as the interface dtype, this is not always the case, e.g. the + interface dtype is fp32 in dynamic quantization but the "input dtype" + specified in the DTypeConfig would still be quint8. The semantics of + dtypes here are the same as the semantics of the dtypes specified in + the observers. + + These dtypes are matched against the ones specified in the user’s + QConfig. If there is a match, and the QConfig satisfies the constraints + specified in the DTypeConfig (if any), then we will quantize the given + pattern using this DTypeConfig. Otherwise, the QConfig is ignored and + the pattern will not be quantized. + + Example usage:: + + >>> # xdoctest: +SKIP(failing) + >>> dtype_config1 = DTypeConfig( + ... input_dtype=torch.quint8, + ... output_dtype=torch.quint8, + ... weight_dtype=torch.qint8, + ... bias_dtype=torch.float) + + >>> dtype_config2 = DTypeConfig( + ... input_dtype=DTypeWithConstraints( + ... dtype=torch.quint8, + ... quant_min_lower_bound=0, + ... quant_max_upper_bound=255, + ... ), + ... output_dtype=DTypeWithConstraints( + ... dtype=torch.quint8, + ... quant_min_lower_bound=0, + ... quant_max_upper_bound=255, + ... ), + ... weight_dtype=DTypeWithConstraints( + ... dtype=torch.qint8, + ... quant_min_lower_bound=-128, + ... quant_max_upper_bound=127, + ... ), + ... bias_dtype=torch.float) + + >>> dtype_config1.input_dtype + torch.quint8 + + >>> dtype_config2.input_dtype + torch.quint8 + + >>> dtype_config2.input_dtype_with_constraints + DTypeWithConstraints(dtype=torch.quint8, quant_min_lower_bound=0, quant_max_upper_bound=255, \ +scale_min_lower_bound=None, scale_max_upper_bound=None) + """ + input_dtype_with_constraints: DTypeWithConstraints + output_dtype_with_constraints: DTypeWithConstraints + weight_dtype_with_constraints: DTypeWithConstraints + bias_dtype: Optional[torch.dtype] + is_dynamic: Optional[bool] + + def __init__( + self, + input_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None, + output_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None, + weight_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None, + bias_dtype: Optional[torch.dtype] = None, + is_dynamic: Optional[bool] = None, + ): + if isinstance(input_dtype, DTypeWithConstraints): + self.input_dtype_with_constraints = input_dtype + else: + self.input_dtype_with_constraints = DTypeWithConstraints(dtype=input_dtype) + + if isinstance(output_dtype, DTypeWithConstraints): + self.output_dtype_with_constraints = output_dtype + else: + self.output_dtype_with_constraints = DTypeWithConstraints(dtype=output_dtype) + + if isinstance(weight_dtype, DTypeWithConstraints): + self.weight_dtype_with_constraints = weight_dtype + else: + self.weight_dtype_with_constraints = DTypeWithConstraints(dtype=weight_dtype) + + self.bias_dtype = bias_dtype + self.is_dynamic = is_dynamic + + @property + def input_dtype(self) -> Optional[torch.dtype]: + return self.input_dtype_with_constraints.dtype + + @property + def output_dtype(self) -> Optional[torch.dtype]: + return self.output_dtype_with_constraints.dtype + + @property + def weight_dtype(self) -> Optional[torch.dtype]: + return self.weight_dtype_with_constraints.dtype + + @classmethod + def from_dict(cls, dtype_config_dict: Dict[str, Any]) -> DTypeConfig: + """ + Create a ``DTypeConfig`` from a dictionary with the following items (all optional): + "input_dtype": torch.dtype or ``DTypeWithConstraints`` + "output_dtype": torch.dtype or ``DTypeWithConstraints`` + "weight_dtype": torch.dtype or ``DTypeWithConstraints`` + "bias_type": torch.dtype + "is_dynamic": bool + """ + input_dtype = dtype_config_dict.get(INPUT_DTYPE_DICT_KEY, None) + if input_dtype is not None and not isinstance(input_dtype, (torch.dtype, DTypeWithConstraints)): + raise ValueError("Expected input_dtype to be a torch.dtype or DTypeWithConstraints") + output_dtype = dtype_config_dict.get(OUTPUT_DTYPE_DICT_KEY, None) + if output_dtype is not None and not isinstance(output_dtype, (torch.dtype, DTypeWithConstraints)): + raise ValueError("Expected output_dtype to be a torch.dtype or DTypeWithConstraints") + weight_dtype = dtype_config_dict.get(WEIGHT_DTYPE_DICT_KEY, None) + if weight_dtype is not None and not isinstance(weight_dtype, (torch.dtype, DTypeWithConstraints)): + raise ValueError("Expected weight_dtype to be a torch.dtype or DTypeWithConstraints") + bias_dtype = dtype_config_dict.get(BIAS_DTYPE_DICT_KEY, None) + is_dynamic = dtype_config_dict.get(IS_DYNAMIC_DICT_KEY, None) + return cls(input_dtype, output_dtype, weight_dtype, bias_dtype, is_dynamic) + + def to_dict(self) -> Dict[str, Any]: + """ + Convert this ``DTypeConfig`` to a dictionary with the items described in + :func:`~torch.ao.quantization.backend_config.DTypeConfig.from_dict`. + """ + dtype_config_dict: Dict[str, Any] = {} + if self.input_dtype is not None: + dtype_config_dict[INPUT_DTYPE_DICT_KEY] = self.input_dtype_with_constraints + if self.output_dtype is not None: + dtype_config_dict[OUTPUT_DTYPE_DICT_KEY] = self.output_dtype_with_constraints + if self.weight_dtype is not None: + dtype_config_dict[WEIGHT_DTYPE_DICT_KEY] = self.weight_dtype_with_constraints + if self.bias_dtype is not None: + dtype_config_dict[BIAS_DTYPE_DICT_KEY] = self.bias_dtype + if self.is_dynamic is not None: + dtype_config_dict[IS_DYNAMIC_DICT_KEY] = self.is_dynamic + return dtype_config_dict + + +class BackendConfig: + # TODO: refer to NativeBackendConfig once that is implemented + """Config that defines the set of patterns that can be quantized on a given backend, and how reference + quantized models can be produced from these patterns. + + A pattern in this context refers to a module, a functional, an operator, or a directed acyclic graph + of the above. Each pattern supported on the target backend can be individually configured through + :class:`~torch.ao.quantization.backend_config.BackendPatternConfig` in terms of: + + (1) The supported input/output activation, weight, and bias data types + + (2) How observers and quant/dequant ops are inserted in order to construct the reference pattern, and + + (3) (Optionally) Fusion, QAT, and reference module mappings. + + The format of the patterns is described in: + https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md + + Example usage:: + + import torch + from torch.ao.quantization.backend_config import ( + BackendConfig, + BackendPatternConfig, + DTypeConfig, + ObservationType, + ) + + weighted_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float) + + def fuse_conv2d_relu(is_qat, conv, relu): + return torch.ao.nn.intrinsic.ConvReLU2d(conv, relu) + + # For quantizing Linear + linear_config = BackendPatternConfig(torch.nn.Linear) \ + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \ + .add_dtype_config(weighted_int8_dtype_config) \ + .set_root_module(torch.nn.Linear) \ + .set_qat_module(torch.ao.nn.qat.Linear) \ + .set_reference_quantized_module(torch.ao.nn.quantized.reference.Linear) + + # For fusing Conv2d + ReLU into ConvReLU2d + conv_relu_config = BackendPatternConfig((torch.nn.Conv2d, torch.nn.ReLU)) \ + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \ + .add_dtype_config(weighted_int8_dtype_config) \ + .set_fused_module(torch.ao.nn.intrinsic.ConvReLU2d) \ + .set_fuser_method(fuse_conv2d_relu) + + # For quantizing ConvReLU2d + fused_conv_relu_config = BackendPatternConfig(torch.ao.nn.intrinsic.ConvReLU2d) \ + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \ + .add_dtype_config(weighted_int8_dtype_config) \ + .set_root_module(torch.nn.Conv2d) \ + .set_qat_module(torch.ao.nn.intrinsic.qat.ConvReLU2d) \ + .set_reference_quantized_module(torch.ao.nn.quantized.reference.Conv2d) + + backend_config = BackendConfig("my_backend") \ + .set_backend_pattern_config(linear_config) \ + .set_backend_pattern_config(conv_relu_config) \ + .set_backend_pattern_config(fused_conv_relu_config) + + """ + def __init__(self, name: str = ""): + self.name = name + # Store all BackendPatternConfigs in a map to handle duplicates + # Note: the key in this map uses the complex reversed tuple format. + # This is intended only for internal use; users who wish to access + # the original patterns should go through `self.configs` instead. + self._pattern_complex_format_to_config: Dict[Pattern, BackendPatternConfig] = {} + + def __repr__(self): + return f"BackendConfig({self.__dict__})" + + def set_name(self, name: str) -> BackendConfig: + """ + Set the name of the target backend. + """ + self.name = name + return self + + def set_backend_pattern_config(self, config: BackendPatternConfig) -> BackendConfig: + """ + Set the config for an pattern that can be run on the target backend. + This overrides any existing config for the given pattern. + """ + # Avoid circular dependencies + pattern_complex_format = torch.ao.quantization.backend_config.utils \ + ._get_pattern_in_reversed_nested_tuple_format(config) # type: ignore[attr-defined] + self._pattern_complex_format_to_config[pattern_complex_format] = config + return self + + def set_backend_pattern_configs(self, configs: List[BackendPatternConfig]) -> BackendConfig: + """ + Set the configs for patterns that can be run on the target backend. + This overrides any existing config for a given pattern if it was previously registered already. + """ + for conf in configs: + self.set_backend_pattern_config(conf) + return self + + @property + def configs(self) -> List[BackendPatternConfig]: + """ + Return a copy of the list of configs set in this `BackendConfig`. + """ + return list(self._pattern_complex_format_to_config.values()) + + @classmethod + def from_dict(cls, backend_config_dict: Dict[str, Any]) -> BackendConfig: + """ + Create a ``BackendConfig`` from a dictionary with the following items: + + "name": the name of the target backend + + "configs": a list of dictionaries that each represents a `BackendPatternConfig` + + """ + conf = cls(backend_config_dict.get(NAME_DICT_KEY, "")) + for d in backend_config_dict.get(CONFIGS_DICT_KEY, []): + if isinstance(d, BackendPatternConfig): + conf.set_backend_pattern_config(d) + elif isinstance(d, Dict): + conf.set_backend_pattern_config(BackendPatternConfig.from_dict(d)) + else: + raise ValueError(f"Expected backend_config_dict['{CONFIGS_DICT_KEY}'] to be a dictionary") + return conf + + def to_dict(self) -> Dict[str, Any]: + """ + Convert this ``BackendConfig`` to a dictionary with the items described in + :func:`~torch.ao.quantization.backend_config.BackendConfig.from_dict`. + """ + return { + NAME_DICT_KEY: self.name, + CONFIGS_DICT_KEY: [c.to_dict() for c in self.configs], + } + + +class BackendPatternConfig: + """ + Config object that specifies quantization behavior for a given operator pattern. + For a detailed example usage, see :class:`~torch.ao.quantization.backend_config.BackendConfig`. + """ + def __init__(self, pattern: Optional[Pattern] = None): + self.pattern: Optional[Pattern] = pattern + self.observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + self.dtype_configs: List[DTypeConfig] = [] + self.root_module: Optional[Type[torch.nn.Module]] = None + self.qat_module: Optional[Type[torch.nn.Module]] = None + self.reference_quantized_module: Optional[Type[torch.nn.Module]] = None + self.fused_module: Optional[Type[torch.nn.Module]] = None + self.fuser_method: Optional[Callable] = None + + # Temporary/internal configs + self._root_node_getter: Optional[Callable] = None + self._extra_inputs_getter: Optional[Callable] = None + self._num_tensor_args_to_observation_type: Dict[int, ObservationType] = {} + self._input_type_to_index: Dict[str, int] = {} + self._pattern_complex_format: Optional[Pattern] = None + + def __repr__(self): + dict_nonempty = { + k: v for k, v in self.__dict__.items() + if ( + (not isinstance(v, (list, dict)) and v is not None) + or (isinstance(v, (list, dict)) and len(v) > 0) + ) + } + return f"BackendPatternConfig({dict_nonempty})" + + def set_pattern(self, pattern: Pattern) -> BackendPatternConfig: + """ + Set the pattern to configure. + + The pattern can be a float module, functional operator, pytorch operator, or a tuple + combination of the above. Tuple patterns are treated as sequential patterns, and + currently only tuples of 2 or 3 elements are supported. + """ + if self._pattern_complex_format is not None: + raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set") + self.pattern = pattern + return self + + def set_observation_type(self, observation_type: ObservationType) -> BackendPatternConfig: + """ + Set how observers should be inserted in the graph for this pattern. + + Observation type here refers to how observers (or quant-dequant ops) will be placed + in the graph. This is used to produce the desired reference patterns understood by + the backend. Weighted ops such as linear and conv require different observers + (or quantization parameters passed to quantize ops in the reference model) for the + input and the output. + + There are two observation types: + + `OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT` (default): the output observer instance + will be different from the input. This is the most common observation type. + + `OUTPUT_SHARE_OBSERVER_WITH_INPUT`: the output observer instance will be the + same as the input. This is useful for operators like `cat`. + + Note: This will be renamed in the near future, since we will soon insert QuantDeQuantStubs + with observers (and fake quantizes) attached instead of observers themselves. + """ + self.observation_type = observation_type + return self + + def add_dtype_config(self, dtype_config: DTypeConfig) -> BackendPatternConfig: + """ + Add a set of supported data types passed as arguments to quantize ops in the + reference model spec. + """ + self.dtype_configs.append(dtype_config) + return self + + def set_dtype_configs(self, dtype_configs: List[DTypeConfig]) -> BackendPatternConfig: + """ + Set the supported data types passed as arguments to quantize ops in the + reference model spec, overriding all previously registered data types. + """ + self.dtype_configs = dtype_configs + return self + + def set_root_module(self, root_module: Type[torch.nn.Module]) -> BackendPatternConfig: + """ + Set the module that represents the root for this pattern. + + When we construct the reference quantized model during the convert phase, + the root modules (e.g. torch.nn.Linear for torch.ao.nn.intrinsic.LinearReLU) + will be swapped to the corresponding reference quantized modules (e.g. + torch.ao.nn.reference.quantized.Linear). This allows custom backends to + specify custom reference quantized module implementations to match the + numerics of their lowered operators. Since this is a one-to-one mapping, + both the root module and the reference quantized module must be specified + in the same BackendPatternConfig in order for the conversion to take place. + """ + self.root_module = root_module + return self + + def set_qat_module(self, qat_module: Type[torch.nn.Module]) -> BackendPatternConfig: + """ + Set the module that represents the QAT implementation for this pattern. + """ + self.qat_module = qat_module + return self + + def set_reference_quantized_module(self, reference_quantized_module: Type[torch.nn.Module]) -> BackendPatternConfig: + """ + Set the module that represents the reference quantized implementation for + this pattern's root module. + + For more detail, see :func:`~torch.ao.quantization.backend_config.BackendPatternConfig.set_root_module`. + """ + self.reference_quantized_module = reference_quantized_module + return self + + def set_fused_module(self, fused_module: Type[torch.nn.Module]) -> BackendPatternConfig: + """ + Set the module that represents the fused implementation for this pattern. + """ + self.fused_module = fused_module + return self + + def set_fuser_method(self, fuser_method: Callable) -> BackendPatternConfig: + """ + Set the function that specifies how to fuse this BackendPatternConfig's pattern. + + The first argument of this function should be `is_qat`, and the rest of the arguments + should be the items in the tuple pattern. The return value of this function should be + the resulting fused module. + + For example, the fuser method for the pattern `(torch.nn.Linear, torch.nn.ReLU)` can be: + + def fuse_linear_relu(is_qat, linear, relu): + return torch.ao.nn.intrinsic.LinearReLU(linear, relu) + + For a more complicated example, see https://gist.github.com/jerryzh168/8bea7180a8ba3c279f2c9b050f2a69a6. + """ + self.fuser_method = fuser_method + return self + + def _set_root_node_getter(self, root_node_getter: Callable) -> BackendPatternConfig: + self._root_node_getter = root_node_getter + return self + + def _set_extra_inputs_getter(self, extra_inputs_getter: Callable) -> BackendPatternConfig: + self._extra_inputs_getter = extra_inputs_getter + return self + + def _set_num_tensor_args_to_observation_type( + self, num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> BackendPatternConfig: + self._num_tensor_args_to_observation_type = num_tensor_args_to_observation_type + return self + + def _set_input_type_to_index(self, input_type_to_index: Dict[str, int]) -> BackendPatternConfig: + self._input_type_to_index = input_type_to_index + return self + + def _set_pattern_complex_format(self, pattern: Pattern) -> BackendPatternConfig: + """ + Set the pattern to configure, using the reversed nested tuple format. + + See the BackendConfig README for more detail: + https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md#advanced-pattern-specification + """ + if self.pattern is not None: + raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set") + self._pattern_complex_format = pattern + return self + + @classmethod + def from_dict(cls, backend_pattern_config_dict: Dict[str, Any]) -> BackendPatternConfig: + """ + Create a ``BackendPatternConfig`` from a dictionary with the following items: + + "pattern": the pattern being configured + "observation_type": the :class:`~torch.ao.quantization.backend_config.ObservationType` that specifies how + observers should be inserted for this pattern + "dtype_configs": a list of dictionaries that represents :class:`~torch.ao.quantization.backend_config.DTypeConfig` s + "root_module": a :class:`torch.nn.Module` that represents the root for this pattern + "qat_module": a :class:`torch.nn.Module` that represents the QAT implementation for this pattern + "reference_quantized_module": a :class:`torch.nn.Module` that represents the reference quantized + implementation for this pattern's root module. + "fused_module": a :class:`torch.nn.Module` that represents the fused implementation for this pattern + "fuser_method": a function that specifies how to fuse the pattern for this pattern + "pattern_complex_format": the pattern specified in the reversed nested tuple format (deprecated) + + """ + def _get_dtype_config(obj: Any) -> DTypeConfig: + """ + Convert the given object into a ``DTypeConfig`` if possible, else throw an exception. + """ + if isinstance(obj, DTypeConfig): + return obj + if isinstance(obj, Dict): + return DTypeConfig.from_dict(obj) + raise ValueError( + f"Expected a list of DTypeConfigs in " + f"backend_pattern_config_dict[\"{DTYPE_CONFIGS_DICT_KEY}\"], got '{type(obj)}'" + ) + + conf = cls() + if PATTERN_DICT_KEY in backend_pattern_config_dict: + conf.set_pattern(backend_pattern_config_dict[PATTERN_DICT_KEY]) + if OBSERVATION_TYPE_DICT_KEY in backend_pattern_config_dict: + conf.set_observation_type(backend_pattern_config_dict[OBSERVATION_TYPE_DICT_KEY]) + for d in backend_pattern_config_dict.get(DTYPE_CONFIGS_DICT_KEY, []): + conf.add_dtype_config(_get_dtype_config(d)) + conf.set_root_module(backend_pattern_config_dict.get(ROOT_MODULE_DICT_KEY, None)) + conf.set_qat_module(backend_pattern_config_dict.get(QAT_MODULE_DICT_KEY, None)) + conf.set_reference_quantized_module(backend_pattern_config_dict.get(REFERENCE_QUANTIZED_MODULE_DICT_KEY, None)) + conf.set_fused_module(backend_pattern_config_dict.get(FUSED_MODULE_DICT_KEY, None)) + conf.set_fuser_method(backend_pattern_config_dict.get(FUSER_METHOD_DICT_KEY, None)) + conf._set_root_node_getter(backend_pattern_config_dict.get(ROOT_NODE_GETTER_DICT_KEY, None)) + conf._set_extra_inputs_getter(backend_pattern_config_dict.get(EXTRA_INPUTS_GETTER_DICT_KEY, None)) + conf._set_num_tensor_args_to_observation_type( + backend_pattern_config_dict.get(NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY, {})) + conf._set_input_type_to_index(backend_pattern_config_dict.get(INPUT_TYPE_TO_INDEX_DICT_KEY, {})) + if PATTERN_COMPLEX_FORMAT_DICT_KEY in backend_pattern_config_dict: + conf._set_pattern_complex_format(backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY]) + return conf + + def to_dict(self) -> Dict[str, Any]: + """ + Convert this ``BackendPatternConfig`` to a dictionary with the items described in + :func:`~torch.ao.quantization.backend_config.BackendPatternConfig.from_dict`. + """ + backend_pattern_config_dict: Dict[str, Any] = { + OBSERVATION_TYPE_DICT_KEY: self.observation_type, + DTYPE_CONFIGS_DICT_KEY: [c.to_dict() for c in self.dtype_configs], + } + if self.pattern is not None: + backend_pattern_config_dict[PATTERN_DICT_KEY] = self.pattern + if self.root_module is not None: + backend_pattern_config_dict[ROOT_MODULE_DICT_KEY] = self.root_module + if self.qat_module is not None: + backend_pattern_config_dict[QAT_MODULE_DICT_KEY] = self.qat_module + if self.reference_quantized_module is not None: + backend_pattern_config_dict[REFERENCE_QUANTIZED_MODULE_DICT_KEY] = self.reference_quantized_module + if self.fused_module is not None: + backend_pattern_config_dict[FUSED_MODULE_DICT_KEY] = self.fused_module + if self.fuser_method is not None: + backend_pattern_config_dict[FUSER_METHOD_DICT_KEY] = self.fuser_method + if self._root_node_getter is not None: + backend_pattern_config_dict[ROOT_NODE_GETTER_DICT_KEY] = self._root_node_getter + if self._extra_inputs_getter is not None: + backend_pattern_config_dict[EXTRA_INPUTS_GETTER_DICT_KEY] = self._extra_inputs_getter + if len(self._num_tensor_args_to_observation_type) > 0: + backend_pattern_config_dict[NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY] = self._num_tensor_args_to_observation_type + if len(self._input_type_to_index) > 0: + backend_pattern_config_dict[INPUT_TYPE_TO_INDEX_DICT_KEY] = self._input_type_to_index + if self._pattern_complex_format is not None: + backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY] = self._pattern_complex_format + return backend_pattern_config_dict diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/executorch.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/executorch.py new file mode 100644 index 0000000000000000000000000000000000000000..86a2d13e19ff1a2dc2e9bdc5e5920bd1b207ab42 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/executorch.py @@ -0,0 +1,494 @@ +# TODO: rename executorch to qnnpack_executorch since executorch is a general runtime +# not a specific backend + +import operator +from typing import List + +import torch +import torch.ao.nn.qat as nnqat +import torch.ao.nn.quantized.reference as nnqr +import torch.nn as nn +import torch.nn.functional as F + +from ..fuser_method_mappings import ( + _sequential_wrapper2, + fuse_conv_bn, + fuse_conv_bn_relu, +) +from ._common_operator_config_utils import _Conv2dMetadata +from .backend_config import ( + BackendConfig, + BackendPatternConfig, + DTypeConfig, + DTypeWithConstraints, + ObservationType, +) +from .qnnpack import ( + qnnpack_default_op_qint8_symmetric_dtype_config, + qnnpack_weighted_op_qint8_symmetric_dtype_config, +) + + +__all__ = [ + "get_executorch_backend_config", +] + + +# =================== +# | DTYPE CONFIGS | +# =================== + +executorch_weighted_op_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, +) + +executorch_default_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, +) + +executorch_default_dynamic_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.float, + weight_dtype=torch.qint8, + bias_dtype=torch.float, + is_dynamic=True, +) + +executorch_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints( + dtype=torch.qint8, + scale_min_lower_bound=2**-12, +) + +executorch_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints( + dtype=torch.qint8, + quant_min_lower_bound=-127, + quant_max_upper_bound=127, + scale_min_lower_bound=2**-12, +) + +executorch_default_dynamic_qint8_dtype_config = DTypeConfig( + input_dtype=executorch_act_qint8_scale_min_2_neg_12, + output_dtype=torch.float, + weight_dtype=executorch_weight_qint8_neg_127_to_127_scale_min_2_neg_12, + bias_dtype=torch.float, + is_dynamic=True, +) + +executorch_default_dynamic_float16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float, + weight_dtype=torch.float16, + bias_dtype=torch.float, + is_dynamic=True, +) + +executorch_weight_only_quint8_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint8, +) + + +# ============================= +# | BACKEND PATTERN CONFIGS | +# ============================= + + +def _get_linear_configs() -> List[BackendPatternConfig]: + """ + Return all configs related to linear modules and ops. + """ + observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + dtype_configs = [ + qnnpack_weighted_op_qint8_symmetric_dtype_config, + executorch_weighted_op_int8_dtype_config, + executorch_default_dynamic_quint8_dtype_config, + executorch_default_dynamic_qint8_dtype_config, + executorch_default_dynamic_float16_dtype_config, + ] + linear_configs: List[BackendPatternConfig] = [] + # linear module + linear_configs.append( + BackendPatternConfig(torch.nn.Linear) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear) + .set_qat_module(nnqat.Linear) + ) + # linear qat module + linear_configs.append( + BackendPatternConfig(nnqat.Linear) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(torch.nn.Linear) + .set_reference_quantized_module(nnqr.Linear) + ) + # functional linear + linear_configs.append( + BackendPatternConfig(torch.nn.functional.linear) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1, "bias": 2}) + ) + return linear_configs + + +def _get_conv_configs() -> List[BackendPatternConfig]: + """ + Return all configs related to conv modules and ops. + """ + observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + dtype_configs = [ + qnnpack_weighted_op_qint8_symmetric_dtype_config, + executorch_weighted_op_int8_dtype_config, + ] + conv_configs = [] + for convs in [_Conv2dMetadata]: + # (1) Single conv modules/functions + # ----------------------------------- + # conv module + conv_configs.append( + BackendPatternConfig(convs.root) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + .set_qat_module(convs.qat) + ) + # conv qat module + conv_configs.append( + BackendPatternConfig(convs.qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + ) + # functional conv + conv_configs.append( + BackendPatternConfig(convs.func) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1, "bias": 2}) + ) + + # (2) Conv + relu + # ----------------------------------- + # conv module + relu module + conv_configs.append( + BackendPatternConfig((convs.root, nn.ReLU)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu)) + .set_fused_module(convs.fused_conv_relu) + ) + # conv module + functional relu + conv_configs.append( + BackendPatternConfig((convs.root, F.relu)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(_sequential_wrapper2(convs.fused_conv_relu)) + .set_fused_module(convs.fused_conv_relu) + ) + # fused conv relu module + conv_configs.append( + BackendPatternConfig(convs.fused_conv_relu) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + .set_qat_module(convs.relu_qat) + ) + # conv relu, qat fused module + conv_configs.append( + BackendPatternConfig(convs.relu_qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + ) + # functional conv + relu module + conv_configs.append( + BackendPatternConfig((convs.func, nn.ReLU)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ) + # functional conv + functional relu + conv_configs.append( + BackendPatternConfig((convs.func, F.relu)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ) + # fused conv relu + conv_configs.append( + BackendPatternConfig(convs.fused_conv_relu) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_qat_module(convs.relu_qat) + ) + + conv_configs.append( + BackendPatternConfig(convs.relu_qat) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + ) + + # (3) Conv + batchnorm (+ relu) + # ------------------------------- + # conv + batchnorm (+ relu) + conv_configs.append( + BackendPatternConfig((convs.root, convs.bn)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuse_conv_bn) + .set_fused_module(convs.fused_conv_bn) + ) + # conv + bn + relu module fusion + conv_configs.append( + BackendPatternConfig((convs.root, convs.bn, nn.ReLU)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuse_conv_bn_relu) + .set_fused_module(convs.fused_conv_bn_relu) + ) + # conv + bn + relu functional fusion + conv_configs.append( + BackendPatternConfig((convs.root, convs.bn, F.relu)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_root_module(convs.root) + .set_fuser_method(fuse_conv_bn_relu) + .set_fused_module(convs.fused_conv_bn_relu) + ) + # TODO: we can add fusion for torch.relu as well + # 3.2 conv + bn (+ relu) fused module configs + # fused conv bn + conv_configs.append( + BackendPatternConfig(convs.fused_conv_bn) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_qat_module(convs.bn_qat) + ) + + # fused conv bn relu + conv_configs.append( + BackendPatternConfig(convs.fused_conv_bn_relu) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_qat_module(convs.bn_relu_qat) + ) + + # conv bn, qat fused module + conv_configs.append( + BackendPatternConfig(convs.bn_qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + ) + # conv bn relu, qat fused module + conv_configs.append( + BackendPatternConfig(convs.bn_relu_qat) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(convs.root) + .set_reference_quantized_module(convs.reference) + ) + return conv_configs + + +def _get_binary_ops_configs() -> List[BackendPatternConfig]: + """ + Return all configs related to binary ops. + """ + dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + executorch_weighted_op_int8_dtype_config, + ] + num_tensor_args_to_observation_type_mapping = { + # TODO: this is not used right now since we have extra check in prepare + # will need to change this to NO_OBSERVER later after we implemented + # Tensor dtype inference properly + 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT, + 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT, + 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT, + } + binary_op_configs: List[BackendPatternConfig] = [] + for op in [operator.add, torch.add, operator.sub, torch.sub, operator.mul, torch.mul]: + bop_patterns = [ + (op, torch.nn.ReLU), + (op, torch.nn.functional.relu), + (op, torch.relu), + op + ] + for bop_pattern in bop_patterns: + binary_op_configs.append( + BackendPatternConfig(bop_pattern) + .set_dtype_configs(dtype_configs) # noqa: E131 + ._set_num_tensor_args_to_observation_type( + num_tensor_args_to_observation_type_mapping + ) + ) + return binary_op_configs + + +def _get_share_qparams_ops_configs() -> List[BackendPatternConfig]: + """ + Return the operator configs for the operators that works for both float and quantized + input if input is quantized, the output Tensor shares the same quantization parameter + with input. + + Example operator: avgpool2d, reshape, transpose, maxpool2d + Example observed operator: + observer_0 - avgpool2d - observer_0 (same observer instance as input) + """ + observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT + dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + executorch_default_op_quint8_dtype_config, + ] + share_qparams_ops = [ + torch.nn.Flatten, + F.adaptive_avg_pool2d, + F.elu, + F.hardtanh, + F.max_pool2d, + F.pad, + F.relu, + F.relu6, + F.leaky_relu, + F.leaky_relu_, + torch.nn.AdaptiveAvgPool2d, + torch.nn.ConstantPad2d, + torch.nn.ELU, + torch.nn.MaxPool2d, + torch.nn.ReLU6, + torch.nn.Hardtanh, + torch.nn.LeakyReLU, + torch.clamp, + torch.flatten, + torch.mean, + torch.permute, + torch.permute_copy, + torch.squeeze, + "clamp", + "mean", + "permute", + "reshape", + "relu", + "relu_", + "squeeze", + "squeeze_", + "leaky_relu", + ] + share_qparams_op_configs: List[BackendPatternConfig] = [] + for op in share_qparams_ops: + share_qparams_op_configs.append( + BackendPatternConfig(op) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ) + return share_qparams_op_configs + + +def _get_bn_configs() -> List[BackendPatternConfig]: + """ + Return all configs related to batchnorm. + """ + observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + executorch_default_op_quint8_dtype_config, + ] + bn_configs = [] + bn_configs.append( + BackendPatternConfig(nn.BatchNorm2d) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + ) + return bn_configs + + +def _get_cat_configs() -> List[BackendPatternConfig]: + dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + executorch_default_op_quint8_dtype_config, + ] + cat_configs = [] + cat_configs.append( + BackendPatternConfig(torch.cat) + .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) + .set_dtype_configs(dtype_configs) + ) + cat_configs.append( + BackendPatternConfig(torch.concat) + .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) + .set_dtype_configs(dtype_configs) + ) + cat_configs.append( + BackendPatternConfig(torch.concatenate) + .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) + .set_dtype_configs(dtype_configs) + ) + return cat_configs + + +def _get_embedding_op_configs() -> List[BackendPatternConfig]: + dtype_configs = [ + executorch_weight_only_quint8_dtype_config, + ] + embedding_op_configs = [] + for embedding_op, qat_embedding_op, ref_embedding_op in [ + (nn.Embedding, nnqat.Embedding, nnqr.Embedding), + (nn.EmbeddingBag, nnqat.EmbeddingBag, nnqr.EmbeddingBag), + ]: + embedding_op_configs.append( + BackendPatternConfig(embedding_op) + .set_observation_type( + ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + ) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_qat_module(qat_embedding_op) + .set_root_module(embedding_op) + .set_reference_quantized_module(ref_embedding_op) + ) + # config for qat op + embedding_op_configs.append( + BackendPatternConfig(qat_embedding_op) + .set_observation_type( + ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + ) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(embedding_op) + .set_reference_quantized_module(ref_embedding_op) + ) + + # config for functional embedding + embedding_op_configs.append( + BackendPatternConfig(torch.nn.functional.embedding) + .set_observation_type( + ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + ) # noqa: E131 + .set_dtype_configs(dtype_configs) + ._set_input_type_to_index({"weight": 1}) + ) + return embedding_op_configs + + + +# ===================== +# | BACKEND CONFIGS | +# ===================== + + +def get_executorch_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for backends PyTorch lowers to through the Executorch stack. + """ + return ( + BackendConfig("executorch") + .set_backend_pattern_configs(_get_linear_configs()) + .set_backend_pattern_configs(_get_conv_configs()) + .set_backend_pattern_configs(_get_binary_ops_configs()) + .set_backend_pattern_configs(_get_share_qparams_ops_configs()) + .set_backend_pattern_configs(_get_bn_configs()) + .set_backend_pattern_configs(_get_cat_configs()) + .set_backend_pattern_configs(_get_embedding_op_configs()) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/fbgemm.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/fbgemm.py new file mode 100644 index 0000000000000000000000000000000000000000..74759fa73580c2ab8abe9352887bf11c1f029f62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/fbgemm.py @@ -0,0 +1,116 @@ +import torch +from ._common_operator_config_utils import ( + _get_binary_op_configs, + _get_bn_configs, + _get_cat_config, + _get_conv_configs, + _get_default_op_configs, + _get_embedding_op_configs, + _get_fixed_qparams_op_configs, + _get_linear_configs, + _get_rnn_op_configs, + _get_share_qparams_op_configs, + _get_tensor_info_op_configs, +) +from .backend_config import BackendConfig, DTypeConfig + +__all__ = [ + "get_fbgemm_backend_config", +] + +# =================== +# | DTYPE CONFIGS | +# =================== + +# TODO: For now, these DTypeConfigs are identical to the ones defined in native.py +# In the future, once we support specifying quant_min/quant_max and scale_min/scale_max, +# these will diverge. In particular, for FBGEMM, we will restrict the activation quantized +# values to within [0, 127]. + +fbgemm_weighted_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, +) + +fbgemm_default_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, +) + +fbgemm_default_op_fp16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float16, + weight_dtype=torch.float16, + bias_dtype=torch.float16, +) + +fbgemm_default_dynamic_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.float, + weight_dtype=torch.qint8, + bias_dtype=torch.float, + is_dynamic=True, +) + +fbgemm_default_dynamic_float16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float, + weight_dtype=torch.float16, + bias_dtype=torch.float, + is_dynamic=True, +) + +fbgemm_weight_only_quint8_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint8, +) + +fbgemm_weight_only_quint4x2_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint4x2, +) + + +# ===================== +# | BACKEND CONFIGS | +# ===================== + +def get_fbgemm_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for PyTorch's native FBGEMM backend. + """ + conv_dtype_configs = [fbgemm_weighted_op_quint8_dtype_config] + linear_dtype_configs = [ + fbgemm_weighted_op_quint8_dtype_config, + fbgemm_default_dynamic_int8_dtype_config, + fbgemm_default_dynamic_float16_dtype_config, + ] + binary_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config] + default_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config] + fixed_qparams_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config] + share_qparams_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config] + tensor_info_op_dtype_configs = [fbgemm_default_op_quint8_dtype_config] + rnn_op_dtype_configs = [ + fbgemm_default_dynamic_int8_dtype_config, + fbgemm_default_dynamic_float16_dtype_config, + ] + embedding_op_dtype_configs = [ + fbgemm_weight_only_quint8_dtype_config, + fbgemm_weight_only_quint4x2_dtype_config, + ] + return BackendConfig("fbgemm") \ + .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \ + .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \ + .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \ + .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py new file mode 100644 index 0000000000000000000000000000000000000000..81cfc928adb5b127a09691c5841b4cfd1d564800 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py @@ -0,0 +1,204 @@ +import torch +from ._common_operator_config_utils import ( + _get_binary_op_configs, + _get_bn_configs, + _get_cat_config, + _get_conv_configs, + _get_default_op_configs, + _get_embedding_op_configs, + _get_fixed_qparams_op_configs, + _get_linear_configs, + _get_ln_configs, + _get_rnn_op_configs, + _get_share_qparams_op_configs, + _get_tensor_info_op_configs, +) +from .backend_config import BackendConfig, DTypeConfig + +__all__ = [ + "get_test_only_legacy_native_backend_config", + "default_op_quint8_dtype_config", + "default_op_fp16_dtype_config", + "default_dynamic_int8_dtype_config", + "default_dynamic_float16_dtype_config", + "input_output_only_quint8_dtype_config", + "weight_only_quint8_dtype_config", + "weight_only_quint4x2_dtype_config", + "get_native_backend_config", + "get_native_backend_config_dict", + "get_test_only_legacy_native_backend_config_dict", +] + +# =================== +# | DTYPE CONFIGS | +# =================== + +# weighted op int8 dtype config +# this is config for ops that has quantized weights, like linear, conv +weighted_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, +) + +default_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, +) + +default_op_fp16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float16, + weight_dtype=torch.float16, + bias_dtype=torch.float16, +) + +default_dynamic_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.float, + weight_dtype=torch.qint8, + bias_dtype=torch.float, + # currently the dtype check is not yet enabled, so we provided the dtype_configs but + # it is not really used yet, + # we will enable it a bit later after we moved everything to backend_config_dict + is_dynamic=True, +) + +default_dynamic_float16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float, + weight_dtype=torch.float16, + bias_dtype=torch.float, + # currently the dtype check is not yet enabled, so we provided the dtype_configs but + # it is not really used yet, + # we will enable it a bit later after we moved everything to backend_config_dict + is_dynamic=True, +) + +# Needed for LayerNorm and f.layer_norm, since currently the kernel only supports float weights +input_output_only_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.float, + bias_dtype=torch.float, +) + +weight_only_quint8_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint8, +) + +weight_only_quint4x2_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint4x2, +) + + +# ===================== +# | BACKEND CONFIGS | +# ===================== + +def get_test_only_legacy_native_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional fp16 ops. + """ + conv_dtype_configs = [weighted_op_quint8_dtype_config] + linear_dtype_configs = [ + weighted_op_quint8_dtype_config, + default_dynamic_int8_dtype_config, + default_dynamic_float16_dtype_config, + default_op_fp16_dtype_config, + ] + binary_op_dtype_configs = [ + default_op_quint8_dtype_config, + default_op_fp16_dtype_config, + ] + default_op_dtype_configs = [default_op_quint8_dtype_config] + fixed_qparams_op_dtype_configs = [ + default_op_quint8_dtype_config, + default_op_fp16_dtype_config, + ] + share_qparams_op_dtype_configs = [ + default_op_quint8_dtype_config, + default_op_fp16_dtype_config + ] + tensor_info_op_dtype_configs = [ + default_op_quint8_dtype_config, + ] + rnn_op_dtype_configs = [ + default_dynamic_int8_dtype_config, + default_dynamic_float16_dtype_config, + ] + embedding_op_dtype_configs = [ + weight_only_quint8_dtype_config, + weight_only_quint4x2_dtype_config, + ] + layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config] + return BackendConfig("_native_and_fp16") \ + .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \ + .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \ + .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \ + .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs)) + +def get_native_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack). + """ + # TODO: express this BackendConfig as a union of the FBGEMM and QNNPACK BackendConfigs + conv_dtype_configs = [weighted_op_quint8_dtype_config] + linear_dtype_configs = [ + weighted_op_quint8_dtype_config, + default_dynamic_int8_dtype_config, + default_dynamic_float16_dtype_config, + ] + binary_op_dtype_configs = [default_op_quint8_dtype_config] + default_op_dtype_configs = [default_op_quint8_dtype_config] + fixed_qparams_op_dtype_configs = [default_op_quint8_dtype_config] + share_qparams_op_dtype_configs = [default_op_quint8_dtype_config] + tensor_info_op_dtype_configs = [default_op_quint8_dtype_config] + rnn_op_dtype_configs = [ + default_dynamic_int8_dtype_config, + default_dynamic_float16_dtype_config, + ] + embedding_op_dtype_configs = [ + weight_only_quint8_dtype_config, + weight_only_quint4x2_dtype_config, + ] + layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config] + return BackendConfig("native") \ + .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \ + .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \ + .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \ + .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs)) + +def get_native_backend_config_dict(): + """ + Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) in dictionary form. + """ + return get_native_backend_config().to_dict() + +def get_test_only_legacy_native_backend_config_dict(): + """ + Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional + fp16 ops in dictionary form. + """ + return get_test_only_legacy_native_backend_config().to_dict() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/observation_type.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/observation_type.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/onednn.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/onednn.py new file mode 100644 index 0000000000000000000000000000000000000000..8c14637ae3d3f72b0cb7d84c39d74af3ccb4bfca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/onednn.py @@ -0,0 +1,542 @@ +import torch +import torch.nn as nn +import torch.ao.nn.intrinsic as nni +import torch.nn.functional as F +import torch.ao.nn.quantized.reference as nnqr +from ._common_operator_config_utils import ( + _get_conv_configs, + _get_linear_configs, + _get_binary_op_configs, + _get_bn_configs, + _get_cat_config, + _get_default_op_configs, + _get_embedding_op_configs, + _get_fixed_qparams_op_configs, + _get_ln_configs, + _get_rnn_op_configs, + _get_share_qparams_op_configs, +) +from .backend_config import ( + BackendPatternConfig, + BackendConfig, + DTypeConfig, + ObservationType, +) +from ..fuser_method_mappings import ( + _sequential_wrapper2, +) +import operator +from torch.ao.quantization.utils import MatchAllNode +import itertools + +# =================== +# | DTYPE CONFIGS | +# =================== + +onednn_weighted_op_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, +) + +onednn_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, +) + +onednn_dynamic_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.float, + weight_dtype=torch.qint8, + bias_dtype=torch.float, + is_dynamic=True, +) + +onednn_weight_only_qint8_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.qint8, +) + +onednn_input_output_only_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.float, + bias_dtype=torch.float, +) + +# =================== +# | FUSER METHODS | +# =================== + +def _fuse_linear_bn_leaky_relu(is_qat, linear, bn, leaky_relu): + r"""Given the linear, bn and leaky_relu modules, fuses them and returns the fused module + Args: + is_qat: a flag for whether we are using quantization aware training fusion + or post training quantization fusion + linear: Module instance of type Linear + bn: BatchNorm1d instance that needs to be fused with the linear layer + leaky_relu: LeakyReLU instance that needs to be fused with the linear layer + Examples:: + >>> # xdoctest: +SKIP(failing) + >>> m1 = nn.Linear(20, 10) + >>> b1 = nn.BatchNorm1d(10) + >>> lr = nn.LeakyReLU(0.01) + >>> m2 = _fuse_linear_bn_leaky_relu(m1, b1, lr) + """ + assert(linear.training == bn.training and bn.training == leaky_relu.training),\ + "Linear, BN and LeakyReLU all must be in the same mode (train or eval)." + + if is_qat: + raise NotImplementedError(f"Cannot fuse train modules: {(linear, bn, leaky_relu)}") + else: + map_to_fused_module_eval = { + nn.Linear: nni.LinearLeakyReLU, + } + fused_module = map_to_fused_module_eval.get(type(linear), None) + if fused_module is not None: + fused_linear = nn.utils.fusion.fuse_linear_bn_eval(linear, bn) + fm = fused_module(fused_linear, leaky_relu) + return fm + else: + raise NotImplementedError(f"Cannot fuse eval modules: {(linear, bn, leaky_relu)}") + +# ====================== +# | CONFIGS FOR CONV | +# ====================== +observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT + +conv_dtype_configs = [onednn_weighted_op_int8_dtype_config] +conv_configs = _get_conv_configs(conv_dtype_configs) + +# (1) Conv2d + Add + +# conv2d Y +# \ / +# add + +# include: +# conv2d conv2d +# \ / +# add + +def _fuse_conv_add_left(is_qat, add, conv, _): + return nni.ConvAdd2d(conv, add) + +def _conv_add_root_node_getter_left(pattern): + _, conv, _ = pattern + return conv + +def _conv_add_extra_inputs_getter_left(pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + _, conv, extra_input = pattern + return [extra_input] + +# conv2d +# \ +# bn Y +# \ / +# add + +def _fuse_conv_bn_add_left(is_qat, add, bn_conv, _): + bn, conv = bn_conv + if is_qat: + raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add)}") + else: + fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn) + return nni.ConvAdd2d(fused_conv, add) + +def _conv_bn_add_root_node_getter_left(add_pattern): + _, bn_conv, _ = add_pattern + bn, conv = bn_conv + return conv + +def _conv_bn_add_extra_inputs_getter_left(add_pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + _, bn_conv, extra_input = add_pattern + bn, conv = bn_conv + return [extra_input] + +conv_add_left_optioins = itertools.product( + [True, False], # with_bn + [torch.add, operator.add], # add_op +) + +for with_bn, add_op in conv_add_left_optioins: + if with_bn: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((add_op, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode)) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_bn_add_left) + ._set_root_node_getter(_conv_bn_add_root_node_getter_left) + ._set_extra_inputs_getter(_conv_bn_add_extra_inputs_getter_left) + .set_fused_module(nni.ConvAdd2d)) + else: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((add_op, nn.Conv2d, MatchAllNode)) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_add_left) + ._set_root_node_getter(_conv_add_root_node_getter_left) + ._set_extra_inputs_getter(_conv_add_extra_inputs_getter_left) + .set_fused_module(nni.ConvAdd2d)) + +# Y conv2d +# \ / +# add + +def _fuse_conv_add_right(is_qat, add, _, conv): + return nni.ConvAdd2d(conv, add) + +def _conv_add_root_node_getter_right(pattern): + add, _, conv = pattern + return conv + +def _conv_add_extra_inputs_getter_right(pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + _, extra_input, conv = pattern + return [extra_input] + +# conv2d +# / +# Y bn +# \ / +# add + +def _fuse_conv_bn_add_right(is_qat, add, _, bn_conv): + bn, conv = bn_conv + if is_qat: + raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add)}") + else: + fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn) + return nni.ConvAdd2d(fused_conv, add) + +def _conv_bn_add_root_node_getter_right(pattern): + add, _, bn_conv = pattern + bn, conv = bn_conv + return conv + +def _conv_bn_add_extra_inputs_getter_right(pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + _, extra_input, bn_conv = pattern + bn, conv = bn_conv + return [extra_input] + +conv_add_optioins = itertools.product( + [True, False], # with_bn + [torch.add, operator.add], # add_op +) + +for with_bn, add_op in conv_add_optioins: + if with_bn: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((add_op, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d))) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_bn_add_right) + ._set_root_node_getter(_conv_bn_add_root_node_getter_right) + ._set_extra_inputs_getter(_conv_bn_add_extra_inputs_getter_right) + .set_fused_module(nni.ConvAdd2d)) + else: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((add_op, MatchAllNode, nn.Conv2d)) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_add_right) + ._set_root_node_getter(_conv_add_root_node_getter_right) + ._set_extra_inputs_getter(_conv_add_extra_inputs_getter_right) + .set_fused_module(nni.ConvAdd2d)) + +conv_configs.append( + BackendPatternConfig(nni.ConvAdd2d) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(conv_dtype_configs) + .set_root_module(nn.Conv2d) + .set_reference_quantized_module(nnqr.Conv2d)) + +# (2) Conv2d + Add + Relu + +# conv2d Y +# \ / +# add +# \ +# relu + +def _fuse_conv_add_relu_left(is_qat, relu, add_pattern): + add, conv, _ = add_pattern + return nni.ConvAddReLU2d(conv, add, relu) + +def _conv_add_relu_root_node_getter_left(pattern): + relu, add_pattern = pattern + _, conv, _ = add_pattern + return conv + +def _conv_add_relu_extra_inputs_getter_left(pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + relu, add_pattern = pattern + _, conv, extra_input = add_pattern + return [extra_input] + +# conv2d +# \ +# bn Y +# \ / +# add +# \ +# relu + +def _fuse_conv_bn_add_relu_left(is_qat, relu, add_pattern): + add, bn_conv, _ = add_pattern + bn, conv = bn_conv + if is_qat: + raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}") + else: + fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn) + return nni.ConvAddReLU2d(fused_conv, add, relu) + +def _conv_bn_add_relu_root_node_getter_left(pattern): + relu, add_pattern = pattern + _, bn_conv, _ = add_pattern + bn, conv = bn_conv + return conv + +def _conv_bn_add_relu_extra_inputs_getter_left(pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + relu, add_pattern = pattern + _, bn_conv, extra_input = add_pattern + bn, conv = bn_conv + return [extra_input] + +conv_add_relu_left_optioins = itertools.product( + [True, False], # with_bn + [torch.add, operator.add], # add_op +) + +for with_bn, add_op in conv_add_relu_left_optioins: + if with_bn: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((nn.ReLU, (add_op, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode))) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_bn_add_relu_left) + ._set_root_node_getter(_conv_bn_add_relu_root_node_getter_left) + ._set_extra_inputs_getter(_conv_bn_add_relu_extra_inputs_getter_left) + .set_fused_module(nni.ConvAddReLU2d)) + else: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((nn.ReLU, (add_op, nn.Conv2d, MatchAllNode))) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_add_relu_left) + ._set_root_node_getter(_conv_add_relu_root_node_getter_left) + ._set_extra_inputs_getter(_conv_add_relu_extra_inputs_getter_left) + .set_fused_module(nni.ConvAddReLU2d)) + +# Y conv2d +# \ / +# add +# \ +# relu + +def _fuse_conv_add_relu_right(is_qat, relu, add_pattern): + add, _, conv = add_pattern + return nni.ConvAddReLU2d(conv, add, relu) + +def _conv_add_relu_root_node_getter_right(pattern): + relu, add_pattern = pattern + _, _, conv = add_pattern + return conv + +def _conv_add_relu_extra_inputs_getter_right(pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + relu, add_pattern = pattern + _, extra_input, conv = add_pattern + return [extra_input] + +# conv2d +# / +# Y bn +# \ / +# add +# \ +# relu + +def _fuse_conv_bn_add_relu_right(is_qat, relu, add_pattern): + add, _, bn_conv = add_pattern + bn, conv = bn_conv + if is_qat: + raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, add, relu)}") + else: + fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn) + return nni.ConvAddReLU2d(fused_conv, add, relu) + +def _conv_bn_add_relu_root_node_getter_right(pattern): + relu, add_pattern = pattern + _, _, bn_conv = add_pattern + bn, conv = bn_conv + return conv + +def _conv_bn_add_relu_extra_inputs_getter_right(pattern): + """ get inputs pattern for extra inputs, inputs for root node + are assumed to be copied over from root node to the fused node + """ + relu, add_pattern = pattern + _, extra_input, bn_conv = add_pattern + bn, conv = bn_conv + return [extra_input] + +conv_add_relu_optioins = itertools.product( + [True, False], # with_bn + [torch.add, operator.add], # add_op +) + +for with_bn, add_op in conv_add_relu_optioins: + if with_bn: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((nn.ReLU, (add_op, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d)))) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_bn_add_relu_right) + ._set_root_node_getter(_conv_bn_add_relu_root_node_getter_right) + ._set_extra_inputs_getter(_conv_bn_add_relu_extra_inputs_getter_right) + .set_fused_module(nni.ConvAddReLU2d)) + else: + conv_configs.append( + BackendPatternConfig() + ._set_pattern_complex_format((nn.ReLU, (add_op, MatchAllNode, nn.Conv2d))) # noqa: E131 + .set_observation_type(observation_type) + .set_dtype_configs(conv_dtype_configs) + .set_fuser_method(_fuse_conv_add_relu_right) + ._set_root_node_getter(_conv_add_relu_root_node_getter_right) + ._set_extra_inputs_getter(_conv_add_relu_extra_inputs_getter_right) + .set_fused_module(nni.ConvAddReLU2d)) + +conv_configs.append( + BackendPatternConfig(nni.ConvAddReLU2d) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(conv_dtype_configs) + .set_root_module(nn.Conv2d) + .set_reference_quantized_module(nnqr.Conv2d)) + +# ======================== +# | CONFIGS FOR LINEAR | +# ======================== + +linear_dtype_configs = [ + onednn_weighted_op_int8_dtype_config, + onednn_dynamic_int8_dtype_config, +] +linear_configs = _get_linear_configs(linear_dtype_configs) + +def _add_eltwise_fusion_configs(configs, root_module, root_op, post_module, post_op, + dtype_configs, fuser_method, fused_module, observation_type, + ref_quant_module): + # 1 base module + op module fusion config + configs.append( + BackendPatternConfig((root_module, post_module)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuser_method) + .set_fused_module(fused_module)) + # base module + functional post op + configs.append( + BackendPatternConfig((root_module, post_op)) + .set_dtype_configs(dtype_configs) # noqa: E131 + .set_fuser_method(fuser_method) + .set_fused_module(fused_module)) + + # 2 fused module configs + configs.append( + BackendPatternConfig(fused_module) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs) + .set_root_module(root_module) + .set_reference_quantized_module(ref_quant_module)) + + # 3 functional base op + post op configs + configs.append( + BackendPatternConfig((root_op, post_module)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs)) + configs.append( + BackendPatternConfig((root_op, post_op)) + .set_observation_type(observation_type) # noqa: E131 + .set_dtype_configs(dtype_configs)) + +# Configs for linear + leaky_relu fusion +_add_eltwise_fusion_configs(linear_configs, nn.Linear, F.linear, + nn.LeakyReLU, F.leaky_relu, linear_dtype_configs, + _sequential_wrapper2(nni.LinearLeakyReLU), + nni.LinearLeakyReLU, observation_type, nnqr.Linear) + +# Configs for linear module + batchnorm + leaky_relu +linear_configs.append( + BackendPatternConfig((nn.Linear, nn.BatchNorm1d, nn.LeakyReLU)) + .set_dtype_configs(linear_dtype_configs) # noqa: E131 + .set_fuser_method(_fuse_linear_bn_leaky_relu) + .set_fused_module(nni.LinearLeakyReLU)) + +# Configs for linear + tanh fusion +_add_eltwise_fusion_configs(linear_configs, nn.Linear, F.linear, + nn.Tanh, torch.tanh, linear_dtype_configs, + _sequential_wrapper2(nni.LinearTanh), + nni.LinearTanh, observation_type, nnqr.Linear) + +# =========================== +# | CONFIGS FOR OTHER OPS | +# =========================== + +binary_op_dtype_configs = [onednn_op_quint8_dtype_config] +default_op_dtype_configs = [onednn_op_quint8_dtype_config] +fixed_qparams_op_dtype_configs = [onednn_op_quint8_dtype_config] +share_qparams_op_dtype_configs = [onednn_op_quint8_dtype_config] +rnn_op_dtype_configs = [onednn_dynamic_int8_dtype_config] +embedding_op_dtype_configs = [onednn_weight_only_qint8_dtype_config] +layer_norm_op_dtype_configs = [onednn_input_output_only_quint8_dtype_config] + +# ===================== +# | BACKEND CONFIGS | +# ===================== + +def get_onednn_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for PyTorch's native ONEDNN backend. + """ + return BackendConfig("onednn") \ + .set_backend_pattern_configs(conv_configs) \ + .set_backend_pattern_configs(linear_configs) \ + .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \ + .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs)) + +__all__ = [ + "get_onednn_backend_config", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py new file mode 100644 index 0000000000000000000000000000000000000000..772a25c65574481d70186e9d968039756b2fa0ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py @@ -0,0 +1,160 @@ +import torch +from ._common_operator_config_utils import ( + _get_binary_op_configs, + _get_bn_configs, + _get_cat_config, + _get_conv_configs, + _get_default_op_configs, + _get_embedding_op_configs, + _get_fixed_qparams_op_configs, + _get_linear_configs, + _get_rnn_op_configs, + _get_share_qparams_op_configs, +) +from .backend_config import BackendConfig, DTypeConfig, DTypeWithConstraints + +__all__ = [ + "get_qnnpack_backend_config", +] + +# =================== +# | DTYPE CONFIGS | +# =================== + +qnnpack_weighted_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, +) + +qnnpack_default_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, +) + +qnnpack_default_op_fp16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float16, + weight_dtype=torch.float16, + bias_dtype=torch.float16, +) + +qnnpack_default_dynamic_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.float, + weight_dtype=torch.qint8, + bias_dtype=torch.float, + is_dynamic=True, +) + +qnnpack_default_dynamic_float16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float, + weight_dtype=torch.float16, + bias_dtype=torch.float, + is_dynamic=True, +) + +qnnpack_weight_only_quint8_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint8, +) + +qnnpack_weight_only_quint4x2_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint4x2, +) + +# xnnpack compatible dtype configs + +# We restrict scale values to be 2 ** -12 to ensure the +# requantization scale never falls below the xnnpack lower +# threshold. Additionally, for qint8 weight, we restrict +# the quantization values to [-127, +127], excluding -128. +# For more detail, refer to the description of +# `default_symmetric_qnnpack_qconfig`. + +# TODO: add additional restriction on qscheme to ensure it +# is either per_tensor_symmetric or per_channel_symmetric + +qnnpack_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints( + dtype=torch.qint8, + scale_min_lower_bound=2 ** -12, +) + +qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints( + dtype=torch.qint8, + quant_min_lower_bound=-127, + quant_max_upper_bound=127, + scale_min_lower_bound=2 ** -12, +) + +qnnpack_weighted_op_qint8_symmetric_dtype_config = DTypeConfig( + input_dtype=qnnpack_act_qint8_scale_min_2_neg_12, + output_dtype=qnnpack_act_qint8_scale_min_2_neg_12, + weight_dtype=qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12, + bias_dtype=torch.float, +) + +qnnpack_default_op_qint8_symmetric_dtype_config = DTypeConfig( + input_dtype=qnnpack_act_qint8_scale_min_2_neg_12, + output_dtype=qnnpack_act_qint8_scale_min_2_neg_12, +) + + +# ===================== +# | BACKEND CONFIGS | +# ===================== + +def get_qnnpack_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for PyTorch's native QNNPACK backend. + """ + conv_dtype_configs = [ + qnnpack_weighted_op_qint8_symmetric_dtype_config, + qnnpack_weighted_op_quint8_dtype_config, + ] + linear_dtype_configs = [ + qnnpack_weighted_op_qint8_symmetric_dtype_config, + qnnpack_weighted_op_quint8_dtype_config, + qnnpack_default_dynamic_int8_dtype_config, + qnnpack_default_dynamic_float16_dtype_config, + ] + binary_op_dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + qnnpack_default_op_quint8_dtype_config, + ] + default_op_dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + qnnpack_default_op_quint8_dtype_config, + ] + fixed_qparams_op_dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + qnnpack_default_op_quint8_dtype_config, + ] + share_qparams_op_dtype_configs = [ + qnnpack_default_op_qint8_symmetric_dtype_config, + qnnpack_default_op_quint8_dtype_config, + ] + rnn_op_dtype_configs = [ + qnnpack_default_dynamic_int8_dtype_config, + qnnpack_default_dynamic_float16_dtype_config, + ] + embedding_op_dtype_configs = [ + qnnpack_weight_only_quint8_dtype_config, + qnnpack_weight_only_quint4x2_dtype_config, + ] + return BackendConfig("qnnpack") \ + .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \ + .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \ + .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \ + .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py new file mode 100644 index 0000000000000000000000000000000000000000..1c5f761508bbb9e95392bfe07d494f7fba61303d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py @@ -0,0 +1,81 @@ +import torch +from .backend_config import ( + BackendConfig, + BackendPatternConfig, + DTypeConfig, + ObservationType +) +from ._common_operator_config_utils import ( + _get_binary_op_configs, + _get_linear_configs, + _get_conv_configs, + _get_share_qparams_op_configs, + _get_tensor_info_op_configs, +) + +__all__ = [ + "get_tensorrt_backend_config", + "get_tensorrt_backend_config_dict", +] + +def get_tensorrt_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for the TensorRT backend. + NOTE: Current api will change in the future, it's just to unblock experimentation for + new backends, please don't use it right now. + TODO: add a README when it's more stable + """ + # dtype configs + weighted_op_qint8_dtype_config = DTypeConfig( + input_dtype=torch.qint8, + output_dtype=torch.qint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, + ) + non_weighted_op_qint8_dtype_config = DTypeConfig( + input_dtype=torch.qint8, + output_dtype=torch.qint8, + ) + + addmm_config = BackendPatternConfig(torch.addmm) \ + .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \ + .add_dtype_config(weighted_op_qint8_dtype_config) \ + ._set_input_type_to_index({ + "bias": 0, + "input": 1, + "weight": 2, + }) + cat_config = BackendPatternConfig(torch.cat) \ + .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \ + .add_dtype_config(non_weighted_op_qint8_dtype_config) + conv_dtype_configs = [ + weighted_op_qint8_dtype_config, + ] + linear_dtype_configs = [ + weighted_op_qint8_dtype_config, + ] + binary_op_dtype_configs = [ + weighted_op_qint8_dtype_config, + ] + share_qparams_op_dtype_configs = [ + non_weighted_op_qint8_dtype_config, + ] + tensor_info_op_dtype_configs = [ + non_weighted_op_qint8_dtype_config, + ] + # there might be things not supported in fx2trt, but it will error out + # during fx2trt conversion and can support them after that + return BackendConfig("tensorrt") \ + .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \ + .set_backend_pattern_config(addmm_config) \ + .set_backend_pattern_config(cat_config) \ + .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \ + .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) + +def get_tensorrt_backend_config_dict(): + """ + Return the `BackendConfig` for the TensorRT backend in dictionary form. + """ + return get_tensorrt_backend_config().to_dict() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2e738227407907ef942786937eb082f41d9e02ef --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py @@ -0,0 +1,279 @@ +from typing import Dict, Any, List, Callable, Union, Tuple, Type + +import torch +import torch.nn as nn +import torch.nn.functional as F +from .backend_config import ( + BackendConfig, + BackendPatternConfig, + DTypeConfig, +) +from ..utils import Pattern +from ..fuser_method_mappings import ( + _reverse2, + _reverse3, +) + +__all__ = [ + "get_pattern_to_dtype_configs", + "get_qat_module_classes", + "get_fused_module_classes", + "get_pattern_to_input_type_to_index", + "get_root_module_to_quantized_reference_module", + "get_fuser_method_mapping", + "get_module_to_qat_module", + "get_fusion_pattern_to_root_node_getter", + "get_fusion_pattern_to_extra_inputs_getter", + "remove_boolean_dispatch_from_name", + "pattern_to_human_readable", + "entry_to_pretty_str", +] + +def get_pattern_to_dtype_configs(backend_config: BackendConfig) -> Dict[Pattern, List[DTypeConfig]]: + pattern_to_dtype_configs: Dict[Pattern, List[DTypeConfig]] = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + pattern_to_dtype_configs[pattern] = config.dtype_configs + return pattern_to_dtype_configs + +def get_qat_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]: + qat_module_classes = [] + for config in backend_config.configs: + if config.qat_module is not None: + qat_module_classes.append(config.qat_module) + return tuple(set(qat_module_classes)) + +def get_fused_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]: + fused_module_classes = [] + for config in backend_config.configs: + if config.fused_module is not None: + fused_module_classes.append(config.fused_module) + return tuple(set(fused_module_classes)) + +def get_pattern_to_input_type_to_index(backend_config: BackendConfig) -> Dict[Pattern, Dict[str, int]]: + pattern_to_input_type_to_index: Dict[Pattern, Dict[str, int]] = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + pattern_to_input_type_to_index[pattern] = config._input_type_to_index + return pattern_to_input_type_to_index + +def get_root_module_to_quantized_reference_module( + backend_config: BackendConfig) -> Dict[Type[torch.nn.Module], Type[torch.nn.Module]]: + mapping: Dict[Type[torch.nn.Module], Type[torch.nn.Module]] = {} + for config in backend_config.configs: + if config.root_module is not None and config.reference_quantized_module is not None: + mapping[config.root_module] = config.reference_quantized_module + return mapping + +def get_fuser_method_mapping(backend_config: BackendConfig) -> Dict[Pattern, Union[nn.Sequential, Callable]]: + fuser_method_mapping : Dict[Pattern, Union[nn.Sequential, Callable]] = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + if config.fuser_method is not None: + # Note: both the fuser method and the pattern are specified in forward order in the + # BackendConfig, but the internal pattern matching code uses the reversed nested tuple + # format, so we need to convert both to the internal format + fuser_method = _get_fuser_method_in_reversed_nested_tuple_format(config) + fuser_method_mapping[pattern] = fuser_method + return fuser_method_mapping + +def get_module_to_qat_module(backend_config: BackendConfig) -> Dict[Pattern, Type[torch.nn.Module]]: + module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]] = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + if config.qat_module is not None: + module_to_qat_module[pattern] = config.qat_module + return module_to_qat_module + +def get_fusion_pattern_to_root_node_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]: + """ Get a map from fusion pattern to a function that returns the root node + from the fusion pattern, e.g. the most common one is: + def get_root_node(node_pattern): + while not isinstance(node_pattern[-1], Node): + node_pattern = node_pattern[-1] + return node_pattern[-1] + This can work for all patterns whose root node is the "last node" in the pattern, + e.g. (torch.add, MatchAllNode, (torch.ReLU, torch.Conv2d)) + """ + root_node_getter_mapping: Dict[Pattern, Callable] = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + if config._root_node_getter is not None: + root_node_getter_mapping[pattern] = config._root_node_getter + return root_node_getter_mapping + +def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]: + """ Get a map from fusion pattern to a function that returns extra input nodes + from the fusion pattern, in the order required by the root node. This is optional, + if not specified, we will not copy over any extra inputs for the root node. + Example: + # Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d)) + # and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra + # argument to the fused module, we can unpack the pattern and return the node at + # MatchAllNode here + # we can implement extra_inputs_getter as follows: + def extra_inputs_getter(pattern) -> List[Any]: + add, extra_input, conv_pattern = pattern + return [extra_input] + """ + extra_inputs_getter_mapping: Dict[Pattern, Callable] = {} + for pattern, config in backend_config._pattern_complex_format_to_config.items(): + if config._extra_inputs_getter is not None: + extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter + return extra_inputs_getter_mapping + +def remove_boolean_dispatch_from_name(p) -> Any: + """ + Some ops have a default string representation such as + '.fn at 0x7ff1106bf280>', + this function replaces them with the hardcoded function names. + """ + if p is F.fractional_max_pool2d: + return "torch.nn.functional.fractional_max_pool2d" + elif p is F.fractional_max_pool3d: + return "torch.nn.functional.fractional_max_pool3d" + elif p is F.max_pool1d: + return "torch.nn.functional.max_pool1d" + elif p is F.max_pool2d: + return "torch.nn.functional.max_pool2d" + elif p is F.max_pool3d: + return "torch.nn.functional.max_pool3d" + elif p is F.adaptive_max_pool1d: + return "torch.nn.functional.adaptive_max_pool1d" + elif p is F.adaptive_max_pool2d: + return "torch.nn.functional.adaptive_max_pool2d" + elif p is F.adaptive_max_pool3d: + return "torch.nn.functional.adaptive_max_pool3d" + assert "boolean_dispatch" not in str(p), \ + f"{p} does not have a human readable representation in " + \ + "quantization documentation" + return p + +def pattern_to_human_readable(p) -> Any: + if isinstance(p, tuple): + # nested patterns, recurse + return tuple(pattern_to_human_readable(inner_p) for inner_p in p) + elif isinstance(p, str): + # method names are already human readable + return p + else: + p = remove_boolean_dispatch_from_name(p) + return p + +# TODO(future PR): move backend_config_dict to use dataclass and move this logic to +# the corresponding __str__ function +def entry_to_pretty_str(entry) -> str: + """ + Given a backend_config_dict entry, returns a string with the human readable + representation of it. + """ + s = "{\n" + + # always output the pattern first + if "pattern" in entry: + pattern_str = pattern_to_human_readable(entry["pattern"]) + + s += f" 'pattern': {pattern_str},\n" + + # custom output for dtype_configs to make it look nice + if "dtype_configs" in entry: + s += " 'dtype_configs': [\n" + for dtype_config in entry["dtype_configs"]: + s += " {\n" + for k, v in dtype_config.items(): + s += f" '{k}': {v},\n" + s += " },\n" + s += " ],\n" + + # custom output for num_tensor_args_to_observation_type to make it look nice + if "num_tensor_args_to_observation_type" in entry: + s += " 'num_tensor_args_to_observation_type': {\n" + for k, v in entry["num_tensor_args_to_observation_type"].items(): + s += f" {k}: {v},\n" + s += " },\n" + + # output all the other fields + custom_handled_fields = [ + "pattern", + "dtype_configs", + "num_tensor_args_to_observation_type", + ] + for field_name in entry: + if field_name in custom_handled_fields: + continue + s += f" '{field_name}': {entry[field_name]},\n" + + s += "}" + return s + +def _get_pattern_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Pattern: + """ + Return the pattern specified in the given config in the reversed nested tuple format + used internally in the quantization pattern matching code. + + If the pattern is not a tuple, or the pattern is already specified in the reversed + nested tuple format, return the pattern as is. Otherwise: + + For 2-tuples (a, b), return (b, a). + For 3-tuples (a, b, c), return (c, (b, a)). + + For example: + * Given nn.Linear, return nn.Linear + * Given (nn.Linear, nn.ReLU), return (nn.ReLU, nn.Linear) + * Given (nn.Conv2d, nn.BatchNorm2d, nn.ReLU), return + (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d)) + + For context, the reason why this is needed is the user-facing BackendConfig + API accepts the flat 2-or-3-tuple format in forward order. While this simple + format handles the vast majority of use cases, it does not handle the more + complex ones, and so the internal pattern matching code for quantization uses + the following, more general reversed nested tuple format instead: + + operator = module_type | functional | torch op | native op | MatchAllNode + Pattern = (operator, Pattern, Pattern, ...) | operator + + In the future, we expect to replace the above complex format with the one used + by the subgraph rewriter in torch.fx, so we don't have to maintain our own + complex pattern matching code. Then we won't need this helper function anymore. + """ + if config._pattern_complex_format is not None: + return config._pattern_complex_format + if config.pattern is None: + raise ValueError("Either 'pattern' or 'pattern_complex_format' must be specified") + if not isinstance(config.pattern, tuple): + return config.pattern + + # Pattern is specified in the simple tuple format, need to convert + if len(config.pattern) == 2: + (a, b) = config.pattern + return (b, a) + elif len(config.pattern) == 3: + (a, b, c) = config.pattern + return (c, (b, a)) + else: + raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern) + +def _get_fuser_method_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Callable: + """ + Return the fuser method specified in the given config in the reversed nested + tuple format used internally in the quantization pattern matching code. + + If pattern is specified in the reversed nested tuple format, we assume the + fuser method is also specified in this format and simply return it as is. + Otherwise, we convert the fuser method as follows: + + * Given f(is_qat, conv, relu), return f'(is_qat, relu, conv) + * Given f(is_qat, conv, bn, relu), return f'(is_qat, relu, bn_conv), + where bn_conv is a 2-tuple (bn, conv) + + The first argument of a fuser method is always `is_qat` and is not affected + in the conversion. We currently only support functions with 3 or 4 arguments. + """ + assert config.fuser_method is not None + if config._pattern_complex_format is not None: + return config.fuser_method + if not isinstance(config.pattern, tuple): + raise ValueError("Expected pattern to be a tuple, got: ", config.pattern) + + # Pattern is specified in the simple tuple format, need to convert + if len(config.pattern) == 2: + return _reverse2(config.fuser_method) + elif len(config.pattern) == 3: + return _reverse3(config.fuser_method) + else: + raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py new file mode 100644 index 0000000000000000000000000000000000000000..b4f165958f2791d3e6e2f63eceecdcd9e6f6d50c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py @@ -0,0 +1,113 @@ +import torch +from ._common_operator_config_utils import ( + _get_binary_op_configs, + _get_bn_configs, + _get_cat_config, + _get_conv_configs, + _get_default_op_configs, + _get_embedding_op_configs, + _get_fixed_qparams_op_configs, + _get_linear_configs, + _get_rnn_op_configs, + _get_share_qparams_op_configs, + _get_tensor_info_op_configs, +) +from .backend_config import BackendConfig, DTypeConfig + +__all__ = [ + "get_x86_backend_config", +] + +# =================== +# | DTYPE CONFIGS | +# =================== + +# X86 aligns with FBGEMM for now + +x86_weighted_op_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, + weight_dtype=torch.qint8, + bias_dtype=torch.float, +) + +x86_default_op_quint8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.quint8, +) + +x86_default_op_fp16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float16, + weight_dtype=torch.float16, + bias_dtype=torch.float16, +) + +x86_default_dynamic_int8_dtype_config = DTypeConfig( + input_dtype=torch.quint8, + output_dtype=torch.float, + weight_dtype=torch.qint8, + bias_dtype=torch.float, + is_dynamic=True, +) + +x86_default_dynamic_float16_dtype_config = DTypeConfig( + input_dtype=torch.float16, + output_dtype=torch.float, + weight_dtype=torch.float16, + bias_dtype=torch.float, + is_dynamic=True, +) + +x86_weight_only_quint8_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint8, +) + +x86_weight_only_quint4x2_dtype_config = DTypeConfig( + input_dtype=torch.float, + output_dtype=torch.float, + weight_dtype=torch.quint4x2, +) + + +# ===================== +# | BACKEND CONFIGS | +# ===================== + +def get_x86_backend_config() -> BackendConfig: + """ + Return the `BackendConfig` for PyTorch's native x86 backend. + """ + conv_dtype_configs = [x86_weighted_op_int8_dtype_config] + linear_dtype_configs = [ + x86_weighted_op_int8_dtype_config, + x86_default_dynamic_int8_dtype_config, + x86_default_dynamic_float16_dtype_config, + ] + binary_op_dtype_configs = [x86_weighted_op_int8_dtype_config] + default_op_dtype_configs = [x86_default_op_quint8_dtype_config] + fixed_qparams_op_dtype_configs = [x86_weighted_op_int8_dtype_config] + share_qparams_op_dtype_configs = [x86_default_op_quint8_dtype_config] + tensor_info_op_dtype_configs = [x86_default_op_quint8_dtype_config] + rnn_op_dtype_configs = [ + x86_default_dynamic_int8_dtype_config, + x86_default_dynamic_float16_dtype_config, + ] + embedding_op_dtype_configs = [ + x86_weight_only_quint8_dtype_config, + x86_weight_only_quint4x2_dtype_config, + ] + return BackendConfig("x86") \ + .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \ + .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \ + .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \ + .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \ + .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..817c4d63082dcf7b2b7b63808d59ac2c36d8d5ec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a7049d45d91034c556905992f38baa7da852cfa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/eval_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/eval_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c53b5f0ba244936abd7b45d7a69bb1a62c82b502 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/eval_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..272e00e34983e6113f5a05c7e62836a424bf5e92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..769bf2ad914f60f6976b183ba0e73c9c67c2dff5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..943b4b9d1e69dc0bf4cd2bb2868d6aafe43005bf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/prepare.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/prepare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e31d02feb41f5233aaa1700a0be32463b57a9ebc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/prepare.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/qat_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/qat_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce09626ab37f465b8f788eae3980b1b58aeb373f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/qat_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4698c0397422188de4873f2e11bf50b0a4b5cc7d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/duplicate_dq_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/duplicate_dq_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..b6931a14088e10b5e7c04699df50af2f03171278 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/duplicate_dq_pass.py @@ -0,0 +1,59 @@ +import logging + +import torch +from torch._export.pass_base import _ExportPassBase + +from torch.ao.quantization.pt2e.utils import ( + _filter_sym_size_users, + _is_valid_annotation, +) + +from torch.fx.node import map_arg +from torch.fx.passes.infra.pass_base import PassResult + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.WARNING) + +__all__ = ["DuplicateDQPass"] + +_DEQUANTIZE_OPS = [ + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, + torch.ops.quantized_decomposed.dequantize_per_channel.default, +] + + +def _maybe_duplicate_dq( + gm: torch.fx.GraphModule, dq_node: torch.fx.Node, user: torch.fx.Node +): + annotation = user.meta.get("quantization_annotation", None) + if not _is_valid_annotation(annotation): + return + with gm.graph.inserting_after(dq_node): + new_node = gm.graph.node_copy(dq_node) + + def maybe_replace_node(n: torch.fx.Node) -> torch.fx.Node: + if n == dq_node: + return new_node + else: + return n + + new_args = map_arg(user.args, maybe_replace_node) + new_kwargs = map_arg(user.kwargs, maybe_replace_node) + user.args = new_args + user.kwargs = new_kwargs + + +class DuplicateDQPass(_ExportPassBase): + def call(self, graph_module: torch.fx.GraphModule) -> PassResult: + for node in graph_module.graph.nodes: + if node.op == "call_function" and node.target in _DEQUANTIZE_OPS: + dq_users = _filter_sym_size_users(node) + if len(dq_users) <= 1: + continue + for user in dq_users: + _maybe_duplicate_dq(graph_module, node, user) + graph_module.graph.eliminate_dead_code() + graph_module.recompile() + return PassResult(graph_module, True) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/eval_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7699e61ed6d5a0bef6e0f6becd6bb0519c19f50d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/eval_utils.py @@ -0,0 +1,112 @@ +import torch +import torch.nn.functional as F + + +def _replace_dropout_for_eval(m: torch.fx.GraphModule): + """ + Replace the aten training dropout pattern with a noop, intended for eval. + + For models with dropout torch ops (nn.Dropout, F.dropout), calling model.eval() + effectively turns these dropout ops into noops. For exported models, however, + this is not done automatically, since the aten dropout patterns previously generated + for training remain in the graph. Here we rewrite these dropout patterns with noops + to avoid incorrectly applying further dropout during eval. + + See https://github.com/pytorch/pytorch/issues/103681. + """ + # Avoid circular dependencies + from .utils import get_aten_graph_module + + # Needed to ensure subgraph matches are self-contained + m.graph.eliminate_dead_code() + m.recompile() + + for inplace in [False, True]: + + def dropout_train(x): + return F.dropout(x, p=0.5, training=True, inplace=inplace) + + def dropout_eval(x): + return F.dropout(x, p=0.5, training=False, inplace=inplace) + + example_inputs = (torch.randn(1),) + match_pattern = get_aten_graph_module(dropout_train, example_inputs) + replacement_pattern = get_aten_graph_module(dropout_eval, example_inputs) + + from torch.fx.subgraph_rewriter import replace_pattern_with_filters + + replace_pattern_with_filters( + m, + match_pattern, + replacement_pattern, + match_filters=[], + ignore_literals=True, + ) + m.recompile() + + +def _replace_batchnorm_for_eval(m: torch.fx.GraphModule): + # TODO(Leslie): This function still fails to support custom momentum and eps value. + # Enable this support in future updates. + + # Avoid circular dependencies + from .utils import get_aten_graph_module + + # Needed to ensure subgraph matches are self-contained + m.graph.eliminate_dead_code() + m.recompile() + + def bn_train( + x: torch.Tensor, + bn_weight: torch.Tensor, + bn_bias: torch.Tensor, + bn_running_mean: torch.Tensor, + bn_running_var: torch.Tensor, + ): + return F.batch_norm( + x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True + ) + + def bn_eval( + x: torch.Tensor, + bn_weight: torch.Tensor, + bn_bias: torch.Tensor, + bn_running_mean: torch.Tensor, + bn_running_var: torch.Tensor, + ): + return F.batch_norm( + x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=False + ) + + example_inputs = ( + torch.randn(1, 1, 3, 3), # x + torch.randn(1), # bn_weight + torch.randn(1), # bn_bias + torch.randn(1), # bn_running_mean + torch.randn(1), # bn_running_var + ) + match_pattern = get_aten_graph_module(bn_train, example_inputs) + replacement_pattern = get_aten_graph_module(bn_eval, example_inputs) + from torch.fx.subgraph_rewriter import replace_pattern_with_filters + + replace_pattern_with_filters( + m, + match_pattern, + replacement_pattern, + match_filters=[], + ignore_literals=True, + ) + m.recompile() + + +# TODO: also support move_exported_model_to_train +def _move_exported_model_to_eval(model: torch.fx.GraphModule): + """ + Move an exported GraphModule to eval mode. + + This is equivalent to model.eval() but only for certain special ops like dropout, batchnorm. + QAT users should call this before performing inference on the model. + """ + _replace_dropout_for_eval(model) + _replace_batchnorm_for_eval(model) + return model diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/generate_numeric_debug_handle.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/generate_numeric_debug_handle.py new file mode 100644 index 0000000000000000000000000000000000000000..a6ca1f71b7d20c3d230f0ccd20924d2c3ef02d7a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/generate_numeric_debug_handle.py @@ -0,0 +1,17 @@ +from torch.fx import GraphModule, Node + +__all__ = ["generate_numeric_debug_handle"] + + +def generate_numeric_debug_handle(graph_module: GraphModule) -> None: + unique_id = 0 + for node in graph_module.graph.nodes: + if node.op == "call_function": + node.meta["numeric_debug_handle"] = {} + for arg in node.args: + if isinstance(arg, Node): + node.meta["numeric_debug_handle"][arg] = unique_id + unique_id += 1 + + node.meta["numeric_debug_handle"]["output"] = unique_id + unique_id += 1 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/graph_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/graph_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2390bafed286814a6e86c640a2862ebfcfa69278 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/graph_utils.py @@ -0,0 +1,109 @@ +import itertools +from typing import Any, List, OrderedDict, Set, Optional, Callable +import operator +from torch.fx import Node + +import torch + +from torch.fx.passes.utils.source_matcher_utils import ( + check_subgraphs_connected, + get_source_partitions, + SourcePartition, +) + +__all__ = [ + "find_sequential_partitions", + "get_equivalent_types", + "update_equivalent_types_dict", +] + +_EQUIVALENT_TYPES: List[Set] = [ + {torch.nn.Conv1d, torch.nn.functional.conv1d}, + {torch.nn.Conv2d, torch.nn.functional.conv2d}, + {torch.nn.AdaptiveAvgPool2d, torch.nn.functional.adaptive_avg_pool2d}, + {torch.nn.ReLU, torch.nn.functional.relu, torch.nn.functional.relu_}, + {torch.nn.BatchNorm2d, torch.nn.functional.batch_norm}, + {torch.nn.Hardtanh, torch.nn.functional.hardtanh, torch.nn.functional.hardtanh_}, + {torch.add, operator.add, operator.iadd, "add", "add_"}, + {torch.mul, operator.mul, operator.imul, "mul", "mul_"}, +] + + +def _create_equivalent_types_dict(): + _DICT = {} + for values in _EQUIVALENT_TYPES: + for v in values: + _DICT[v] = list(values) + return _DICT + + +_EQUIVALENT_TYPES_DICT = _create_equivalent_types_dict() + +def get_equivalent_types() -> List[Set]: + return _EQUIVALENT_TYPES + +def update_equivalent_types_dict(customized_equivalent_types=None): + """Help function for user who wants to customize the _EQUIVALENT_TYPES and _EQUIVALENT_TYPES_DICT. + When customized_equivalent_types passes in, + re-generate _EQUIVALENT_TYPES and _EQUIVALENT_TYPES_DICT. + """ + if customized_equivalent_types is None: + raise ValueError("customized_equivalent_types should not be None") + global _EQUIVALENT_TYPES + global _EQUIVALENT_TYPES_DICT + _EQUIVALENT_TYPES = customized_equivalent_types + _EQUIVALENT_TYPES_DICT = _create_equivalent_types_dict() + +def _partitions_sequential(partitions: List[SourcePartition]): + prev_partition = None + for partition in partitions: + if prev_partition is not None and not check_subgraphs_connected( + prev_partition, partition + ): + return False + prev_partition = partition + return True + + +def _get_matching_types(partition_type): + matching_types = [partition_type] + if partition_type in _EQUIVALENT_TYPES_DICT: + matching_types.extend(_EQUIVALENT_TYPES_DICT[partition_type]) + return matching_types + + +def _valid_type_sequence(partition_types: List[Any]): + partition_types_set = set() # type: ignore[var-annotated] + for partition_type in partition_types: + matching_types = _get_matching_types(partition_type) + matching_types_set = set(matching_types) + if len(partition_types_set & matching_types_set) > 0: + return False + partition_types_set |= matching_types_set + return True + + +def find_sequential_partitions( + gm: torch.fx.GraphModule, + partition_types: List[Any], + include_functional_equivalent=True, + filter_fn: Optional[Callable[[Node], bool]] = None, +): + if not _valid_type_sequence(partition_types): + raise ValueError( + f"Invalid partition types: {partition_types}. Each type in the sequence must be unique" + ) + + typed_partitions: OrderedDict[Any, List[SourcePartition]] = OrderedDict() + for partition_type in partition_types: + types_to_match = _get_matching_types(partition_type) + partitions = get_source_partitions(gm.graph, types_to_match, filter_fn) + typed_partitions[partition_type] = list(itertools.chain(*partitions.values())) + + typed_partitions_list = list(typed_partitions.values()) + fusion_candidates = itertools.product(*typed_partitions_list) + fused_partitions = [] + for candidate in fusion_candidates: + if _partitions_sequential(candidate): # type: ignore[arg-type] + fused_partitions.append(candidate) + return fused_partitions diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/port_metadata_pass.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/port_metadata_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6b0a50e16b18d6f6256698d2ee8e6aed32fe7e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/port_metadata_pass.py @@ -0,0 +1,199 @@ +import logging +from typing import Optional + +import torch +from torch._export.error import InternalError +from torch._export.pass_base import _ExportPassBase + +from torch.ao.quantization.pt2e.utils import ( + _filter_sym_size_users, + _find_q_dq_node_for_user, + _is_valid_annotation, +) + +from torch.ao.quantization.quantizer import QuantizationSpecBase + +from torch.fx.passes.infra.pass_base import PassResult + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.ERROR) + +__all__ = ["PortNodeMetaForQDQ"] + +_METADATA_TO_PORT = [ + "stack_trace", + "quantization_tag", +] + +_QUANTIZE_OPS = [ + torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.quantize_per_tensor.tensor, + torch.ops.quantized_decomposed.quantize_per_channel.default, +] + +_DEQUANTIZE_OPS = [ + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, + torch.ops.quantized_decomposed.dequantize_per_channel.default, +] + + +def _add_metadata(to_node: torch.fx.Node, from_node: torch.fx.Node) -> None: + from_meta = from_node.meta + for meta_name in _METADATA_TO_PORT: + if meta_name in from_meta: + to_node.meta[meta_name] = from_meta[meta_name] + + +def _has_quant_annotation(node: torch.fx.Node) -> bool: + return "quantization_annotation" in node.meta + + +def _find_choose_qparams_node(node: torch.fx.Node) -> Optional[torch.fx.Node]: + # BFS to look for choose qparams + from collections import deque + + queue = deque(list(node.users.keys())) + while len(queue): + n = queue.popleft() + if n.op == "output": + continue + if ( + n.op == "call_function" + and n.target == torch.ops.quantized_decomposed.choose_qparams.tensor + ): + return n + for k in n.users.keys(): + queue.append(k) + return None + + +def _port_metadata_for_input_quant_nodes( + input_node: torch.fx.Node, + node: torch.fx.Node, + qspec: Optional[QuantizationSpecBase], +): + if qspec is None: + return + + is_dynamic_quant = getattr(qspec, "is_dynamic", None) + if is_dynamic_quant is not None and is_dynamic_quant is True: + choose_qparams_node = _find_choose_qparams_node(input_node) + if choose_qparams_node is None: + raise ValueError(f"No chose qparams node found for {node}") + choose_qparam_users = _filter_sym_size_users(choose_qparams_node) + if len(choose_qparam_users) != 2: + raise InternalError(f"Expecting exactly two user for {choose_qparams_node}") + scale_node = choose_qparam_users.pop() + dynamic_q_node = next(iter(scale_node.users.keys())) + dynamic_q_node_users = _filter_sym_size_users(dynamic_q_node) + if len(dynamic_q_node_users) > 1: + raise InternalError(f"Expecting single user for {dynamic_q_node}") + dynamic_dq_node = dynamic_q_node_users.pop() + _add_metadata(choose_qparams_node, node) + _add_metadata(dynamic_q_node, node) + _add_metadata(dynamic_dq_node, node) + else: + q_node, dq_node = _find_q_dq_node_for_user(input_node, node) + if q_node is None or dq_node is None: + return + # add metadata for all the node between q_node and get_attr node + # if the q_node can be traced back to get_attr node + q_to_get_attr_nodes = [q_node] + q_node_input = q_node.args[0] + while isinstance(q_node_input, torch.fx.Node) and q_node_input.op not in [ + "placeholder", + "get_attr", + ]: + q_to_get_attr_nodes.append(q_node_input) + q_node_input = q_node_input.args[0] + if isinstance(q_node_input, torch.fx.Node) and q_node_input.op == "get_attr": + for n in q_to_get_attr_nodes: + _add_metadata(n, q_node_input) + _add_metadata(dq_node, node) + + +def _port_metadata_for_output_quant_nodes( + node: torch.fx.Node, qspec: Optional[QuantizationSpecBase] +): + if qspec is None: + return + + node_users = _filter_sym_size_users(node) + if len(node_users) != 1: + raise InternalError(f"Expecting {node} to have single user") + q_node = node_users.pop() + if q_node.op != "call_function" or q_node.target not in _QUANTIZE_OPS: + logger.warning( + f"Expecting {node} user to be a quantized op but got {q_node}" # noqa: G004 + ) # noqa: G004 + return + + _add_metadata(q_node, node) + + +class PortNodeMetaForQDQ(_ExportPassBase): + """ + Port metadata for nodes added by quantization flow. + For static quant these are: + - quantizer_per_tensor.default, dequantize_per_tensor.default + - quantizer_per_channel.default, dequantize_per_channel.default + For dynamic quant these are: + - choose_qparams.tensor + - quantizer_per_tensor.tensor, dequantize_per_tensor.tensor + - quantizer_per_channel.default, dequantize_per_channel.default + + Rules of porting metadata: + - Metadata to be ported: + - nn_module_stack + - stack_trace + - quantization_tag + - Metadata to NOT be ported: + - Everything else + - Rules: + - Statically quantized patterns: + - Dequantize nodes on the inputs to be quantized inherit metadata of the consumer node. + - Quantize nodes on the outputs inherit metadata of the producer node. + - Example 1: + - Original: [Conv -> AvgPool -> Linear] + - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> Linear -> Q -> DQ] + - Inner brackets specify which nodes Q/DQ inherit metdata from + - [Q-> [DQ -> Conv -> Q] -> [DQ -> AvgPool -> Q] -> [DQ -> Linear -> Q] -> DQ] + - Note first Q and last DQ do not inherit metadata from any nodes + - Example 2: + - Original: [Conv -> AvgPool -> Linear] + - AvgPool is not quantized + - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> Linear -> Q -> DQ] + - Inner brackets specify which nodes Q/DQ inherit metdata from + - [Q-> [DQ -> Conv -> Q] -> DQ -> [AvgPool] -> Q -> [DQ -> Linear -> Q] -> DQ] + - Note DQ and Q nodes around AvgPool do not inherit metadata from AvgPool because + AvgPool was not supposed to be quantized. Metadata porting relies on quantization_annotation + on the nodes (in this case AvgPool node) to conclude if the node or patter was + supposed to be quantized. And subsequntly decide if the preceding Q, if any, should + inherit metadata from AvgPool. + - Dynamically quantized patterns: + - Input that are dynamically quantized have choose_qparams, quantize and dequantize nodes + - For example, below linear is dynamically quantized while rest statically: + - Original: [Conv -> AvgPool -> Linear] + - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> choose_params -> Q -> DQ -> Linear] + - Quantized [Q-> [DQ -> Conv -> Q] -> [DQ -> AvgPool -> Q] -> DQ -> [choose_params -> Q -> DQ -> Linear]] + - Note first Q does not inherit metadata from any nodes + NB: + - The best place for porting metadata is during observer conversion to q/dq. This is because it precisely + knows which quantization spec is converted to q/dq and thus from where the metadata should be ported. + However, since FX and PT2E quant workflow are on a common code-base, this hurts readability quite a bit. + Doing it via a separate pass, helps readability of the code. Once we are able to refactor PT2E quant + code, this pass should like to be integrated in the refactored variant of "convert" step. + """ + + def call(self, graph_module: torch.fx.GraphModule) -> PassResult: + for node in graph_module.graph.nodes: + annotation = node.meta.get("quantization_annotation", None) + if _is_valid_annotation(annotation): + input_qspec_map = node.meta["quantization_annotation"].input_qspec_map + output_qspec = node.meta["quantization_annotation"].output_qspec + for input_node, qspec in input_qspec_map.items(): + _port_metadata_for_input_quant_nodes(input_node, node, qspec) + _port_metadata_for_output_quant_nodes(node, output_qspec) + return PassResult(graph_module, True) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/prepare.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/prepare.py new file mode 100644 index 0000000000000000000000000000000000000000..ed7816d93c6b7bc78f0dc436b89bd348c80b60b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/prepare.py @@ -0,0 +1,476 @@ +import torch +from torch._subclasses import FakeTensor +from torch.ao.quantization.fx.prepare import ( + _insert_obs_or_fq, + _save_state, + _is_activation_post_process_node, + _create_obs_or_fq_from_qspec, +) +from torch.fx import ( + GraphModule, + Graph, + Node, +) +from torch.fx.node import Argument + +from torch.ao.quantization import QConfigMapping +from torch.ao.quantization.qconfig import QConfigAny +from torch.ao.quantization.fx.custom_config import PrepareCustomConfig +from typing import Dict, Tuple, Union, Any, Optional +from torch.ao.quantization.quantizer import ( + EdgeOrNode, + SharedQuantizationSpec, + QuantizationSpecBase, +) +from torch.ao.quantization import ObserverOrFakeQuantize + +# TODO: make pt2e folder private? +__all__ = [ + "prepare", +] + + +def _find_root_edge_or_node(edge_or_node: EdgeOrNode, shared_with_map: Dict[EdgeOrNode, EdgeOrNode]) -> EdgeOrNode: + """Find the root node for the sharing tree + Args: + edge_or_node: edge/node that we want to find the root + shared_with_map: each edge/node points to the parent, the root node will points to itself + + Returns: + root edge/node + """ + parent = shared_with_map[edge_or_node] + if parent == edge_or_node: + return edge_or_node + root = _find_root_edge_or_node(parent, shared_with_map) + # path compression + shared_with_map[edge_or_node] = root + return root + +def _union(parent: EdgeOrNode, child: EdgeOrNode, shared_with_map: Dict[EdgeOrNode, EdgeOrNode]) -> None: + """Merge the subtree for `child` with `parent`, the order is important here + """ + root_parent = _find_root_edge_or_node(parent, shared_with_map) + root_child = _find_root_edge_or_node(child, shared_with_map) + # union the two trees by pointing the root of child to root of parent + shared_with_map[root_child] = root_parent + +def _update_shared_with(child: EdgeOrNode, qspec: QuantizationSpecBase, shared_with_map: Dict[EdgeOrNode, EdgeOrNode]): + """Update the `shared_with_map` based on the qspec, this applies the `SharedQuantizationSpec` + configuration and established the relationship between `edge_or_node` with the edge/node that it + is pointing to, we'll use this information in the end to get the group id + """ + if isinstance(qspec, SharedQuantizationSpec): + parent = qspec.edge_or_node + # we point from edge_or_node to the node that it is sharing_with, e.g. + # qspec for a = SharedQuantizationSpec(b) means `a` points to `b` + _union(parent, child, shared_with_map) + +def _unwrap_shared_qspec( + qspec: QuantizationSpecBase, + edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase], + shared_with_map: Dict[EdgeOrNode, EdgeOrNode] +) -> QuantizationSpecBase: + """Unwraps qspec to get the final root qspec (non SharedQuantizationSpec) + if qspec is SharedQuantizationSpec + (1). tries to find the root edge or node for the node that the qspec points to + (2). recursively find the root qspec based on the qspec for the root node + """ + if isinstance(qspec, SharedQuantizationSpec): + sharing_with = qspec.edge_or_node + root = _find_root_edge_or_node(sharing_with, shared_with_map) + qspec = edge_or_node_to_qspec[root] + return _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map) + return qspec + +def _has_same_dtype(qspec_a: QuantizationSpecBase, qspec_b: QuantizationSpecBase): + return ( + hasattr(qspec_a, "dtype") and + hasattr(qspec_b, "dtype") and + qspec_a.dtype == qspec_b.dtype + ) + +def _has_same_is_dynamic(qspec_a: QuantizationSpecBase, qspec_b: QuantizationSpecBase): + return ( + hasattr(qspec_a, "is_dynamic") and + hasattr(qspec_b, "is_dynamic") and + qspec_a.is_dynamic == qspec_b.is_dynamic + ) + +def _get_edge_or_node_to_qspec(model: torch.fx.GraphModule) -> Dict[EdgeOrNode, QuantizationSpecBase]: + """Get a map from EdgeOrNode to quantization spec based on annotations on the nodes + """ + edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase] = {} + for n in model.graph.nodes: + if hasattr(n, "meta") and "quantization_annotation" in n.meta: + qa = n.meta["quantization_annotation"] + for input_to_n, qspec in qa.input_qspec_map.items(): + input_edge = (input_to_n, n) + edge_or_node_to_qspec[input_edge] = qspec + if qa.output_qspec is not None: + output_node = n + qspec = qa.output_qspec + edge_or_node_to_qspec[output_node] = qspec + return edge_or_node_to_qspec + +def _union_input_edge_with(input_edge, input_edge_root_qspec, edge_or_node, edge_or_node_to_qspec, shared_with_map): + """Union input edge with another edge or node, used in implicit sharing to point the current input + edge to other user edges of the producer node, or the output of producer node since these are + referring to the same Tensor + """ + root_qspec = None + if edge_or_node in edge_or_node_to_qspec: + qspec = edge_or_node_to_qspec[edge_or_node] + root_qspec = _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map) + # TODO: add assertions for types of root qspecs + if ( + root_qspec is not None and + _has_same_dtype(root_qspec, input_edge_root_qspec) and + _has_same_is_dynamic(root_qspec, input_edge_root_qspec) + ): + # the input arg to the node should reuse the existing output observer for arg + # since dtype is the same (we may want to extend this to be a more strict check + # in the future) + # so we point from `input_edge` to `arg` (output of the argument) + _union(edge_or_node, input_edge, shared_with_map) + + +def _get_edge_or_node_to_group_id(edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase]) -> Dict[EdgeOrNode, int]: + """Map from edge/node to the group ID, generated from quantization annotations, + edge/node with the same group ID should use the same observer/fake_quant instance + + This is applying SharedQuantizationSpec configuration and map each edge/node to a group + There is another implicit sharing that's built in the quantization, when we have the following: + * op1 -> op2 + * output of op1: int8_qspec + * (op1 -> op2) input edge: int8_qspec + we'll assume sharing between the output of op1 and input of (op1 -> op2) since these are the same Tensor. + + Figuring out the correct group ID for all edge/node is a standard union find problem: + https://www.geeksforgeeks.org/introduction-to-disjoint-set-data-structure-or-union-find-algorithm/ + + Args: + edge_or_node_to_qspec: Dictionary from edge_or_node to the qspec, derived from annotations + Returns: + edge_or_node_to_group_id: Dictionary from edge_or_node to group_id (int), all edge or node that + belongs to the same group should have the same id + + Example: + op2 -> cat1 -> cat2 + op1 / / + op3 + edge_or_node_to_qspec: { + op1: int8_qspec, + op2: int8_qspec, + (op1, cat1): int8_qspc, + (op2, cat1): SharedQuantizationSpec((op1, cat1)), + cat1: SharedQuantizationSpec((op1, cat1)), + (op3, cat2): int8_qspec, + (cat1, cat2): SharedQuantizationSpec((op3, cat2)), + cat2: SharedQuantizationSpec((op3, cat2)), + } + + edge_or_node_to_group_id = _get_edge_or_node_to_group_id(edge_or_node_to_qspec) + edge_or_node_to_group_id: { + op1: 1, + op2: 1, + (op1, cat1): 1, + (op2, cat1): 1, + cat1: 1, + (op3, cat2): 1, + (cat1, cat2): 1, + cat2: 1, + } + # everything are in the same group because (cat1) and (cat1, cat2) are implicitly shared, which + # connects the two sharing group around cat1 and cat2 op due to transitive sharing + """ + # means the observer of key should be shared with observer with value, by default it will + # be shared with itself + shared_with_map: Dict[EdgeOrNode, EdgeOrNode] = {k: k for k in edge_or_node_to_qspec.keys()} + for edge_or_node, qspec in edge_or_node_to_qspec.items(): + if isinstance(edge_or_node, torch.fx.Node): + output_node = edge_or_node + _update_shared_with(output_node, qspec, shared_with_map) + else: + input_edge = edge_or_node + input_edge_root_qspec = _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map) + + assert isinstance(input_edge, tuple) + arg, n = input_edge + if n.meta["quantization_annotation"].allow_implicit_sharing: + # NOTE: the order is important here, we first share with other users and then share with previous + # output because the reverse order could cause circular dependency + # e.g node1 -> node2 + # \ -> node3 + # when processing (node1, node2), if we first point (node1, node2) to node1 + # Step 1. shared_map = {(node1, node2): node1} + # Step 2. after that, we point the (node1, node2) to its other user (node1, node3) , + # which means shared_map = {(node1, node2): node1, node1: (node1, node3)} + # because we will point the root of (node1, node2) (in this case node1) to the root of (node1, node3) + # Step 3. and when we process (node1, node3), it can try to point to node1 as well, then we'll + # have a circular dependency + # the following order works around this issue, but this does not allow arbitrary configuration + # of sharing so it might break in a different case in the future, when it breaks + # quantizer writer can check the notes here to debug the issue + + # sharing with other users of the producer node + # (arg, user) + for user in arg.users: + if user is n: + continue + arg_to_user_edge = (arg, user) + _union_input_edge_with( + input_edge, + input_edge_root_qspec, + arg_to_user_edge, + edge_or_node_to_qspec, + shared_with_map + ) + + # sharing with output of producer node + _union_input_edge_with(input_edge, input_edge_root_qspec, arg, edge_or_node_to_qspec, shared_with_map) + + _update_shared_with(input_edge, qspec, shared_with_map) + + # now that we get the sharing relations between all edges and nodes, we can assingn group ids + cur_group_id = 0 + edge_or_node_to_group_id: Dict[EdgeOrNode, int] = {} + for edge_or_node in shared_with_map.keys(): + root = _find_root_edge_or_node(edge_or_node, shared_with_map) + if root not in edge_or_node_to_group_id: + edge_or_node_to_group_id[root] = cur_group_id + cur_group_id += 1 + edge_or_node_to_group_id[edge_or_node] = edge_or_node_to_group_id[root] + + return edge_or_node_to_group_id + +def _get_obs_or_fq_map( + edge_or_node_to_group_id: Dict[EdgeOrNode, int], + edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase], + is_qat: bool +) -> Dict[EdgeOrNode, ObserverOrFakeQuantize]: + """Generates the EdgeOrNode to observer/fake_quant instances + Makes sure that for EdgeOrNode that has the same group_id should have the same observer or fake quant + instances + """ + obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize] = {} + group_id_to_obs_or_fq: Dict[int, ObserverOrFakeQuantize] = {} + for edge_or_node, qspec in edge_or_node_to_qspec.items(): + group_id = edge_or_node_to_group_id[edge_or_node] + if group_id not in group_id_to_obs_or_fq: + # TODO: maybe edge_or_node_to_qspec should be edge_or_node_to_root_qspec, this will simplify + # the implementation for _create_obs_or_fq_from_qspec + group_id_to_obs_or_fq[group_id] = _create_obs_or_fq_from_qspec(qspec, obs_or_fq_map, is_qat) + obs_or_fq_map[edge_or_node] = group_id_to_obs_or_fq[group_id] + return obs_or_fq_map + +def _maybe_insert_input_observer_for_arg_or_kwarg( + node: Union[Node, Any], + arg: Argument, + qconfig: QConfigAny, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize], + is_qat: bool, +) -> Argument: + """ + Given a `node` and an `arg`, inserts an input observer between + `node` and `arg` if necessary. + """ + # for ops such as torch.cat([x0, x1]), + # traverse through the list + if isinstance(arg, (list, tuple)): + new_arg_to_return = [] + for inner_arg in arg: + new_inner_arg = _maybe_insert_input_observer_for_arg_or_kwarg( + node, inner_arg, qconfig, model, named_modules, obs_or_fq_map, is_qat, + ) + new_arg_to_return.append(new_inner_arg) + return type(arg)(new_arg_to_return) + + if not isinstance(arg, Node): + return arg + assert isinstance(arg, Node) + # default (no observer) + new_arg = arg + + # find the original `arg` node to the current node, skipping inserted observer/fake_quant nodes + original_arg = arg + while _is_activation_post_process_node(original_arg, named_modules): + original_arg = original_arg.args[0] # type: ignore[assignment] + assert isinstance(original_arg, Node), f"expect original argument to be a Node, but got: {type(original_arg)}" + + input_edge = (original_arg, node) + if input_edge not in obs_or_fq_map: + return new_arg + # input_edge needs to be observed + input_edge_obs_or_fq = obs_or_fq_map[input_edge] + if input_edge_obs_or_fq is None: + return new_arg + + arg_as_output_obs_or_fq = obs_or_fq_map.get(original_arg, None) + # the arg is observed as the output and is using the same instance as the input_edge + # we'll reuse the inserted observer/fake_quant + if arg_as_output_obs_or_fq is not None and id(arg_as_output_obs_or_fq) == id(input_edge_obs_or_fq): + return new_arg + + # otherwise, we'll insert a new observer/fake_quant node + + existing_obs_node = None + # skip inserting new observers if the same observer instance is inserted before for another user + # Example: + # conv1 -> obs1 -> existing_obs -> conv2 + # \ -> conv3 + # + # instead of inserting new observers we will have: + # conv1 -> obs1 -> existing_obs -> conv2 + # \ -> conv3 + for maybe_obs_node in arg.users.keys(): + if not _is_activation_post_process_node(maybe_obs_node, named_modules): + continue + maybe_obs_mod = named_modules[maybe_obs_node.target] # type: ignore[index] + if id(maybe_obs_mod) == id(input_edge_obs_or_fq): + return maybe_obs_node + + new_arg = _insert_obs_or_fq(arg, input_edge_obs_or_fq, model, named_modules, model.graph) + return new_arg + +def _maybe_insert_input_observers_for_node( + node: Node, + qconfig: QConfigAny, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize], + is_qat: bool, +) -> None: + """ + If needed, inserts observers to the input args and kwargs of `node`. + Note: modifies `node` inplace. + + For example, if cur_node needs an observer after prev_node, we change from + + prev_node -> cur_node + + To + + prev_node -> obs -> cur_node + + """ + # Look through every input arg. If that arg's target dtype does not + # match the current node's target dtype, insert an observer. + new_args = [] + for arg in node.args: + new_arg = _maybe_insert_input_observer_for_arg_or_kwarg( + node, arg, qconfig, model, named_modules, obs_or_fq_map, is_qat, + ) + new_args.append(new_arg) + + # Clone has a memory_format kwarg and zeros_like has a pin_memory kwarg + # that persist in exported graph. This is just a work around for these. + assert ( + node.target == torch.ops.aten.clone.default or + node.target == torch.ops.aten.zeros_like.default or + len(node.kwargs) == 0 + ), " expecting kwargs for aten op IR to be empty" + + # assign the new args to the node, inplace + node.args = tuple(new_args) + +def _maybe_insert_output_observer_for_node( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, + obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize], + is_qat: bool, +) -> Optional[Node]: + if node in obs_or_fq_map: + output_act_obs_or_fq = obs_or_fq_map[node] + return _insert_obs_or_fq(node, output_act_obs_or_fq, model, named_modules, graph) + return None + +def _maybe_insert_input_and_output_observers_for_node( + node: Node, + model: torch.fx.GraphModule, + obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize], + is_qat: bool, +): + this_node_quantization_annotation = node.meta["quantization_annotation"] if "quantization_annotation" in node.meta else None + if this_node_quantization_annotation is None: + return + + named_modules = dict(model.named_modules(remove_duplicate=False)) + _maybe_insert_input_observers_for_node( + node, + None, # qconfig + model, + named_modules, + obs_or_fq_map, + is_qat, + ) + + output_is_a_tensor = "val" in node.meta and isinstance(node.meta["val"], FakeTensor) + if not output_is_a_tensor: + return + + # this returns the new observer node if it was needed + maybe_output_obs_node = _maybe_insert_output_observer_for_node( + node, model, named_modules, model.graph, obs_or_fq_map, is_qat) + + if maybe_output_obs_node is None: + return + # Update users of original node to use the output observer + # instead. For example, change + # + # next_node + # / + # cur_node -> obs + # + # to + # + # next_node + # / + # cur_node -> obs + # + # We need to save orig users before updating uses because + # the list of users will change as we update uses + orig_users = list(node.users.keys()) + for user_node in orig_users: + if user_node is maybe_output_obs_node: + continue + user_node.replace_input_with(node, maybe_output_obs_node) + +def prepare( + model: GraphModule, + node_name_to_scope: Dict[str, Tuple[str, type]], + is_qat: bool, +) -> GraphModule: + # Since we are mutating the graph as we go, we iterate over the original + # nodes before observer insertion, instead of model.graph.nodes. + nodes_before_observation = list(model.graph.nodes) + + # At the high level we construct a map from EdgeOrNode to a observer_or_fake_quant instance + # all edge/nodes that belongs to the same group will use the same instance + # and when we insert observers we'll just query this map to get the correct observer_or_fake_quant + # instance + edge_or_node_to_qspec = _get_edge_or_node_to_qspec(model) + edge_or_node_to_group_id = _get_edge_or_node_to_group_id(edge_or_node_to_qspec) + obs_or_fq_map = _get_obs_or_fq_map(edge_or_node_to_group_id, edge_or_node_to_qspec, is_qat) + + for node in nodes_before_observation: + # TODO: simplify logic for inserting observers + _maybe_insert_input_and_output_observers_for_node(node, model, obs_or_fq_map, is_qat) + + model = GraphModule(model, model.graph) + + _save_state( + model, + {}, # node_name_to_qconfig + node_name_to_scope, + PrepareCustomConfig(), + {}, # equalization_node_name_to_qconfig + QConfigMapping(), + is_qat, + set() # observed_node_names + ) + return model diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/qat_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/qat_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d84428a7c13d6a290d9a0b36b995150c5d639fa8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/qat_utils.py @@ -0,0 +1,778 @@ +import dataclasses +import itertools +import operator +from typing import Any, Callable, Dict, List, Tuple + +import torch +from torch.fx import Graph, GraphModule, Node +from torch.fx.subgraph_rewriter import ( + replace_pattern_with_filters, + ReplacedPatterns, +) +import torch.nn.functional as F +from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401 +from torch.ao.quantization.quantizer import ( + DerivedQuantizationSpec, + EdgeOrNode, + SharedQuantizationSpec, + QuantizationSpecBase, +) +from .utils import ( + _conv1d_bn_example_inputs, + _conv2d_bn_example_inputs, + _is_conv, + _is_supported_batch_norm_for_training, + fold_bn_weights_into_conv_node, + get_aten_graph_module, +) + + +__all__ = [] # type: ignore[var-annotated] + + +# Example inputs for quantized and folded conv-bn1d patterns used in convert +_quantized_conv1d_bn_example_inputs = ( + torch.randn(1, 1, 3), # x + torch.randn(1, 1, 1), # conv_weight + torch.randn(1), # bn_weight + torch.randn(1), # bn_bias + torch.randn(1), # bn_running_mean + torch.randn(1), # bn_running_var +) + +# Example inputs for quantized and folded conv-bn2d patterns used in convert +_quantized_conv2d_bn_example_inputs = ( + torch.randn(1, 1, 3, 3), # x + torch.randn(1, 1, 1, 1), # conv_weight + torch.randn(1), # bn_weight + torch.randn(1), # bn_bias + torch.randn(1), # bn_running_mean + torch.randn(1), # bn_running_var +) + +def _get_quantized_conv_bn_example_inputs_kwargs( + is_per_channel: bool, + has_bias: bool, + is_cuda: bool, +) -> Dict[str, Any]: + """ + Optional example inputs for quantized and folded conv-bn patterns + used in convert, expressed as kwargs. + """ + kwargs = {} + # Per tensor quantization uses literals to represent scale and zero + # point, so there is no need to include them here as kwargs + if is_per_channel: + kwargs["scale"] = torch.tensor([1], dtype=torch.float) + kwargs["zero_point"] = torch.tensor([0], dtype=torch.int) + if has_bias: + kwargs["conv_bias"] = torch.randn(1) + if is_cuda: + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + kwargs[k] = v.cuda() + return kwargs + +def _get_conv_bn_pattern(conv_fn: Callable) -> Callable: + def _conv_bn_pattern( + x: torch.Tensor, + conv_weight: torch.Tensor, + conv_bias: torch.Tensor, + bn_weight: torch.Tensor, + bn_bias: torch.Tensor, + bn_running_mean: torch.Tensor, + bn_running_var: torch.Tensor, + ) -> torch.Tensor: + x = conv_fn(x, conv_weight, conv_bias) + x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True) + return x + return _conv_bn_pattern + +# TODO: merge this with the `no_conv_bias` case +def _get_qat_conv_bn_pattern(conv_fn: Callable) -> Callable: + def _qat_conv_bn_pattern( + x: torch.Tensor, + conv_weight: torch.Tensor, + conv_bias: torch.Tensor, + bn_weight: torch.Tensor, + bn_bias: torch.Tensor, + bn_running_mean: torch.Tensor, + bn_running_var: torch.Tensor, + ) -> torch.Tensor: + """ + Approximated method to fuse conv and bn. It requires only one forward pass. + conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std. + This is based on `nniqat.ConvBn2d._forward_approximate`. + """ + # TODO: allow setting eps + bn_eps = 1e-5 + running_std = torch.sqrt(bn_running_var + bn_eps) + scale_factor = bn_weight / running_std + weight_shape = [1] * len(conv_weight.shape) + weight_shape[0] = -1 + bias_shape = [1] * len(conv_weight.shape) + bias_shape[1] = -1 + scaled_weight = conv_weight * scale_factor.reshape(weight_shape) + zero_bias = torch.zeros_like(conv_bias, dtype=x.dtype) + x = conv_fn(x, scaled_weight, zero_bias) + x = x / scale_factor.reshape(bias_shape) + x = x + conv_bias.reshape(bias_shape) + x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True, eps=bn_eps) + return x + return _qat_conv_bn_pattern + +def _get_qat_conv_bn_pattern_no_conv_bias(conv_fn: Callable) -> Callable: + def _qat_conv_bn_pattern_no_conv_bias( + x: torch.Tensor, + conv_weight: torch.Tensor, + # Not used, only for matching convenience + conv_bias: torch.Tensor, + bn_weight: torch.Tensor, + bn_bias: torch.Tensor, + bn_running_mean: torch.Tensor, + bn_running_var: torch.Tensor, + ) -> torch.Tensor: + """ + Same as `_get_qat_conv_bn_pattern`, but handles the case with no conv bias. + """ + # TODO: allow setting eps + bn_eps = 1e-5 + running_std = torch.sqrt(bn_running_var + bn_eps) + scale_factor = bn_weight / running_std + weight_shape = [1] * len(conv_weight.shape) + weight_shape[0] = -1 + bias_shape = [1] * len(conv_weight.shape) + bias_shape[1] = -1 + scaled_weight = conv_weight * scale_factor.reshape(weight_shape) + x = conv_fn(x, scaled_weight, None) + x = x / scale_factor.reshape(bias_shape) + x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True, eps=bn_eps) + return x + return _qat_conv_bn_pattern_no_conv_bias + +def _append_qdq(x, is_per_channel, kwargs): + """ + Helper function to append q-dq ops after `x`, using dummy values for the qparams + and qmin/qmax. We use dummy values here because we match with `ignore_literals=True` + and will manually replace these values after subgraph rewriting. + + Return the dq node. + """ + # Dummy args to be passed into q-dq ops + per_channel_axis = 0 + scale = kwargs["scale"] if is_per_channel else 1.0 + zp = kwargs["zero_point"] if is_per_channel else 0 + qmin = -127 + qmax = 127 + dtype = torch.int8 + + qd = torch.ops.quantized_decomposed + if is_per_channel: + x = qd.quantize_per_channel(x, scale, zp, per_channel_axis, qmin, qmax, dtype) + x = qd.dequantize_per_channel(x, scale, zp, per_channel_axis, qmin, qmax, dtype) + else: + x = qd.quantize_per_tensor(x, scale, zp, qmin, qmax, dtype) + x = qd.dequantize_per_tensor(x, scale, zp, qmin, qmax, dtype) + return x + +def _get_quantized_qat_conv_bn_pattern( + is_per_channel: bool, + has_bias: bool, + bias_is_quantized: bool, + conv_fn: Callable, + bn_is_training: bool, +) -> Callable: + """ + Return the quantized version of QAT conv + BN pattern. + This is based on `nniqat.ConvBn2d._forward_approximate`, + used in QAT convert. We first match this pattern and replace + it with the normal [conv - bn] pattern, then fold the BN + weights into conv. + """ + # TODO: allow setting eps + bn_eps = 1e-5 + + def _quantized_qat_conv_bn_pattern( + x: torch.Tensor, + conv_weight: torch.Tensor, + bn_weight: torch.Tensor, + bn_bias: torch.Tensor, + bn_running_mean: torch.Tensor, + bn_running_var: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + running_std = torch.sqrt(bn_running_var + bn_eps) + scale_factor = bn_weight / running_std + weight_shape = [1] * len(conv_weight.shape) + weight_shape[0] = -1 + bias_shape = [1] * len(conv_weight.shape) + bias_shape[1] = -1 + scaled_weight = conv_weight * scale_factor.reshape(weight_shape) + scaled_weight = _append_qdq(scaled_weight, is_per_channel, kwargs) + if has_bias: + zero_bias = torch.zeros_like(kwargs["conv_bias"], dtype=x.dtype) + if bias_is_quantized: + zero_bias = _append_qdq(zero_bias, is_per_channel, kwargs) + x = conv_fn(x, scaled_weight, zero_bias) + else: + x = conv_fn(x, scaled_weight, None) + x = x / scale_factor.reshape(bias_shape) + if has_bias: + x = x + kwargs["conv_bias"].reshape(bias_shape) + x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=bn_is_training, eps=bn_eps) + return x + return _quantized_qat_conv_bn_pattern + +def _get_folded_quantized_qat_conv_bn_pattern( + is_per_channel: bool, + has_bias: bool, + bias_is_quantized: bool, + conv_fn: Callable, + bn_is_training: bool, +) -> Callable: + """ + Quantized QAT conv - bn pattern with bn weights being folded into conv. + """ + # TODO: allow setting eps + bn_eps = 1e-5 + + def _folded_quantized_qat_conv_bn_pattern( + x: torch.Tensor, + conv_weight: torch.Tensor, + bn_weight: torch.Tensor, + bn_bias: torch.Tensor, + bn_running_mean: torch.Tensor, + bn_running_var: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + conv_weight = _append_qdq(conv_weight, is_per_channel, kwargs) + if has_bias: + bias = kwargs["conv_bias"] + if bias_is_quantized: + bias = _append_qdq(bias, is_per_channel, kwargs) + else: + bias = None + x = conv_fn(x, conv_weight, bias) + x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=bn_is_training, eps=bn_eps) + return x + return _folded_quantized_qat_conv_bn_pattern + +def _has_conv_bias_filter( + match: "InternalMatch", # type: ignore[name-defined] + original_graph: Graph, + pattern_graph: Graph, +) -> bool: + """ + Match filter for the subgraph rewriter that returns True if the conv node in + the original graph has bias. + """ + for n in match.nodes_map.values(): + if _is_conv(n): + return len(n.args) > 2 and n.args[2] is not None + raise ValueError("Could not find conv node in matched conv + bn pattern") + +def _no_conv_bias_filter( + match: "InternalMatch", # type: ignore[name-defined] + original_graph: Graph, + pattern_graph: Graph, +) -> bool: + """ + Match filter for the subgraph rewriter that returns True if the conv node in + the original graph does NOT have bias. + """ + return not _has_conv_bias_filter(match, original_graph, pattern_graph) + +def _is_quantize(n: Node) -> bool: + return n.target in [ + torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.quantize_per_tensor.tensor, + torch.ops.quantized_decomposed.quantize_per_channel.default, + ] + +def _is_dequantize(n: Node) -> bool: + return n.target in [ + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, + torch.ops.quantized_decomposed.dequantize_per_channel.default, + ] + +def _get_conv_bn_pattern_nodes(r: ReplacedPatterns) -> Dict[str, Tuple[Node, Node]]: + """ + Helper function to extract the nodes in the conv-bn fusion pattern after + subgraph rewriting, in the form of a map: + + {name: (original_node, replacement_node)} + + The following names must exist in the map: + + "conv", "conv_weight", "conv_input", "bn", "getitem" + + The following names may exist in the map: + + "conv_weight_q", "conv_weight_dq", "conv_bias", + "conv_bias_q", "conv_bias_dq" + """ + def _get_nodes(nodes: List[Node]) -> Tuple[Node, Node, Node]: + """ + Return a 3-tuple of (conv_node, bn_node, getitem_node). + This asserts that the match contains exactly one of each node. + """ + conv_node, bn_node, getitem_node = None, None, None + for n in nodes: + if n.op != "call_function": + continue + if _is_conv(n): + assert conv_node is None + conv_node = n + if _is_supported_batch_norm_for_training(n) or n.target == torch.ops.aten._native_batch_norm_legit_no_training.default: + assert bn_node is None + bn_node = n + if n.target == operator.getitem: + assert getitem_node is None + getitem_node = n + assert conv_node is not None + assert bn_node is not None + assert getitem_node is not None + return (conv_node, bn_node, getitem_node) + + def _get_q_dq_nodes(n: Node) -> Tuple[Node, Node, Node]: + """ + Return a 3-tuple of (orig_node, q_node, dq_node). + """ + assert _is_dequantize(n) + q_node = n.args[0] + assert isinstance(q_node, Node) + assert _is_quantize(q_node) + orig_node = q_node.args[0] + assert isinstance(orig_node, Node) + return (orig_node, q_node, n) + + original_nodes = list(_filter_nodes_map(r.nodes_map).values()) + o_conv, o_bn, o_getitem = _get_nodes(original_nodes) + r_conv, r_bn, r_getitem = _get_nodes(r.replacements) + + # Create the mapping from original node to replacement node + mapping = { + "conv": (o_conv, r_conv), + "bn": (o_bn, r_bn), + "getitem": (o_getitem, r_getitem), + } + + # Extract conv input and weight + # Note: here we extract the original nodes indirectly through the pattern nodes + # because the args of the original nodes are no longer available after replacement + (p_conv, _, _) = _get_nodes(list(r.nodes_map.keys())) + (p_conv_input, p_conv_weight, *_) = p_conv.args + (r_conv_input, r_conv_weight, *_) = r_conv.args + assert isinstance(p_conv_input, Node) + assert isinstance(p_conv_weight, Node) + assert isinstance(r_conv_input, Node) + assert isinstance(r_conv_weight, Node) + o_conv_input = r.nodes_map[p_conv_input] + o_conv_weight = r.nodes_map[p_conv_weight] + + # If conv weight is quantized, extract the q - dq nodes + if _is_dequantize(p_conv_weight): + p_conv_weight, p_conv_weight_q, p_conv_weight_dq = _get_q_dq_nodes(p_conv_weight) + r_conv_weight, r_conv_weight_q, r_conv_weight_dq = _get_q_dq_nodes(r_conv_weight) + o_conv_weight = r.nodes_map[p_conv_weight] + o_conv_weight_q = r.nodes_map[p_conv_weight_q] + o_conv_weight_dq = r.nodes_map[p_conv_weight_dq] + mapping["conv_weight_q"] = (o_conv_weight_q, r_conv_weight_q) + mapping["conv_weight_dq"] = (o_conv_weight_dq, r_conv_weight_dq) + mapping["conv_input"] = (o_conv_input, r_conv_input) + mapping["conv_weight"] = (o_conv_weight, r_conv_weight) + + # Extract conv bias + if len(p_conv.args) > 2 and len(r_conv.args) > 2: + p_conv_bias = p_conv.args[2] + r_conv_bias = r_conv.args[2] + assert isinstance(p_conv_bias, Node) + assert isinstance(r_conv_bias, Node) + o_conv_bias = r.nodes_map[p_conv_bias] + + # If conv bias is quantized, extract the q - dq nodes + if _is_dequantize(p_conv_bias): + p_conv_bias, p_conv_bias_q, p_conv_bias_dq = _get_q_dq_nodes(p_conv_bias) + r_conv_bias, r_conv_bias_q, r_conv_bias_dq = _get_q_dq_nodes(r_conv_bias) + o_conv_bias = r.nodes_map[p_conv_bias] + o_conv_bias_q = r.nodes_map[p_conv_bias_q] + o_conv_bias_dq = r.nodes_map[p_conv_bias_dq] + mapping["conv_bias_q"] = (o_conv_bias_q, r_conv_bias_q) + mapping["conv_bias_dq"] = (o_conv_bias_dq, r_conv_bias_dq) + mapping["conv_bias"] = (o_conv_bias, r_conv_bias) + return mapping + +def _filter_nodes_map(nodes_map: Dict[Node, Node]) -> Dict[Node, Node]: + """ + Return a filtered `nodes_map` returned from the subgraph rewriter. + The filtered `nodes_map` will contain only nodes that are actually + matched in the pattern, excluding None or placeholder nodes. + """ + new_nodes_map: Dict[Node, Node] = {} + for pattern_node, graph_node in nodes_map.items(): + # bias can be None + if graph_node is None: + continue + # skip pattern placeholder nodes + if pattern_node.op == "placeholder": + continue + new_nodes_map[pattern_node] = graph_node + return new_nodes_map + +# TODO: this is error prone, use the replace_literals_with_placeholders hack instead +def _copy_over_literal_conv_args(original_node: Node, new_node: Node): + """ + Copy over literal args in conv, such as stride and padding, from the matched node + in the original graph to its replacement in the new graph. + + This is needed due to the following limitation in the subgraph rewriter when used + with dynamo export: literal (non-tensor) args are not supported in the match and + replacement patterns. This is because dynamo export automatically inlines these + literal args, making them dead placeholder nodes. In the future, we should check + if dynamo export can optionally disable this inlining, or if subgraph rewriter + can do the copying for us. See https://github.com/pytorch/pytorch/issues/100419. + + Note: Unlike other tensor args like conv weights and biases, literal args are + preserved in the original nodes after replacement, so we can access them here. + """ + assert _is_conv(original_node) + assert _is_conv(new_node) + # x, weight, bias, [stride, padding, dilation, transposed, output_padding, groups] + new_args = list(new_node.args) + if len(new_args) < 3: + # bias is optional, when it is not present, it means it is None + new_args.append(None) + new_node.args = tuple(new_args[:3]) + original_node.args[3:] + +def _update_conv_input_qspec_map_after_replacement(original_node: Node, replacement_node: Node): + """ + Update the `input_qspec_map` in the annotation after subgraph rewriting. + + The original annotation referred to the nodes in the original graph, + so the keys in the `input_qspec_map` will need to be updated to reflect + the corresponding nodes in the replacement graph. + """ + assert _is_conv(original_node) + assert _is_conv(replacement_node) + if "quantization_annotation" not in original_node.meta: + return + original_input_qspec_map = original_node.meta["quantization_annotation"].input_qspec_map + input_qspec_map = {} + # get the list of configs, it should be ordered as input, weight, bias + # note: this is really hacky, we need a better solution, hopefully + # in subgraph_rewriter, issue tracking the problem: https://github.com/pytorch/pytorch/issues/101820 + all_configs = list(original_input_qspec_map.items()) + # input activation + input_qspec_map[replacement_node.args[0]] = all_configs[0][1] + # weight + input_qspec_map[replacement_node.args[1]] = all_configs[1][1] + # bias + if len(replacement_node.args) > 2 and len(all_configs) > 2: + input_qspec_map[replacement_node.args[2]] = all_configs[2][1] + replacement_node.meta["quantization_annotation"].input_qspec_map = input_qspec_map + +def _update_special_qspecs_after_replacement( + node: Node, + original_to_replacement_node: Dict[Node, Node], +): + """ + Update the `SharedQuantizationSpec`s and `DerivedQuantizationSpec`s + used in `node`'s quantization annotation after subgraph rewriting. + + The original annotation referred to the nodes in the original graph, + so the nodes used in these special quantization specs will need to + be updated to the corresponding nodes in the replacement graph. + """ + def _get_new_edge_or_node(edge_or_node: EdgeOrNode): + if isinstance(edge_or_node, Node): + _node = edge_or_node + return original_to_replacement_node.get(_node, _node) + elif isinstance(edge_or_node, tuple) and len(edge_or_node) == 2 and all(isinstance(x, Node) for x in edge_or_node): + src, dest = edge_or_node + return ( + original_to_replacement_node.get(src, src), + original_to_replacement_node.get(dest, dest), + ) + else: + raise ValueError("unexpected type for edge_or_node: ", type(edge_or_node)) + + def _get_new_qspec(qspec: QuantizationSpecBase): + if isinstance(qspec, SharedQuantizationSpec): + new_edge_or_node = _get_new_edge_or_node(qspec.edge_or_node) + return SharedQuantizationSpec(new_edge_or_node) + elif isinstance(qspec, DerivedQuantizationSpec): + new_derived_from = [_get_new_edge_or_node(x) for x in qspec.derived_from] + return dataclasses.replace(qspec, derived_from=new_derived_from) + else: + return qspec + + if "quantization_annotation" not in node.meta: + return + annotation = node.meta["quantization_annotation"] + for input_node, qspec in annotation.input_qspec_map.items(): + annotation.input_qspec_map[input_node] = _get_new_qspec(qspec) + annotation.output_qspec = _get_new_qspec(annotation.output_qspec) + +def _fuse_conv_bn_qat(m: GraphModule) -> GraphModule: + m = _fuse_conv_bn_qat_helper(m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=False) + m = _fuse_conv_bn_qat_helper(m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=False) + if torch.cuda.is_available(): + m = _fuse_conv_bn_qat_helper(m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=True) + m = _fuse_conv_bn_qat_helper(m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=True) + return m + +def _fuse_conv_bn_qat_helper( + m: GraphModule, + conv_fn: Callable, + example_inputs: Tuple[Any, ...], + is_cuda: bool, +) -> GraphModule: + """ + Given a graph of decomposed aten ops, replace the (conv + bn) pattern with + the fused QAT subgraph equivalent. The input graph should already be annotated. + The annotations in the original nodes will be preserved in the corresponding + nodes in the new subgraph. + + Note: This also handles the (conv + bn + relu) pattern. + """ + m.graph.eliminate_dead_code() + m.recompile() + conv_bn_pattern = _get_conv_bn_pattern(conv_fn) + match_pattern = get_aten_graph_module(conv_bn_pattern, example_inputs, is_cuda) + + # Step (1): Replace patterns with conv bias + # + # Here we do replacement separately for cases with and without conv bias, since + # the replacement patterns for these two cases are substantially different. + # TODO: use the public replace_pattern API once it also returns replacement nodes + + qat_conv_bn_pattern = _get_qat_conv_bn_pattern(conv_fn) + replacement_pattern_with_conv_bias = get_aten_graph_module( + qat_conv_bn_pattern, + example_inputs, + is_cuda, + ) + replacements_with_conv_bias = replace_pattern_with_filters( + m, + match_pattern, + replacement_pattern_with_conv_bias, + match_filters=[_has_conv_bias_filter], + ignore_literals=True, + ) + m.recompile() + + # Step (2): Replace patterns without conv bias + + qat_conv_bn_pattern_no_conv_bias = _get_qat_conv_bn_pattern_no_conv_bias(conv_fn) + replacement_pattern_no_conv_bias = get_aten_graph_module( + qat_conv_bn_pattern_no_conv_bias, + example_inputs, + is_cuda, + ) + replacements_no_conv_bias = replace_pattern_with_filters( + m, + match_pattern, + replacement_pattern_no_conv_bias, + match_filters=[_no_conv_bias_filter], + ignore_literals=True, + ) + m.recompile() + + # Step (3): Post processing + # + # Due to limited functionality in the subgraph rewriter, here we manually + # update the replacement graph as follows: + # + # (a) Copy over metadata from original subgraph. This ensures the stack traces + # and annotations are preserved in the new subgraph + # + # (b) Copy over literal args for conv from the original subgraph + # TODO: do this for literal args for batchnorm as well + # + # (c) Update all references of the old nodes in the original subgraph to refer + # to the corresponding nodes in the new subgraph in the annotations + # + # In the future, we should try to push as much of this functionality into the + # subgraph rewriter as possible, so we don't have to manually copy anything over. + # For more detail, see https://github.com/pytorch/pytorch/issues/100419. + + all_original_to_replacement_nodes = {} + for r in replacements_with_conv_bias + replacements_no_conv_bias: + for original_node, replacement_node in _get_conv_bn_pattern_nodes(r).values(): + # Step (3a): Copy over metadata for all nodes in [conv - bn - getitem] + replacement_node.meta = original_node.meta + if _is_conv(original_node): + # Step (3b): Copy over conv literal args + _copy_over_literal_conv_args(original_node, replacement_node) + # Step (3c): Update old references in the conv node's input_qspec_map + _update_conv_input_qspec_map_after_replacement(original_node, replacement_node) + all_original_to_replacement_nodes[original_node] = replacement_node + + # Step (3c): Update old references in the special qspecs for all nodes in the graph + for n in m.graph.nodes: + _update_special_qspecs_after_replacement(n, all_original_to_replacement_nodes) + + return m + +def _duplicate_dequantize_node(m: GraphModule): + """ + Helper function to duplicate all dequantize nodes in the graph if the + node has more than one user. For example: + + Before: + quantize -> dequantize -> a + \\--> b + \\--> c + + After: + quantize -> dequantize_1 -> a + \\--> dequantize_2 -> b + \\--> dequantize_3 -> c + + This is useful for subgraph rewriting. E.g. if we wish to match the + pattern [dequantize - a] above, subgraph matching would fail because + the dequantize node has users outside the matched portion of the graph. + Instead, we match [dequantize_1 - a], which is safe. + """ + dq_op = torch.ops.quantized_decomposed.dequantize_per_tensor + for n in m.graph.nodes: + if n.op != "call_function" or n.target != dq_op or len(n.users) == 1: + continue + for user in list(n.users): + with m.graph.inserting_before(n): + new_node = m.graph.create_node("call_function", dq_op, n.args, n.kwargs) + user.replace_input_with(n, new_node) + m.graph.erase_node(n) + m.recompile() + +def _remove_extra_dequantize(m: GraphModule): + """ + Removes duplicate dequant nodes in the graph, for an operator that has + multiple dequant nodes as a user, replace them with a single dequant node + that can be shared across all the uses. This should be seen as the "reverse" + of `_duplicate_dequantize_node`. + """ + dq_op = torch.ops.quantized_decomposed.dequantize_per_tensor + for n in m.graph.nodes: + dq_users = [user for user in n.users if user.op == "call_function" and user.target == dq_op] + if len(dq_users) > 1: + with m.graph.inserting_after(dq_users[0]): + new_node = m.graph.create_node("call_function", dq_op, dq_users[0].args, {}) + for dq_user in dq_users: + dq_user.replace_all_uses_with(new_node) + m.graph.erase_node(dq_user) + m.recompile() + +def _copy_over_q_dq_args(original_node: Node, replacement_node: Node): + """ + Given a pair of quantize or dequantize nodes, copy over all literal args + from the original node to the replacement node. + """ + # For quantize_per_tensor, scale and zp are literals and need to be copied + # For quantize_per_channel, scale and zp are get_attr nodes and should be skipped + assert original_node.target == replacement_node.target + if original_node.target in ( + torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + ): + # Args: input, [scale, zp, qmin, qmax, dtype] + start_copy_arg_index = 1 + elif original_node.target in ( + torch.ops.quantized_decomposed.quantize_per_channel.default, + torch.ops.quantized_decomposed.dequantize_per_channel.default, + ): + # Args: input, scale, zp, [axis, qmin, qmax, dtype] + start_copy_arg_index = 3 + else: + raise ValueError("Expected quantize/dequantize nodes, got '%s'" % original_node.target) + replacement_node.args = ( + replacement_node.args[:start_copy_arg_index] + original_node.args[start_copy_arg_index:] + ) + +def _fold_conv_bn_qat(m: GraphModule) -> GraphModule: + m = _fold_conv_bn_qat_helper(m, F.conv1d, _quantized_conv1d_bn_example_inputs, is_cuda=False) + m = _fold_conv_bn_qat_helper(m, F.conv2d, _quantized_conv2d_bn_example_inputs, is_cuda=False) + if torch.cuda.is_available(): + m = _fold_conv_bn_qat_helper(m, F.conv1d, _quantized_conv1d_bn_example_inputs, is_cuda=True) + m = _fold_conv_bn_qat_helper(m, F.conv2d, _quantized_conv2d_bn_example_inputs, is_cuda=True) + return m + +def _fold_conv_bn_qat_helper( + m: GraphModule, + conv_fn: Callable, + example_inputs: Tuple[Any, ...], + is_cuda: bool, +) -> GraphModule: + """ + Replace the quantized (conv + bn) pattern with conv with bn weights folded into the weights of conv. + """ + m.graph.eliminate_dead_code() + m.recompile() + _duplicate_dequantize_node(m) + + # Step (1): Replace QAT pattern with simple [conv - bn] pattern + replacements = [] + replacement_options = itertools.product( + [True, False], # is_per_channel + [True, False], # has_bias + [True, False], # bias_is_quantized + [True, False], # bn_is_training + ) + for is_per_channel, has_bias, bias_is_quantized, bn_is_training in replacement_options: + # For the cases without bias, `bias_is_quantized` is irrelevant, so here we arbitrarily + # filter out one of the values for this flag to avoid having duplicate patterns + if not has_bias and bias_is_quantized: + continue + kwargs = _get_quantized_conv_bn_example_inputs_kwargs(is_per_channel, has_bias, is_cuda) + match_pattern = _get_quantized_qat_conv_bn_pattern( + is_per_channel, has_bias, bias_is_quantized, conv_fn, bn_is_training + ) + match_pattern = get_aten_graph_module(match_pattern, example_inputs, is_cuda, **kwargs) + replacement_pattern = _get_folded_quantized_qat_conv_bn_pattern( + is_per_channel, has_bias, bias_is_quantized, conv_fn, bn_is_training + ) + replacement_pattern = get_aten_graph_module(replacement_pattern, example_inputs, is_cuda, **kwargs) + replacements.extend( + replace_pattern_with_filters( + m, + match_pattern, + replacement_pattern, + ignore_literals=True, + ) + ) + m.recompile() + _remove_extra_dequantize(m) + + for r in replacements: + node_map = _get_conv_bn_pattern_nodes(r) + + # Step (2): Copy over metadata from original subgraph + for original_node, replacement_node in node_map.values(): + replacement_node.meta = original_node.meta + + # Step (3): Copy over args for weight (and optionally bias) q - dq nodes + _copy_over_q_dq_args(*node_map["conv_weight_q"]) + _copy_over_q_dq_args(*node_map["conv_weight_dq"]) + if "conv_bias_q" in node_map: + assert "conv_bias_dq" in node_map + _copy_over_q_dq_args(*node_map["conv_bias_q"]) + _copy_over_q_dq_args(*node_map["conv_bias_dq"]) + + # Step (4): Fold BN weights into conv + conv_bias = None + (_, conv_node) = node_map["conv"] + (_, bn_node) = node_map["bn"] + (_, conv_weight) = node_map["conv_weight"] + if "conv_bias" in node_map: + (_, conv_bias) = node_map["conv_bias"] + fold_bn_weights_into_conv_node(conv_node, conv_weight, conv_bias, bn_node, m) + + # Copy over literal args for conv + for original_node in _filter_nodes_map(r.nodes_map).values(): + if _is_conv(original_node): + _copy_over_literal_conv_args(original_node, conv_node) + + m.graph.eliminate_dead_code() + m.recompile() + return m diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ddac64c04fa4bbc6a781540cbce9c6416ba0b52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__init__.py @@ -0,0 +1,5 @@ +from .rewrite import reference_representation_rewrite + +__all__ = [ + "reference_representation_rewrite", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a099b24faf02d7ea426897d7f631b53ca5a57f77 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__pycache__/rewrite.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__pycache__/rewrite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d444a59b7cad52942cfe044034324819b528b29c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__pycache__/rewrite.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/rewrite.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/rewrite.py new file mode 100644 index 0000000000000000000000000000000000000000..42e3db762a5178fcd9af3fc748ae953f3fc770c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/rewrite.py @@ -0,0 +1,598 @@ +import torch +from torch.fx import GraphModule +from ..utils import ( + get_aten_graph_module, + remove_tensor_overload_for_qdq_ops, + _replace_literals_with_new_placeholders, + _replace_literals_with_existing_placeholders, +) +from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401 +from torch.fx.subgraph_rewriter import replace_pattern +from torch._higher_order_ops.out_dtype import out_dtype +from typing import Optional, Callable, Tuple, Any +from dataclasses import dataclass + +from functools import partial + +__all__ = [ + "reference_representation_rewrite", +] + + +_QUANTIZED_LINEAR_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (2, 5), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randint(-128, 127, (5, 5), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-127], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_linear( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + weight_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, torch.int8) + out_fp32 = torch.ops.aten.linear.default(x_fp32, weight_fp32, bias_fp32) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, out_quant_min, out_quant_max, torch.int8) + return out_i8 + +def _reference_quantized_linear( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + # without using quant_min/max in clamp, the traced graph will not have quant_mi/max args. + # This results in failure to match the pattern. + # Therefore, we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, x_quant_min, x_quant_max) + weight_i8 = torch.ops.aten.clamp(weight_i8, weight_quant_min, weight_quant_max) + + x_i16 = x_i8.to(torch.int16) + weight_i16 = weight_i8.to(torch.int16) + # always set bias to None so that the same representation can work for the case + # no matter if bias_scale == x_scale * weight_scale or not + acc_i32 = out_dtype( + torch.ops.aten.linear.default, + torch.int32, + x_i16 - x_zero_point, + weight_i16 - weight_zero_point, + None) + # TODO: change to mul.Scalar + # Note: we are quantizing bias with these scales without signal from user, but it might be OK + bias_scale = x_scale * weight_scale + bias_i32 = out_dtype(torch.ops.aten.div.Tensor, torch.int32, bias_fp32, bias_scale) + acc_i32 = acc_i32 + bias_i32 + # TODO: change to mul.Scalar when we make x_scale/weight_scale etc. Scalar values + acc_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, acc_i32, x_scale * weight_scale / out_scale) + out_zero_point + out_i8 = torch.ops.aten.clamp(acc_i32, out_quant_min, out_quant_max).to(torch.int8) + return out_i8 + + +_DYNAMIC_QUANTIZED_LINEAR_EXAMPLE_INPUTS = ( + torch.randn((2, 5), dtype=torch.float), + -128, + 127, + torch.finfo(torch.float32).eps, + torch.randint(-128, 127, (5, 5), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-127], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), +) + + +def _qdq_dynamic_quantized_linear( + x_fp32, x_quant_min, x_quant_max, x_eps, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, +): + x_scale, x_zero_point = torch.ops.quantized_decomposed.choose_qparams(x_fp32, x_quant_min, x_quant_max, x_eps, torch.int8) + x_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + x_fp32, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + weight_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, torch.int8) + out_fp32 = torch.ops.aten.linear.default(x_fp32, weight_fp32, bias_fp32) + return out_fp32 + +def _reference_dynamic_quantized_linear( + x_fp32, x_quant_min, x_quant_max, x_eps, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, +): + x_scale, x_zero_point = torch.ops.quantized_decomposed.choose_qparams(x_fp32, x_quant_min, x_quant_max, x_eps, torch.int8) + # decomposed representation for quantize_per_tensor + # TODO: use out_dtype(mul, ...) here when the op is ready + x_fp32 = x_fp32 / x_scale # fp32 + # round modes might be different here + # pytorch is rounding to even, which is also common for most of the backends + x_fp32 = torch.round(x_fp32) # fp32 + x_i32 = x_fp32.to(dtype=torch.int32) # int32 + x_i32 = x_i32 + x_zero_point # int32 + # clamp works for fp32, int32 and int8 dtypes + x_i32 = torch.clamp(x_i32, x_quant_min, x_quant_max) # int32 + x_i8 = x_i32.to(dtype=torch.int8) + + weight_i8 = torch.ops.aten.clamp(weight_i8, weight_quant_min, weight_quant_max) + + x_i16 = x_i8.to(torch.int16) + weight_i16 = weight_i8.to(torch.int16) + # always set bias to None so that the same representation can work for the case + # no matter if bias_scale == x_scale * weight_scale or not + acc_i32 = out_dtype( + torch.ops.aten.linear.default, + torch.int32, + x_i16 - x_zero_point, + weight_i16 - weight_zero_point, + None) + bias_scale = x_scale * weight_scale + bias_i32 = out_dtype(torch.ops.aten.div.Tensor, torch.int32, bias_fp32, bias_scale) + acc_i32 = acc_i32 + bias_i32 + out_fp32 = acc_i32 * (x_scale * weight_scale) + return out_fp32 + + +_QUANTIZED_CONV2d_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-127], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_conv2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + stride = [1, 1] + padding = [0, 0] + dilation = [1, 1] + transposed = False + output_padding = [0, 0] + groups = 1 + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + weight_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, torch.int8) + out_fp32 = torch.ops.aten.convolution.default( + x_fp32, weight_fp32, bias_fp32, stride, padding, dilation, transposed, output_padding, groups) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, out_quant_min, out_quant_max, torch.int8) + return out_i8 + +def _reference_quantized_conv2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + stride = [1, 1] + padding = [0, 0] + dilation = [1, 1] + transposed = False + output_padding = [0, 0] + groups = 1 + # without using quant_min/max in clamp, the traced graph will not have quant_mi/max args. + # This results in failure to match the pattern. + # Therefore, we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, x_quant_min, x_quant_max) + weight_i8 = torch.ops.aten.clamp(weight_i8, weight_quant_min, weight_quant_max) + + x_i16 = x_i8.to(torch.int16) + weight_i16 = weight_i8.to(torch.int16) + # always set bias to None so that the same representation can work for the case + # no matter if bias_scale == x_scale * weight_scale or not + acc_i32 = out_dtype( + torch.ops.aten.convolution.default, + torch.int32, + x_i16 - x_zero_point, + weight_i16 - weight_zero_point, + None, stride, padding, dilation, transposed, output_padding, groups) + # Note: we are quantizing bias with these scales without signal from user, but it might be OK + bias_scale = x_scale * weight_scale + # bias quantization to int32 uses bias_scale = x_scale * weight_scale due to: + # Take linear calculation for example + # Out_(i, j)_fp32 = Sum_(over k)[X_(i, k)_fp32 * W_(i, k)_fp32] + bias_(i)_fp32 + # Represent X, W fp32 as their dequant transforms + # A_fp32 = (A_q - A_zero_point)/A_scale + # Out_(i, j)_fp32 = Sum_(over k)[(X_(i, k)_fp32 - X_zp) * X_scale * (W_(i, k)_fp32 - W_zp) * W_scale] + bias_(i)_fp32 + # Factor out X_scale and W_scale + # Out_(i, j)_fp32 = ((X_scale * W_scale) * Sum_(over k)[(X_(i, k)_fp32 - X_zp) * (W_(i, k)_fp32 - W_zp)]) + bias_(i)_fp32 + # In order to addition of bias_(i)_fp32 inside, we must do + # Out_(i, j)_fp32 = (X_scale * W_scale) * (Sum_(over k)[(X_(i, k)_fp32 - X_zp) * (W_(i, k)_fp32 - W_zp)] + (1 / (X_scale * W_scale)) * bias_(i)_fp32)W_scale # noqa: B950 + # Note we had to multiply bias_fp32 qith X_scale * W_scale = bias_scale + # Thus bias quantization to int32 must be with X_scale * W_scale + + bias_i32 = out_dtype(torch.ops.aten.div.Tensor, torch.int32, bias_fp32, bias_scale) + # Unsqueeze to match broadcast dims + # Unfortnuately I cannot do bias_i32.unsqueeze(0) due to literal matching nightmare + # in graph pattern replacement + bias_i32 = bias_i32.unsqueeze(-1) + bias_i32 = bias_i32.unsqueeze(-1) + acc_i32 = acc_i32 + bias_i32 + # TODO: change to mul.Scalar when we make x_scale/weight_scale etc. Scalar values + acc_i32 = out_dtype( + torch.ops.aten.mul.Tensor, torch.int32, acc_i32, x_scale * weight_scale / out_scale) + out_zero_point + out_i8 = torch.ops.aten.clamp(acc_i32, out_quant_min, out_quant_max).to(torch.int8) + return out_i8 + + +_QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_add_relu( + x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, + out_scale, out_zero_point, quant_min, quant_max +): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, x_scale, x_zero_point, quant_min, quant_max, torch.int8) + y_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(y_i8, y_scale, y_zero_point, quant_min, quant_max, torch.int8) + out_fp32 = x_fp32 + y_fp32 + out_fp32 = torch.ops.aten.relu(out_fp32) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, quant_min, quant_max, torch.int8 + ) + return out_i8 + +def _reference_quantized_add_relu( + x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, + out_scale, out_zero_point, quant_min, quant_max +): + """ + See comments for `_reference_quantized_add` for more information on + how to derive the formula for out_i8 based on x_i8 and y_i8 + """ + x_i32 = x_i8.to(torch.int32) + y_i32 = y_i8.to(torch.int32) + # TODO: change this to mul.Scalar? + x_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, (x_i32 - x_zero_point), (x_scale / out_scale)) + y_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, (y_i32 - y_zero_point), (y_scale / out_scale)) + out_i32 = x_i32 + y_i32 + out_zero_point + # out_i32 = torch.ops.aten.clamp(out_i32, out_zero_point) + out_i8 = torch.ops.aten.clamp(out_i32, out_zero_point, quant_max).to(torch.int8) + return out_i8 + +def _qdq_quantized_add(x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, out_scale, out_zero_point, quant_min, quant_max): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, x_scale, x_zero_point, quant_min, quant_max, torch.int8) + y_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(y_i8, y_scale, y_zero_point, quant_min, quant_max, torch.int8) + out_fp32 = x_fp32 + y_fp32 + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, quant_min, quant_max, torch.int8 + ) + return out_i8 + +def _reference_quantized_add( + x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, + out_scale, out_zero_point, quant_min, quant_max +): + """ + # How to Derive the formula for out_i8 based on x_i8 and y_i8 + # (since quantized add takes x_i8, y_i8 and their quantization parameters, and produce an out_i8) + + # out_i8 is quantized output, we can write down the formula for it first: +out_i8 = out_f32 / out_scale + out_zero_point (1) + + # then out_fp32 is computed from x_f32 + y_f32, and the x_fp32 and y_fp32 are the dequantized x_i8 and y_i8 + out_f32 = x_f32 + y_f32 (2) + x_fp32 = (x_i8 - x_zero_point) * x_scale (3) + y_fp32 = (y_i8 - y_zero_point) * y_scale (4) + + # applying the above fomula to the out_i8 equation we can get the following: + out_i8 = out_fp32 / out_scale + out_zero_point # (1) + = (x_f32 + y_f32) / out_scale + out_zero_point # applying (2) to substitute out_fp32 with x_fp32 + y_fp32 + = ((x_i8 - x_zero_point) * x_scale + (y_i8 - y_zero_point) * y_scale) / out_scale + out_zero_point # apply (3) and (4) + """ + x_i32 = x_i8.to(torch.int32) + y_i32 = y_i8.to(torch.int32) + # TODO: use out_dtype op + x_i32 = torch.round((x_scale / out_scale) * (x_i32 - x_zero_point)).to(torch.int32) + y_i32 = torch.round((y_scale / out_scale) * (y_i32 - y_zero_point)).to(torch.int32) + out_i32 = x_i32 + y_i32 + out_zero_point + quant_min = -128 + quant_max = 127 + out_i8 = torch.ops.aten.clamp(out_i32, quant_min, quant_max).to(torch.int8) + return out_i8 + +_QUANTIZED_MAX_POOL2D_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_max_pool2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, out_scale, out_zero_point, out_quant_min, out_quant_max): + kernel_size = 1 + stride = 1 + padding = 0 + dilation = 1 + ceil_mode = False + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + out_fp32, _ = torch.ops.aten.max_pool2d_with_indices.default(x_fp32, kernel_size, stride, padding, dilation, ceil_mode) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, out_quant_min, out_quant_max, torch.int8) + return out_i8 + +def _reference_quantized_max_pool2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, out_scale, out_zero_point, out_quant_min, out_quant_max): + kernel_size = 1 + stride = 1 + padding = 0 + dilation = 1 + ceil_mode = False + # to preserve x_quant_min, x_quant_max in the graph for pattern matching + x_i8 = torch.clamp(x_i8, x_quant_min, x_quant_max) + x_i32 = x_i8.to(torch.int32) + out_i32, _ = torch.ops.aten.max_pool2d_with_indices.default( + x_i32 - x_zero_point, + kernel_size, + stride, + padding, + dilation, + ceil_mode + ) + out_fp32 = out_i32 * (x_scale / out_scale) + out_zero_point + out_fp32 = torch.clamp(out_fp32, out_quant_min, out_quant_max) + out_i8 = out_fp32.to(torch.int8) + return out_i8 + +_QUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS = ( + torch.randn(1, 3, 3, 3, dtype=torch.float), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _quantize_per_tensor_int8(x_fp32, scale, zero_point, quant_min, quant_max): + x = torch.ops.quantized_decomposed.quantize_per_tensor(x_fp32, scale, zero_point, quant_min, quant_max, torch.int8) + return x + +def _reference_quantize_per_tensor_int8(x_fp32, scale, zero_point, quant_min, quant_max): + # TODO: use out_dtype(mul, ...) here when the op is ready + x = x_fp32 / scale # fp32 + # round modes might be different here + # pytorch is rounding to even, which is also common for most of the backends + x = torch.round(x) # fp32 + x = x.to(dtype=torch.int32) # int32 + x = x + zero_point # int32 + # clamp works for fp32, int32 and int8 dtypes + x = torch.clamp(x, quant_min, quant_max) # int32 + x = x.to(dtype=torch.int8) + return x + +_DEQUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _dequantize_per_tensor_int8(x_i8, scale, zero_point, quant_min, quant_max): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max, torch.int8) + return x_fp32 + +def _reference_dequantize_per_tensor_int8(x_i8, scale, zero_point, quant_min, quant_max): + # without using quant_min/max in clamp, the traced graph will not have quant_mi/max args. + # This results in failure to match the pattern. + # Therefore, we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, quant_min, quant_max) + # TODO: use out_dtype op + # note: x_i8.to(torch.int32) does not work here + # TODO: debug the implementation later when torchdynamo time out issue is resolved + return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32) + +_QUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS = ( + torch.randn(1, 3, 3, 3, dtype=torch.float), + torch.randn(3, dtype=torch.float), + torch.zeros(3, dtype=torch.int), + 1, + -128, + 127, +) + +def _quantize_per_channel_int8(x_fp32, scales, zero_points, ch_axis, quant_min, quant_max): + out_i8 = torch.ops.quantized_decomposed.quantize_per_channel( + x_fp32, scales, zero_points, ch_axis, quant_min, quant_max, torch.int8 + ) + return out_i8 + +def _reference_quantize_per_channel_int8(x_fp32, scales, zero_points, ch_axis, quant_min, quant_max): + x_fp32 = torch.transpose(x_fp32, ch_axis, -1) + out_i32 = torch.ops.aten.clamp(torch.round(x_fp32 / scales).to(torch.int32) + zero_points, quant_min, quant_max) + out_i32 = torch.transpose(out_i32, ch_axis, -1) + return out_i32.to(torch.int8) + +_DEQUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(3, dtype=torch.float), + torch.zeros(3, dtype=torch.int), + 1, + -128, + 127, +) + +def _dequantize_per_channel_int8(x_i8, scales, zero_points, ch_axis, quant_min, quant_max): + # the following will be replaced as placeholders + out_fp32 = torch.ops.quantized_decomposed.dequantize_per_channel( + x_i8, scales, zero_points, ch_axis, quant_min, quant_max, torch.int8 + ) + return out_fp32 + +def _reference_dequantize_per_channel_int8(x_i8, scales, zero_points, ch_axis, quant_min, quant_max): + # the following will be replaced as placeholders + # in order to preserve the quant_min/quant_max args for pattern matching (e.g. matching for int4 quantized ops) + # we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, quant_min, quant_max) + x_i8 = torch.transpose(x_i8, ch_axis, -1) + x_i32 = x_i8.to(torch.int32) + out_fp32 = (x_i32 - zero_points).to(torch.float) * scales + out_fp32 = torch.transpose(out_fp32, ch_axis, -1) + return out_fp32 + +def _replace_ph_qdq_per_channel_replacement(gm: torch.fx.GraphModule): + return _replace_literals_with_existing_placeholders( + gm, + exclude_literals=[-1], + literal_to_ph_idx={1: 3, -128: 4, 127: 5} + ) + + +@dataclass +class _RewriteInfo: + """Data needed for rewrite, this includes example inputs, pattern and replacement functions + and post transformation functions for the exported pattern and replacement GraphModule + """ + + # example inputs used for exporting the pattern into GraphModule + example_inputs: Tuple[Any, ...] + pattern: Callable + replacement: Callable + # post transformation on the exported pattern and replacement GraphModule + pattern_post_trans: Optional[Callable[[GraphModule], GraphModule]] = None + replacement_post_trans: Optional[Callable[[GraphModule], GraphModule]] = None + +_REWRITE_INFO_LIST = [ + _RewriteInfo( + _DYNAMIC_QUANTIZED_LINEAR_EXAMPLE_INPUTS, + _qdq_dynamic_quantized_linear, + _reference_dynamic_quantized_linear, + partial( + _replace_literals_with_existing_placeholders, + literal_to_ph_idx={ + -128: 1, + 127: 2, + torch.finfo(torch.float32).eps: 3 + } + ), + partial( + _replace_literals_with_existing_placeholders, + literal_to_ph_idx={ + -128: 1, + 127: 2, + torch.finfo(torch.float32).eps: 3 + } + ), + ), + _RewriteInfo( + _QUANTIZED_LINEAR_EXAMPLE_INPUTS, + _qdq_quantized_linear, + _reference_quantized_linear, + _replace_literals_with_new_placeholders, + _replace_literals_with_new_placeholders, + ), + _RewriteInfo( + _QUANTIZED_CONV2d_EXAMPLE_INPUTS, + _qdq_quantized_conv2d, + _reference_quantized_conv2d, + partial(_replace_literals_with_new_placeholders, exclude_literals=[-1]), + partial(_replace_literals_with_new_placeholders, exclude_literals=[-1]), + ), + _RewriteInfo( + _QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS, + _qdq_quantized_add_relu, + _reference_quantized_add_relu + ), + _RewriteInfo( + _QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS, + _qdq_quantized_add, + _reference_quantized_add + ), + _RewriteInfo( + _QUANTIZED_MAX_POOL2D_EXAMPLE_INPUTS, + _qdq_quantized_max_pool2d, + _reference_quantized_max_pool2d, + _replace_literals_with_new_placeholders, + _replace_literals_with_new_placeholders + ), + _RewriteInfo( + _QUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS, + _quantize_per_tensor_int8, + _reference_quantize_per_tensor_int8), + _RewriteInfo( + _DEQUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS, + _dequantize_per_tensor_int8, + _reference_dequantize_per_tensor_int8 + ), + _RewriteInfo( + _QUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS, + _quantize_per_channel_int8, + _reference_quantize_per_channel_int8, + _replace_ph_qdq_per_channel_replacement, + _replace_ph_qdq_per_channel_replacement + ), + _RewriteInfo( + _DEQUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS, + _dequantize_per_channel_int8, + _reference_dequantize_per_channel_int8, + _replace_ph_qdq_per_channel_replacement, + _replace_ph_qdq_per_channel_replacement + ), +] + +def reference_representation_rewrite(model: GraphModule) -> GraphModule: + remove_tensor_overload_for_qdq_ops(model) + for rewrite_info in _REWRITE_INFO_LIST: + example_inputs = rewrite_info.example_inputs + pattern = rewrite_info.pattern + replacement = rewrite_info.replacement + pattern_post_trans = rewrite_info.pattern_post_trans + replacement_post_trans = rewrite_info.replacement_post_trans + pattern = get_aten_graph_module(pattern, example_inputs) # type: ignore[arg-type, assignment] + remove_tensor_overload_for_qdq_ops(pattern) # type: ignore[arg-type] + replacement = get_aten_graph_module(replacement, example_inputs) # type: ignore[arg-type, assignment] + remove_tensor_overload_for_qdq_ops(replacement) # type: ignore[arg-type] + if pattern_post_trans: + pattern = pattern_post_trans(pattern) + if replacement_post_trans: + replacement = replacement_post_trans(replacement) + pattern.recompile() # type: ignore[attr-defined] + replacement.recompile() # type: ignore[attr-defined] + matches = replace_pattern(model, pattern, replacement) + return model diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/utils.py b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fcdfee42e9f449d80d2bab68c817e9a692ed2b75 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/pt2e/utils.py @@ -0,0 +1,507 @@ +import operator +import types + +import torch +from torch._export import capture_pre_autograd_graph +from torch.fx import ( + GraphModule, + Node, +) +from torch.nn.utils.fusion import fuse_conv_bn_weights +from typing import Any, Callable, Dict, Optional, Tuple, List, Union +from torch.utils._pytree import LeafSpec + +# Makes sure that quantized_decomposed ops are registered +from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401 + +from torch.ao.quantization.quantizer import QuantizationAnnotation + + +__all__ = [ + "fold_bn_weights_into_conv_node", + "get_aten_graph_module", + "remove_tensor_overload_for_qdq_ops", +] + +_QUANTIZE_OPS = [ + torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.quantize_per_tensor.tensor, + torch.ops.quantized_decomposed.quantize_per_channel.default, +] + + +_DEQUANTIZE_OPS = [ + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, + torch.ops.quantized_decomposed.dequantize_per_channel.default, +] + +# Example inputs for conv-bn1d patterns +_conv1d_bn_example_inputs = ( + torch.randn(1, 1, 3), # x + torch.randn(1, 1, 1), # conv_weight + torch.randn(1), # conv_bias + torch.randn(1), # bn_weight + torch.randn(1), # bn_bias + torch.randn(1), # bn_running_mean + torch.randn(1), # bn_running_var +) + +# Example inputs for conv-bn2d patterns +_conv2d_bn_example_inputs = ( + torch.randn(1, 1, 3, 3), # x + torch.randn(1, 1, 1, 1), # conv_weight + torch.randn(1), # conv_bias + torch.randn(1), # bn_weight + torch.randn(1), # bn_bias + torch.randn(1), # bn_running_mean + torch.randn(1), # bn_running_var +) + +def _is_connected(source: torch.fx.Node, dest: torch.fx.Node) -> bool: + """ + Assuming dest is one of the ops inserted by quant workflow, this function + finds if source and dest are connected. Assumption is that only quant workflow + inserted ops exist between source and dest + """ + quant_workflow_ops = _QUANTIZE_OPS + _DEQUANTIZE_OPS + quant_workflow_ops.append(torch.ops.quantized_decomposed.choose_qparams.tensor) + while dest.target in quant_workflow_ops: + if not isinstance(dest.args[0], torch.fx.Node): + raise ValueError(f"expected arg[0] of quant workflow ops to be a node but found {dest.args[0]}") + dest = dest.args[0] + return (dest == source) + + +def _find_q_dq_node_for_user( + produer: torch.fx.Node, user: torch.fx.Node +) -> Tuple[Any, Any]: + """ + Find q, dq pair corresponding to [producer -> q -> dq -> user] + Utils works by finding dq arg of user and ensuring it is connected to + producer + """ + dq_node = None + for n in user.args: + if isinstance(n, torch.fx.Node) and n.op == "call_function" and n.target in _DEQUANTIZE_OPS: + if _is_connected(produer, n): + dq_node = n + break + if dq_node is None: + for n in user.kwargs: + if isinstance(n, torch.fx.Node) and n.op == "call_function" and n.target in _DEQUANTIZE_OPS: + if _is_connected(produer, n): + dq_node = n + break + if dq_node is None: + return (None, None) + + q_node = None + if dq_node.args[0].op == "call_function" and dq_node.args[0].target in _QUANTIZE_OPS: + q_node = dq_node.args[0] + return (q_node, dq_node) + + + +def _is_sym_size_node(node: Node): + return ( + node.op == "call_function" + and node.target == torch.ops.aten.sym_size.default + or node.target == torch.ops.aten.sym_numel.default + or node.target == torch.ops.aten.sym_numel + or node.target == torch.ops.aten.sym_size + ) + + +def _filter_sym_size_users(node: torch.fx.Node) -> List[torch.fx.Node]: + node_users = list(filter((lambda x: (_is_sym_size_node(x) is False)), node.users)) + return node_users + + +def _is_valid_annotation(annotation: QuantizationAnnotation) -> bool: + if annotation is None: + return False + input_qspec_map = annotation.input_qspec_map + output_qspec = annotation.output_qspec + if len(input_qspec_map) == 0 and output_qspec is None: + return False + return True + + +def _get_tensor_constant_from_node(node, m): + if node is None: + return None + assert node.op == "get_attr" + return getattr(m, node.target) + +def _get_all_arguments(orig_args, orig_kwargs, args_schema): + all_args = [] + for i, schema in enumerate(args_schema): + if schema.name in orig_kwargs: + all_args.append(orig_kwargs[schema.name]) + elif not schema.kwarg_only and i < len(orig_args): + all_args.append(orig_args[i]) + else: + all_args.append(schema.default_value) + return all_args + +def _is_supported_batch_norm_for_training(node: Node): + """ + Return True if the given node refers to an aten batch norm op QAT supports. + """ + supported_ops = [ + torch.ops.aten._native_batch_norm_legit.default, + # Note: we won't need this op anymore after batch norm consolidation + # For now, we need to continue to support it because it gives better + # training numerics than `_native_batch_norm_legit` + torch.ops.aten.cudnn_batch_norm.default, + torch.ops.aten.miopen_batch_norm.default, + ] + return node.target in supported_ops + +def _is_conv(n: Node): + """ + Return whether the node refers to an aten conv op. + """ + return n.op == "call_function" and n.target in [ + torch.ops.aten.conv1d.default, + torch.ops.aten.conv2d.default, + ] + +def _is_conv_transpose(n: Node): + """ + Return whether the node refers to an aten conv_transpose op. + """ + return n.op == "call_function" and n.target in [ + torch.ops.aten.conv_transpose1d, + torch.ops.aten.conv_transpose2d, + ] + +def fold_bn_weights_into_conv_node( + conv_node: Node, + conv_weight_node: Node, + conv_bias_node: Optional[Node], + bn_node: Node, + m: GraphModule +) -> None: + # conv args: input, weight, bias, stride, padding, dilation, ... + conv_w = _get_tensor_constant_from_node(conv_weight_node, m) + conv_b = _get_tensor_constant_from_node(conv_bias_node, m) + transpose = _is_conv_transpose(conv_node) + + # eval bn args: input, weight, bias, running mean, running var, momentum, eps + # train bn args: input, weight, bias, running mean, running var, training, momentum, eps + bn_args_schema = bn_node.target._schema.arguments # type: ignore[union-attr] + bn_args = _get_all_arguments(bn_node.args, bn_node.kwargs, bn_args_schema) + bn_w = _get_tensor_constant_from_node(bn_args[1], m) + bn_b = _get_tensor_constant_from_node(bn_args[2], m) + bn_rm = _get_tensor_constant_from_node(bn_args[3], m) + bn_rv = _get_tensor_constant_from_node(bn_args[4], m) + if bn_node.target == torch.ops.aten._native_batch_norm_legit_no_training.default: + eps_arg_index = 6 + elif _is_supported_batch_norm_for_training(bn_node): + eps_arg_index = 7 + else: + raise ValueError("BN node target is unexpected ", bn_node.target) + bn_eps = bn_args[eps_arg_index] + + fused_weight, fused_bias = fuse_conv_bn_weights(conv_w, conv_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b, transpose=transpose) + + # update the weight and bias for conv + conv_args = list(conv_node.args) + # filling in the default bias argument + if len(conv_args) == 2: + conv_args.append(None) + + # calling data since the fused_weight and fused_bias are nn.Parameter + weight_attr_name = conv_weight_node.target + assert isinstance(weight_attr_name, str) + setattr(m, weight_attr_name, fused_weight) + if conv_bias_node is not None: + bias_attr_name = conv_bias_node.target + setattr(m, bias_attr_name, fused_bias) # type: ignore[arg-type] + else: + bias_attr_name = weight_attr_name + "_bias" + setattr(m, bias_attr_name, fused_bias) # type: ignore[arg-type] + with m.graph.inserting_before(conv_node): + get_bias_node = m.graph.get_attr(bias_attr_name) + # NOTE: here we assume the bias of conv is not quantized! + conv_args[2] = get_bias_node + conv_node.args = tuple(conv_args) + + # native_batch_norm has 3 outputs, we expect getitem calls on the output + # and we want to replace the uses of getitem 0 with the output of conv + # + # Before: + # conv -> bn - (first output) -> users1 + # \ - (second output) -> users2 + # \ - (third output) -> users3 + # After: + # conv -> (first output) -> users1 + # bn - + # \ - (second output) -> users2 + # \ - (third output) -> users3 + # if users2 and users3 are empty then bn will be removed through dead code elimination + + for user in bn_node.users: + if user.op != "call_function" or user.target != operator.getitem or user.args[1] != 0: + continue + user.replace_all_uses_with(conv_node) + +# fuse conv bn weights, inplace modification of the graph_module and graph +def _fuse_conv_bn_(m: GraphModule) -> None: + for n in m.graph.nodes: + if n.op != "call_function" or n.target != torch.ops.aten._native_batch_norm_legit_no_training.default: + continue + bn_node = n + n = bn_node.args[0] + if not _is_conv(n): + continue + conv_node = n + conv_weight_node = conv_node.args[1] + conv_bias_node = conv_node.args[2] if len(conv_node.args) > 2 else None + fold_bn_weights_into_conv_node(conv_node, conv_weight_node, conv_bias_node, bn_node, m) + + m.graph.eliminate_dead_code() + m.recompile() + +def _get_node_name_to_scope(model: GraphModule) -> Dict[str, Tuple[str, type]]: + # TODO: move this information to fx node itself + node_name_to_scope: Dict[str, Tuple[str, type]] = {} + for n in model.graph.nodes: + nn_module_stack = n.meta.get("nn_module_stack", None) + current_scope = ("", type(None)) + if nn_module_stack: + bt = list(nn_module_stack.values())[-1] + current_scope = (bt[0].split(".")[-1], bt[1]) + node_name_to_scope[n.name] = current_scope + return node_name_to_scope + +def get_aten_graph_module( + pattern: Callable, + example_inputs: Tuple[Any, ...], + is_cuda: bool = False, + **kwargs, +) -> GraphModule: + """ + Convert the pattern to an FX graph with decomposed aten ops. + """ + if is_cuda: + example_inputs = tuple([x.cuda() if isinstance(x, torch.Tensor) else x for x in example_inputs]) + aten_pattern = capture_pre_autograd_graph( + pattern, + example_inputs, + kwargs, + ) + aten_pattern.graph.eliminate_dead_code() + aten_pattern.recompile() + return aten_pattern + +def remove_tensor_overload_for_qdq_ops(match_pattern: GraphModule) -> None: + """ Remove .tensor overload for quantize/dequantize ops so that we can + use the match_pattern that we get from torchdynamo export to match the output of convert_pt2e + """ + _MAP = { + torch.ops.quantized_decomposed.quantize_per_tensor.default: torch.ops.quantized_decomposed.quantize_per_tensor, + torch.ops.quantized_decomposed.dequantize_per_tensor.default: torch.ops.quantized_decomposed.dequantize_per_tensor, + torch.ops.quantized_decomposed.quantize_per_tensor.tensor: torch.ops.quantized_decomposed.quantize_per_tensor, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: torch.ops.quantized_decomposed.dequantize_per_tensor, + torch.ops.quantized_decomposed.quantize_per_tensor.tensor2: torch.ops.quantized_decomposed.quantize_per_tensor, + torch.ops.quantized_decomposed.dequantize_per_tensor.tensor2: torch.ops.quantized_decomposed.dequantize_per_tensor, + torch.ops.quantized_decomposed.quantize_per_channel.default: torch.ops.quantized_decomposed.quantize_per_channel, + torch.ops.quantized_decomposed.dequantize_per_channel.default: torch.ops.quantized_decomposed.dequantize_per_channel, + torch.ops.aten.clamp.Tensor: torch.ops.aten.clamp, + } + for n in match_pattern.graph.nodes: + if n.op != "call_function": + continue + if n.target in _MAP: + n.target = _MAP[n.target] + +def _is_literal(arg): + if isinstance(arg, (int, float)): + return True + if isinstance(arg, (tuple, list)): + return all(map(_is_literal, arg)) + return False + +def _replace_literals_with_new_placeholders( + gm: torch.fx.GraphModule, + merge_dup: bool = False, + exclude_literals: Optional[List[Any]] = None +): + """Replace the literals in the graph with placeholder nodes that's created on the fly while we + traverse the graph, so that the literal arguments in the graph can be matched and replaced + + To use this, the pattern and replacement graph should have the exact same number of literal args + and they should be used in the exact same order in the pattern and replacement graph. + + If the literal arguments are not used in the same order in pattern and replacement graph, please + use `_replace_literals_with_existing_placeholders` instead + + Args: + `gm`: input GraphModule that we'll transform + `merge_dup`: boolean flag to indicate that if the same literal appears multiple times in + the graph, whether they should correspond to the same placeholder or not + `exclude_literals`: a list of literals that will not be replaced with placeholders + + Example: + + # 1. Original Graph + def pattern(self, x): + return x + 3 + + def replacement(self, x): + return x - 3 + + example_inputs = (torch.randn(1, 3, 3, 3),) + pattern_gm = get_aten_graph_module(pattern, example_inputs) + replacement_gm = get_aten_graph_module(pattern, example_inptus) + + # 2. Before calling replace literals we'll see the following graph: + def pattern(self, x): + return x + 3 + + def replacement(self, x): + return x - 3 + + pattern_gm = _replace_literals_with_new_placeholders(pattern_gm) + replacement_gm = _replace_literals_with_new_placeholders(replacement_gm) + + # 3. After replacing literals with new placeholder nodes + + def pattern(self, x, new_ph): + return x + new_ph + + def pattern(self, x, new_ph): + return x - new_ph + + """ + last_ph = None + cnt = 0 + literal_to_ph: Dict[Union[float, bool, int, torch.dtype], Node] = {} + if exclude_literals is None: + exclude_literals = [] + + for node in gm.graph.nodes: + if node.op == "placeholder": + last_ph = node + cnt += 1 + continue + with gm.graph.inserting_after(last_ph): + new_args = [] + for arg in node.args: + if _is_literal(arg) and arg not in exclude_literals: + if merge_dup and arg in literal_to_ph: + new_args.append(literal_to_ph[arg]) + else: + ph_node = gm.graph.placeholder("arg" + str(cnt)) + new_args.append(ph_node) + gm._in_spec.children_specs[0].children_specs.append(LeafSpec()) + cnt += 1 + if merge_dup: + literal_to_ph[arg] = ph_node + else: + new_args.append(arg) + new_args = tuple(new_args) + + node.args = new_args + return gm + + +def _replace_literals_with_existing_placeholders( + gm: torch.fx.GraphModule, + exclude_literals: Optional[List[Any]] = None, + literal_to_ph_idx: Optional[Dict[Union[float, int, bool, torch.dtype], int]] = None +): + """Replace the literals in the graph with **existing** placeholder nodes, so that the literal arguments + in the graph can be matched and replaced + + To use this, all literal args in the graph should be unique and each of them should correspond + to exactly one placeholder node + + # 1. Original Graph + def pattern(self, x_i8, scale, zero_point, quant_min, quant_max): + return torch.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max) + + def replacement(x_i8, scale, zero_point, quant_min, quant_max): + x_i8 = torch.clamp(x_i8, quant_min, quant_max) + return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32) + + example_inputs = ( + torch.randn(1, 3, 3, 3), + 1.0, + 0, + -128, + 127, + ) + pattern_gm = get_aten_graph_module(pattern, example_inputs) + replacement_gm = get_aten_graph_module(pattern, example_inptus) + + # 2. Before calling replace literals we'll see the following graph: + def pattern(self, x_i8, scale, zero_point, quant_min, quant_max): + # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values + return torch.dequantize_per_tensor(x_i8, 1.0, 0, -128, 127) + + def replacement(x_i8, scale, zero_point, quant_min, quant_max): + # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values + x_i8 = torch.clamp(x_i8, -128, 127) + return ((x_i8.to(torch.float32) - 0) * 1.0).to(dtype=torch.float32) + + # Note that literal args appear in different order in pattern and replacement graph, so + # we can't use _replace_literals_with_new_placeholders + + literal_to_ph_idx = {1.0: 1, 0: 2, -128: 3, 127: 4} + pattern_gm = _replace_literals_with_existing_placeholders(pattern_gm, literal_to_ph_idx) + replacement_gm = _replace_literals_with_existing_placeholders(replacement_gm, literal_to_ph_idx) + + # 3. After replacing literals with existing placeholder nodes + + def pattern(self, x_i8, scale, zero_point, quant_min, quant_max): + # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values + return torch.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max) + + def replacement(x_i8, scale, zero_point, quant_min, quant_max): + # scale/zero_point/quant_min/quant_max are burnt in since they are scalar values + x_i8 = torch.clamp(x_i8, quant_min, quant_max) + return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32) + """ + if exclude_literals is None: + exclude_literals = [] + + if literal_to_ph_idx is None: + literal_to_ph_idx = {} + + phs = [node for node in gm.graph.nodes if node.op == "placeholder"] + + for node in gm.graph.nodes: + if node.op != "call_function": + continue + new_args = [] + for arg in node.args: + if _is_literal(arg) and arg not in exclude_literals and arg in literal_to_ph_idx: + ph_idx = literal_to_ph_idx[arg] + ph_node = phs[ph_idx] + new_args.append(ph_node) + else: + new_args.append(arg) + new_args = tuple(new_args) + node.args = new_args + return gm + +# TODO: Handle this in export itself and don't wrap the model in another GraphModule +# in prepare and convert +def _disallow_eval_train(model: GraphModule): + """ + Disallow calling `model.train()` or `model.eval()` on the given GraphModule. + This is useful for exported models, where these methods don't actually behave as expected. + """ + def _train(self, mode: bool = True): + raise NotImplementedError("Calling train() is not supported yet.") + + def _eval(self, mode: bool = True): + raise NotImplementedError("Calling eval() is not supported yet.") + + model.train = types.MethodType(_train, model) # type: ignore[method-assign] + model.eval = types.MethodType(_eval, model) # type: ignore[method-assign] + return model diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0522ea959444f5e446505d82913c96b10510de2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/composable_quantizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/composable_quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd7b17a1a0070cb5d2cfe40aea9daf0f10d42fc8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/composable_quantizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/embedding_quantizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/embedding_quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68923741350a502a15a3a1850a5a528e66573847 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/embedding_quantizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54545491f2aa3f9a36d359f8180c47351a2ca638 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1467dc9619e859f10b5137eafc7139b13586bf92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/x86_inductor_quantizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/x86_inductor_quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f061a35b250ee6a3af402bb089a194225056e2d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/x86_inductor_quantizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d614e1a84b0b4c65c03746bcab861761d2d0cd3f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bfacd9ec3b4f8e79620439e820132ce13778061 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc differ