diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e65652573b1b30bc755ab1861d4f7de0359bcedc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__init__.py @@ -0,0 +1,21 @@ +from .quantizer import ( + DerivedQuantizationSpec, + EdgeOrNode, + FixedQParamsQuantizationSpec, + QuantizationAnnotation, + QuantizationSpec, + QuantizationSpecBase, + Quantizer, + SharedQuantizationSpec, +) + +__all__ = [ + "EdgeOrNode", + "Quantizer", + "QuantizationSpecBase", + "QuantizationSpec", + "FixedQParamsQuantizationSpec", + "SharedQuantizationSpec", + "DerivedQuantizationSpec", + "QuantizationAnnotation", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fde96bd7bcc58ea65b5cba0887de2e681d431273 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58123f160457d58cd68ded5af83c49c52f5829f1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..626b62cf67a86e792f2f91ebc069fdb455d7777c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/composable_quantizer.py b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/composable_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..75f7d1ad5f1e9f15890970222d54a2ee492acc17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/composable_quantizer.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from typing import Dict, List + +import torch + +from torch.fx import Node + +from .quantizer import QuantizationAnnotation, Quantizer + +__all__ = [ + "ComposableQuantizer", +] + + +class ComposableQuantizer(Quantizer): + """ + ComposableQuantizer allows users to combine more than one quantizer into a single quantizer. + This allows users to quantize a model with multiple quantizers. E.g., embedding quantization + maybe supported by one quantizer while linear layers and other ops might be supported by another + quantizer. + + ComposableQuantizer is initialized with a list of `Quantizer` instances. + The order of the composition matters since that is the order in which the quantizers will be + applies. + Example: + ``` + embedding_quantizer = EmbeddingQuantizer() + linear_quantizer = MyLinearQuantizer() + xnnpack_quantizer = XNNPackQuantizer() # to handle ops not quantized by previous two quantizers + composed_quantizer = ComposableQuantizer([embedding_quantizer, linear_quantizer, xnnpack_quantizer]) + prepared_m = prepare_pt2e(model, composed_quantizer) + ``` + """ + + def __init__(self, quantizers: List[Quantizer]): + super().__init__() + self.quantizers = quantizers + self._graph_annotations: Dict[Node, QuantizationAnnotation] = {} + + def _record_and_validate_annotations( + self, gm: torch.fx.GraphModule, quantizer: Quantizer + ) -> None: + for n in gm.graph.nodes: + if "quantization_annotation" in n.meta: + # check if the annotation has been changed by + # comparing QuantizationAnnotation object id + if n in self._graph_annotations and ( + id(self._graph_annotations[n]) + != id(n.meta["quantization_annotation"]) + ): + raise RuntimeError( + f"Quantizer {quantizer.__class__.__name__} has changed annotations on node {n}" + ) + else: + self._graph_annotations[n] = n.meta["quantization_annotation"] + else: + if n in self._graph_annotations: + raise RuntimeError( + f"Quantizer {quantizer.__class__.__name__} has removed annotations on node {n}" + ) + + def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: + """just handling global spec for now""" + for quantizer in self.quantizers: + quantizer.annotate(model) + self._record_and_validate_annotations(model, quantizer) + return model + + def transform_for_annotation( + self, model: torch.fx.GraphModule + ) -> torch.fx.GraphModule: + for quantizer in self.quantizers: + model = quantizer.transform_for_annotation(model) + return model + + def validate(self, model: torch.fx.GraphModule) -> None: + pass diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/embedding_quantizer.py b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/embedding_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffd2002e580db7cc6cae69161de1e88c788073c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/embedding_quantizer.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +import copy +from typing import List, Set + +import torch +import torch.nn.functional as F +from torch.ao.quantization.observer import PerChannelMinMaxObserver +from torch.ao.quantization.quantizer.quantizer import ( + QuantizationAnnotation, + QuantizationSpec, + Quantizer, +) +from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import ( + OperatorConfig, + OperatorPatternType, + QuantizationConfig, +) + +__all__ = [ + "get_embedding_operators_config", + "EmbeddingQuantizer", +] + + +def get_embedding_operators_config() -> OperatorConfig: + weight_quantization_spec = QuantizationSpec( + dtype=torch.uint8, + qscheme=torch.per_channel_affine_float_qparams, + ch_axis=0, + observer_or_fake_quant_ctr=PerChannelMinMaxObserver.with_args(eps=2**-12), + ) + quantization_config = QuantizationConfig(None, None, weight_quantization_spec, None) + ops: List[OperatorPatternType] = [[torch.nn.Embedding]] + ops.append([F.embedding]) + supported_config_and_operators = OperatorConfig( + config=quantization_config, operators=ops + ) + return copy.deepcopy(supported_config_and_operators) + + +class EmbeddingQuantizer(Quantizer): + def __init__(self): + super().__init__() + + @classmethod + def get_supported_quantization_configs(cls) -> List[QuantizationConfig]: + op_configs: Set[QuantizationConfig] = set({}) + for spec, _ in cls.get_supported_operators(): + op_configs.add(spec) + return list(op_configs) + + @classmethod + def get_supported_operator_for_quantization_config( + cls, quantization_config: QuantizationConfig + ) -> List[OperatorPatternType]: + for config, ops in cls.get_supported_operators(): + # note: this assumes each entry in cls.supported_spec_and_operators + # corresponds to one spec, e.g. we don't have + # [(spec1, op_list1), (spec1, op_list2), (spec2, op_list3)] + # where the first and second entry have the same spec but did not + # merge the op list + if config == quantization_config: + return ops + return [] + + def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: + """just handling global spec for now""" + self._annotate_embedding_ops(model.graph) + return model + + def _annotate_embedding_ops(self, graph: torch.fx.Graph) -> None: + embedding_config: OperatorConfig = get_embedding_operators_config() + for node in graph.nodes: + # Keep node parsing based annotations instead of module partitioners + # just as an example of alternate ways of annotating + if ( + node.op == "call_function" + and node.target == torch.ops.aten.embedding.default + ): + if embedding_config.config.weight is None: + raise ValueError( + "Embedding config must have a valid weight quantization spec." + ) + node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map={ + node.args[0]: embedding_config.config.weight, + } + ) + + def validate(self, model: torch.fx.GraphModule) -> None: + pass + + @classmethod + def get_supported_operators(cls) -> List[OperatorConfig]: + return [get_embedding_operators_config()] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/quantizer.py b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..5fd1fd20b460310e270bc2a2ef973a4fcc584d66 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/quantizer.py @@ -0,0 +1,158 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.ao.quantization import ObserverOrFakeQuantize +from torch.ao.quantization.qconfig import _ObserverOrFakeQuantizeConstructor +from torch.fx import Node + +__all__ = [ + "Quantizer", + "QuantizationSpecBase", + "QuantizationSpec", + "FixedQParamsQuantizationSpec", + "EdgeOrNode", + "SharedQuantizationSpec", + "DerivedQuantizationSpec", + "QuantizationAnnotation", +] + + +class QuantizationSpecBase(ABC): # noqa: B024 + """Base class for different types of quantization specs that allows users to + specify how to quantize a Tensor (input/output of a Node) in the model + """ + + pass + + +@dataclass(eq=True, frozen=True) +class QuantizationSpec(QuantizationSpecBase): + """Quantization spec for common operators that allows user to specify how to + quantize a Tensor, this includes dtype, quant_min, quant_max etc. + """ + + dtype: torch.dtype + # observer or fake_quantize constructor such as + # MinMaxObserver, PerChannelHistogramObserver etc. + # or we can attach some custom args to them + # e.g. MinMaxObserver.with_args(eps=eps) + observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor + quant_min: Optional[int] = None + quant_max: Optional[int] = None + qscheme: Optional[torch.qscheme] = None + ch_axis: Optional[int] = None + is_dynamic: bool = False + + def __post_init__(self): + # quant_min must be less than quant_max + if ( + self.quant_min is not None + and self.quant_max is not None + and self.quant_min > self.quant_max + ): + raise ValueError( + f"quant_min {self.quant_min} must be <= quant_max {self.quant_max}." + ) + + # ch_axis must be less than the number of channels + # but no way to check here. Just check that it is not < 0. + if self.ch_axis is not None and self.ch_axis < 0: + raise ValueError("Ch_axis is < 0.") + + +@dataclass(eq=True, frozen=True) +class FixedQParamsQuantizationSpec(QuantizationSpecBase): + dtype: torch.dtype + scale: float + zero_point: int + quant_min: Optional[int] = None + quant_max: Optional[int] = None + qscheme: Optional[torch.qscheme] = None + + +""" +The way we refer to other points of quantization in the graph will be either +an input edge or an output value +input edge is the connection between input node and the node consuming the input, so it's a Tuple[Node, Node] +output value is an fx Node +""" +EdgeOrNode = Union[Tuple[Node, Node], Node] +EdgeOrNode.__module__ = "torch.ao.quantization.quantizer.quantizer" + + +@dataclass(eq=True, frozen=True) +class SharedQuantizationSpec(QuantizationSpecBase): + """ + Quantization spec for the Tensors whose quantization parameters are shared with other Tensors + """ + + # the edge or node to share observer or fake quant instances with + edge_or_node: EdgeOrNode + + +@dataclass(eq=True, frozen=True) +class DerivedQuantizationSpec(QuantizationSpecBase): + """Quantization spec for the Tensors whose quantization parameters are derived from other Tensors""" + + derived_from: List[EdgeOrNode] + derive_qparams_fn: Callable[[List[ObserverOrFakeQuantize]], Tuple[Tensor, Tensor]] + dtype: torch.dtype + quant_min: Optional[int] = None + quant_max: Optional[int] = None + qscheme: Optional[torch.qscheme] = None + ch_axis: Optional[int] = None + + +@dataclass +class QuantizationAnnotation: + """How are input arguemnt or output should be quantized, + expressed as QuantizationSpec, this corresponds to how a Tensor in the + operator Graph is observed (PTQ) or fake quantized (QAT) + """ + + # a map from torch.fx.Node to a type of QuantizationSpecBase + input_qspec_map: Dict[Node, Optional[QuantizationSpecBase]] = field( + default_factory=dict + ) + + # How the output of this node is quantized, expressed as QuantizationSpec + # TODO: change the value to QuantizationSpec in a separate PR + output_qspec: Optional[QuantizationSpecBase] = None + + # For a Node: node1 and edge: (node1, node2), since they are observing the same + # Tensor, we may want to implicitly share observers, this flag allows people to + # turn off this behavior for the output of the node + allow_implicit_sharing: bool = True + + # whether the node is annotated or not + _annotated: bool = False + + +class Quantizer(ABC): + def transform_for_annotation( + self, model: torch.fx.GraphModule + ) -> torch.fx.GraphModule: + """Allows for user defined transforms to run before annotating the graph. + This allows quantizer to allow quantizing part of the model that are otherwise not quantizable. + For example quantizer can + a) decompose a compound operator like scaled dot product attention, + into bmm and softmax if quantizer knows how to quantize bmm/softmax but not sdpa + or b) transform scalars to tensor to allow quantizing scalares. + + Note: this is an optional method + """ + return model + + # annotate nodes in the graph with observer or fake quant constructors + # to convey the desired way of quantization + @abstractmethod + def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: + pass + + # validate the annotated graph is supported by the backend + @abstractmethod + def validate(self, model: torch.fx.GraphModule) -> None: + pass diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/x86_inductor_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..797753b7d940bc0358d976fd088993118da0452d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/x86_inductor_quantizer.py @@ -0,0 +1,1016 @@ +import copy +import functools +import itertools +import operator +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Sequence, Set, Tuple + +import torch +import torch.nn.functional as F +from torch.ao.quantization.fake_quantize import ( + FakeQuantize, + FusedMovingAvgObsFakeQuantize, +) +from torch.ao.quantization.observer import ( + HistogramObserver, + MovingAverageMinMaxObserver, + MovingAveragePerChannelMinMaxObserver, + PerChannelMinMaxObserver, + PlaceholderObserver, +) +from torch.ao.quantization.pt2e.graph_utils import find_sequential_partitions +from torch.ao.quantization.qconfig import _ObserverOrFakeQuantizeConstructor +from torch.ao.quantization.quantizer.quantizer import ( + QuantizationAnnotation, + QuantizationSpec, + Quantizer, + SharedQuantizationSpec, +) +from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import ( + _is_annotated, + get_bias_qspec, + get_input_act_qspec, + get_output_act_qspec, + get_weight_qspec, + OperatorConfig, + OperatorPatternType, + QuantizationConfig, +) +from torch.fx import Node +from torch.fx.passes.utils.source_matcher_utils import ( + get_source_partitions, + SourcePartition, +) + +__all__ = [ + "X86InductorQuantizer", + "get_default_x86_inductor_quantization_config", +] + + +@dataclass +class _X86InductorQuantizationAnnotation(QuantizationAnnotation): + # _is_output_of_quantized_pattern: + # * Node as output node of a fusion pattern. + # * The fusion pattern supports int8 data type. + # * The fusion pattern has inputs annotated to insert observer. + _is_output_of_quantized_pattern: bool = False + + +# Operations that: +# 1. Operations are optimized to run with int8 when int8 input provided. +# 2. Operations do not support int8 input and produce fp32 output. +int8_in_int8_out_ops_pt2e: Set = { + torch.ops.aten.max_pool2d.default, + torch.ops.aten.cat.default, + torch.ops.aten.avg_pool2d.default, + torch.ops.aten.adaptive_avg_pool2d.default, + torch.ops.aten.flatten.using_ints, +} + + +# Operations support the int8 data type and exclude operations such as conv and linear. +# A superset of int8_in_int8_out_ops_pt2e incorporating additional operators. +quantizable_ops_pt2e = copy.deepcopy(int8_in_int8_out_ops_pt2e) + +QUANT_ANNOTATION_KEY = "quantization_annotation" + + +def _mark_nodes_as_annotated(nodes: List[Node]): + for node in nodes: + if node is not None: + if QUANT_ANNOTATION_KEY not in node.meta: + node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation() + node.meta[QUANT_ANNOTATION_KEY]._annotated = True + + +def _is_node_annotated(_node): + """ + return True if the node is annotated, otherwise return False + """ + return ( + QUANT_ANNOTATION_KEY in _node.meta + and _node.meta[QUANT_ANNOTATION_KEY]._annotated + ) + + +def _is_any_annotated(nodes: List[Node]): + """ + Given a list of nodes (that represents an operator pattern), + check if any of the node is annotated, return True if any of the node + is annotated, otherwise return False. + """ + return any(_is_node_annotated(node) for node in nodes) + + +def _is_all_annotated(nodes: List[Node]): + """ + Given a list of nodes (that represents an operator pattern), + return True if all of the node is annotated, otherwise return False. + """ + return all(_is_node_annotated(node) for node in nodes) + + +def _is_quantized_op_pt2e(node: torch.fx.Node): + """ + Used for pt2e flow to check if the node is a quantized node: + Case1: the node has been annotated as output node of a fusion pattern. + Case2: the node has been annotated as single quantized node. + """ + if not _is_any_annotated([node]): + # The node has not been annotated, directly return False + return False + quantization_annotation = node.meta.get(QUANT_ANNOTATION_KEY, None) + assert isinstance(quantization_annotation, _X86InductorQuantizationAnnotation) + return quantization_annotation._is_output_of_quantized_pattern + + +def _supported_quantized_operators() -> Dict[str, List[OperatorPatternType]]: + # TODO: Add more supported operators here. + supported_operators: Dict[str, List[OperatorPatternType]] = { + "conv2d": [ + [torch.nn.Conv2d], + [F.conv2d], + ], + } + + # Append Conv Optional(Add) Optioinal(ReLU) + conv_add_relu_options = itertools.product( + [torch.nn.Conv2d, F.conv2d], + [torch.add, operator.add, None], # add + [torch.nn.ReLU, F.relu, None], # relu + ) + for conv_op, add_op, relu_op in conv_add_relu_options: + if add_op is None: + # Append Conv ReLU + supported_operators["conv2d"].append([conv_op, relu_op]) # type: ignore[list-item] + elif relu_op is None: + # Append Conv Add + supported_operators["conv2d"].append([conv_op, add_op]) # type: ignore[list-item] + else: + # Append Conv Add ReLU + supported_operators["conv2d"].append([conv_op, add_op, relu_op]) # type: ignore[list-item] + + return copy.deepcopy(supported_operators) + + +def _get_supported_x86_inductor_config_and_operators() -> List[OperatorConfig]: + supported_config_and_operators: List[OperatorConfig] = [] + for quantization_config in [ + get_default_x86_inductor_quantization_config(), + ]: + ops = _supported_quantized_operators() + for pattern_list in ops.values(): + supported_config_and_operators.append( + OperatorConfig(quantization_config, pattern_list) + ) + return copy.deepcopy(supported_config_and_operators) + + +@functools.lru_cache +def get_default_x86_inductor_quantization_config( + is_qat: bool = False, + is_dynamic: bool = False, +): + extra_args: Dict[str, Any] = {"eps": 2**-12} + if is_qat: + if is_dynamic: + act_observer_or_fake_quant_ctr = FakeQuantize + dynamic_quant_observer = MovingAverageMinMaxObserver.with_args( + averaging_constant=1 + ) + extra_args["observer"] = dynamic_quant_observer + else: + act_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize # type: ignore[assignment] + else: + if is_dynamic: + act_observer_or_fake_quant_ctr = PlaceholderObserver # type: ignore[assignment] + else: + act_observer_or_fake_quant_ctr = HistogramObserver # type: ignore[assignment] + + # Copy from x86 default qconfig from torch/ao/quantization/qconfig.py + act_quantization_spec = QuantizationSpec( + dtype=torch.uint8, + quant_min=0, + quant_max=255, # reduce_range=False + qscheme=torch.per_tensor_affine, + is_dynamic=is_dynamic, + observer_or_fake_quant_ctr=act_observer_or_fake_quant_ctr.with_args( + **extra_args + ), + ) + + weight_observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor = ( + FusedMovingAvgObsFakeQuantize if is_qat else PerChannelMinMaxObserver + ) + + if is_qat: + # Only support per channel quant for now + extra_args["observer"] = MovingAveragePerChannelMinMaxObserver # type: ignore[dict-item] + weight_quantization_spec = QuantizationSpec( + dtype=torch.int8, + quant_min=-128, + quant_max=127, + qscheme=torch.per_channel_symmetric, + ch_axis=0, # 0 corresponding to weight shape = (oc, ic, kh, kw) of conv + is_dynamic=False, + observer_or_fake_quant_ctr=weight_observer_or_fake_quant_ctr.with_args( + **extra_args + ), + ) + bias_quantization_spec = None # will use placeholder observer by default + quantization_config = QuantizationConfig( + act_quantization_spec, + act_quantization_spec, + weight_quantization_spec, + bias_quantization_spec, + is_qat, + ) + return quantization_config + + +def _get_supported_config_and_operators() -> List[OperatorConfig]: + return _get_supported_x86_inductor_config_and_operators() + + +class X86InductorQuantizer(Quantizer): + supported_config_and_operators = _get_supported_config_and_operators() + + def __init__(self): + super().__init__() + self.global_config: QuantizationConfig = None # type: ignore[assignment] + self.operator_type_config: Dict[str, Optional[QuantizationConfig]] = {} + + @classmethod + def get_supported_quantization_configs(cls) -> List[QuantizationConfig]: + op_configs: Set[QuantizationConfig] = set({}) + for spec, _ in cls.supported_config_and_operators: + op_configs.add(spec) + return list(op_configs) + + @classmethod + def get_supported_operator_for_quantization_config( + cls, quantization_config: Optional[QuantizationConfig] + ) -> List[OperatorPatternType]: + if quantization_config is None: + all_ops = [] + for _, ops in cls.supported_config_and_operators: + all_ops.extend(ops) + return all_ops + + for config, ops in cls.supported_config_and_operators: + if config == quantization_config: + return ops + return [] + + def set_global(self, quantization_config: QuantizationConfig): + self.global_config = quantization_config + return self + + def set_config_for_operator_type( + self, operator_type: str, quantization_config: QuantizationConfig + ): + self.operator_type_config[operator_type] = quantization_config + return self + + def _annotate_conv_node_helper( + self, + conv_node: torch.fx.Node, + annotate_output: bool, + quantization_config: QuantizationConfig, + ) -> None: + """Helper function to annotate the conv node""" + input_qspec_map = {} + input_node = conv_node.args[0] + assert isinstance(input_node, Node) + input_qspec_map[input_node] = get_input_act_qspec(quantization_config) + weight_node = conv_node.args[1] + assert isinstance(weight_node, Node) + input_qspec_map[weight_node] = get_weight_qspec(quantization_config) + bias_node = None if len(conv_node.args) == 2 else conv_node.args[2] + if isinstance(bias_node, Node): + input_qspec_map[bias_node] = get_bias_qspec(quantization_config) + if annotate_output: + conv_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + else: + conv_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + ) + + def _annotate_linear_node_helper( + self, + linear_node: torch.fx.Node, + annotate_output: bool, + quantization_config: QuantizationConfig, + ) -> None: + """Helper function to annotate the linear node""" + input_qspec_map = {} + assert linear_node.target in (torch.ops.aten.linear.default,) + has_bias = len(linear_node.args) == 3 + input_index = 0 + weight_index = 1 + bias_index = 2 + + input_node = linear_node.args[input_index] + assert isinstance(input_node, Node) + input_qspec_map[input_node] = get_input_act_qspec(quantization_config) + + weight_node = linear_node.args[weight_index] + assert isinstance(weight_node, Node) + input_qspec_map[weight_node] = get_weight_qspec(quantization_config) + + bias_node = linear_node.args[bias_index] if has_bias else None + if isinstance(bias_node, Node): + input_qspec_map[bias_node] = get_bias_qspec(quantization_config) + + if annotate_output: + linear_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + else: + linear_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, _annotated=True + ) + + def _get_output_nodes_of_partitions( + self, + partition_list: List[SourcePartition], + ) -> List[torch.fx.Node]: + """Helper function to get the output node list from partition list""" + output_node_list = [] + for partition in partition_list: + if len(partition.output_nodes) > 1: + raise ValueError("Input partition has more than one output node") + output_node = partition.output_nodes[0] + assert isinstance(output_node, Node) + output_node_list.append(output_node) + if len(output_node_list) != len(partition_list): + raise ValueError( + "length of output_node_list should equal to length of partition_list" + ) + return output_node_list + + def _get_input_idx_for_binary_node( + self, + conv_gemm_node: torch.fx.Node, + binary_node: torch.fx.Node, + ): + """Helper function to check conv_gemm and extra input node index + for binary node fused with conv_gemm. + """ + conv_gemm_node_idx = None + extra_input_node_idx = None + if (binary_node.args[0].op == "call_function") and ( # type: ignore[union-attr] + binary_node.args[0] == conv_gemm_node + ): + conv_gemm_node_idx = 0 + extra_input_node_idx = 1 + elif (binary_node.args[1].op == "call_function") and ( # type: ignore[union-attr] + binary_node.args[1] == conv_gemm_node + ): + conv_gemm_node_idx = 1 + extra_input_node_idx = 0 + extra_input_node = binary_node.args[extra_input_node_idx] # type: ignore[index] + assert isinstance(extra_input_node, Node) + return conv_gemm_node_idx, extra_input_node_idx + + def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: + """just handling global spec for now""" + if self.global_config and self.global_config.input_activation.is_dynamic: # type: ignore[union-attr] + model = self._annotate_for_dynamic_quantization_config(model) + else: + model = self._annotate_for_static_quantization_config(model) + return model + + def _annotate_for_static_quantization_config( + self, model: torch.fx.GraphModule + ) -> torch.fx.GraphModule: + r""" + High-level description of quantization recipe for X86 Inductor Backend: + Step 1: Apply quantization recipe for fusion patterns of conv/linear to enable int8 data type actively. + Step 2: Propagate quantization annotation for patterns besides conv/linear. Go through the pattern in model + from start to the end. If a pattern supports computation with int8 data type and inputs connected to + quantized patterns, annotate its inputs as quantized pattern. + Step 3: Since in step 2, we only annotate the inputs of quantized pattern. For some quantized patterns, + such as maxpool2d, which only supports output with int8 data type when the input is with int8 data type, + we need to annotate the output of this pattern. + """ + + config = self.global_config + + # Step1: Recipe of fusion patterns like conv/linear. + if config.is_qat: + # Annotate QAT specific pattern: mainly due to BN not folded in prepare_qat + self._annotate_qat_conv2d_fusion_pattern(model, config) + + self._annotate_conv2d_fusion_pattern(model, config) + + # Step2: Recipe to propagate annotation for patterns beside conv/linear. + # Go through all the nodes from start to end. + # Recipe refer to https://github.com/intel/intel-extension-for-pytorch/blob/ + # 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L538 + for node in model.graph.nodes: + self._annotation_propagation_quantizable_pattern(node, config) + + # Step3: For quantizable ops, such as maxpool2d, we need to quantize its output if it is quantized + # in inputs. So, we can fuse dq-operator-q into a quantized op. + # Refer to https://github.com/intel/intel-extension-for-pytorch/blob/ + # 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L487 + for node in model.graph.nodes: + self._annotate_output_for_int8_in_int8_out_pattern(node, config) + + return model + + def _annotate_for_dynamic_quantization_config( + self, model: torch.fx.GraphModule + ) -> torch.fx.GraphModule: + config = self.global_config + self._annotate_linear(model, config) + return model + + def _annotate_qat_conv2d_fusion_pattern( + self, model: torch.fx.GraphModule, config: QuantizationConfig + ): + # Annotate QAT Specific patterns + self._annotate_qat_conv2d_bn_binary_unary(model, config) + self._annotate_qat_conv2d_bn_binary(model, config) + self._annotate_qat_conv2d_bn_unary(model, config) + self._annotate_qat_conv2d_bn(model, config) + + def _annotate_qat_conv2d_bn_binary_unary( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + fused_partitions = find_sequential_partitions( + gm, [torch.nn.Conv2d, torch.nn.BatchNorm2d, operator.add, torch.nn.ReLU] + ) + for fused_partition in fused_partitions: + ( + conv_partition, + bn_partition, + binary_partition, + unary_partition, + ) = fused_partition + + ( + conv_node, + bn_output_node, + binary_node, + unary_node, + ) = self._get_output_nodes_of_partitions( + [conv_partition, bn_partition, binary_partition, unary_partition] + ) + if len(bn_output_node.users) != 1: + # Conv BN pattern should only has 1 user. + continue + ( + bn_output_node_idx, + extra_input_node_idx, + ) = self._get_input_idx_for_binary_node(bn_output_node, binary_node) + if (bn_output_node_idx is None) or (extra_input_node_idx is None): + continue + if bn_output_node != binary_node.args[bn_output_node_idx]: + raise ValueError(f"{bn_output_node} doesn't match input of binary node") + extra_input_node = binary_node.args[extra_input_node_idx] + + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + continue + + if _is_annotated([unary_node, binary_node, bn_output_node, conv_node]): + continue + + self._annotate_conv_node_helper(conv_node, False, quantization_config) + + binary_node_input_qspec_map = {} + binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec( + quantization_config + ) + binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=binary_node_input_qspec_map, + _annotated=True, + ) + unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + # TODO Remove the annotate of output in QAT when qat util support pattern matcher. + output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type] + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + nodes_to_mark_annotated = list(conv_partition.nodes) + nodes_to_mark_annotated.extend(list(bn_partition.nodes)) + nodes_to_mark_annotated.extend(list(binary_partition.nodes)) + nodes_to_mark_annotated.extend(list(unary_partition.nodes)) + _mark_nodes_as_annotated(nodes_to_mark_annotated) + + def _annotate_qat_conv2d_bn_binary( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + fused_partitions = find_sequential_partitions( + gm, [torch.nn.Conv2d, torch.nn.BatchNorm2d, operator.add] + ) + for fused_partition in fused_partitions: + conv_partition, bn_partition, binary_partition = fused_partition + ( + conv_node, + bn_output_node, + binary_node, + ) = self._get_output_nodes_of_partitions( + [conv_partition, bn_partition, binary_partition] + ) + if len(bn_output_node.users) != 1: + # Conv BN pattern should only has 1 user. + continue + ( + bn_output_node_idx, + extra_input_node_idx, + ) = self._get_input_idx_for_binary_node(bn_output_node, binary_node) + if (bn_output_node_idx is None) or (extra_input_node_idx is None): + continue + if bn_output_node != binary_node.args[bn_output_node_idx]: + raise ValueError(f"{bn_output_node} doesn't match input of binary node") + + extra_input_node = binary_node.args[extra_input_node_idx] + + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + continue + + if _is_annotated([binary_node, bn_output_node, conv_node]): + continue + + self._annotate_conv_node_helper(conv_node, False, quantization_config) + + binary_node_input_qspec_map = {} + binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec( + quantization_config + ) + binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=binary_node_input_qspec_map, + # TODO Remove the annotate of output in QAT when qat util support pattern matcher. + output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type] + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + nodes_to_mark_annotated = list(conv_partition.nodes) + nodes_to_mark_annotated.extend(list(bn_partition.nodes)) + nodes_to_mark_annotated.extend(list(binary_partition.nodes)) + _mark_nodes_as_annotated(nodes_to_mark_annotated) + + def _annotate_qat_conv2d_bn_unary( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + fused_partitions = [] + unary_patterns = [ + [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.ReLU], + [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.Hardtanh], + [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.Hardswish], + [torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.ReLU6], + ] + for unary_pattern in unary_patterns: + partitions = find_sequential_partitions(gm, unary_pattern) + if partitions: + # Extend the fused_partitions if partitions is not empty + fused_partitions.extend(partitions) + + for fused_partition in fused_partitions: + conv_partition, bn_partition, unary_partition = fused_partition + ( + conv_node, + bn_output_node, + unary_node, + ) = self._get_output_nodes_of_partitions( + [conv_partition, bn_partition, unary_partition] + ) + + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + continue + + if _is_annotated([unary_node, bn_output_node, conv_node]): + continue + + self._annotate_conv_node_helper(conv_node, False, quantization_config) + unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + # TODO Remove the annotate of output in QAT when qat util support pattern matcher. + output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type] + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + nodes_to_mark_annotated = list(conv_partition.nodes) + nodes_to_mark_annotated.extend(list(bn_partition.nodes)) + nodes_to_mark_annotated.extend(list(unary_partition.nodes)) + _mark_nodes_as_annotated(nodes_to_mark_annotated) + + def _annotate_qat_conv2d_bn( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + fused_partitions = find_sequential_partitions( + gm, [torch.nn.Conv2d, torch.nn.BatchNorm2d] + ) + for fused_partition in fused_partitions: + conv_partition, bn_partition = fused_partition + conv_node, bn_output_node = self._get_output_nodes_of_partitions( + [conv_partition, bn_partition] + ) + + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + continue + + if _is_annotated([bn_output_node, conv_node]): + continue + + self._annotate_conv_node_helper(conv_node, False, quantization_config) + bn_output_node.meta[ + QUANT_ANNOTATION_KEY + ] = _X86InductorQuantizationAnnotation( + # TODO Remove the annotate of output in QAT when qat util support pattern matcher. + output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type] + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + nodes_to_mark_annotated = list(conv_partition.nodes) + nodes_to_mark_annotated.extend(list(bn_partition.nodes)) + _mark_nodes_as_annotated(nodes_to_mark_annotated) + + def _annotate_conv2d_fusion_pattern( + self, model: torch.fx.GraphModule, config: QuantizationConfig + ): + self._annotate_conv2d_binary_unary(model, config) + self._annotate_conv2d_binary(model, config) + self._annotate_conv2d_unary(model, config) + self._annotate_conv2d(model, config) + self._annotate_linear_unary(model, config) + self._annotate_linear(model, config) + + def _annotate_conv2d_binary_unary( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + # Conv2d + add + unary op + fused_partitions = find_sequential_partitions( + gm, [torch.nn.Conv2d, operator.add, torch.nn.ReLU] + ) + for fused_partition in fused_partitions: + conv_partition, binary_partition, unary_partition = fused_partition + conv_node, binary_node, unary_node = self._get_output_nodes_of_partitions( + [conv_partition, binary_partition, unary_partition] + ) + if len(conv_node.users) != 1: + # Conv Node should only has 1 user node + continue + conv_node_idx, extra_input_node_idx = self._get_input_idx_for_binary_node( + conv_node, binary_node + ) + if (conv_node_idx is None) or (extra_input_node_idx is None): + continue + if conv_node != binary_node.args[conv_node_idx]: + raise ValueError(f"{conv_node} doesn't match input of binary node") + extra_input_node = binary_node.args[extra_input_node_idx] + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + # No conv node found to be fused with add + continue + if _is_annotated([unary_node, binary_node, conv_node]): + continue + self._annotate_conv_node_helper(conv_node, False, quantization_config) + binary_node_input_qspec_map = {} + binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec( + quantization_config + ) + binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=binary_node_input_qspec_map, + _annotated=True, + ) + unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + + def _annotate_conv2d_binary( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + # Conv2d + add + fused_partitions = find_sequential_partitions( + gm, [torch.nn.Conv2d, operator.add] + ) + for fused_partition in fused_partitions: + conv_partition, binary_partition = fused_partition + conv_node, binary_node = self._get_output_nodes_of_partitions( + [conv_partition, binary_partition] + ) + if len(conv_node.users) != 1: + # Conv Node should only has 1 user node + continue + conv_node_idx, extra_input_node_idx = self._get_input_idx_for_binary_node( + conv_node, binary_node + ) + if (conv_node_idx is None) or (extra_input_node_idx is None): + continue + if conv_node != binary_node.args[conv_node_idx]: + raise ValueError(f"{conv_node} doesn't match input of binary node") + extra_input_node = binary_node.args[extra_input_node_idx] + assert isinstance(conv_node, Node) + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + # No conv node found to be fused with add + continue + if _is_annotated([binary_node, conv_node]): + continue + self._annotate_conv_node_helper(conv_node, False, quantization_config) + binary_node_input_qspec_map = {} + binary_node_input_qspec_map[extra_input_node] = get_input_act_qspec( + quantization_config + ) + binary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=binary_node_input_qspec_map, + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + + def _annotate_conv2d_unary( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + fused_partitions = [] + unary_patterns = [ + [torch.nn.Conv2d, torch.nn.ReLU], + [torch.nn.Conv2d, torch.nn.Hardtanh], + [torch.nn.Conv2d, torch.nn.Hardswish], + [torch.nn.Conv2d, torch.nn.ReLU6], + ] + for unary_pattern in unary_patterns: + partitions = find_sequential_partitions(gm, unary_pattern) + if partitions: + # Extend the fused_partitions if partitions is not empty + fused_partitions.extend(partitions) + + for fused_partition in fused_partitions: + conv_partition, unary_partition = fused_partition + conv_node, unary_node = self._get_output_nodes_of_partitions( + [conv_partition, unary_partition] + ) + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + continue + if _is_annotated([unary_node, conv_node]): + continue + self._annotate_conv_node_helper(conv_node, False, quantization_config) + unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + + def _annotate_conv2d( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + conv_partitions = get_source_partitions( + gm.graph, [torch.nn.Conv2d, torch.nn.functional.conv2d] + ) + conv_partitions = list(itertools.chain.from_iterable(conv_partitions.values())) + for conv_partition in conv_partitions: + if len(conv_partition.output_nodes) > 1: + raise ValueError("conv partition has more than one output node") + conv_node = conv_partition.output_nodes[0] + if ( + conv_node.op != "call_function" + or conv_node.target != torch.ops.aten.conv2d.default + ): + raise ValueError(f"{conv_node} is not an aten conv2d operator") + # skip annotation if it is already annotated + if _is_annotated([conv_node]): + continue + self._annotate_conv_node_helper(conv_node, True, quantization_config) + + def _annotate_maxpool2d( + self, node: Node, quantization_config: QuantizationConfig + ) -> None: + if node.target is not torch.ops.aten.max_pool2d.default: + return + maxpool_node = node + if _is_any_annotated( + [ + maxpool_node, + ] + ): + return + input_node = maxpool_node.args[0] + assert isinstance(input_node, Node) + input_qspec_map = {} + input_qspec_map[input_node] = get_input_act_qspec(quantization_config) + maxpool_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + + def _annotate_cat( + self, node: Node, quantization_config: QuantizationConfig + ) -> None: + cat_node = node + input_nodes = cat_node.args[0] + assert isinstance(input_nodes, Sequence) + first_input_node = input_nodes[0] + input_qspec_map = {} + assert isinstance(first_input_node, Node) + assert isinstance(cat_node, Node) + input_qspec_map[first_input_node] = get_input_act_qspec(quantization_config) + share_qparams_with_input_act0_qspec = SharedQuantizationSpec( + (first_input_node, cat_node) + ) + + for input_node in input_nodes[1:]: + if input_node not in input_qspec_map: + # There has the case of cat same nodes: torch.cat([input0, input0], 1) + assert isinstance(input_node, Node) + input_qspec_map[input_node] = share_qparams_with_input_act0_qspec + + cat_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + + def _annotation_propagation_quantizable_pattern( + self, node: Node, quantization_config: QuantizationConfig + ) -> None: + # Propagate annotation to quantizable patterns. + if ( + (node.target in quantizable_ops_pt2e) + and (not _is_any_annotated([node])) + and (node.op == "call_function") + ): + + def is_all_inputs_connected_to_quantized_op(input_nodes): + # Ensure all the inputs connect to fusion pattern or quantized node + for input_node in input_nodes: + if not _is_quantized_op_pt2e(input_node): + return False + return True + + if node.target is torch.ops.aten.max_pool2d.default: + # Recipe of maxpool2d: check input arg[0] of maxpool2d is quantized or not + input_nodes_to_check = [node.all_input_nodes[0]] + if not is_all_inputs_connected_to_quantized_op(input_nodes_to_check): + return + self._annotate_maxpool2d(node, quantization_config) + return + elif node.target is torch.ops.aten.cat.default: + input_nodes_to_check = node.all_input_nodes + if not is_all_inputs_connected_to_quantized_op(input_nodes_to_check): + return + self._annotate_cat(node, quantization_config) + else: + input_node = node.all_input_nodes[0] + if not is_all_inputs_connected_to_quantized_op( + [ + input_node, + ] + ): + return + input_qspec_map = {} + input_qspec_map[input_node] = get_input_act_qspec(quantization_config) + node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + return + + def _annotate_output_share_observer_as_input( + self, input_node: Node, source_node: Node + ): + source_node_quantization_annotation = ( + source_node.meta[QUANT_ANNOTATION_KEY] + if QUANT_ANNOTATION_KEY in source_node.meta + else None + ) + if ( + source_node_quantization_annotation + and source_node_quantization_annotation._is_output_of_quantized_pattern + ): + edge_or_node = (input_node, source_node) + source_node_quantization_annotation.output_qspec = SharedQuantizationSpec( + edge_or_node + ) + return + + def _annotate_output_for_int8_in_int8_out_pattern( + self, node: Node, quantization_config: QuantizationConfig + ) -> None: + r""" + Check and insert observer at output of node in int8_in_int8_out_ops_pt2e if needed. + Recipe refers to https://github.com/intel/intel-extension-for-pytorch/blob/ + 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_utils.py#L495 + """ + edge_or_node: Tuple[Node, Node] + if (node.target in int8_in_int8_out_ops_pt2e) and (_is_any_annotated([node])): + if node.target == torch.ops.aten.max_pool2d.default: + maxpool_node = node + if not _is_all_annotated( + [ + maxpool_node, + ] + ): + return + # Get the quantization_annotation from getitem_node + maxpool_node_quantization_annotation = ( + maxpool_node.meta[QUANT_ANNOTATION_KEY] + if QUANT_ANNOTATION_KEY in maxpool_node.meta + else None + ) + if ( + maxpool_node_quantization_annotation + and maxpool_node_quantization_annotation._is_output_of_quantized_pattern + ): + # Annotate the output_qspec of getitem_node + input_act = maxpool_node.args[0] + assert isinstance(input_act, Node) + assert isinstance(maxpool_node, Node) + edge_or_node = (input_act, maxpool_node) + maxpool_node_quantization_annotation.output_qspec = ( + SharedQuantizationSpec(edge_or_node) + ) + else: + input_node = node.all_input_nodes[0] + self._annotate_output_share_observer_as_input(input_node, node) + return + + def _annotate_linear( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + linear_partitions = get_source_partitions( + gm.graph, [torch.nn.Linear, torch.nn.functional.linear] + ) + linear_partitions = list( + itertools.chain.from_iterable(linear_partitions.values()) + ) + for partition in linear_partitions: + if len(partition.output_nodes) > 1: + raise ValueError( + "Linear partition cannot have more than one output node" + ) + linear_node = partition.output_nodes[0] + if linear_node.op != "call_function" or linear_node.target not in ( + torch.ops.aten.linear.default, + ): + raise ValueError(f"{linear_node} is not an aten linear operator") + # skip annotation if it is already annotated + if _is_annotated([linear_node]): + continue + self._annotate_linear_node_helper(linear_node, True, quantization_config) + + def _annotate_linear_unary( + self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig + ) -> None: + postop_list = [ + torch.nn.ReLU, + torch.nn.LeakyReLU, + torch.nn.Tanh, + ] + fused_partitions: List[tuple] = [] + for postop in postop_list: + fused_partitions = fused_partitions + find_sequential_partitions( + gm, [torch.nn.Linear, postop] + ) + for fused_partition in fused_partitions: + linear_partition, unary_partition = fused_partition + linear_node, unary_node = self._get_output_nodes_of_partitions( + [linear_partition, unary_partition] + ) + if linear_node.op != "call_function" or linear_node.target not in ( + torch.ops.aten.linear.default, + ): + continue + if _is_annotated([unary_node, linear_node]): + continue + self._annotate_linear_node_helper(linear_node, False, quantization_config) + unary_node.meta[QUANT_ANNOTATION_KEY] = _X86InductorQuantizationAnnotation( + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + + def validate(self, model: torch.fx.GraphModule) -> None: + pass + + @classmethod + def get_supported_operators(cls) -> List[OperatorConfig]: + return cls.supported_config_and_operators diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer.py b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1062c62428975ac42c0c19c6fddf69904e0ebe19 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer.py @@ -0,0 +1,453 @@ +from __future__ import annotations + +import copy +import functools + +from typing import Any, Callable, Dict, List, Optional, Set + +import torch +import torch._dynamo as torchdynamo +import torch.nn.functional as F +from torch.ao.quantization.fake_quantize import ( + FakeQuantize, + FusedMovingAvgObsFakeQuantize, +) +from torch.ao.quantization.observer import ( + HistogramObserver, + MinMaxObserver, + MovingAverageMinMaxObserver, + MovingAveragePerChannelMinMaxObserver, + PerChannelMinMaxObserver, + PlaceholderObserver, +) + +from torch.ao.quantization.qconfig import _ObserverOrFakeQuantizeConstructor + +from torch.ao.quantization.quantizer import QuantizationSpec, Quantizer + +from torch.ao.quantization.quantizer.xnnpack_quantizer_utils import ( + _convert_scalars_to_attrs, + OP_TO_ANNOTATOR, + OperatorConfig, + OperatorPatternType, + propagate_annotation, + QuantizationConfig, +) + +from torch.fx import Node + + +__all__ = [ + "XNNPACKQuantizer", + "get_symmetric_quantization_config", +] + + +def _get_dynamo_graph(function: Callable, inputs) -> torch.fx.Graph: + gm, _ = torchdynamo.export(function, aten_graph=True)(*inputs) + gm.graph.eliminate_dead_code() + return gm.graph + + +def _get_linear_patterns(input_size: List[int]): + in_channels = input_size[-1] + out_channels = 8 # hard coding but this should not matter + weight = torch.ones((out_channels, in_channels)) + bias = torch.ones((out_channels,)) + act = torch.ones(input_size) + + def linear_op(act, weight, bias=None): + return F.linear(act, weight, bias) + + pattern_w_bias = _get_dynamo_graph(linear_op, (act, weight, bias)) + pattern_wo_bias = _get_dynamo_graph(linear_op, (act, weight)) + return [pattern_w_bias, pattern_wo_bias] + + +def _supported_symmetric_quantized_operators() -> Dict[str, List[OperatorPatternType]]: + supported_operators: Dict[str, List[OperatorPatternType]] = { + # Both conv and linear should be able to handle relu + hardtanh fusion since + # those are clamp ops + "conv2d": [ + [torch.nn.Conv2d, torch.nn.ReLU], + [torch.nn.Conv2d, F.relu], + [F.conv2d, torch.nn.ReLU], + [F.conv2d, F.relu], + ], + "linear": [[torch.nn.Linear], [F.linear]], + "add": [[torch.add]], + "max_pool2d": [[torch.nn.MaxPool2d], [F.max_pool2d]], + "adaptive_avg_pool2d": [ + [torch.nn.AdaptiveAvgPool2d], + [F.adaptive_avg_pool2d], + ], + } + return copy.deepcopy(supported_operators) + + +def _get_supported_symmetric_config_and_operators() -> List[OperatorConfig]: + supported_config_and_operators: List[OperatorConfig] = [] + for quantization_config in [ + get_symmetric_quantization_config(), + get_symmetric_quantization_config(is_qat=True), + get_symmetric_quantization_config(is_per_channel=True), + get_symmetric_quantization_config(is_per_channel=True, is_qat=True), + ]: + ops = _supported_symmetric_quantized_operators() + for pattern_list in ops.values(): + supported_config_and_operators.append( + OperatorConfig(quantization_config, pattern_list) + ) + return copy.deepcopy(supported_config_and_operators) + + +@functools.lru_cache +def get_symmetric_quantization_config( + is_per_channel: bool = False, + is_qat: bool = False, + is_dynamic: bool = False, + act_qmin: int = -128, + act_qmax: int = 127, + weight_qmin: int = -127, + weight_qmax: int = 127, +): + extra_args: Dict[str, Any] = {"eps": 2**-12} + if is_qat: + if is_dynamic: + act_observer_or_fake_quant_ctr = FakeQuantize + dynamic_quant_observer = MovingAverageMinMaxObserver.with_args( + averaging_constant=1 + ) + extra_args["observer"] = dynamic_quant_observer + else: + act_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize # type: ignore[assignment] + else: + if is_dynamic: + act_observer_or_fake_quant_ctr = PlaceholderObserver # type: ignore[assignment] + else: + act_observer_or_fake_quant_ctr = HistogramObserver # type: ignore[assignment] + + act_quantization_spec = QuantizationSpec( + dtype=torch.int8, + quant_min=act_qmin, + quant_max=act_qmax, + qscheme=torch.per_tensor_affine, + is_dynamic=is_dynamic, + observer_or_fake_quant_ctr=act_observer_or_fake_quant_ctr.with_args( + **extra_args, + ), + ) + weight_qscheme = ( + torch.per_channel_symmetric if is_per_channel else torch.per_tensor_symmetric + ) + weight_observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor = ( + MinMaxObserver + ) + if is_qat: + # TODO: qat + per channel? + weight_observer_or_fake_quant_ctr = FusedMovingAvgObsFakeQuantize + elif is_per_channel: + weight_observer_or_fake_quant_ctr = PerChannelMinMaxObserver + + extra_args: Dict[str, Any] = {"eps": 2**-12} + if is_qat: + if weight_qscheme == torch.per_tensor_symmetric: + extra_args["observer"] = MovingAverageMinMaxObserver + else: + extra_args["observer"] = MovingAveragePerChannelMinMaxObserver # type: ignore[dict-item] + weight_quantization_spec = QuantizationSpec( + dtype=torch.int8, + quant_min=weight_qmin, + quant_max=weight_qmax, + qscheme=weight_qscheme, + ch_axis=0, + is_dynamic=False, + observer_or_fake_quant_ctr=weight_observer_or_fake_quant_ctr.with_args( + **extra_args + ), + ) + + bias_quantization_spec = None + if is_dynamic: + quantization_config = QuantizationConfig( + act_quantization_spec, + None, + weight_quantization_spec, + bias_quantization_spec, + is_qat, + ) + else: + quantization_config = QuantizationConfig( + act_quantization_spec, + act_quantization_spec, + weight_quantization_spec, + bias_quantization_spec, + is_qat, + ) + return quantization_config + + +def _get_supported_config_and_operators() -> List[OperatorConfig]: + return _get_supported_symmetric_config_and_operators() + + +def _get_module_name_filter(module_name: str): + """Get the module_name_filter function for a given module name, the filter accepts + a node and checks if the node comes from a module that has certain module name + + For example: + node: linear_op = call_function[...](...) # comes from a module with name blocks.sub.linear1 + + + >> module_name_filter = _get_module_name_filter("blocks.sub") + >> print(module_name_filter(node)) + True # the node is from "blocks.sub" based on the fully qualified name "blocks.sub.linear1" + """ + + def module_name_filter(n: Node) -> bool: + # example: { + # 'L__self___sub': ("L['self'].sub", ), + # 'L__self___sub_linear': ("L['self'].sub.linear", ) + # } + # get_attr nodes doesn't have nn_module_stack? + nn_module_stack = n.meta.get("nn_module_stack", {}) + names = [n[len("L['self'].") :] for n, klass in nn_module_stack.values()] + return module_name in names + + return module_name_filter + + +def _get_module_type_filter(tp: Callable): + """Get the module_type_filter function for a given module type, the filter accepts + a node and checks if the node comes from a module that has certain module type + + For example: + node: linear_op = call_function[...](...) # comes from a module with type Block -> Sub -> Linear + + + >> module_type_filter = _get_module_type_filter(Sub) # submodule with type `Sub`, under the `Block` submodule + >> print(module_type_filter(node)) + True # the node is from the submodule `Sub` (same for `Block` and `Linear` as well) + """ + + def module_type_filter(n: Node) -> bool: + # example: { + # 'L__self___sub': ("L['self'].sub", ), + # 'L__self___sub_linear': ("L['self'].sub.linear", ) + # } + nn_module_stack = n.meta.get("nn_module_stack", {}) + types = [t for _, t in nn_module_stack.values()] + return tp in types + + return module_type_filter + + +def _get_not_module_type_or_name_filter( + tp_list: List[Callable], module_name_list: List[str] +) -> Callable[[Node], bool]: + module_type_filters = [_get_module_type_filter(tp) for tp in tp_list] + module_name_list_filters = [_get_module_name_filter(m) for m in module_name_list] + + def not_module_type_or_name_filter(n: Node) -> bool: + return not any(f(n) for f in module_type_filters + module_name_list_filters) + + return not_module_type_or_name_filter + + +class XNNPACKQuantizer(Quantizer): + supported_config_and_operators = _get_supported_config_and_operators() + STATIC_QAT_ONLY_OPS = [ + "conv_bn_relu", + "conv_bn", + ] + + # static quantization ops (both PTQ and QAT) + # Preserve the order that fusions come before singular ops + STATIC_OPS = [ + "linear_relu", + "linear", + "conv_relu", + "conv", + "adaptive_avg_pool2d", + # TODO: move this to BoltNNQuantizer? + "gru_io_only", + "max_pool2d", + "add_relu", + "add", + "mul_relu", + "mul", + "cat", + ] + + DYNAMIC_OPS = [ + "linear", + ] + + def __init__(self): + super().__init__() + self.global_config: Optional[QuantizationConfig] = None + self.operator_type_config: Dict[ + torch._ops.OpOverloadPacket, Optional[QuantizationConfig] + ] = {} + self.module_type_config: Dict[Callable, Optional[QuantizationConfig]] = {} + self.module_name_config: Dict[str, Optional[QuantizationConfig]] = {} + + @classmethod + def get_supported_quantization_configs(cls) -> List[QuantizationConfig]: + op_configs: Set[QuantizationConfig] = set({}) + for spec, _ in cls.supported_config_and_operators: + op_configs.add(spec) + return list(op_configs) + + @classmethod + def get_supported_operator_for_quantization_config( + cls, quantization_config: Optional[QuantizationConfig] + ) -> List[OperatorPatternType]: + if quantization_config is None: + all_ops = [] + for _, ops in cls.supported_config_and_operators: + all_ops.extend(ops) + return all_ops + + for config, ops in cls.supported_config_and_operators: + # note: this assumes each entry in cls.supported_spec_and_operators + # corresponds to one spec, e.g. we don't have + # [(spec1, op_list1), (spec1, op_list2), (spec2, op_list3)] + # where the first and second entry have the same spec but did not + # merge the op list + if config == quantization_config: + return ops + return [] + + def set_global(self, quantization_config: QuantizationConfig) -> XNNPACKQuantizer: + self.global_config = quantization_config + return self + + def set_operator_type( + self, + operator_type: torch._ops.OpOverloadPacket, + quantization_config: QuantizationConfig, + ) -> XNNPACKQuantizer: + self.operator_type_config[operator_type] = quantization_config + return self + + def set_module_type( + self, module_type: Callable, quantization_config: QuantizationConfig + ): + """Set quantization_config for a submodule with type: `module_type`, for example: + quantizer.set_module_name(Sub) or quantizer.set_module_name(nn.Linear), it will quantize all supported operator/operator + patterns in the submodule with this module type with the given `quantization_config` + """ + self.module_type_config[module_type] = quantization_config + return self + + def set_module_name( + self, module_name: str, quantization_config: Optional[QuantizationConfig] + ): + """Set quantization_config for a submodule with name: `module_name`, for example: + quantizer.set_module_name("blocks.sub"), it will quantize all supported operator/operator + patterns in the submodule with this module name with the given `quantization_config` + """ + assert ( + quantization_config is not None + ), " quantization_config == None is not supported yet" + self.module_name_config[module_name] = quantization_config + return self + + def transform_for_annotation( + self, model: torch.fx.GraphModule + ) -> torch.fx.GraphModule: + """Transforms scalar values to tensor attributes""" + return _convert_scalars_to_attrs(model) + + def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: + """just handling global spec for now""" + # hacked for handling dynamic linear quant. will fix later. + if self.global_config and self.global_config.input_activation.is_dynamic: # type: ignore[union-attr] + model = self._annotate_for_dynamic_quantization_config(model) + else: + model = self._annotate_for_static_quantization_config(model) + propagate_annotation(model) + return model + + def _annotate_all_static_patterns( + self, + model: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, + ) -> torch.fx.GraphModule: + # TODO: implement the support for None to be canceling out previous annotations + if quantization_config is None: + return model + + if quantization_config.is_qat: + for op in self.STATIC_QAT_ONLY_OPS: + OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn) + for op in self.STATIC_OPS: + OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn) + return model + + def _annotate_all_dynamic_patterns( + self, + model: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, + ) -> torch.fx.GraphModule: + # TODO: implement the support for None to be canceling out previous annotations + if quantization_config is None: + return model + + for op in self.DYNAMIC_OPS: + OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn) + return model + + def _annotate_for_static_quantization_config( + self, model: torch.fx.GraphModule + ) -> torch.fx.GraphModule: + module_name_list = list(self.module_name_config.keys()) + for module_name, config in self.module_name_config.items(): + self._annotate_all_static_patterns( + model, config, _get_module_name_filter(module_name) + ) + + tp_list = list(self.module_type_config.keys()) + for module_type, config in self.module_type_config.items(): + self._annotate_all_static_patterns( + model, config, _get_module_type_filter(module_type) + ) + + self._annotate_all_static_patterns( + model, + self.global_config, + _get_not_module_type_or_name_filter(tp_list, module_name_list), + ) + return model + + def _annotate_for_dynamic_quantization_config( + self, model: torch.fx.GraphModule + ) -> torch.fx.GraphModule: + module_name_list = list(self.module_name_config.keys()) + for module_name, config in self.module_name_config.items(): + self._annotate_all_dynamic_patterns( + model, config, _get_module_name_filter(module_name) + ) + + tp_list = list(self.module_type_config.keys()) + for module_type, config in self.module_type_config.items(): + self._annotate_all_dynamic_patterns( + model, config, _get_module_type_filter(module_type) + ) + + self._annotate_all_dynamic_patterns( + model, + self.global_config, + _get_not_module_type_or_name_filter(tp_list, module_name_list), + ) + return model + + def validate(self, model: torch.fx.GraphModule) -> None: + pass + + @classmethod + def get_supported_operators(cls) -> List[OperatorConfig]: + return cls.supported_config_and_operators diff --git a/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..042163705a0b93d5ffdef6562b55581182a8852a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py @@ -0,0 +1,1032 @@ +import itertools +import operator +from dataclasses import dataclass +from typing import Callable, Dict, List, NamedTuple, Optional + +import torch +import torch.nn.functional as F +from torch._subclasses import FakeTensor +from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix +from torch.ao.quantization.pt2e.export_utils import _WrapperModule +from torch.ao.quantization.pt2e.graph_utils import find_sequential_partitions +from torch.ao.quantization.pt2e.utils import ( + _conv1d_bn_example_inputs, + _conv2d_bn_example_inputs, + get_aten_graph_module, +) +from torch.ao.quantization.quantizer import ( + QuantizationAnnotation, + QuantizationSpec, + QuantizationSpecBase, + SharedQuantizationSpec, +) + +from torch.ao.quantization.quantizer.utils import ( + _annotate_input_qspec_map, + _annotate_output_qspec, +) +from torch.fx import Node +from torch.fx.passes.utils.matcher_with_name_node_map_utils import ( + SubgraphMatcherWithNameNodeMap, +) +from torch.fx.passes.utils.source_matcher_utils import get_source_partitions + + +__all__ = [ + "OperatorConfig", + "OperatorPatternType", + "QuantizationConfig", + "get_input_act_qspec", + "get_output_act_qspec", + "get_weight_qspec", + "get_bias_qspec", + "OP_TO_ANNOTATOR", + "propagate_annotation", +] + + +# In the absence of better name, just winging it with QuantizationConfig +@dataclass(eq=True, frozen=True) +class QuantizationConfig: + input_activation: Optional[QuantizationSpec] + output_activation: Optional[QuantizationSpec] + weight: Optional[QuantizationSpec] + bias: Optional[QuantizationSpec] + # TODO: remove, since we can use observer_or_fake_quant_ctr to express this + is_qat: bool = False + + +OperatorPatternType = List[Callable] +OperatorPatternType.__module__ = ( + "torch.ao.quantization.quantizer.xnnpack_quantizer_utils" +) + +AnnotatorType = Callable[ + [ + torch.fx.GraphModule, + Optional[QuantizationConfig], + Optional[Callable[[Node], bool]], + ], + Optional[List[List[Node]]], +] +OP_TO_ANNOTATOR: Dict[str, AnnotatorType] = {} + + +def register_annotator(op: str): + def decorator(annotator: AnnotatorType): + OP_TO_ANNOTATOR[op] = annotator + + return decorator + + +class OperatorConfig(NamedTuple): + # fix List[str] with List[List[Union[nn.Module, FunctionType, BuiltinFunctionType]]] + # Basically we are mapping a quantization config to some list of patterns. + # a pattern is defined as a list of nn module, function or builtin function names + # e.g. [nn.Conv2d, torch.relu, torch.add] + # We have not resolved whether fusion can be considered internal details of the + # quantizer hence it does not need communication to user. + # Note this pattern is not really informative since it does not really + # tell us the graph structure resulting from the list of ops. + config: QuantizationConfig + operators: List[OperatorPatternType] + + +def _is_annotated(nodes: List[Node]): + """ + Given a list of nodes (that represents an operator pattern), + check if any of the node is annotated, return True if any of the node + is annotated, otherwise return False + """ + annotated = False + for node in nodes: + annotated = annotated or ( + "quantization_annotation" in node.meta + and node.meta["quantization_annotation"]._annotated + ) + return annotated + + +def _mark_nodes_as_annotated(nodes: List[Node]): + for node in nodes: + if node is not None: + if "quantization_annotation" not in node.meta: + node.meta["quantization_annotation"] = QuantizationAnnotation() + node.meta["quantization_annotation"]._annotated = True + + +def get_input_act_qspec(quantization_config: Optional[QuantizationConfig]): + if quantization_config is None: + return None + if quantization_config.input_activation is None: + return None + quantization_spec: QuantizationSpec = quantization_config.input_activation + assert quantization_spec.qscheme in [ + torch.per_tensor_affine, + torch.per_tensor_symmetric, + ] + return quantization_spec + + +def get_output_act_qspec(quantization_config: Optional[QuantizationConfig]): + if quantization_config is None: + return None + if quantization_config.output_activation is None: + return None + quantization_spec: QuantizationSpec = quantization_config.output_activation + assert quantization_spec.qscheme in [ + torch.per_tensor_affine, + torch.per_tensor_symmetric, + ] + return quantization_spec + + +def get_weight_qspec(quantization_config: Optional[QuantizationConfig]): + if quantization_config is None: + return None + assert quantization_config is not None + if quantization_config.weight is None: + return None + quantization_spec: QuantizationSpec = quantization_config.weight + if quantization_spec.qscheme not in [ + torch.per_tensor_symmetric, + torch.per_channel_symmetric, + ]: + raise ValueError( + f"Unsupported quantization_spec {quantization_spec} for weight" + ) + return quantization_spec + + +def get_bias_qspec(quantization_config: Optional[QuantizationConfig]): + if quantization_config is None: + return None + assert quantization_config is not None + if quantization_config.bias is None: + return None + quantization_spec: QuantizationSpec = quantization_config.bias + assert ( + quantization_spec.dtype == torch.float + ), "Only float dtype for bias is supported for bias right now" + return quantization_spec + + +@register_annotator("linear") +def _annotate_linear( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + annotated_partitions = [] + input_act_qspec = get_input_act_qspec(quantization_config) + output_act_qspec = get_output_act_qspec(quantization_config) + weight_qspec = get_weight_qspec(quantization_config) + bias_qspec = get_bias_qspec(quantization_config) + for node in gm.graph.nodes: + if node.op != "call_function" or node.target != torch.ops.aten.linear.default: + continue + if filter_fn and not filter_fn(node): + continue + act_node = node.args[0] + weight_node = node.args[1] + bias_node = None + if len(node.args) > 2: + bias_node = node.args[2] + + if _is_annotated([node]) is False: # type: ignore[list-item] + _annotate_input_qspec_map( + node, + act_node, + input_act_qspec, + ) + _annotate_input_qspec_map( + node, + weight_node, + weight_qspec, + ) + nodes_to_mark_annotated = [node, weight_node] + if bias_node: + _annotate_input_qspec_map( + node, + bias_node, + bias_qspec, + ) + nodes_to_mark_annotated.append(bias_node) + _annotate_output_qspec(node, output_act_qspec) + _mark_nodes_as_annotated(nodes_to_mark_annotated) + annotated_partitions.append(nodes_to_mark_annotated) + + return annotated_partitions + + +@register_annotator("linear_relu") +def _annotate_linear_relu( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + annotated_partitions = [] + input_act_qspec = get_input_act_qspec(quantization_config) + output_act_qspec = get_output_act_qspec(quantization_config) + weight_qspec = get_weight_qspec(quantization_config) + bias_qspec = get_bias_qspec(quantization_config) + for node in gm.graph.nodes: + if node.op != "call_function" or node.target not in [ + torch.ops.aten.relu.default, + torch.ops.aten.relu_.default, + ]: + continue + relu_node = node + maybe_linear_node = node.args[0] + if ( + not isinstance(maybe_linear_node, Node) + or maybe_linear_node.op != "call_function" + or maybe_linear_node.target != torch.ops.aten.linear.default + ): + continue + + linear_node = maybe_linear_node + input_qspec_map = {} + input_act = linear_node.args[0] + assert isinstance(input_act, Node) + input_qspec_map[input_act] = input_act_qspec + + weight = linear_node.args[1] + assert isinstance(weight, Node) + input_qspec_map[weight] = weight_qspec + + # adding weight node to the partition as well + partition = [relu_node, linear_node, weight] + bias = linear_node.args[2] if len(linear_node.args) > 2 else None + if isinstance(bias, Node): + input_qspec_map[bias] = bias_qspec + partition.append(bias) + + if _is_annotated(partition): + continue + + if filter_fn and any(not filter_fn(n) for n in partition): + continue + + linear_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + ) + relu_node.meta["quantization_annotation"] = QuantizationAnnotation( + output_qspec=output_act_qspec, + _annotated=True, + ) + _mark_nodes_as_annotated(partition) + annotated_partitions.append(partition) + return annotated_partitions + + +@register_annotator("conv") +def _annotate_conv( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + annotated_partitions = [] + for n in gm.graph.nodes: + if n.op != "call_function" or n.target not in [ + torch.ops.aten.conv1d.default, + torch.ops.aten.conv2d.default, + ]: + continue + conv_node = n + + input_qspec_map = {} + input_act = conv_node.args[0] + assert isinstance(input_act, Node) + input_qspec_map[input_act] = get_input_act_qspec(quantization_config) + + weight = conv_node.args[1] + assert isinstance(weight, Node) + input_qspec_map[weight] = get_weight_qspec(quantization_config) + + # adding weight node to the partition as well + partition = [conv_node, conv_node.args[1]] + + bias = conv_node.args[2] if len(conv_node.args) > 2 else None + if isinstance(bias, Node): + input_qspec_map[bias] = get_bias_qspec(quantization_config) + partition.append(bias) + + if _is_annotated(partition): + continue + + if filter_fn and any(not filter_fn(n) for n in partition): + continue + + conv_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + output_qspec=get_output_act_qspec(quantization_config), + _annotated=True, + ) + _mark_nodes_as_annotated(partition) + annotated_partitions.append(partition) + return annotated_partitions + + +@register_annotator("conv_relu") +def _annotate_conv_relu( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + annotated_partitions = [] + for n in gm.graph.nodes: + if n.op != "call_function" or n.target not in [ + torch.ops.aten.relu.default, + torch.ops.aten.relu_.default, + ]: + continue + relu_node = n + maybe_conv_node = n.args[0] + if ( + not isinstance(maybe_conv_node, Node) + or maybe_conv_node.op != "call_function" + or maybe_conv_node.target + not in [ + torch.ops.aten.conv1d.default, + torch.ops.aten.conv2d.default, + ] + ): + continue + conv_node = maybe_conv_node + + input_qspec_map = {} + input_act = conv_node.args[0] + assert isinstance(input_act, Node) + input_qspec_map[input_act] = get_input_act_qspec(quantization_config) + + weight = conv_node.args[1] + assert isinstance(weight, Node) + input_qspec_map[weight] = get_weight_qspec(quantization_config) + + # adding weight node to the partition as well + partition = [relu_node, conv_node, conv_node.args[1]] + bias = conv_node.args[2] if len(conv_node.args) > 2 else None + if isinstance(bias, Node): + input_qspec_map[bias] = get_bias_qspec(quantization_config) + partition.append(bias) + + if _is_annotated(partition): + continue + + if filter_fn and any(not filter_fn(n) for n in partition): + continue + + conv_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, _annotated=True + ) + relu_node.meta["quantization_annotation"] = QuantizationAnnotation( + output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type] + _annotated=True, + ) + _mark_nodes_as_annotated(partition) + annotated_partitions.append(partition) + return annotated_partitions + + +@register_annotator("conv_bn") +def _annotate_conv_bn( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + """ + Find conv + batchnorm parititions + Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv. + """ + return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=False) + + +@register_annotator("conv_bn_relu") +def _annotate_conv_bn_relu( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + """ + Find conv + batchnorm + relu parititions + Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv. + """ + return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=True) + + +def _do_annotate_conv_bn( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]], + has_relu: bool, +) -> List[List[Node]]: + """ + Given a function that takes in a `conv_fn` and returns a conv-bn[-relu] pattern, + return a list of annotated partitions. + + The output of the pattern must include a dictionary from string name to node + for the following names: "input", "conv", "weight", "bias", and "output". + """ + + def get_pattern(conv_fn: Callable, relu_is_inplace: bool): + def _conv_bn(x, conv_weight, conv_bias, bn_weight, bn_bias, bn_rm, bn_rv): + conv = conv_fn(x, conv_weight, conv_bias) + bn = F.batch_norm(conv, bn_rm, bn_rv, bn_weight, bn_bias, training=True) + if has_relu: + output = F.relu_(bn) if relu_is_inplace else F.relu(bn) + else: + output = bn + return output, { + "input": x, + "conv": conv, + "weight": conv_weight, + "bias": conv_bias, + "output": output, + } + + return _WrapperModule(_conv_bn) + + # Needed for matching, otherwise the matches gets filtered out due to unused + # nodes returned by batch norm + gm.graph.eliminate_dead_code() + gm.recompile() + + matches = [] + combinations = [ + (F.conv1d, _conv1d_bn_example_inputs), + (F.conv2d, _conv2d_bn_example_inputs), + ] + + # Add `is_cuda` and `relu_is_inplace` dimensions + combinations = itertools.product( + combinations, + [True, False] if torch.cuda.is_available() else [False], # is_cuda + [True, False] if has_relu else [False], # relu_is_inplace + ) + + # Match against all conv dimensions and cuda variants + for (conv_fn, example_inputs), is_cuda, relu_is_inplace in combinations: + pattern = get_pattern(conv_fn, relu_is_inplace) + pattern = get_aten_graph_module(pattern, example_inputs, is_cuda) + pattern.graph.eliminate_dead_code() + pattern.recompile() + matcher = SubgraphMatcherWithNameNodeMap(pattern, ignore_literals=True) + matches.extend(matcher.match(gm.graph)) + + # Annotate nodes returned in the matches + annotated_partitions = [] + for match in matches: + name_node_map = match.name_node_map + input_node = name_node_map["input"] + conv_node = name_node_map["conv"] + weight_node = name_node_map["weight"] + bias_node = name_node_map["bias"] + output_node = name_node_map["output"] + + # TODO: annotate the uses of input, weight, and bias separately instead + # of assuming they come from a single conv node. This is not possible today + # because input may have multiple users, and we can't rely on the conv node + # always being the first user. This was the case in models with skip + # connections like resnet18 + + # Validate conv args + if conv_node.args[0] is not input_node: + raise ValueError("Conv arg did not contain input node ", input_node) + if conv_node.args[1] is not weight_node: + raise ValueError("Conv arg did not contain weight node ", weight_node) + if len(conv_node.args) > 2 and conv_node.args[2] is not bias_node: + raise ValueError("Conv arg did not contain bias node ", bias_node) + + # Skip if the partition is already annotated or is filtered out by the user + partition = [conv_node, weight_node] + if bias_node is not None: + partition.append(bias_node) + if _is_annotated(partition): + continue + if filter_fn and any(not filter_fn(n) for n in partition): + continue + + # Annotate conv inputs and pattern output + input_qspec_map = {} + input_qspec_map[input_node] = get_input_act_qspec(quantization_config) + input_qspec_map[weight_node] = get_weight_qspec(quantization_config) + if bias_node is not None: + input_qspec_map[bias_node] = get_bias_qspec(quantization_config) + conv_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + ) + output_node.meta["quantization_annotation"] = QuantizationAnnotation( + output_qspec=get_output_act_qspec(quantization_config), # type: ignore[arg-type] + _annotated=True, + ) + _mark_nodes_as_annotated(partition) + annotated_partitions.append(partition) + return annotated_partitions + + +@register_annotator("gru_io_only") +def _annotate_gru_io_only( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + gru_partitions = get_source_partitions(gm.graph, [torch.nn.GRU], filter_fn) + gru_partitions = list(itertools.chain.from_iterable(gru_partitions.values())) + annotated_partitions = [] + for gru_partition in gru_partitions: + annotated_partitions.append(gru_partition.nodes) + output_nodes = gru_partition.output_nodes + input_nodes = gru_partition.input_nodes + # skip annotation if it is already annotated + if _is_annotated(input_nodes + output_nodes): + continue + # inside each GRU partition, we should be able to annotate each linear + # subgraph + input_qspec_map: Dict[Node, QuantizationSpecBase] = {} + input_act = input_nodes[0] + input_act_user = next(iter(input_act.users.keys())) + assert isinstance(input_act, Node) + assert isinstance(input_act_user, Node) + input_act_user.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map={ + input_act: get_input_act_qspec(quantization_config), + }, + _annotated=True, + ) + + hidden_state = input_nodes[1] + hidden_state_user = next(iter(hidden_state.users.keys())) + assert isinstance(hidden_state, Node) + assert isinstance(hidden_state_user, Node) + hidden_state_user.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map={ + hidden_state: get_input_act_qspec(quantization_config), + }, + _annotated=True, + ) + + assert len(output_nodes) == 2, "expecting GRU to have two outputs" + for output in output_nodes: + output.meta["quantization_annotation"] = QuantizationAnnotation( + output_qspec=get_output_act_qspec(quantization_config), + _annotated=True, + ) + nodes_to_mark_annotated = list(gru_partition.nodes) + _mark_nodes_as_annotated(nodes_to_mark_annotated) + return annotated_partitions + + +@register_annotator("max_pool2d") +def _annotate_max_pool2d( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + module_partitions = get_source_partitions( + gm.graph, [torch.nn.MaxPool2d, torch.nn.functional.max_pool2d], filter_fn + ) + maxpool_partitions = list(itertools.chain.from_iterable(module_partitions.values())) + annotated_partitions = [] + for maxpool_partition in maxpool_partitions: + annotated_partitions.append(maxpool_partition.nodes) + output_node = maxpool_partition.output_nodes[0] + maxpool_node = None + for n in maxpool_partition.nodes: + if n.target == torch.ops.aten.max_pool2d.default: + maxpool_node = n + assert ( + maxpool_node is not None + ), "XNNPACKQuantizer only works with torch.ops.aten.max_pool2d.default, " + "please make sure you are exporting the model correctly" + if _is_annotated([output_node, maxpool_node]): # type: ignore[list-item] + continue + + input_act = maxpool_node.args[0] # type: ignore[union-attr] + assert isinstance(input_act, Node) + + # only annotate maxpool when the output of the input node is annotated + if ( + "quantization_annotation" not in input_act.meta + or not input_act.meta["quantization_annotation"]._annotated + or input_act.meta["quantization_annotation"].output_qspec is None + ): + continue + # input and output of maxpool will share quantization parameter with input of maxpool + act_qspec = SharedQuantizationSpec(input_act) + # act_qspec = get_act_qspec(quantization_config) + maxpool_node.meta["quantization_annotation"] = QuantizationAnnotation( # type: ignore[union-attr] + input_qspec_map={ + input_act: act_qspec, + }, + _annotated=True, + ) + output_node.meta["quantization_annotation"] = QuantizationAnnotation( + output_qspec=act_qspec, + _annotated=True, + ) + return annotated_partitions + + +@register_annotator("adaptive_avg_pool2d") +def _annotate_adaptive_avg_pool2d( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + """Always annotate adaptive_avg_pool2d op""" + module_partitions = get_source_partitions( + gm.graph, [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d], filter_fn + ) + partitions = list(itertools.chain.from_iterable(module_partitions.values())) + annotated_partitions = [] + for partition in partitions: + pool_node = partition.output_nodes[0] + if ( + pool_node.op != "call_function" + or pool_node.target != torch.ops.aten.adaptive_avg_pool2d.default + ): + raise ValueError(f"{pool_node} is not an aten adaptive_avg_pool2d operator") + + if _is_annotated([pool_node]): + continue + + annotated_partitions.append(partition.nodes) + input_act = pool_node.args[0] + assert isinstance(input_act, Node) + + # only annotate input output sharing operator + # when the output of the input node is annotated + if ( + "quantization_annotation" not in input_act.meta + or not input_act.meta["quantization_annotation"]._annotated + or input_act.meta["quantization_annotation"].output_qspec is None + ): + input_act_qspec = get_input_act_qspec(quantization_config) + else: + input_act_qspec = SharedQuantizationSpec(input_act) + + # output sharing with input + output_act_qspec = SharedQuantizationSpec((input_act, pool_node)) + pool_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map={ + input_act: input_act_qspec, + }, + output_qspec=output_act_qspec, + _annotated=True, + ) + return annotated_partitions + + +def _is_input_large_scalar(node: Node, gm: torch.fx.GraphModule): + """Check if input is a large scalar value. So that we can skip quantization for the node + since histc op (in HistogramObserver) only works for values up to certain upper bound + """ + if node.op == "get_attr": + tensor = getattr(gm, node.target) # type: ignore[arg-type] + # torch.histc works until this upper bound + HISTC_UPPER_BOUND = 3.4028235e15 + return tensor.numel() == 1 and abs(tensor.item()) > HISTC_UPPER_BOUND + return False + + +def _is_input_non_float_tensor(node: Node): + """Check if the input is not a float tensor, so that we can skip quantization for the node + since observers only works with float Tensors + """ + if "val" not in node.meta or not isinstance(node.meta["val"], FakeTensor): + return True + return node.meta["val"].dtype != torch.float32 + + +@register_annotator("add_relu") +def _annotate_add_relu( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + fused_partitions = find_sequential_partitions( + gm, [torch.add, torch.nn.ReLU], filter_fn=filter_fn + ) + annotated_partitions = [] + for fused_partition in fused_partitions: + add_partition, relu_partition = fused_partition + annotated_partitions.append(add_partition.nodes + relu_partition.nodes) + if len(relu_partition.output_nodes) > 1: + raise ValueError("Relu partition has more than one output node") + relu_node = relu_partition.output_nodes[0] + if len(add_partition.output_nodes) > 1: + raise ValueError("add partition has more than one output node") + add_node = add_partition.output_nodes[0] + + if _is_annotated([relu_node, add_node]): + continue + + input_act_qspec = get_input_act_qspec(quantization_config) + output_act_qspec = get_output_act_qspec(quantization_config) + + input_qspec_map = {} + input_act0 = add_node.args[0] + if isinstance(input_act0, Node): + if _is_input_large_scalar(input_act0, gm): + continue + if _is_input_non_float_tensor(input_act0): + continue + input_qspec_map[input_act0] = input_act_qspec + + input_act1 = add_node.args[1] + if isinstance(input_act1, Node): + if _is_input_large_scalar(input_act1, gm): + continue + if _is_input_non_float_tensor(input_act1): + continue + input_qspec_map[input_act1] = input_act_qspec + + add_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + ) + relu_node.meta["quantization_annotation"] = QuantizationAnnotation( + output_qspec=output_act_qspec, + _annotated=True, + ) + return annotated_partitions + + +@register_annotator("add") +def _annotate_add( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + add_partitions = get_source_partitions( + gm.graph, [operator.add, torch.add, operator.iadd], filter_fn + ) + add_partitions = list(itertools.chain.from_iterable(add_partitions.values())) + annotated_partitions = [] + for add_partition in add_partitions: + annotated_partitions.append(add_partition.nodes) + add_node = add_partition.output_nodes[0] + if _is_annotated([add_node]): + continue + + input_act_qspec = get_input_act_qspec(quantization_config) + output_act_qspec = get_output_act_qspec(quantization_config) + + input_qspec_map = {} + input_act0 = add_node.args[0] + if isinstance(input_act0, Node): + if _is_input_large_scalar(input_act0, gm): + continue + if _is_input_non_float_tensor(input_act0): + continue + input_qspec_map[input_act0] = input_act_qspec + + input_act1 = add_node.args[1] + if isinstance(input_act1, Node): + if _is_input_large_scalar(input_act1, gm): + continue + if _is_input_non_float_tensor(input_act1): + continue + input_qspec_map[input_act1] = input_act_qspec + + add_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + output_qspec=output_act_qspec, + _annotated=True, + ) + return annotated_partitions + + +@register_annotator("mul_relu") +def _annotate_mul_relu( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + fused_partitions = find_sequential_partitions( + gm, [torch.mul, torch.nn.ReLU], filter_fn=filter_fn + ) + annotated_partitions = [] + for fused_partition in fused_partitions: + mul_partition, relu_partition = fused_partition + annotated_partitions.append(mul_partition.nodes + relu_partition.nodes) + if len(relu_partition.output_nodes) > 1: + raise ValueError("Relu partition has more than one output node") + relu_node = relu_partition.output_nodes[0] + if len(mul_partition.output_nodes) > 1: + raise ValueError("mul partition has more than one output node") + mul_node = mul_partition.output_nodes[0] + + if _is_annotated([relu_node, mul_node]): + continue + + input_act_qspec = get_input_act_qspec(quantization_config) + output_act_qspec = get_output_act_qspec(quantization_config) + + input_qspec_map = {} + input_act0 = mul_node.args[0] + if isinstance(input_act0, Node): + if _is_input_large_scalar(input_act0, gm): + continue + if _is_input_non_float_tensor(input_act0): + continue + input_qspec_map[input_act0] = input_act_qspec + + input_act1 = mul_node.args[1] + if isinstance(input_act1, Node): + if _is_input_large_scalar(input_act1, gm): + continue + if _is_input_non_float_tensor(input_act1): + continue + input_qspec_map[input_act1] = input_act_qspec + + mul_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + ) + relu_node.meta["quantization_annotation"] = QuantizationAnnotation( + output_qspec=output_act_qspec, + _annotated=True, + ) + return annotated_partitions + + +@register_annotator("mul") +def _annotate_mul( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + mul_partitions = get_source_partitions( + gm.graph, ["mul", "mul_", operator.mul, torch.mul, operator.imul], filter_fn + ) + mul_partitions = list(itertools.chain.from_iterable(mul_partitions.values())) + annotated_partitions = [] + for mul_partition in mul_partitions: + annotated_partitions.append(mul_partition.nodes) + mul_node = mul_partition.output_nodes[0] + if _is_annotated([mul_node]): + continue + + input_act_qspec = get_input_act_qspec(quantization_config) + output_act_qspec = get_output_act_qspec(quantization_config) + + input_qspec_map = {} + input_act0 = mul_node.args[0] + if isinstance(input_act0, Node): + if _is_input_large_scalar(input_act0, gm): + continue + if _is_input_non_float_tensor(input_act0): + continue + input_qspec_map[input_act0] = input_act_qspec + + input_act1 = mul_node.args[1] + if isinstance(input_act1, Node): + if _is_input_large_scalar(input_act1, gm): + continue + if _is_input_non_float_tensor(input_act1): + continue + input_qspec_map[input_act1] = input_act_qspec + + mul_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + output_qspec=output_act_qspec, + _annotated=True, + ) + return annotated_partitions + + +# TODO: remove Optional in return type, fix annotated_partitions logic +@register_annotator("cat") +def _annotate_cat( + gm: torch.fx.GraphModule, + quantization_config: Optional[QuantizationConfig], + filter_fn: Optional[Callable[[Node], bool]] = None, +) -> Optional[List[List[Node]]]: + cat_partitions = get_source_partitions(gm.graph, [torch.cat], filter_fn) + cat_partitions = list(itertools.chain.from_iterable(cat_partitions.values())) + annotated_partitions = [] + for cat_partition in cat_partitions: + cat_node = cat_partition.output_nodes[0] + if _is_annotated([cat_node]): + continue + + if cat_node.target != torch.ops.aten.cat.default: + # TODO: change this to AnnotationException + raise Exception( + f"Expected cat node: torch.ops.aten.cat.default, but found {cat_node.target}" + " please check if you are calling the correct capture API" + ) + + annotated_partitions.append(cat_partition.nodes) + + input_act_qspec = get_input_act_qspec(quantization_config) + inputs = cat_node.args[0] + + input_qspec_map = {} + input_act0 = inputs[0] + if isinstance(input_act0, Node): + input_qspec_map[input_act0] = input_act_qspec + + shared_with_input0_qspec = SharedQuantizationSpec((input_act0, cat_node)) + for input_act in inputs[1:]: + input_qspec_map[input_act] = shared_with_input0_qspec + + output_act_qspec = shared_with_input0_qspec + + cat_node.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map=input_qspec_map, + output_qspec=output_act_qspec, + _annotated=True, + ) + return annotated_partitions + + +def _is_share_obs_or_fq_op(op: Callable) -> bool: + return op in [ + torch.ops.aten.hardtanh.default, + torch.ops.aten.hardtanh_.default, + torch.ops.aten.mean.default, + torch.ops.aten.mean.dim, + torch.ops.aten.permute.default, + torch.ops.aten.permute_copy.default, + torch.ops.aten.squeeze.dim, + torch.ops.aten.squeeze_copy.dim, + # TODO: remove? + torch.ops.aten.adaptive_avg_pool2d.default, + torch.ops.aten.view_copy.default, + torch.ops.aten.view.default, + torch.ops.aten.slice_copy.Tensor, + torch.ops.aten.flatten.using_ints, + ] + + +def propagate_annotation(model: torch.fx.GraphModule) -> None: + for n in model.graph.nodes: + if n.op != "call_function" or not _is_share_obs_or_fq_op(n.target): + continue + + prev_node = n.args[0] + if not isinstance(prev_node, Node): + continue + + quantization_annotation = prev_node.meta.get("quantization_annotation", None) + if not quantization_annotation: + continue + + output_qspec = quantization_annotation.output_qspec + if not output_qspec: + continue + + # make sure current node is not annotated + if ( + "quantization_annotation" in n.meta + and n.meta["quantization_annotation"]._annotated + ): + continue + + shared_qspec = SharedQuantizationSpec(prev_node) + # propagate the previous output_qspec to the current node + n.meta["quantization_annotation"] = QuantizationAnnotation( + input_qspec_map={ + prev_node: shared_qspec, + }, + output_qspec=shared_qspec, + _annotated=True, + ) + + +# TODO: make the list of ops customizable +def _convert_scalars_to_attrs(model: torch.fx.GraphModule) -> torch.fx.GraphModule: + for n in model.graph.nodes: + if n.op != "call_function" or n.target not in [ + torch.ops.aten.add.Tensor, + torch.ops.aten.mul.Tensor, + ]: + continue + args = list(n.args) + new_args = [] + for i in range(len(args)): + if isinstance(args[i], torch.fx.Node): + new_args.append(args[i]) + continue + prefix = "_tensor_constant_" + get_new_attr_name = get_new_attr_name_with_prefix(prefix) + tensor_constant_name = get_new_attr_name(model) + float_tensor = torch.tensor(float(args[i])) + model.register_buffer(tensor_constant_name, float_tensor) + fake_mode = n.meta["val"].fake_mode + with model.graph.inserting_before(n): + get_attr_node = model.graph.create_node( + "get_attr", tensor_constant_name, (), {} + ) + get_attr_node.meta["val"] = fake_mode.from_tensor( + float_tensor, static_shapes=True + ) + new_args.append(get_attr_node) + n.args = tuple(new_args) + model.recompile() + return model diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h new file mode 100644 index 0000000000000000000000000000000000000000..6203905732667776ed9646d4ff3b4fa0ea2458de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h @@ -0,0 +1,351 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct Def; +struct Property; +struct ClassDef; +struct SugaredValue; +struct Resolver; + +using ResolverPtr = std::shared_ptr; +struct Self { + virtual ~Self() = default; + virtual std::shared_ptr makeSugared(Value* v) const = 0; + virtual ClassTypePtr getClassType() const = 0; +}; + +// A CompilationUnit is a list of named Functions +// with helper methods to iterate the list or invoke the function. +// Classes have a CompilationUnit holding the class methods, +// and Modules have a CompilationUnit holding the Functions that +// are used to implement their Methods + +struct TORCH_API CompilationUnit { + enum class FunctionType { Method, Hook, PreHook }; + // constructor that takes a set of functions to compile using the native + // resolver + explicit CompilationUnit(const std::string& source); + CompilationUnit() = default; + + CompilationUnit& operator=(CompilationUnit&&) = default; + CompilationUnit(CompilationUnit&&) = default; + CompilationUnit& operator=(const CompilationUnit&) = delete; + CompilationUnit(const CompilationUnit&) = delete; + + Function* find_function(const c10::QualifiedName& name) const { + auto it = dict_.find(name); + if (it == dict_.end()) { + return nullptr; + } + return functions_[it->second].get(); + } + + Function& get_function(const c10::QualifiedName& name) const { + if (auto r = find_function(name)) { + return *r; + } + TORCH_CHECK(false, "attempted to get undefined function ", name.name()); + } + + void set_optimized(bool o) { + TORCH_WARN( + "CompilationUnit::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "CompilationUnit::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + // for historic reasons, these are defined in ir_emitter.cpp + // Returns the list of Functions just defined. + std::vector define( + const c10::optional& prefix, + const std::vector& properties, + const std::vector& propResolvers, + const std::vector& definitions, + const std::vector& + defResolvers, /* determines how we handle free + variables in each definition*/ + // if non-null, the first argument to each def, is bound to this value + const Self* self, + // see [name mangling] + bool shouldMangle = false, + c10::optional operator_set_version = c10::nullopt); + + void define_hooks( + const c10::optional& prefix, + const std::vector& hookDefs, + const std::vector& hookResolvers, + const std::vector& preHookDefs, + const std::vector& preHookResolvers, + const Self* self, + bool shouldMangle = false); + + // same as above but parse the definitions from source + // Returns the list of Functions just defined. + std::vector define( + // prefix namespace to put all the defined functions into + const c10::optional& prefix, + const std::string& source, + const ResolverPtr& resolver, + const Self* self); + + void define_interface( + const c10::QualifiedName& qualifiedName, + const ClassDef& classDef, + ResolverPtr rcb, + bool is_module = false); + + Function* create_function( + c10::QualifiedName name, + std::shared_ptr graph, + bool shouldMangle = false) { + if (shouldMangle) { + name = mangle(name); + } + auto fn = std::make_unique( + std::move(name), std::move(graph), nullptr); + auto ret = fn.get(); + register_function(std::move(fn)); + return ret; + } + + std::vector get_functions() const { + return fmap(functions_, [](const std::unique_ptr& fn) { + return fn.get(); + }); + } + + /// Run a method from this compilation. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const c10::QualifiedName& method_name, Types&&... args) { + return get_function(method_name)({IValue(std::forward(args))...}); + } + + void drop_all_functions() { + dict_.clear(); + functions_.clear(); + } + + /** + * Register a class as being owned by this compilation unit. + */ + void register_type(c10::NamedTypePtr namedType) { + // TODO: class types cannot be redefined because we have no way right now + // of invalidating their methods. NamedTuples are fine though, since they + // don't have methods. + TORCH_CHECK( + 0 == classDict_.count(*namedType->name()), + "class '", + namedType->name()->qualifiedName(), + "' already defined."); + classes_.push_back(std::move(namedType)); + classDict_[*classes_.back()->name()] = classes_.size() - 1; + }; + + c10::ClassTypePtr get_class(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const { + for (const auto& cls : classes_) { + if (cls->name()->qualifiedName() == name.qualifiedName()) { + return cls->expect(); + } + } + return nullptr; + } + + c10::NamedTypePtr get_type(const c10::QualifiedName& name) const { + auto it = classDict_.find(name); + if (it == classDict_.end()) { + return nullptr; + } + return classes_[it->second]; + } + + // For testing: clear all Python-defined classes to ensure that unit tests + // have isolation. + void _clear_python_cu() { + // Delete all the associated class methods + for (const auto& type : classes_) { + if (auto cls = type->cast()) { + for (auto method : cls->methods()) { + // Tombstone the method in the compilation unit. + // Don't erase because the dict_ + auto it = dict_.find(method->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + // Classes can have multiple pointers to the same hook, + // need to make sure to not delete it twice + std::unordered_set hooks_to_delete; + for (const auto& hook : cls->getForwardHooks()) { + hooks_to_delete.insert(hook); + } + for (const auto& pre_hook : cls->getForwardPreHooks()) { + hooks_to_delete.insert(pre_hook); + } + for (const auto& hook : hooks_to_delete) { + // Tombstone the hook in the compilation unit. + auto it = dict_.find(hook->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + } + } + classes_.clear(); + classDict_.clear(); + } + + // [Internal Only] Remove method. + // Note Used for freezing. + void unsafeRemoveMethod(const c10::QualifiedName& method_name) { + auto it = dict_.find(method_name); + TORCH_CHECK( + it != dict_.end(), + "method '", + method_name.qualifiedName(), + "' does not exist."); + functions_[it->second] = nullptr; + dict_.erase(it); + } + + // [name mangling] All code objects must have a unique qualified name in a + // CompilationUnit. In Python, sometimes functions won't have unique qualified + // name (for example, nested functions). So we mangle Python functions to + // ensure that they are uniquely named. + // + // We also use mangling to distinguish different Module instances. Since each + // Module is a singleton class instance, different instances of the same + // Python Module will have different types but the same qualified name. + c10::QualifiedName mangle(const c10::QualifiedName& name) const { + auto mangled = name; + while (get_type(mangled) || find_function(mangled)) { + mangled = mangler_.mangle(mangled); + } + return mangled; + } + + private: + std::unique_ptr define( + const c10::optional& prefix, + const Def& def, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false, + FunctionType type = FunctionType::Method, + c10::optional version = c10::nullopt) const; + + // Define a property on \p self. + struct PropertyPair; + PropertyPair define_property( + const c10::optional& prefix, + const Property& prop, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false) const; + + Function& register_function(std::unique_ptr fn) { + TORCH_CHECK( + 0 == dict_.count(fn->qualname().qualifiedName()), + "method '", + fn->qualname().qualifiedName(), + "' already defined."); + functions_.emplace_back(std::move(fn)); + dict_[functions_.back()->qualname()] = functions_.size() - 1; + return *functions_.back(); + } + std::vector> functions_; + // for fast lookup + std::unordered_map dict_; + std::unordered_map classDict_; + + // [class ownership] Right now there are two relationships between classes + // and compilation units: + // 1. Classes have compilation units internally that hold their methods. + // 2. On load, the TypePtrs of any imported classes are owned by the main + // module's compilation unit. + std::vector classes_; + + mutable NameMangler mangler_; +}; + +// An owning pointer to a Function. Just a pair of a raw Function ptr and it's +// owning CU. We need this because pybind requires a ref-counted way to refer to +// Functions. +struct StrongFunctionPtr { + StrongFunctionPtr(std::shared_ptr cu, Function* function) + : cu_(std::move(cu)), function_(function) { + TORCH_INTERNAL_ASSERT(cu_); + TORCH_INTERNAL_ASSERT(function_); + } + std::shared_ptr cu_; + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using CompilationUnit = ::torch::jit::CompilationUnit; +} // namespace script +} // namespace torch::jit diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..74663cfb41ce717d7bd6668a86d22e09471efb82 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h @@ -0,0 +1,181 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API GraphFunction : public Function { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + GraphFunction( + c10::QualifiedName name, + std::shared_ptr graph, + std::function function_creator, + c10::optional executor_execution_mode = + c10::nullopt) + : name_(std::move(name)), + graph_(std::move(graph)), + executor_execution_mode_(executor_execution_mode), + function_creator_(std::move(function_creator)) {} + + bool isGraphFunction() const override { + return true; + } + + void run(Stack& stack) override; + + std::function function_creator() const { + return function_creator_; + } + + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch) override; + + std::shared_ptr graph() const { + return graph_; + } + + std::shared_ptr optimized_graph() const; + + const c10::QualifiedName& qualname() const override { + return name_; + } + + // private/unstable api. sets the initial execution mode + // will not affect executor if there is an existing executor + // created for this function + void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) { + executor_execution_mode_ = mode; + } + // private/unstable api. sets flag of whether or not to ignore amp. + // will not affect executor if there is an existing executor + // created for this function + void _set_ignore_amp(bool ignore_amp) { + force_no_amp_ = ignore_amp; + } + + // if this isn't yet defined, run its method_creator function + void ensure_defined() override; + + size_t num_inputs() const override { + return graph()->inputs().size(); + } + + Function& setSchema(FunctionSchema schema) override { + schema_ = std::make_unique(std::move(schema)); + return *this; + } + + const FunctionSchema& getSchema() const override; + + GraphExecutorState getDebugState() { + return get_executor().getDebugState(); + } + + bool is_optimized() const { + TORCH_WARN( + "GraphFunction::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + void check_single_output() { + TORCH_CHECK( + graph()->outputs().size() == 1, + "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs"); + } + + GraphExecutor& get_executor() { + ensure_defined(); + std::lock_guard lock(compile_mutex); + auto& executor = executors_[currentSpecialization()]; + if (executor) { + return *executor; + } + check_single_output(); + const std::string& name = name_.name(); + std::shared_ptr opt_graph = optimized_graph(); + if (!executor_execution_mode_) { + executor = GraphExecutor(opt_graph, name); + } else { + executor = GraphExecutor(opt_graph, name, *executor_execution_mode_); + } + return *executor; + } + + using Function::call; + bool call( + Stack& stack, + c10::optional bailOut, + c10::function_ref f) override { + f(get_executor().getPlanFor(stack, bailOut).code); + return true; + } + + void clear_optimized_graphs() { + optimized_graphs_.fill(nullptr); + } + + private: + enum SpecializationKey { + AutocastOff, + CpuAutocastOn, + GpuAutocastOn, + CpuGpuAutocastOn, + + // This provides the number of specializations + // (Must be last entry) + TotalCount + }; + + SpecializationKey currentSpecialization() const; + + private: + c10::QualifiedName name_; + // The original, non-optimized graph + std::shared_ptr graph_; // for debugging and for inlining + + // allows users to specify Simple/Profiling Executor for function + // TODO: add more executors + mutable c10::optional executor_execution_mode_; + + // if invoked on a graph that has already traced through amp + // don't invoke amp pass + mutable bool force_no_amp_ = false; + // Optimized graph, computed lazily. Used for inlining. + mutable std::array, SpecializationKey::TotalCount> + optimized_graphs_; + + // GraphFunctions are invokable from multiple threads, so this lock needs to + // be held when we're initializing graph executor for the first time or + // computing the optimized graph. We're using reentrant mutex so that we don't + // need to worry about causing a deadlock by calling one method from another + // (e.g. optimized_graph() from get_executor()). + mutable std::recursive_mutex compile_mutex; + + // executor_[0] - autocast off + // executor_[1] - autocast cpu on + // executor_[2] - autocast gpu on + // executor_[3] - autocast cpu & gpu on + std::array, SpecializationKey::TotalCount> + executors_; + + // an optional function that actually creates the method when + // ensure_defined() is called. This is used by the compiler so + // that it can construct methods out of order + std::function function_creator_; + + // if absent, then we generate a default schema based on the graph + // mutable because getSchema caches the default schema if one is requested + // before a call to setSchema + mutable std::unique_ptr schema_; +}; + +// Short hands for dynamic_cast. +TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept; +TORCH_API GraphFunction& toGraphFunction(Function&); +TORCH_API const GraphFunction& toGraphFunction(const Function&); + +} // namespace torch::jit diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h new file mode 100644 index 0000000000000000000000000000000000000000..28675e5bd059f5e876e1b55c94b2c0a705aca28c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch::jit { + +using ObjectPtr = c10::intrusive_ptr; + +// A method in a module, e.g. f in: +// +// class M(ScriptModule): +// @script_method +// def f(self, x): +// ... +// Note: because Method/Module are exposed to python these +// classes use python method naming conventions +struct TORCH_API Method : public torch::IMethod { + Method(ObjectPtr owner, Function* function); + + // the module that contains this method. + Module owner() const; + // the raw objectptr that owns this method, for when the method is owned by a + // torchbind object. + ObjectPtr raw_owner() const; + void run(Stack& stack); + void run(Stack&& stack) { + run(stack); + } + + c10::IValue operator()( + std::vector stack, + const Kwargs& kwargs = Kwargs()) const override; + + // Run method async. Invocation on this function would invokes a JIT + // interpreter that executes ops inline, one by one, on caller's thread. A + // model can utilize async op, i.e. `fork`, to launch an asynchronous task + // which will be launched on provided `taskLauncher`. + c10::intrusive_ptr run_async( + std::vector stack, + const Kwargs& kwargs = Kwargs(), + TaskLauncher taskLauncher = at::launch); + + std::shared_ptr graph() const { + return toGraphFunction(*function_).graph(); + } + + const std::string& name() const override { + return function_->name(); + } + + size_t num_inputs() const { + return function_->num_inputs(); + } + + GraphExecutor& get_executor() { + return toGraphFunction(*function_).get_executor(); + } + + Function& function() const { + return *function_; + } + + private: + void setArgumentNames(std::vector&) const override; + + // Methods are uniqued onwed by a single module. This raw pointer allows + // looking up the module. + ObjectPtr owner_; + + // Underlying unbound function + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Method = ::torch::jit::Method; +} // namespace script + +} // namespace torch::jit diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h new file mode 100644 index 0000000000000000000000000000000000000000..6c49b695cb6b5dec57e45f851f5db5b82533e4af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h @@ -0,0 +1,685 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// This file contains classes which assist in desugaring Python style +// modules and their methods into flattened graphs which don't have any +// function calls. + +namespace torch::jit { + +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::QualifiedName; +// Map which stores filename to content. +using ExtraFilesMap = std::unordered_map; + +using ModulePtr = c10::intrusive_ptr; + +struct Module; + +template +struct slot_list_impl; + +template +struct Named { + std::string name; + T value; +}; + +using NameModule = Named; +using NameValue = Named; +using NameTensor = Named; + +namespace detail { +struct TORCH_API ModulePolicy; +struct TORCH_API ParameterPolicy; +struct TORCH_API AttributePolicy; +struct TORCH_API BufferPolicy; +template +struct NamedPolicy; +} // namespace detail + +using module_list = slot_list_impl; +using named_module_list = + slot_list_impl>; + +using parameter_list = slot_list_impl; +using named_parameter_list = + slot_list_impl>; + +using attribute_list = slot_list_impl; +using named_attribute_list = + slot_list_impl>; + +using buffer_list = slot_list_impl; +using named_buffer_list = + slot_list_impl>; + +using ModuleLookup = std::function&)>; + +struct TORCH_API Module : public Object { + explicit Module(c10::QualifiedName class_name); + Module(std::shared_ptr cu, const c10::ClassTypePtr& type); + Module() = default; + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; + Module( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + Module(ModulePtr module_value) : Object(std::move(module_value)) {} + ~Module() = default; + + void set_optimized(bool o) { + TORCH_WARN( + "Module::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "Module::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + IValue forward(std::vector inputs, const Kwargs& kwargs = Kwargs()) { + return get_method("forward")(std::move(inputs), kwargs); + } + + // In script modules, buffers are Tensors attribute that are _not_ registered + // as parameters. This is different than in nn.Module where there is a special + // register_buffer method. With this simplification, we only need to track + // whether a slot is a parameter to be able to classify it. + void register_buffer(const std::string& name, at::Tensor v) { + bool is_param = false; + bool is_buffer = true; + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_parameter( + const std::string& name, + at::Tensor v, + bool is_buffer) { + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_attribute( + const std::string& name, + const TypePtr& t, + IValue v, + bool is_param = false, + bool is_buffer = false) { + type()->addOrCheckAttribute(name, t, is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_module(const std::string& name, const Module& module) { + type()->addOrCheckAttribute(name, module.type()); + _ivalue()->setAttr(name, module._ivalue()); + } + + void apply(const std::function& fn); + + buffer_list buffers(bool recurse = true) const; + named_buffer_list named_buffers(bool recurse = true) const; + + module_list children() const; // direct modules + named_module_list named_children() const; + module_list modules() const; // all modules, including this one, recursively + named_module_list named_modules() const; + + // all tensors involved in gradient optimization + parameter_list parameters(bool recurse = true) const; + named_parameter_list named_parameters(bool recurse = true) const; + + // all members of the object, similar to iterating over dir(obj) in python + attribute_list attributes(bool recurse = true) const; + named_attribute_list named_attributes(bool recurse = true) const; + + void dump( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + std::string dump_to_str( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + /// Enables "training" mode. + void train(bool on = true); + /// Calls train(false) to enable "eval" mode. + /// Do not override this method, override `train()` instead. + void eval() { + train(/*on=*/false); + } + /// True if the module is in training mode. + bool is_training() const { + return attr("training", true).toBool(); + } + + /// Recursively casts all parameters to the given `dtype` and `device`. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, at::ScalarType dtype, bool non_blocking = false); + + /// Recursively casts all parameters to the given dtype. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::ScalarType dtype, bool non_blocking = false); + + /// Recursively moves all parameters to the given device. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, bool non_blocking = false); + + void save( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void save( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void _save_for_mobile( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + void _save_for_mobile( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + Module copy() const; + + Module deepcopy(c10::optional device = c10::nullopt) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well + Module clone(bool inplace = false) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well. Also allows the caller to specify a set of + // method and attribute names to not clone. + Module clone( + bool inplace, + const std::unordered_set& ignored_method, + const std::unordered_set& ignored_attributes) const; + + void clone_method(const Module& orig, const std::string& name); + + IValue operator()(std::vector inputs); + + template + IValue create_class(const c10::QualifiedName& name, Types&&... args) const { + return create_class(name, {IValue(std::forward(args))...}); + } + + IValue create_class(const c10::QualifiedName& name, Stack stack) const; + + inline bool operator==(const Module& y) const noexcept { + return _ivalue() == y._ivalue(); + } + + void set_delete_memory(std::shared_ptr delete_mem) { + mem_to_delete_ = std::move(delete_mem); + } + + // A set of functions to maintain input shapes through torch.jit.save and + // torch.jit.load. It only works on tensors and lists/dicts of tensors + // because tracing is only supported by these types. + void store_traced_inputs(std::string func_name, std::vector inputs) { + if (inputs.size() == 0) { + return; + } + auto c10_inputs = c10::impl::GenericList(AnyType::get()); + for (IValue& value : inputs) { + // Not checking whether this is traceable type as that is already checked + // higher up in the stack and changing that would require a larger + // restructuring. + c10_inputs.emplace_back(std::move(value)); + } + traced_inputs_.insert_or_assign(func_name, c10_inputs); + } + + c10::Dict retrieve_traced_inputs() + const { + return traced_inputs_; + } + + private: + Module clone_impl( + std::unordered_map& type_remap, + bool inplace, + IValue::HashAliasedIValueMap memo, + const std::unordered_set& ignored_methods, + const std::unordered_set& ignored_attributes) const; + + void clone_method( + const Module& orig, + const Function& method, + const std::unordered_map& type_remap); + + c10::QualifiedName getNameForMethod(std::string basename) const { + return QualifiedName(*type()->name(), std::move(basename)); + } + + void to_impl( + const c10::optional& device, + const c10::optional& dtype, + bool non_blocking); + + // Extra handle for the module to delete when itself is deleted + std::shared_ptr mem_to_delete_; + + // Map of function names to the traced inputs that they have been traced with + c10::Dict traced_inputs_; + + // Mutex to keep registring buffer or parameter thread safe. + std::shared_ptr register_mutex_ = std::make_shared(); +}; + +// C++ equivalent api of `torch.jit.freeze`. See documentation there for +// details. +TORCH_API Module freeze( + const Module& module, + const c10::optional>& preserved_attrs = + c10::nullopt, + bool optimize_numerics = true); + +// C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation +// there for details. +TORCH_API Module optimize_for_inference( + Module& module, + const std::vector& other_methods = {}); + +enum class FusionBehavior { STATIC, DYNAMIC }; + +using FusionStrategy = std::vector>; +// clang-format off +/* +Sets the type and number of specializations that can occur during fusion. + +Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC +and depth is an integer. + +Behavior - static vs dynamic: + In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined + based on some initial profiling runs. + In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple + shapes are possible. + +In both cases, we also recompile on new striding behavior, device, or dtype. + +Behavior - fallback functions & depth: + When an input doesn't match the format required by the specialized compiled op, it will run + a fallback function. Fallback functions are recursively be compiled and specialized based + on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to + limit the number of specializations that can be compiled, before giving up on recompiling and + falling back to a completely un-fused, un-specialized implementation. + +The list of (type, depth) pairs controls the type of specializations and the number of +specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first +two specializations will use static fusions, the following two specializations will use +dynamic fusion, and any inputs that satisfy none of the 4 options will run an +unfused implementation. + +NB: in the future, if more as more fusion backends are added there may be more granular +apis for specific fusers. +*/ +// clang-format on +TORCH_API FusionStrategy getFusionStrategy(); +// returns previous strategy +TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy); + +namespace detail { + +struct TORCH_API SlotCursor { + Module module_; + int64_t i_; // slot offset, -1 indicates the module itself +}; + +} // namespace detail + +// This iterator allows the (optionally recursive) enumeration of +// the members of a Module. It performs a depth-first pre-order +// traversal of the module. The Policy template parameter determines +// which slots of the object should be included. For instance, +// when iterating parameters, we return the parameter tensors, +// but skip modules, buffers, and other attributes. +// See ModulePolicy for comments about Policy object's API. +template +struct slot_iterator_impl { + using SlotCursor = detail::SlotCursor; + using value_type = typename Policy::value_type; + slot_iterator_impl( + Module root, + bool recurse, // if true, do a depth-first search, otherwise, just look at + // slots of root + bool return_module) // if true include root itself as the first thing + // visited (used in modules()) + : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}), + recurse_(recurse) { + // advance iterator to first valid element (or the end, if empty) + while_not_valid_next(); + } + // empty cursors_, represents end of iteration + slot_iterator_impl() : recurse_(false) {} + value_type operator*() const { + return Policy::create(cursors_, cur()); + } + value_type operator->() const { + return **this; + } + slot_iterator_impl& operator++() { + next_valid(); + return *this; + } + slot_iterator_impl operator++(int) { + // this is really expensive, should we delete it so people don't use it + // instead of prefix? + slot_iterator_impl old = *this; + ++(*this); + return old; + } + + private: + // return_module() is a corner case where instead of returning a submodule + // of root, we are returning root itself, because we are iterating modules(), + // which contains the root module itself. + // It is represented with a single SlotCursor whose index is -1. + bool return_module() const { + return top().i_ == -1; + } + const SlotCursor& top() const { + return cursors_.back(); + } + SlotCursor& top() { + return cursors_.back(); + } + IValue cur() const { + return return_module() ? top().module_._ivalue() + : top().module_._ivalue()->getSlot(top().i_); + } + + // advance to the next slot in a depth first pre-order traversal of the + // modules slots. This function does not guarantee the next slot is a + // valid element of the iteration. That is done by valid(). + // invariant: !cursors_.empty() + void next() { + // we just returned the module itself, advance i_ to 0 so we are now + // at the first slot of the module. + if (return_module()) { + ++top().i_; + return; + } + // the last traversal action advanced beyond the number of slots in the + // module so continue the iteration in the parent. + if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) { + cursors_.pop_back(); + if (!cursors_.empty()) { + ++top().i_; + } + return; + } + // if the current thing is a module, we have to scan it for recursive + // traversals. We do this by adding a new SlotCursor to track the traversal. + if (recurse_ && + top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) { + cursors_.emplace_back(SlotCursor{cur().toModule(), 0}); + return; + } + // common case: advance to the next slot. + ++top().i_; + } + // is the current position of the iterator a valid one? + // otherwise, we have to continue advancing. + bool valid() const { + return top().i_ < + int64_t(top().module_._ivalue()->type()->numAttributes()) && + Policy::valid( + top().module_._ivalue()->type(), + top().i_, + top().module_._ivalue()->getSlot(top().i_)); + } + void while_not_valid_next() { + // advance iteration until we are either at the end (cursors_.empty()) + // or in a valid state. return_module() is a special case, + // and is always considered valid, regardless of Policy, because it is + // it is only true when we are iterating modules. + while (!cursors_.empty() && !return_module() && !valid()) { + next(); + } + } + void next_valid() { + // avoid crashing if this is empty + if (cursors_.empty()) { + return; + } + // advance to next element, which is maybe not valid + next(); + while_not_valid_next(); + } + + std::vector cursors_; + bool recurse_; + + friend inline bool operator!=( + const slot_iterator_impl& a, + const slot_iterator_impl& b) { + // we are finished iteration when we have no more iteration SlotCursors. + // end is always an empty iterator with no cursors. + return (a.cursors_.empty() != b.cursors_.empty()); + } +}; + +// This type represents lists of parameters, attributes, and +// submodules contained in the module. It is abstract because +// they are not stored directly in std::vectors but inside the +// module's IValue object itself. +template +struct slot_list_impl { + using iterator = slot_iterator_impl; + using const_iterator = slot_iterator_impl; + using value_type = typename iterator::value_type; + slot_iterator_impl begin() const { + return slot_iterator_impl(module_, recurse_, return_module_); + } + slot_iterator_impl end() const { + return slot_iterator_impl(); + } + size_t size() const { + if (!size_) { + size_ = size_t(0); + // NOLINTNEXTLINE(clang-diagnostic-unused-variable) + for (const value_type& s : *(this)) { + (void)s; // Suppress unused variable warning + ++*size_; + } + } + return *size_; + } + + slot_list_impl(Module module, bool recurse, bool return_module) + : module_(std::move(module)), + recurse_(recurse), + return_module_(return_module), + size_(c10::nullopt) { + if (!recurse && !return_module && Policy::all_slots) { + size_ = module_.num_slots(); + } + } + + private: + Module module_; + bool recurse_; + bool return_module_; + // size of this list, cached on first request + // when we need to filter the slot list + mutable c10::optional size_; + friend struct Module; +}; + +namespace detail { + +// slot_iterator_impl always iterate over all the slots in a module, +// the Policy template argument determines slots should be returned and their +// types +struct TORCH_API ModulePolicy { + // the type of the value being returned + using value_type = Module; + + // the logic for creating the type being returned, given the raw IValue + // of that object. + static value_type create( + const std::vector& cursors, + IValue v) { + return Module(std::move(v).toObject()); + } + // is slot i in typ something that this iterator should return, otherwise, + // we skip it. + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->is_module(); + } + // are we going to return everything? If so, we can optimize the calculate + // of the size of the list. + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API ParameterPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->is_parameter(i) && v.isTensor(); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API BufferPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) && + typ->is_buffer(i); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API AttributePolicy { + using value_type = IValue; + static value_type create( + const std::vector& cursors, + IValue v) { + return v; + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return true; + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true; +}; + +// take a Policy object, and make a version of it that returns the slot. +// along with the fully qualified name of that slot. This is used for the named_ +// variants like named_parameters(). +template +struct NamedPolicy { + using value_type = Named; + static value_type create( + const std::vector& cursors, + IValue v) { + std::string name; + if (cursors.size() == 1) { + name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back()); + } else { + std::ostringstream ss; + for (const auto i : c10::irange(cursors.size())) { + if (i > 0) { + ss << "."; + } + ss << nameFragment(cursors[i]); + } + name = ss.str(); + } + return value_type{std::move(name), Policy::create(cursors, std::move(v))}; + } + static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) { + return Policy::valid(t, i, v); + } + static constexpr bool all_slots = Policy::all_slots; + + private: + static std::string nameFragment(const detail::SlotCursor& f) { + return f.module_.type()->getAttributeName(f.i_); + } +}; + +} // namespace detail + +TORCH_API bool& getInlineEverythingMode(); + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Module = ::torch::jit::Module; +using ExtraFilesMap = ::torch::jit::ExtraFilesMap; +} // namespace script + +} // namespace torch::jit diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h new file mode 100644 index 0000000000000000000000000000000000000000..7ccacf385be538f8f8e2ad738745e6874ce9ea62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h @@ -0,0 +1,200 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch::jit { + +struct Resolver; +using ResolverPtr = std::shared_ptr; + +using ObjectPtr = c10::intrusive_ptr; + +// Throw this in C++ land if `attr` fails. This will be converted to a Python +// AttributeError by the Python binding code +class ObjectAttributeError : public std::runtime_error { + public: + ObjectAttributeError(const std::string& what) : std::runtime_error(what) {} +}; + +struct TORCH_API Object { + Object() = default; + Object(const Object&) = default; + Object& operator=(const Object&) = default; + Object(Object&&) noexcept = default; + Object& operator=(Object&&) noexcept = default; + Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {} + Object(std::shared_ptr cu, const c10::ClassTypePtr& type); + Object( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + + ObjectPtr _ivalue() const { + TORCH_INTERNAL_ASSERT(_ivalue_); + return _ivalue_; + } + + c10::ClassTypePtr type() const { + return _ivalue()->type(); + } + + struct Property { + std::string name; + Method getter_func; + c10::optional setter_func; + }; + + void setattr(const std::string& name, c10::IValue v) { + if (_ivalue()->type()->hasConstant(name)) { + TORCH_CHECK( + false, + "Can't set constant '", + name, + "' which has value:", + _ivalue()->type()->getConstant(name)); + } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) { + const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot); + TORCH_CHECK( + v.type()->isSubtypeOf(*expected), + "Expected a value of type '", + expected->repr_str(), + "' for field '", + name, + "', but found '", + v.type()->repr_str(), + "'"); + _ivalue()->setSlot(*slot, std::move(v)); + } else { + TORCH_CHECK(false, "Module has no attribute '", name, "'"); + } + } + + c10::IValue attr(const std::string& name) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + std::stringstream err; + err << _ivalue()->type()->repr_str() << " does not have a field with name '" + << name.c_str() << "'"; + throw ObjectAttributeError(err.str()); + } + + c10::IValue attr(const std::string& name, c10::IValue or_else) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + return or_else; + } + + bool hasattr(const std::string& name) const { + return _ivalue()->type()->hasAttribute(name) || + _ivalue()->type()->hasConstant(name); + } + + // each object owns its methods. The reference returned here + // is guaranteed to stay valid until this module has been destroyed + Method get_method(const std::string& name) const { + if (auto method = find_method(name)) { + return *method; + } + AT_ERROR("Method '", name, "' is not defined."); + } + + const std::vector get_methods() const { + return c10::fmap(type()->methods(), [&](Function* func) { + return Method(_ivalue(), func); + }); + } + + bool has_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + return true; + } + } + return false; + } + + const Property get_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + c10::optional setter = c10::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + prop.name, Method(_ivalue(), prop.getter), std::move(setter)}; + } + } + AT_ERROR("Property '", name, "' is not defined."); + } + + const std::vector get_properties() const { + return c10::fmap(type()->properties(), [&](ClassType::Property prop) { + c10::optional setter = c10::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + std::move(prop.name), + Method(_ivalue(), prop.getter), + std::move(setter)}; + }); + } + + c10::optional find_method(const std::string& basename) const; + + /// Run a method from this module. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const std::string& method_name, Types&&... args) { + return get_method(method_name)({IValue(std::forward(args))...}); + } + + // so that C++ users can easily add methods + void define(const std::string& src, const ResolverPtr& resolver = nullptr); + + size_t num_slots() const { + return _ivalue()->slots().size(); + } + + // shallow copy the object + Object copy() const; + + // Copies all the attributes of the object recursively without creating new + // `ClassType`, including deepcopy of Tensors + Object deepcopy() const; + + private: + // mutable be we lazily initialize in module_object. + mutable ObjectPtr _ivalue_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Object = ::torch::jit::Object; +} // namespace script +} // namespace torch::jit diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h new file mode 100644 index 0000000000000000000000000000000000000000..22349936687ce8a317e7bf7e6d54911487b87646 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h @@ -0,0 +1,241 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT }; +class ConcreteModuleType; + +// You can think of an nn.Module as a template that corresponds to a family of +// JIT types. The template "arguments" are things like the constant values. +// e.g. +// class M(nn.Module): +// __constants__ = ["const"] +// ... +// +// Is similar to writing the following in C++: +// +// template +// class M { +// ... +// } +// +// We need to consider each different member of the type family a different JIT +// type because, e.g. different constant values lead to different versions of +// the same method. +// +// ConcreteModuleType corresponds to a single member of the type family, with +// all template arguments fully specified. Two Modules that share a +// ConcreteModuleType can share a JIT type, and vice versa. +// +// Why not just use a JIT type to represent concrete types? Because constants, +// function attributes, etc. are currently not representable in the type system, +// so this acts a non-first-class way of tracking concrete types. +// +// ConcreteModuleType is also the source of truth for servicing all +// ModuleValue::attr calls. This is so we can guarantee that if two Module's +// share a JIT type (and thus a ConcreteModuleType), then they behave the same +// way when you access attributes on them. + +// ConcreteModuleType has two phases. +// 1. Creation: First we build it up, during the ScriptModule conversion +// process. This is represented by ConcreteModuleTypeBuilder. +// ...then the converter calls ConcreteModuleTypeBuilder::build(), producing +// a +// ConcreteModuleType ready for querying. +// 2. Querying: We use ConcreteModuleType as a source of truth for +// ModuleValue::attr calls during method compilation. + +// Represents a concrete type during in the process for construction. We use +// this to decide whether we can share types between modules. +class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder { + public: + explicit ConcreteModuleTypeBuilder(py::object pyClass) { + TORCH_INTERNAL_ASSERT(pyClass); + pyClass_ = std::move(pyClass); + } + + void addConstant(std::string name, py::object value); + void addConstant(std::string name, IValue value); + void addAttribute( + std::string name, + const TypePtr& type, + bool isParameter, + bool isBuffer); + void addFunctionAttribute( + std::string name, + const TypePtr& type, + py::object pyFunction); + + void addModule(std::string name, std::shared_ptr meta); + + void addForwardHook(py::object hook); + void addForwardPreHook(py::object pre_hook); + + void addOverload( + std::string methodName, + std::vector overloadedMethodNames); + void addBuiltinFunction(std::string name, const std::string& symbol_name); + void addFailedAttribute(std::string name, std::string failureReason); + void addIgnoredAttribute(std::string name); + void setIterableModuleKind(IterableModuleKind kind); + + // If a ConcreteModuleType is poisoned, it will never compare equal to any + // other concrete type + void setPoisoned(); + + std::shared_ptr build() const { + return std::make_shared(*this); + } + + // This determines whether two modules can share a type. The container structs + // used by ConcreteModuleType have been defined such that operator== + // implements a meaningful comparison in that context. + bool equals(const ConcreteModuleTypeBuilder& other) const; + + struct FunctionAttribute { + FunctionTypePtr function_; + py::object pyFunction_; + + friend bool operator==( + const FunctionAttribute& lhs, + const FunctionAttribute& rhs) { + // Functions are not first class, so we can't do type comparison like a + // regular attribute. So we do a pointer equality check on the actual + // Python function object. + return lhs.pyFunction_.is(rhs.pyFunction_); + } + }; + + struct Attribute { + Attribute(TypePtr type, bool isParam, bool isBuffer) + : type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {} + + friend bool operator==(const Attribute& lhs, const Attribute& rhs) { + return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_; + } + TypePtr type_; + bool isParam_; + bool isBuffer_; + }; + + struct ModuleInfo { + ModuleInfo(std::string name, std::shared_ptr meta) + : name_(std::move(name)), meta_(std::move(meta)) {} + + friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs); + + std::string name_; + std::shared_ptr meta_; + }; + + private: + ConcreteModuleTypeBuilder() = default; + ClassTypePtr createTypeFromThis() const; + + // If true, this type will never compare equally to anything else. This is + // used if we want to ensure that this type is not shared (for example, if it + // came from a traced module) + bool isPoisoned_ = false; + + // The value of any constants defined by the module. + std::unordered_map constants_; + // The types of any attributes + OrderedDict attributes_; + // Overloads, in the same format as `__overloads__` in Python + std::unordered_map> overloads_; + // Any attributes we failed to convert to TorchScript, along with a hint as to + // why + std::unordered_map failedAttributes_; + // Any attributes that were marked as ignored. They cannot be used in + // TorchScript but can still be used in ignored function in Python. + std::unordered_set ignoredAttributes_; + // Any function attributes. These are special right now because functions are + // not first-class in the type system. + std::unordered_map functionAttributes_; + // Function attributes that are calls to builtin functions. These get + // de-sugared directly into the corresponding aten:: call. The map is + // attribute name -> aten symbol name + std::unordered_map builtinFunctions_; + // The concrete types of any submodules + std::vector modules_; + // Hooks to be called before/after forward when the module + // is called directly. Used to ensure modules have different types + // when they have different python hooks + // Actual hooks are added to ClassType directly during compilation + std::vector forwardHooks_; + std::vector forwardPreHooks_; + + // If something is a ModuleDict/ModuleList, it means: + // 1. The order of the submodules matters for comparing the type + // 2. The compiler is allowed to treat it like a dict/tuple + IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE; + + // The original `nn.Module` class that we derived this ScriptModule from. + py::object pyClass_; + + // NOTE: If you ever add any more state to this struct, you need to make sure + // operator== still makes sense! + friend ConcreteModuleType; +}; + +// Represents a finalized concrete type, used to service ModuleValue::attr calls +// during method compilation. +class VISIBILITY_HIDDEN ConcreteModuleType { + public: + explicit ConcreteModuleType(ConcreteModuleTypeBuilder data); + + static std::shared_ptr fromJitType(TypePtr type); + + TypePtr getJitType() const; + c10::optional getPyClass() const; + IterableModuleKind getIterableModuleKind() const; + c10::optional> findOverloads( + const std::string& name) const; + c10::optional findFunctionAttribute(const std::string& name) const; + c10::optional findBuiltinFunction(const std::string& name) const; + std::shared_ptr findSubmoduleConcreteType( + const std::string& name) const; + c10::optional findFailedAttribute(const std::string& name) const; + bool isIgnoredAttribute(const std::string& name) const; + + // These getters are only here to return things as types that can be + // automatically converted by pybind. + std::unordered_map getConstantsPy() const; + std::unordered_map> getAttributesPy() + const; + std::vector>> + getModulesPy() const; + + bool equals(const ConcreteModuleType& other) const { + if (jitType_ == other.jitType_) { + // If the computed types are the same, these modules can (obviously) share + // a type. + return true; + } + + return data_.equals(other.data_); + } + bool equals(const ConcreteModuleTypeBuilder& other) const { + return data_.equals(other); + } + + void dump() const; + + private: + ConcreteModuleType() = default; + + // The JIT type derived from this ConcreteModuleType. + ConcreteModuleTypeBuilder data_; + TypePtr jitType_; +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h new file mode 100644 index 0000000000000000000000000000000000000000..787eae80578881a29c05d51774927f54213239f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +// Convert a graph with Loads & Stores into SSA form +TORCH_API void ConvertToSSA(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h new file mode 100644 index 0000000000000000000000000000000000000000..f0d999e83c1a2d7da8514ca2ad738c57d646df40 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API size_t ComputeEditDistance( + const char* word1, + const char* word2, + size_t maxEditDistance); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h new file mode 100644 index 0000000000000000000000000000000000000000..84910c6bc1e4d6a9625ad7896b58deddfd3a9e64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void TransformExits(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h new file mode 100644 index 0000000000000000000000000000000000000000..c5efa0b40151ac313b71099a81df26b7a4e12395 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void InlineLoopCondition(std::shared_ptr& graph); +TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h new file mode 100644 index 0000000000000000000000000000000000000000..ff59fa98ee7c23da778fb9e7997a3b39303a9d15 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h @@ -0,0 +1,575 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace torch { +namespace jit { + +// single character tokens are just the character itself '+' +// multi-character tokens need an entry here +// if the third entry is not the empty string, it is used +// in the lexer to match this token. + +// These kinds are also used in Tree.h as the kind of the AST node. +// Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the +// lexer. + +#define TC_FORALL_TOKEN_KINDS(_) \ + _(TK_EOF, "eof", "") \ + _(TK_WHITESPACE, "whitespace", "") \ + _(TK_WHITESPACE_EOF, "whitespace_eof", "") \ + _(TK_NUMBER, "number", "") \ + _(TK_NEWLINE, "newline", "") \ + _(TK_INDENT, "indent", "") \ + _(TK_DEDENT, "dedent", "") \ + _(TK_DEF, "def", "def") \ + _(TK_EQUIVALENT, "equivalent", "<=>") \ + _(TK_IDENT, "ident", "") \ + _(TK_STRING, "string", "") \ + _(TK_STRINGLITERAL, "string_literal", "") \ + _(TK_CONST, "const", "") \ + _(TK_LIST, "list", "") \ + _(TK_DICT, "dict", "") \ + _(TK_OPTION, "option", "") \ + _(TK_APPLY, "apply", "") \ + _(TK_COMPREHENSION, "comprehension", "") \ + _(TK_RANGE_CONSTRAINT, "range_constraint", "") \ + _(TK_PARAM, "param", "") \ + _(TK_INFERRED, "inferred", "") \ + _(TK_ACCESS, "access", "") \ + _(TK_ASSIGN, "assign", "") \ + _(TK_AUG_ASSIGN, "aug_assign", "") \ + _(TK_ATTRIBUTE, "attribute", "") \ + _(TK_IF, "if", "if") \ + _(TK_ELSE, "else", "else") \ + _(TK_ELIF, "elif", "elif") \ + _(TK_WHILE, "while", "while") \ + _(TK_EXPR_STMT, "expression statement", "") \ + _(TK_RETURN, "return", "return") \ + _(TK_IS, "is", "is") \ + _(TK_ISNOT, "is not", "is not") \ + _(TK_NE, "ne", "!=") \ + _(TK_EQ, "eq", "==") \ + _(TK_LE, "le", "<=") \ + _(TK_GE, "ge", ">=") \ + _(TK_FLOOR_DIV, "floordiv", "//") \ + _(TK_IF_EXPR, "if", "") \ + _(TK_TRUE, "True", "True") \ + _(TK_FALSE, "False", "False") \ + _(TK_NONE, "None", "None") \ + _(TK_AND, "and", "and") \ + _(TK_OR, "or", "or") \ + _(TK_NOT, "not", "not") \ + _(TK_LSHIFT, "<<", "<<") \ + _(TK_RSHIFT, ">>", ">>") \ + _(TK_CAST, "cast", "") \ + _(TK_PLUS_EQ, "+=", "+=") \ + _(TK_MINUS_EQ, "-=", "-=") \ + _(TK_TIMES_EQ, "*=", "*=") \ + _(TK_DIV_EQ, "/=", "/=") \ + _(TK_MOD_EQ, "%=", "%=") \ + _(TK_BIT_OR_EQ, "|=", "|=") \ + _(TK_BIT_AND_EQ, "&=", "&=") \ + _(TK_BIT_XOR_EQ, "^=", "^=") \ + _(TK_LSHIFT_EQ, "<<=", "<<=") \ + _(TK_RSHIFT_EQ, ">>=", ">>=") \ + _(TK_POW_EQ, "**=", "**=") \ + _(TK_GLOBAL, "global", "global") \ + _(TK_BUILT_IN, "built-in", "") \ + _(TK_SUBSCRIPT, "subscript", "") \ + _(TK_VAR, "variable", "") \ + _(TK_NOTHING, "nothing", "") \ + _(TK_DICT_LITERAL, "dict-literal", "") \ + _(TK_LIST_LITERAL, "list-literal", "") \ + _(TK_TUPLE_LITERAL, "tuple-literal", "") \ + _(TK_FOR, "for", "for") \ + _(TK_IN, "in", "in") \ + _(TK_NOTIN, "not in", "not in") \ + _(TK_STARRED, "starred", "") \ + _(TK_UNARY_MINUS, "unary minus", "") \ + _(TK_POW, "pow operator", "**") \ + _(TK_ARROW, "arrow", "->") \ + _(TK_DECL, "decl", "") \ + _(TK_SLICE_EXPR, "slice expr", "") \ + _(TK_TYPE_COMMENT, "type comment", "# type:") \ + _(TK_RAISE, "raise", "raise") \ + _(TK_ASSERT, "assert", "assert") \ + _(TK_DOTS, "dots", "...") \ + _(TK_LIST_COMP, "list comprehension", "") \ + _(TK_DICT_COMP, "dict comprehension", "") \ + _(TK_BREAK, "break", "break") \ + _(TK_CONTINUE, "continue", "continue") \ + _(TK_DELETE, "del", "del") \ + _(TK_PASS, "pass", "pass") \ + _(TK_CLASS_DEF, "class", "class") \ + _(TK_IMPORT, "import", "import") \ + _(TK_WITH, "with", "with") \ + _(TK_WITH_ITEM, "withitem", "") \ + _(TK_AS, "as", "as") \ + _(TK_PROP, "property", "") \ + _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \ + _(TK_NONE_TYPE, "NoneType", "NoneType") + +enum TokenKind { + // we use characters to represent themselves so skip all valid characters + // before + // assigning enum values to multi-char tokens. + TK_DUMMY_START = 256, +#define DEFINE_TOKEN(tok, _, _2) tok, + TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN) +#undef DEFINE_TOKEN +}; + +TORCH_API std::string kindToString(int kind); +TORCH_API int stringToKind(const std::string& str); + +// nested hash tables that indicate char-by-char what is a valid token. +struct TokenTrie; +using TokenTrieRef = std::unique_ptr; +struct TokenTrie { + TokenTrie() : kind(0) {} + void insert(const char* str, int tok) { + if (*str == '\0') { + AT_ASSERT(kind == 0); + kind = tok; + return; + } + + for (size_t i = 0, e = child_chars.size(); i < e; ++i) { + if (child_chars[i] == *str) { + child_tries[i]->insert(str + 1, tok); + return; + } + } + + child_chars.emplace_back(*str); + child_tries.emplace_back(std::make_unique()); + child_tries.back()->insert(str + 1, tok); + } + int kind; // 0 == invalid token + + std::vector child_chars; + std::vector child_tries; +}; + +// stuff that is shared against all TC lexers/parsers and is initialized only +// once. +struct TORCH_API SharedParserData { + SharedParserData() : head(new TokenTrie()) { + std::stringstream ss; + for (const char* c = valid_single_char_tokens; *c; c++) { + std::string str(1, *c); + head->insert(str.c_str(), *c); + } + +#define ADD_CASE(tok, _, tokstring) \ + if (*(tokstring) != '\0') { \ + head->insert((tokstring), (tok)); \ + } + TC_FORALL_TOKEN_KINDS(ADD_CASE) +#undef ADD_CASE + } + + bool match( + StringCordView::Iterator pos, + bool continuation, // are we inside a scope where newlines don't count + // (e.g. inside parens) + bool whitespace_token, // should we treat whitespace as a token + int* kind, + StringCordView::Iterator* start, + StringCordView::Iterator* end) { + *start = pos; + // skip whitespace + while (pos.has_next() && isblank(*pos)) { + ++pos; + } + + // special handling + if (pos.has_next()) { + if (*pos == '#' && !isTypeComment(pos)) { + // skip comments + while (pos.has_next() && *pos != '\n') + ++pos; + // tail call, handle whitespace and more comments + return match(pos, continuation, whitespace_token, kind, start, end); + } + if (*pos == '\\') { + auto newiter = pos; + ++newiter; + if (newiter.has_next() && *newiter == '\n' && !whitespace_token) { + ++newiter; + return match(newiter, continuation, false, kind, start, end); + } + } + if (*pos == '\n') { + return match(++pos, continuation, !continuation, kind, start, end); + } + } + // we handle white space before EOF because in the case we have something + // like the following where we need to generate the dedent token if foo: + // ... + // else: + // pass + if (whitespace_token) { + *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE; + *end = pos; + return true; + } + if (!pos.has_next()) { + *kind = TK_EOF; + *start = pos; + *end = *start; + return true; + } + // invariant: the next token is not whitespace or newline + *start = pos; + // check for a valid number + size_t len; + if (isNumber(pos.rest_line(), 0, &len)) { + *end = *start; + *end += len; + *kind = TK_NUMBER; + return true; + } + // check for string + if (isString(pos.rest_line(), 0, &len)) { + *kind = TK_STRINGLITERAL; + *end = *start; + *end += len; + return true; + } + + // check for either an ident or a token + // ident tracks whether what we have scanned so far could be an identifier + // matched indicates if we have found any match. + bool matched = false; + bool ident = true; + TokenTrie* cur = head.get(); + // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr); + // i++) + for (size_t i = 0; pos.has_next() && (ident || cur != nullptr); + ++pos, ++i) { + ident = ident && validIdent(i, *pos); + if (ident) { + matched = true; + *end = pos.next_iter(); + *kind = TK_IDENT; + } + // check for token second, so that e.g. 'max' matches the token TK_MAX + // rather the + // identifier 'max' + if (cur) { + const auto begin_it = cur->child_chars.begin(); + const auto end_it = cur->child_chars.end(); + const auto ch_it = std::find(begin_it, end_it, *pos); + + cur = (ch_it == end_it) ? nullptr + : cur->child_tries[ch_it - begin_it].get(); + + if (cur && cur->kind != 0) { + matched = true; + *end = pos.next_iter(); + *kind = cur->kind; + } + } + } + return matched; + } + + bool isUnary(int kind, int* prec); + bool isBinary(int kind, int* prec); + bool isRightAssociative(int kind) { + switch (kind) { + case '?': + case TK_POW: + case TK_IF: + return true; + default: + return false; + } + } + + private: + bool validIdent(size_t i, char n) { + return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); + } + + // 1. skip whitespace + // 2. handle comment or newline + // + bool isNumber(c10::string_view str, size_t start, size_t* len) { + char first = str[start]; + // strtod allows numbers to start with + or - or nan or inf + // http://en.cppreference.com/w/cpp/string/byte/strtof + // but we want only the number part, otherwise 1+3 will turn into two + // adjacent numbers in the lexer + if (first == '-' || first == '+' || isalpha(first)) + return false; + const char* startptr = str.data() + start; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + char* endptr; + torch::jit::strtod_c(startptr, &endptr); + *len = endptr - startptr; + // check if the number is complex valued + // access is safe because string is assumed to be null terminated + if (endptr != nullptr && *endptr == 'j') { + *len += 1; + } + return *len > 0; + } + + bool isCharCount(char c, c10::string_view str, size_t start, int len) { + // count checks from [start, start + len) + return start + len <= str.size() && + std::count(str.begin() + start, str.begin() + start + len, c) == len; + } + + // python concatenates all adjacent strings "a" "b" == "ab" + // strings can be enclosed with 1 or 3 single or double quotes + // if enclosed with 3 quotes newlines are valid + // as elsewhere, backslash and new line should be ignored + bool isString(c10::string_view str, size_t start, size_t* len) { + char quote = str[start]; + if (quote != '\"' && quote != '\'') + return false; + int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1; + + // end is now set past the opening quotation marks + size_t end = start + quote_len; + while (end < str.size() && !isCharCount(quote, str, end, quote_len)) { + if (str[end] == '\n' && quote_len != 3) { + return false; + } + // handle escaped characters. advances past escaped quotation marks, + // escaped newlines and escaped backslashes + // multi-char escapes like \x1A are handled fine here because the + // remainder of the escape are valid string characters anyway + if (str[end] == '\\') { + end++; + } + end++; + } + // set length equal to the complete string including quotations + *len = end - start + quote_len; + // if end finished without going past the last character of the string than + // there is a match + return end < str.size(); + } + + bool isblank(int n) { + return isspace(n) && n != '\n'; + } + + bool isTypeComment(StringCordView::Iterator str_iter) { + c10::string_view rest_line = str_iter.rest_line(); + const std::string type_string = "# type:"; + if (rest_line.size() < type_string.length()) { + return false; + } + auto match_string = rest_line.substr(0, type_string.size()); + return match_string == type_string; + } + + // Make an exception ignoring comments for type annotation comments + bool isTypeComment(StringCordView str, size_t pos) { + const std::string type_string = "# type:"; + if (str.size() < pos + type_string.length()) { + return false; + } + auto match_string = str.substr(pos, type_string.size()); + return match_string == type_string; + } + + TokenTrieRef head; +}; + +TORCH_API SharedParserData& sharedParserData(); + +struct Token { + int kind; + SourceRange range; + Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {} + std::string text() { + return std::string(range.token_text()); + } + std::string kindString() const { + return kindToString(kind); + } +}; + +struct Lexer { + explicit Lexer(std::shared_ptr source) + : source(std::move(source)), + pos(0), + nesting(0), + indent_stack(), + next_tokens(), + shared(sharedParserData()) { + auto first_indent = lexRaw(true); + indent_stack.push_back(first_indent.range.size()); + lex(); + } + // Return the current token, and then move to the next one + Token next() { + if (next_tokens.empty()) + reportError("Lexer invariant violated: empty token queue"); + Token r = std::move(next_tokens.front()); + next_tokens.erase(next_tokens.begin()); + if (next_tokens.empty()) { + lex(); + } + return r; + } + // Skip the current token if it matches the given kind + bool nextIf(int kind) { + if (cur().kind != kind) + return false; + next(); + return true; + } + + [[noreturn]] void reportError(const std::string& what) { + reportError(what, cur()); + } + [[noreturn]] void reportError(const std::string& what, const Token& t) { + std::stringstream ss; + ss << what << ":\n"; + t.range.highlight(ss); + throw std::runtime_error(ss.str()); + } + [[noreturn]] void expected(const std::string& what, const Token& t) { + std::stringstream ss; + ss << "expected " << what << " but found '" << t.kindString() + << "' here:\n"; + t.range.highlight(ss); + throw std::runtime_error(ss.str()); + } + [[noreturn]] void expected(const std::string& what) { + expected(what, cur()); + } + // Check that the current token has a given kind, return the current token, + // and advance to the next one. + Token expect(int kind) { + if (cur().kind != kind) { + expected(kindToString(kind)); + } + return next(); + } + Token& lookahead() { + if (next_tokens.size() < 2) { + lex(); + } + return next_tokens[1]; + } + Token& cur() { + return next_tokens.front(); + } + + private: + void lex() { + auto r = lexRaw(); + switch (r.kind) { + case '(': + case '[': + case '{': + nesting++; + break; + case ')': + case ']': + case '}': + nesting--; + break; + case TK_WHITESPACE: + case TK_WHITESPACE_EOF: { + const auto depth = static_cast( + r.kind == TK_WHITESPACE_EOF ? indent_stack.front() + : r.range.size()); + // note: TK_WHITESPACE_EOF is whitespace right before the EOF token + // just like we allow the code to be indented to a particular initial + // indent level, we allow the final indent to be anything and set + // it back to the initial indent level. This allows the code to be + // put into string literals inside code without worrying about final + // whitespace + if (depth > indent_stack.back()) { + indent_stack.push_back(depth); + r.kind = TK_INDENT; + } else if (depth == indent_stack.back()) { + r.kind = TK_NEWLINE; + } else { + next_tokens.emplace_back(TK_NEWLINE, r.range); + while (indent_stack.back() != depth) { + indent_stack.pop_back(); + next_tokens.emplace_back(TK_DEDENT, r.range); + if (indent_stack.empty()) { + reportError("invalid indent level " + std::to_string(depth), r); + } + } + return; // We've already queued the tokens + } + } break; + default: + break; + } + next_tokens.push_back(std::move(r)); + } + Token lexRaw(bool whitespace_token = false) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int kind; + AT_ASSERT(source); + if (current == nullptr) { + AT_ASSERT(pos == 0); + current = std::make_unique( + source->text_str().begin()); + } + + StringCordView::Iterator start_iter = *current; + StringCordView::Iterator end_iter = *current; + if (!shared.match( + *current, + nesting > 0, + whitespace_token, + &kind, + &start_iter, + &end_iter)) { + expected( + "a valid token", + Token( + **current, + SourceRange(source, start_iter, start_iter.pos() + 1))); + } + + auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos())); + pos = end_iter.pos(); + *current = end_iter; + return t; + } + + std::shared_ptr source; + std::unique_ptr current; + size_t pos; + size_t nesting; // depth of ( [ { nesting... + std::vector indent_stack; // stack of indentation level of blocks + // Invariant: this should always contain at least a single element + std::vector next_tokens; + SharedParserData& shared; +}; +} // namespace jit +} // namespace torch + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h new file mode 100644 index 0000000000000000000000000000000000000000..6d856a090854a48947664964bd71f2d985a36832 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h @@ -0,0 +1,33 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct Decl; +struct ParserImpl; +struct Lexer; + +TORCH_API Decl mergeTypesFromTypeComment( + const Decl& decl, + const Decl& type_annotation_decl, + bool is_method); + +struct TORCH_API Parser { + explicit Parser(const std::shared_ptr& src); + TreeRef parseFunction(bool is_method); + TreeRef parseClass(); + Decl parseTypeComment(); + Expr parseExp(); + Lexer& lexer(); + ~Parser(); + + private: + std::unique_ptr pImpl; +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..28300611c8751acbe80c4fb539bdfc1397a7bd5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch { +namespace jit { +static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~"; +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h new file mode 100644 index 0000000000000000000000000000000000000000..dc4ab61f67f7a39ef097cbe37fe8f6d5ec380d5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Resolver; +using ResolverPtr = std::shared_ptr; + +/** + * class Resolver + * + * Represents an "outer environment" in which we an look up names and return + * a corresponding SugaredValue. This is used during compilation to resolve + * references to names which are not defined internal to the graph. + * + * Example: PythonResolver looks at the enclosing Python scope for `name`. + * + * NOTE: When adding methods, keep this an abstract class (i.e. all new methods + * should be purely virtual). Resist the urge to provide a default + * implementation; you should explicitly think about how each resolver would + * handle the method. + */ +struct Resolver { + virtual ~Resolver() = default; + + // Resolve a given name to a SugaredValue. This takes the method `m` that the + // caller is currently constructing, since we may need to insert nodes into + // the graph to create a value. + virtual std::shared_ptr resolveValue( + const std::string& name, + GraphFunction& m, + const SourceRange& loc) { + return nullptr; + } + + // Resolve `name` to a TypePtr. + virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) { + return nullptr; + } +}; + +// A resolver that only understands "torch.foo()" lookups. +struct NativeResolver : public Resolver { + std::shared_ptr resolveValue( + const std::string& name, + GraphFunction& m, + const SourceRange& loc) override { + if (name == "torch") { + return std::make_shared("aten"); + } + return nullptr; + } + + TypePtr resolveType(const std::string& name, const SourceRange& loc) + override { + return nullptr; + } +}; + +inline std::shared_ptr nativeResolver() { + return std::make_shared(); +} +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..c43e4363da38645ec31cd562a5b8ca8e631c73de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h @@ -0,0 +1,40 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using TypePtr = c10::TypePtr; + +struct TORCH_API SchemaTypeParser { + TypePtr parseBaseType(); + c10::optional parseAliasAnnotation(); + std::pair> parseType(); + std::tuple> + parseFakeAndRealType(); + c10::optional parseTensorDType(const std::string& dtype); + TypePtr parseRefinedTensor(); + + SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types) + : complete_tensor_types(parse_complete_tensor_types), L(L) {} + + private: + c10::optional tryToParseRequiresGrad(); + c10::optional tryToParseDeviceType(); + void parseList( + int begin, + int sep, + int end, + c10::function_ref callback); + + bool complete_tensor_types; + Lexer& L; + size_t next_id = 0; +}; +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..3a05af9c598abdfa7e71d8ab90d668031023a829 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h @@ -0,0 +1,55 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * class ScriptTypeParser + * + * Parses expressions in our typed AST format (TreeView) into types and + * typenames. + */ +class TORCH_API ScriptTypeParser { + public: + explicit ScriptTypeParser() = default; + explicit ScriptTypeParser(ResolverPtr resolver) + : resolver_(std::move(resolver)) {} + + c10::TypePtr parseTypeFromExpr(const Expr& expr) const; + + c10::optional> parseBroadcastList( + const Expr& expr) const; + + c10::TypePtr parseType(const std::string& str); + + FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self); + + c10::IValue parseClassConstant(const Assign& assign); + + private: + c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const; + + c10::optional parseBaseTypeName(const Expr& expr) const; + at::TypePtr subscriptToType( + const std::string& typeName, + const Subscript& subscript) const; + std::vector evaluateDefaults( + const SourceRange& r, + const std::vector& default_types, + const std::vector& default_exprs); + std::vector parseArgsFromDecl(const Decl& decl, bool skip_self); + + std::vector parseReturnFromDecl(const Decl& decl); + + ResolverPtr resolver_ = nullptr; + + // Need to use `evaluateDefaults` in serialization + friend struct ConstantTableValue; + friend struct SourceImporterImpl; +}; +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h new file mode 100644 index 0000000000000000000000000000000000000000..72710a94ed21000580cd5a7282bf0051e5d99928 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h @@ -0,0 +1,457 @@ +#pragma once +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +class SourceRangeUnpickler; +struct SourceRange; + +// A stringlike class backed by a vector of string_view +// the string represented are logically the concatenation of the string_views +// This has advantage of not needing continues memory. +struct TORCH_API StringCordView { + StringCordView(); + StringCordView(const StringCordView&) = default; + StringCordView(StringCordView&&) noexcept = default; + StringCordView( + std::vector inputs, + std::vector> ownerships); + + StringCordView& operator=(const StringCordView&) = default; + StringCordView& operator=(StringCordView&&) noexcept = default; + + size_t size() const { + return accumulated_sizes_.back(); + } + + size_t find(const std::string& tok, size_t start) const; + size_t find_regex(const std::string& tok, size_t start) const; + StringCordView substr(size_t start, size_t size) const; + + char at(size_t index) const { + return *iter_for_pos(index); + } + char operator[](size_t index) const { + return at(index); + } + + std::string str() const { + std::stringstream ss; + for (auto s : pieces_) { + ss << std::string(s); + } + return ss.str(); + } + + bool operator==(const std::string& rhs) const; + + bool operator==(const StringCordView& rhs) const; + + c10::string_view piece(size_t index) const { + return pieces_[index]; + } + + struct Iterator { + Iterator( + const StringCordView* str, + size_t start_line, + size_t start_pos, + size_t size) + : line_(start_line), pos_(start_pos), str_(str), size_(size) {} + explicit Iterator(const StringCordView* str) + : Iterator(str, 0, 0, str->size()) {} + + Iterator() : Iterator(nullptr, 0, 0, 0) {} + + Iterator(const Iterator&) = default; + Iterator(Iterator&&) = default; + Iterator& operator=(const Iterator&) = default; + Iterator& operator=(Iterator&&) = default; + + Iterator operator++() { + if (size_ == 0) { + return *this; + } + if ((pos_ + 1) < str_->pieces_[line_].size()) { + pos_++; + } else { + line_++; + pos_ = 0; + } + return *this; + } + + Iterator operator++(int) { + Iterator prev(*this); + ++(*this); + return prev; + } + + Iterator next_iter() const { + Iterator next(*this); + ++next; + return next; + } + + Iterator& operator+=(size_t num) { + if (!has_next()) { + return *this; + } + size_t target_pos = pos_ + num; + if (target_pos >= str_->accumulated_sizes_[line_] && + (line_ + 1) < str_->accumulated_sizes_.size() && + target_pos < str_->accumulated_sizes_[line_ + 1]) { + pos_ = target_pos; + return *this; + } + + size_t target_abs_pos = pos() + num; + *this = str_->iter_for_pos(target_abs_pos); + return *this; + } + + bool operator==(const Iterator& rhs) const { + if (!has_next() && !rhs.has_next()) { + return true; + } + return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_); + } + bool operator!=(const Iterator& rhs) { + return !((*this) == rhs); + } + bool has_next() const { + return size_ > 0 && (line_ < str_->pieces_.size()); + } + + char operator*() const { + TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size()); + TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size()); + return str_->pieces_[line_].at(pos_); + } + + // returns rest of the line of the current iterator + c10::string_view rest_line() const { + if (line_ >= str_->pieces_.size()) { + return ""; + } + + c10::string_view cur_line = str_->pieces_[line_]; + return cur_line.substr(pos_, std::string::npos); + } + + size_t pos() const { + if (size_ == 0) { + return 0; + } + return str_->accumulated_sizes_[line_] + pos_; + } + + private: + size_t line_; + size_t pos_; + const StringCordView* str_; + size_t size_; + friend struct StringCordView; + }; + + Iterator begin() const { + return Iterator(this, 0, 0, size()); + } + Iterator end() const { + return Iterator(this, pieces_.size(), 0, 0); + } + Iterator iter_for_pos(size_t pos) const; + + private: + std::vector pieces_; + std::vector accumulated_sizes_; + std::vector> owned_strings_; +}; + +// Source represents a code segment. It keeps track of: +// - text_view : the view into text of the code segment +// - filename (optional) : if present, represents the name of the file from +// which the code segment originated. +// - starting_line_no : represents the line in the original file where the +// code segment started. +struct TORCH_API Source { + // Whether or not Source should copy the string passed in the constructor. + enum CopiesString { COPIES_STRING, DONT_COPY }; + + explicit Source( + c10::string_view text_view, + c10::optional filename = c10::nullopt, + size_t starting_line_no = 0, + std::shared_ptr gen_ranges = nullptr, + CopiesString copies_str = COPIES_STRING) + : filename_(std::move(filename)), + starting_line_no_(starting_line_no), + gen_ranges_(std::move(gen_ranges)) { + if (copies_str == COPIES_STRING) { + std::shared_ptr allocated_str = + std::make_shared(text_view.data(), text_view.size()); + text_view_ = StringCordView({*allocated_str}, {allocated_str}); + } else { + text_view_ = StringCordView({text_view}, {}); + } + + calc_line_start_offsets(); + } + + explicit Source( + StringCordView str, + c10::optional filename = c10::nullopt, + size_t starting_line_no = 0, + std::shared_ptr gen_ranges = nullptr) + : text_view_(std::move(str)), + filename_(std::move(filename)), + starting_line_no_(starting_line_no), + gen_ranges_(std::move(gen_ranges)) { + calc_line_start_offsets(); + } + // Given a line number (within source_), return the byte offset of the + // beginning of that line. + size_t offset_for_line(size_t line) const { + return line_starting_offsets_.at(line); + } + + // Returns number of lines present. + size_t num_lines() const { + return line_starting_offsets_.size(); + } + + // Calculate the line (within the code segment) on which `offset` resides. + size_t lineno_for_offset(size_t offset) const { + auto iter = std::upper_bound( + line_starting_offsets_.begin(), line_starting_offsets_.end(), offset); + return iter - line_starting_offsets_.begin() - 1; + } + + // Calculate the line (within the original source file, if present) on which + // `lineno` resides. + size_t lineno_to_source_lineno(size_t lineno) const { + if (filename_) { + return lineno + starting_line_no_; + } else { + return lineno; + } + } + + StringCordView get_line(size_t lineno) const { + auto start = offset_for_line(lineno); + auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start + : text_view_.size() - start; + return text_view_.substr(start, size); + } + + const StringCordView& text_str() const { + return text_view_; + } + + char char_at(size_t index) const { + return text_view_.at(index); + } + + size_t size() const { + return text_view_.size(); + } + + c10::optional& filename() { + return filename_; + } + + size_t starting_line_no() const { + return starting_line_no_; + } + + c10::optional findSourceRangeThatGenerated( + const SourceRange& range); + + ~Source() = default; + + private: + void calc_line_start_offsets() { + line_starting_offsets_.clear(); + line_starting_offsets_.push_back(0); + size_t pos = 0; + while ((pos = text_view_.find("\n", pos)) != std::string::npos) { + line_starting_offsets_.push_back(++pos); + } + } + + StringCordView text_view_; + + c10::optional filename_; + // If filename_ is not present, starting_line_no_ is don't care + size_t starting_line_no_; + // Starting offsets for lines into the source. e.g. line 0 starts at + // line_starting_offsets_[0], etc. + std::vector line_starting_offsets_; + + std::shared_ptr gen_ranges_; +}; + +// A SourceRange is a reference to subset of a Source, specified by `start` and +// `end` byte offsets into the source text. +struct TORCH_API SourceRange { + SourceRange(std::shared_ptr source_view, size_t start_, size_t end_) + : source_view_(std::move(source_view)), start_(start_), end_(end_) { + if (source_view_) { + start_iter_ = source_view_->text_str().iter_for_pos(start_); + } + } + + SourceRange() : source_view_(nullptr), start_(0), end_(0) {} + + SourceRange( + std::shared_ptr source_view_, + StringCordView::Iterator start_iter, + size_t end_) + : source_view_(std::move(source_view_)), + start_(start_iter.pos()), + end_(end_), + start_iter_(start_iter) {} + + const c10::string_view token_text() const { + size_t size = end() - start(); + return start_iter_.rest_line().substr(0, size); + } + + const StringCordView text() const { + return source_view_->text_str().substr(start(), end() - start()); + } + size_t size() const { + return end() - start(); + } + static const size_t CONTEXT = 3; + void highlight(std::ostream& out) const; + + // Customizable version of 'highlight' method. + void print_with_context( + std::ostream& out, + size_t context, + bool highlight, + const std::string& funcname) const; + + const std::shared_ptr& source() const { + return source_view_; + } + size_t start() const { + return start_; + } + size_t end() const { + return end_; + } + std::string str() const { + std::stringstream ss; + highlight(ss); + return ss.str(); + } + + c10::optional> file_line_col() const { + if (!source_view_ || !source()->filename()) { + return c10::nullopt; + } + + auto lineno = source_view_->lineno_for_offset(start_); + auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno); + // TODO: c10::optional<>::value returns an rvalue ref so can't use it here?? + return std::make_tuple( + source_view_->filename().value_or(""), + source_view_->lineno_to_source_lineno(lineno), + (size_t)col_offset); + } + + bool operator==(const SourceRange& rhs) const { + return start() == rhs.start() && end() == rhs.end() && + source() == rhs.source(); + } + + bool operator!=(const SourceRange& rhs) const { + return !(*this == rhs); + } + + c10::optional findSourceRangeThatGenerated() const { + if (!source_view_) { + return c10::nullopt; + } + return source_view_->findSourceRangeThatGenerated(*this); + } + + protected: + std::shared_ptr source_view_; + + private: + size_t start_; + size_t end_; + StringCordView::Iterator start_iter_; +}; + +// OwnedSourceRange is just like a SourceRange except that it owns a `Source` +// instead of `Source`. Thus OwnedSourceRange owns a copy of source text. +struct OwnedSourceRange : public SourceRange { + explicit OwnedSourceRange(const SourceRange& source_range) + : SourceRange(source_range) { + const auto& source = source_range.source(); + if (source) { + source_view_ = std::make_shared( + source->text_str().str(), + source->filename(), + source->starting_line_no()); + } + } +}; + +struct TORCH_API SourceRangeHasher { + public: + size_t operator()(const torch::jit::SourceRange& key) const; +}; + +struct StackEntry { + std::string filename; + SourceRange range; +}; + +TORCH_API void format_stack_trace( + std::ostream& out, + const std::vector& entries); + +inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) { + range.highlight(out); + return out; +} + +// A pair of (byte offset, SourceRange) describing a specific segment +// of the output stream +struct TaggedRange { + TaggedRange(size_t bytes, SourceRange range) + : bytes(bytes), range(std::move(range)) {} + size_t bytes; + SourceRange range; +}; +using SourceRangeRecords = std::vector; +using SourceRangeTagMap = + std::unordered_map; + +} // namespace torch::jit + +namespace std { +template <> +struct iterator_traits { + using value_type = char; + using difference_type = ptrdiff_t; + using pointer = char*; + using reference = char&; + using iterator_category = std::forward_iterator_tag; +}; +} // namespace std diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h new file mode 100644 index 0000000000000000000000000000000000000000..185bd3c12684176dbbc2453b47b0e44832196e4c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * SourceRef does two things: + * 1. Owns a Source object. + * 2. Serves as lookup key to the owned Source in associative containers, for + * runtime data aggregation. + * We don't want to use std::shared_ptr directly because we want to + * support heteogeneous lookup, and also shared_ptr is an implementation detail + * which should be encapsulated. + */ +class TORCH_API SourceRef : public CustomClassHolder { + public: + explicit SourceRef(std::shared_ptr source_view) + : source_view_(std::move(source_view)) {} + bool operator==(const SourceRef& other) const { + return source_view_ == other.source_view_; + } + bool operator<(const Source& other) const { + return source_view_.get() < &other; + } + friend bool operator<(const Source& other, const SourceRef& self) { + return &other < self.source_view_.get(); + } + bool operator<(const SourceRef& other) const { + return *this < *other.source_view_.get(); + } + const Source* operator->() const { + return source_view_.get(); + } + + private: + std::shared_ptr source_view_; +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h new file mode 100644 index 0000000000000000000000000000000000000000..dd03c3cdb02dfb47423054443601fb2bcbbbe042 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API double strtod_c(const char* nptr, char** endptr); +TORCH_API float strtof_c(const char* nptr, char** endptr); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h new file mode 100644 index 0000000000000000000000000000000000000000..9bf09f4a56e176f2153ac4ce8226a0b80313ed5a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h @@ -0,0 +1,857 @@ +#pragma once +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using SugaredValuePtr = std::shared_ptr; + +// The AST can contain nodes like `self`, `self.b` or `python_fn` that +// are not first-class values in the graph representation, but instead +// will be desugared based on how they are used in the AST. + +// SugaredValue is used to temporarily represent these values in a way +// that separates their behavior from the AST -> IR converter itself. +// This allows us to keep dependencies on python minimal. + +struct TORCH_API SugaredValue + : public std::enable_shared_from_this { + // what is this node? for error reporting (e.g. Module, python function) + virtual std::string kind() const = 0; + + // what can we do with this thing? + // use it as a value e.g. `this + 4` + virtual Value* asValue(const SourceRange& loc, GraphFunction& m) { + throw ErrorReport(loc) << kind() << " cannot be used as a value"; + } + + // select an attribute on it, e.g. `this.field` + virtual std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) { + throw ErrorReport(loc) << "attribute lookup is not defined on " << kind(); + } + + virtual bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) { + throw ErrorReport(loc) << "attribute lookup is not defined on " << kind(); + } + + // assign an attribute on it, e.g. `this.field = newValue` + virtual void setAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field, + Value* newValue) { + throw ErrorReport(loc) << "attribute assignment is not defined on " + << kind(); + } + + // use it as a vector of values, e.g. a tuple of values as return value from + // a method invocation + virtual std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const c10::optional& size_hint = {}) { + throw ErrorReport(loc) << kind() << " cannot be used as a tuple"; + } + + // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API + virtual SugaredValuePtr asTupleValue( + const SourceRange& loc, + GraphFunction& m) { + throw ErrorReport(loc) << kind() << " cannot be used as a tuplevalue"; + } + + virtual std::vector> asType( + const SourceRange& loc, + Method& m) { + throw ErrorReport(loc) << kind() << " cannot be used as a type"; + } + + // call it like a function, e.g. `outputs = this(inputs)` + virtual std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + // note: names for args will be 'argument 0', 'argument 1', etc.. + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) { + // n_binders is always set to the number of variables an expression is + // syntactically bound to: + // a = foo() # 1 binder (note in this case the single binder might be a + // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0 + // binders + // + // In subexpressions, like bar() in foo(bar()), n_binders is always set to + // 1. n_binders is used as a hint to subexpressions to determine how many + // values they should return when that number is ambiguous statically. In + // particular it is currently used to decide how many tensors a call to a + // python function will return. It is only a hint, functions do not have to + // check that n_binders match the number of things they are returning, the + // assignment logic will do that anyway. + + throw ErrorReport(loc) << "cannot call a " << kind(); + } + + // This function is called when to convert a SugaredValue to its iterator. + // For example, when iterating through a Dict we iterate over its keys + virtual std::shared_ptr iter( + const SourceRange& loc, + GraphFunction& m) { + throw ErrorReport(loc) << kind() << " cannot be used as an iterable"; + } + + // If we are iterating over a Sugared Value and it returns a value from this + // function, then we emit an unrolled loop over the variable. This allows us + // to support containers of Heterogenous types, like Module Containers & + // Tuples + virtual c10::optional staticLen() { + return c10::nullopt; + } + + // When iterating over this SugaredValue, should we emit the for loop as an + // unrolled loop. + bool shouldEmitUnrolled() { + return staticLen() != c10::nullopt; + } + + // return length of this thing, if not then it can't be iterated. + // If it does not have a statically-determinable length, then it cannot + // be iterated over with a modulelist. If it does it must return a constant + // Value * + virtual Value* len(const SourceRange& loc, GraphFunction& m) { + throw ErrorReport(loc) << "'" << kind() << "'" + << " object is not iterable"; + } + + // expression for ith elemement for iterable value + virtual std::shared_ptr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) { + throw ErrorReport(loc) << "'" << kind() << "'" + << " object is not subscriptable"; + } + + virtual ~SugaredValue() = default; +}; + +// most things in the environment are just simple value types +// and not special python syntax sugar types +struct TORCH_API SimpleValue : public SugaredValue { + SimpleValue(Value* value) : value_(value) {} + std::string kind() const override { + std::stringstream ss; + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) + ss << "value of type '" << value_->type()->annotation_str() << "'"; + return ss.str(); + } + Value* asValue(const SourceRange& range, GraphFunction& m) override { + return value_; + } + std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const c10::optional& size_hint = {}) override; + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + void setAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field, + Value* newValue) override; + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + // note: names for args will be 'argument 0', 'argument 1', etc.. + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override; + + Value* getValue() const { + return value_; + } + + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + + private: + Value* value_; +}; + +struct TORCH_API BuiltinFunction : public SugaredValue { + BuiltinFunction(Symbol symbol, c10::optional self) + : symbol(symbol), self(std::move(self)) {} + + // The symbol of the function (e.g. `aten::relu`). + Symbol symbol; + + // if this is method, then this is the self argument. + c10::optional self; + std::string kind() const override { + return "builtin"; + } + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + // try to create this builtin but if it doesn't exist or the self argument + // cannot possibly match, then return nullptr. Use in situations where it is + // not clear if it is a valid builtin + static std::shared_ptr tryCreate( + Symbol symbol, + c10::optional self); +}; + +struct TORCH_API SugaredTupleValue : public SugaredValue { + explicit SugaredTupleValue(std::vector> tup) + : tup_(std::move(tup)){}; + + std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const c10::optional& size_hint = {}) override { + return tup_; + }; + + Value* asValue(const SourceRange& loc, GraphFunction& m) override { + std::vector vec; + vec.reserve(tup_.size()); + for (const auto& sv : tup_) { + vec.push_back(sv->asValue(loc, m)); + } + Graph& g = *m.graph(); + return g.insertNode(g.createTuple(vec))->output(); + } + + std::string kind() const override { + return "Tuple"; + } + + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override { + if (!(idx->type()->cast() && toIValue(idx))) { + throw ErrorReport(loc) + << "Expected integer literal for index but got a variable or non-integer. " + << "ModuleList/Sequential indexing is only supported with integer literals. " + << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. " + << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'"; + } + auto index = toIValue(idx)->toInt(); + int64_t adj_index = + (index < 0) ? index + static_cast(tup_.size()) : index; + if (!(adj_index >= 0 && adj_index < static_cast(tup_.size()))) { + throw ErrorReport(loc) + << "Index " << index << " out of range of length " << tup_.size(); + } + return tup_.at(adj_index); + } + + // This function is called when a SugaredValue is used to convert a + // SugaredValue to its iterator. For example, when iterating through a Dict we + // iterate over its keys + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override { + return shared_from_this(); + }; + + // Because this is used to contain SugaredValues of Heterogenous types, + // we define staticLen() so that when this is iterated over it is emitted + // as an unrolled loop. + c10::optional staticLen() override { + return static_cast(tup_.size()); + } + + std::vector> tup_; +}; + +struct TORCH_API BuiltinModule : public SugaredValue { + BuiltinModule(std::string name, c10::optional version = at::nullopt) + : name(std::move(name)), version(version) {} + + std::string kind() const override { + return "builtin module"; + } + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override { + if (field == "autograd") { + // When refering torch.autograd, it is also considered to be a + // BuiltinModule and we will dispatch to the aten operators for the + // methods under its module. + return std::make_shared("aten", version); + } + + auto sym = Symbol::fromQualString(name + "::" + field); + return std::make_shared(sym, c10::nullopt); + } + + private: + std::string name; + // when we add operator versioning, emit this op as it exising at 'version' + // if not set, use the latest version + c10::optional version; +}; + +// Represents a class, analagous to `int` or `dict`. Instances of classes, +// like `1` or `{"foo": 5}`, are represented as SimpleValues +struct TORCH_API ClassValue : public SugaredValue { + explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {} + + // Call the type's constructor, as in: + // n = Foo(constructor_arg) + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + std::string kind() const override { + return type_->str(); + } + + ClassTypePtr type_; +}; + +struct TORCH_API NamedTupleConstructor : public SugaredValue { + explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {} + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::string kind() const override { + return type_->str(); + } + + TupleTypePtr type_; +}; + +struct FunctionValue : public SugaredValue { + FunctionValue(Function* callee) : callees_({callee}) {} + FunctionValue(const StrongFunctionPtr& p) + : callees_({p.function_}), cu_(p.cu_) {} + FunctionValue(const std::vector& callees) { + for (const StrongFunctionPtr& callee : callees) { + cu_ = cu_ ? cu_ : callee.cu_; + TORCH_INTERNAL_ASSERT(callee.cu_ == cu_); + callees_.push_back(callee.function_); + } + } + + std::string kind() const override { + return "function"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& f, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + std::vector schemas; + for (Function* callee : callees_) { + try { + callee->ensure_defined(); + } catch (const RecursiveMethodCallError&) { + throw ErrorReport(loc) + << " function '" << callee->name() << "' is called recursively. " + << "Recursive calls are not supported"; + } + schemas.push_back(&callee->getSchema()); + } + auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs); + Value* output = + f.graph()->insertFunctionCall(callees_[match.first], match.second); + output->node()->setSourceRange(loc); + return std::make_shared(output); + } + + const std::vector& callees() { + return callees_; + } + + private: + std::vector callees_; + // TODO holding this thing is creepy + std::shared_ptr cu_; +}; + +struct TORCH_API ClosureValue : public SugaredValue { + ClosureValue(Value* value) : value_(value) { + TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure); + } + std::string kind() const override { + return "closure"; + } + Value* asValue(const SourceRange& range, GraphFunction& m) override { + return value_; + } + Value* value_; +}; + +// defines how a method obtained from a module/class/interface behaves in script +struct MethodValue : public SugaredValue { + MethodValue(Value* self, std::vector method_names) + : self_(self), method_names_(std::move(method_names)) {} + MethodValue(Value* self, std::string method_name) + : MethodValue(self, std::vector({std::move(method_name)})) {} + + std::string kind() const override { + return "method"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& f, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + std::vector argsWithSelf = {self_}; + argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end()); + std::vector schemas; + for (const std::string& method_name : method_names_) { + if (auto class_type = self_->type()->cast()) { + Function& method = class_type->getMethod(method_name); + try { + method.ensure_defined(); + } catch (const RecursiveMethodCallError&) { + throw ErrorReport(loc) + << " method '" << method.name() << "' is called recursively. " + << "Recursive calls are not supported"; + } + schemas.push_back(&method.getSchema()); + } else if (auto interface_type = self_->type()->cast()) { + schemas.push_back(interface_type->getMethod(method_name)); + } else { + TORCH_INTERNAL_ASSERT( + false, "method constructed that is not a class or interface"); + } + } + auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs); + Value* output = + f.graph()->insertMethodCall(method_names_[match.first], match.second); + output->node()->setSourceRange(loc); + return std::make_shared(output); + } + + private: + Value* self_; + std::vector method_names_; +}; + +struct TORCH_API PrintValue : public SugaredValue { + std::string kind() const override { + return "print"; + } + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; +}; + +// expressions like int(x) +// these are the same as call prim::Int or equivalent except it +// is a noop when the input is a subtype of 'type' +struct TORCH_API CastValue : public BuiltinFunction { + CastValue(TypePtr type, c10::Symbol method) + : BuiltinFunction(method, c10::nullopt), type_(std::move(type)) {} + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + if (args.size() == 1 && kwargs.empty()) { + auto len_op = std::make_shared(aten::len, at::nullopt); + auto gt_op = std::make_shared(aten::gt, at::nullopt); + auto zero = m.graph()->insertConstant(0); + + auto v = args[0].value(*m.graph()); + if (v->type()->isSubtypeOf(*type_)) { + return std::make_shared(v); + } else if ( + *type_ == *BoolType::get() && + (v->type()->isSubtypeOf(*AnyListType::get()) || + v->type()->isSubtypeOf(*StringType::get()) || + v->type()->cast())) { + auto len = len_op->call(loc, m, {v}, {}, 1); + return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1); + } + } + return BuiltinFunction::call(loc, m, args, kwargs, n_binders); + } + + private: + TypePtr type_; +}; + +struct TORCH_API TensorCastValue : public SugaredValue { + TensorCastValue(at::ScalarType type, NamedValue self) + : dtype_(type), self_(std::move(self)) {} + + std::string kind() const override { + return "Cast"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty()); + Value* dtype_const = m.graph()->insertConstant(dtype_, loc); + std::vector kwargs_{ + self_, NamedValue(loc, "dtype", dtype_const)}; + Value* casted_val = m.graph()->insert( + /*opname=*/Symbol::fromQualString("aten::to"), + /*args=*/args, + /*kwargs=*/kwargs_, + /*range=*/loc); + return std::make_shared(casted_val); + } + + at::ScalarType dtype_; + NamedValue self_; +}; + +// builtins operators and functions that call a method if it exists +// on a class type, like 'len(x)' and 'x + y' +struct TORCH_API MagicMethod : public SugaredValue { + MagicMethod(std::string desugared_name, SugaredValuePtr base) + : base_value_(std::move(base)), + desugared_name_(std::move(desugared_name)) {} + + std::string kind() const override { + return desugared_name_; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + private: + SugaredValuePtr base_value_; + std::string desugared_name_; +}; + +// things that look like function applications, but +// perform non-standard evaluation are represented +// with SpecialFormValues, e.g. +// isinstance(x, int) +// fork(fn) +// annotate(int, 3) +// The implementation of each value is handled by a case inside emitApplyExpr +struct TORCH_API SpecialFormValue : public SugaredValue { + SpecialFormValue(Symbol form) : form_(form) {} + std::string kind() const override { + return form_.toUnqualString(); + } + Symbol form() const { + return form_; + } + static std::shared_ptr create(Symbol form) { + return std::make_shared(form); + } + + private: + Symbol form_; +}; + +struct TORCH_API LegacyTensorConstructor : public SpecialFormValue { + LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device) + : SpecialFormValue(form), device_(device), dtype_(dtype) {} + + static std::shared_ptr create( + Symbol form, + at::ScalarType dtype, + at::Device device) { + return std::make_shared(form, dtype, device); + } + at::ScalarType dtype() const { + return dtype_; + } + + private: + at::Device device_; + at::ScalarType dtype_; +}; + +// matched against for special handling of range expressions +struct TORCH_API RangeValue : SugaredValue { + RangeValue( + const SourceRange& loc, + GraphFunction& m, + std::vector input, + c10::optional static_len = c10::nullopt); + + std::string kind() const override { + return "range"; + } + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override; + + // When Range is instantiated via enumerate(iterable_with_static_len), + // then it takes the static length of the iterable + c10::optional staticLen() override { + return static_len_; + } + + private: + Value* start_{}; + Value* end_{}; + Value* step_{}; + // a flag to determine if it's a simple range() call with only end_ from + // arguments If true, we will not insert length calculation and index + // derivation nodes to simplify the graph and enable more possible + // optimizations + bool has_only_end_{}; + c10::optional static_len_; +}; + +// Specialized Tree structure to matched against for special handling +// of builtin functions iterables expressions like zip(), enumerate(), etc. +// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: +// zip(x, y) -> (x, y) with tuple assignment to each loop target +// enumerate(x) -> (range(0, math.inf, 1), x) +// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: +// (a, (range(0, math.inf, 1), b), range(0, 100)) +// We use those base iterables to fill in the loop information like +// max_trip_count and set the value table for loop targets +// Iterables can contain lists of SugaredValues like ModuleLists. If it +// does, then we emit it unrolled and require that all values it contains +// have a statically-determinable length. +struct TORCH_API IterableTree : SugaredValue { + IterableTree() = default; + IterableTree( + const SourceRange& range, + GraphFunction& m, + at::ArrayRef children) { + for (const auto& child : children) { + addChild(range, m, child); + } + } + std::string kind() const override { + return "iterabletree"; + } + + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override { + return shared_from_this(); + } + + void addChild( + const SourceRange& range, + GraphFunction& m, + const SugaredValuePtr& iter_value); + + std::vector get_children() { + return children_; + } + + // If this iterable contains a ModuleList or Tuple, then it will have a + // static length, and we will emit it as an unrolled for loop. + c10::optional staticLen() override { + return unroll_length_; + } + + // given a IterableTree node, get all the base iterables/leaves under the + // IterableTree node. This enables + // us to get all the basic SugaredValues that contains valid loop information + // with len() and getitem() + std::vector get_base_iterables(); + + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + + private: + c10::optional unroll_length_ = c10::nullopt; + std::vector children_; +}; + +static inline std::vector toValues( + Graph& g, + at::ArrayRef nvs) { + return fmap(nvs, [&](const NamedValue& v) { return v.value(g); }); +} + +struct SimpleSelf : public Self { + explicit SimpleSelf(ClassTypePtr classType) + : Self(), classType_(std::move(classType)) {} + std::shared_ptr makeSugared(Value* v) const override { + v->setType(classType_); + return std::make_shared(v); + } + ClassTypePtr getClassType() const override { + return classType_; + } + + private: + ClassTypePtr classType_; +}; + +// This is not a SimpleValue so it can not pass through the code paths that +// expect a SimpleValue as a sugared value. +struct TORCH_API ExceptionMessageValue : public SugaredValue { + explicit ExceptionMessageValue( + Value* value, + Value* qualified_class_name = nullptr) + : value_(value), qualified_class_name_(qualified_class_name) {} + + std::string kind() const override { + return "exception message"; + } + + Value* getValue() { + return value_; + } + + // qualified python class name + Value* getQualifiedClassName() { + return qualified_class_name_; + } + + private: + Value* value_; + Value* qualified_class_name_; +}; + +struct TORCH_API ExceptionValue : public SugaredValue { + explicit ExceptionValue(std::string message) : message_(std::move(message)) {} + + std::string kind() const override { + return "exception"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef /*attributes*/, + size_t /*n_binders*/) override { + auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc); + for (auto& input : args) { + auto input_str = input.value(*m.graph()); + if (!input_str->type()->isSubtypeOf(*StringType::get())) { + input_str = + emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {}); + } + exception_message = emitBuiltinCall( + loc, *m.graph(), aten::add, {exception_message, input_str}, {}); + } + return std::make_shared(exception_message); + } + + std::string message_; +}; + +struct TORCH_API SugaredEnumClass : public SugaredValue { + explicit SugaredEnumClass(EnumTypePtr enum_type) + : enum_type_(std::move(enum_type)) {} + + std::string kind() const override { + return "EnumClass"; + } + + SugaredValuePtr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override; + + private: + EnumTypePtr enum_type_; +}; + +struct TORCH_API SliceValue : public SugaredValue { + explicit SliceValue(Value* start, Value* stop, Value* step) + : start_(start), stop_(stop), step_(step) {} + + std::string kind() const override { + return "Python slice value"; + } + + Value* start() { + return start_; + }; + Value* stop() { + return stop_; + }; + Value* step() { + return step_; + }; + + private: + Value* start_; + Value* stop_; + Value* step_; +}; + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h new file mode 100644 index 0000000000000000000000000000000000000000..f265d57b649dda335d6c4124d401d229154c8bc4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h @@ -0,0 +1,412 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +namespace torch::jit { +struct Node; +struct Value; +struct Graph; +struct Module; + +namespace tracer { + +using ::c10::ivalue::Shared; + +using ::c10::IValue; +using ::c10::ivalue::Future; + +using ::c10::ArrayRef; +using ::c10::TupleType; +using ::c10::TupleTypePtr; +using ::c10::ivalue::ConstantString; + +using torch::autograd::Variable; +using variable_list = std::vector; + +TORCH_API std::atomic& getTracerStateWarnMode(); + +struct TORCH_API TracingState + : public std::enable_shared_from_this { + TracingState(); + ~TracingState(); + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr graph; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool warn = getTracerStateWarnMode(); + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool strict = true; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool force_outplace = false; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::function lookup_var_name_fn = + [](const Variable& var) { return ""; }; + + void enterFrame() { + env_stack.emplace_back(); + } + + void leaveFrame() { + env_stack.pop_back(); + } + + void setValue(const IValue& v, Value* value); + void delValue(const IValue& var); + Value* getValue(const IValue& var); + Value* getOutput(const IValue& var, size_t i); + bool hasValue(const IValue& var) const; + + Node* createNode(c10::Symbol op_name, size_t num_outputs); + void insertNode(Node* node); + + private: + using WeakIValue = at::WeakIValue; + + struct WeakIValueHasher { + size_t operator()(const WeakIValue& t) const { + return t.hash(); + } + }; + + struct WeakIValueEq { + bool operator()(const WeakIValue& t1, const WeakIValue& t2) const { + return t1.isSameIdentity(t2); + } + }; + + using Frame = + std::unordered_map; + std::vector env_stack; +}; + +// This is meant to be used as a thread local place, where we can store extra +// info that gets lost when we call into ATen from Python bindings. One example +// for when this happens is when we get an IntArrayRef argument with e.g. sizes +// for view. When tracing, those might be tensors, which let us encode extra +// data dependencies, but once they get to the ATen call where we actually have +// the tracing logic, they get converted into a raw IntArrayRef, and we loose +// all information. To prevent this, we temporarily stash it in here. +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct ArgumentStash { + struct IntArrayRefTrace : std::vector { + IntArrayRefTrace(int size) : std::vector(size, nullptr) {} + }; + + static bool empty() { + return stash.intlists.empty(); + } + + TORCH_API static void stashIntArrayRefElem( + const std::string& arg_name, + size_t size, + size_t idx, + const Variable& var); + + static bool hasIntArrayRef(const std::string& arg_name) { + return stash.intlists.count(arg_name) > 0; + } + + static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) { + auto info = std::move(stash.intlists.at(arg_name)); + stash.intlists.erase(arg_name); + return info; + } + + // Value stashing: Use these methods to stash arguments which correspond + // to regular Value*'s in the graph. i.e. they don't require special + // handling like in the case of IntArrayRefs + TORCH_API static void stashValue( + const std::string& arg_name, + size_t idx, + const Variable& var, + const c10::TypePtr& type = nullptr); + + static bool hasValue(const std::string& arg_name) { + return stash.values.count(arg_name) > 0; + } + + static Value* popValue(const std::string& arg_name) { + auto info = stash.values.at(arg_name); + stash.values.erase(arg_name); + return info; + } + + private: + static thread_local ArgumentStash stash; + std::unordered_map intlists; + std::unordered_map values; +}; + +// Retrieve or set the current tracing state. Returns a nullptr if tracing is +// disabled. +TORCH_API const std::shared_ptr& getTracingState(); +TORCH_API void setTracingState(std::shared_ptr state); + +inline bool isTracing() { + return static_cast(getTracingState()); +} + +using warn_fn_type = void (*)(const std::string& msg); +TORCH_API extern const char* WARN_PYTHON_DATAFLOW; +TORCH_API extern const char* WARN_CONSTRUCTOR; +TORCH_API extern const char* WARN_RESIZE; +TORCH_API extern const char* STRICT_TRACER_MSG; +TORCH_API void _do_warn(const char* _reason, const char* _kind); +inline void warn(const char* _reason, const char* _kind = nullptr) { + if (const auto& state = getTracingState()) { + if (!state->warn) + return; + _do_warn(_reason, _kind); + } +} +TORCH_API void setWarn(warn_fn_type fn); + +struct TORCH_API NoWarn { + NoWarn() : state(getTracingState()) { + if (state) { + prev = state->warn; + state->warn = false; + } + } + ~NoWarn() { + if (state) { + state->warn = prev; + } + } + std::shared_ptr state; + bool prev{false}; +}; + +struct WithNestedTracingFrame { + WithNestedTracingFrame() { + getTracingState()->enterFrame(); + } + + ~WithNestedTracingFrame() { + getTracingState()->leaveFrame(); + } +}; +TORCH_API void recordSourceLocation(Node* n); +TORCH_API void setRecordSourceLocation(void (*v)(Node*)); + +TORCH_API std::vector pythonCallstack(); +TORCH_API void setPythonCallstack(std::vector (*v)()); + +// Having finished adding a new 'node' to the graph IR 'setValueTrace' +// associates this node with an output variable, so that further operations +// involving this variable know which node in the IR to reference. +TORCH_API void setValueTrace(const IValue& v, Value* value); + +TORCH_API void delValueTrace(const IValue& var); + +TORCH_API std::function pauseTracing(); + +TORCH_API Value* getValueTrace(const IValue& var); + +TORCH_API std::pair, Stack> trace( + Stack inputs, + const std::function& traced_fn, + std::function var_name_lookup_fn, + bool strict = true, + bool force_outplace = false, + Module* self = nullptr, + const std::vector& argument_names = {}); + +TORCH_API void abandon(); + +// NB: those serve both as an intermediate steps in addInputs below, +// as well as the overloads that terminate template recursion +TORCH_API void addInputs(Node* n, const char* name, int64_t value); +TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value); +TORCH_API void addInputs( + Node* n, + const char* name, + c10::optional value); +TORCH_API void addInputs(Node* n, const char* name, bool value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, double value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, ArrayRef value); +TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value); +TORCH_API void addInputs( + Node* n, + const char* name, + c10::optional value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional>& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const at::OptionalIntArrayRef& opt_value); +TORCH_API void addInputs( + Node* n, + const char* name, + const at::OptionalSymIntArrayRef& opt_value); +TORCH_API void addInputs( + Node* n, + const char* name, + ArrayRef value, + bool allow_undefined = false); +TORCH_API void addInputs( + Node* n, + const char* name, + std::vector value, + bool allow_undefined = false); +TORCH_API void addInputs( + Node* n, + const char* name, + at::ITensorListRef value, + bool allow_undefined = false); +TORCH_API void addInputs( + Node* n, + const char* name, + const List>& value); +TORCH_API void addInputs( + Node* n, + const char* name, + ArrayRef> value, + const c10::ClassTypePtr& class_type); +TORCH_API void addInputs(Node* n, const char* name, ArrayRef value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional>& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::string_view value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, at::Device value); +TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream); +TORCH_API void addInputs(Node* n, const char* name, at::Layout value); +TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value); +TORCH_API void addInputs( + Node* n, + const char* name, + c10::optional value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); + +inline void addInputs( + Node* n, + const char* name, + const std::vector& value) { + AT_ERROR("Tracing a list of bool type is currently not supported!"); +} + +template +void addInputs(Node* n, const char* name, ArrayRef value) { + AT_ERROR("Tracing a list of arbitrary type is currently not supported!"); +} +template +void addInputs( + Node* n, + const char* name, + const std::unordered_map& value) { + AT_ERROR("Tracing a dict of arbitrary types is currently not supported!"); +} + +template +void addInputs(Node* n, const char* name, std::array value) { + throw std::runtime_error( + "Found an unsupported argument type in the JIT tracer. File a bug report."); +} + +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::intrusive_ptr& obj); + +TORCH_API void ensureUniqueIfOutOfPlaced( + const char* name, + const at::Tensor& tensor); +TORCH_API void ensureUniqueIfOutOfPlaced( + const char* name, + const c10::optional& tensor); + +template < + typename T, + typename = torch::enable_if_t< + (!std::is_convertible_v, at::TensorList> && + !std::is_convertible_v, c10::List> && + !std::is_convertible_v, at::Tensor> && + !std::is_convertible_v< + torch::decay_t, + c10::intrusive_ptr>)>> +void addOutput(Node* node, T&&) { + AT_ERROR( + "Found an unsupported argument type ", + c10::demangle_type(), + " in the JIT tracer. File a bug report."); +} +TORCH_API void addOutput(Node* node, const at::Tensor& tensor); +TORCH_API void setOutput(Value* value, const at::Tensor& output); +TORCH_API void addOutput(Node* node, const std::vector& list); +TORCH_API void addOutput(Node* node, const c10::List& list); +TORCH_API void addOutput( + Node* node, + const c10::intrusive_ptr& output); + +TORCH_API autograd::Variable getSizeOf( + const autograd::Variable& var, + int64_t dim); + +TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var); + +} // namespace tracer +} // namespace torch::jit diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h new file mode 100644 index 0000000000000000000000000000000000000000..33a1223581866dcb10df6fcddd677b975217e1d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h @@ -0,0 +1,220 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +// Trees are used to represent all forms of TC IR, pre- and post-typechecking. +// Rather than have a full class hierarchy for all TC statements, trees are a +// slight variation of Lisp s-expressions. For instance, the expression a*b+1 +// is represented as: +// (+ (* (ident a) (ident b)) (const 1)) +// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which +// define stringValue(). Everything else is a Compound object, which has a +// 'kind' that is a token from lexer.h's TokenKind enum. Single-character +// operators like '+' are represented using the character itself (so, add.kind() +// would be '+'). Each Compound object also contains a list of subtrees and is +// associated with a SourceRange for error reporting. +// Memory management of trees is done using intrusive_ptr. + +struct Tree; +using TreeRef = c10::intrusive_ptr; +using TreeList = at::SmallVector; + +struct Tree : c10::intrusive_ptr_target { + Tree(int kind_) : kind_(kind_) {} + int kind() const { + return kind_; + } + virtual bool isAtom() const { + return true; + } + virtual const SourceRange& range() const { + throw std::runtime_error("is an Atom"); + } + virtual const std::string& stringValue() const { + throw std::runtime_error("stringValue can only be called on TK_STRING"); + } + virtual const TreeList& trees() const { + static const TreeList empty_trees = {}; + return empty_trees; + } + const TreeRef& tree(size_t i) const { + return trees().at(i); + } + virtual TreeRef map(const std::function& fn) { + (void)fn; + c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer + // from a raw `this` pointer + // so we need to bump the refcount + // to account for this ownership + return TreeRef::reclaim(this); + } + template + void match(int k, Args&... args) const { + matchD(k, "unknown", 0, args...); + } + template + void matchD(int k, const char* filename, int lineno, Args&... args) const { + std::initializer_list vars = {args...}; + matchNumSubtreesD(k, filename, lineno, vars.size(), true); + size_t i = 0; + for (TreeRef* v : vars) { + *v = trees()[i++]; + } + } + void matchNumSubtrees(int k, size_t expected_subtrees) { + return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false); + } + void matchNumSubtreesD( + int k, + const char* filename, + int lineno, + size_t expected_subtrees, + bool allow_more) const { + if (kind() != k) { + std::stringstream ss; + ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k) + << "' but found '" << kindToString(kind()) << "'\n"; + range().highlight(ss); + throw std::runtime_error(ss.str()); + } + if (trees().size() < expected_subtrees || + (!allow_more && trees().size() != expected_subtrees)) { + std::stringstream ss; + ss << filename << ":" << lineno << ": expected at least " + << expected_subtrees << " subtrees, but found only " << trees().size() + << "\n"; + range().highlight(ss); + throw std::runtime_error(ss.str()); + } + } + ~Tree() override = default; + + private: + int kind_; +}; + +struct String : public Tree { + String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {} + const std::string& stringValue() const override { + return value_; + } + template + static TreeRef create(Args&&... args) { + return c10::make_intrusive(std::forward(args)...); + } + + private: + std::string value_; +}; + +static SourceRange mergeRanges(SourceRange c, const TreeList& others) { + for (const auto& t : others) { + if (t->isAtom()) + continue; + size_t s = std::min(c.start(), t->range().start()); + size_t e = std::max(c.end(), t->range().end()); + c = SourceRange(c.source(), s, e); + } + return c; +} + +struct Compound : public Tree { + Compound(int kind, SourceRange range) + : Tree(kind), range_(std::move(range)) {} + Compound(int kind, const SourceRange& range_, TreeList&& trees_) + : Tree(kind), + range_(mergeRanges(range_, trees_)), + trees_(std::move(trees_)) {} + const TreeList& trees() const override { + return trees_; + } + static TreeRef create( + int kind, + const SourceRange& range_, + TreeList&& trees_) { + return c10::make_intrusive(kind, range_, std::move(trees_)); + } + bool isAtom() const override { + return false; + } + TreeRef map(const std::function& fn) override { + TreeList ret; + for (auto& t : trees()) { + ret.push_back(fn(t)); + } + return Compound::create(kind(), range(), std::move(ret)); + } + + const SourceRange& range() const override { + return range_; + } + + private: + SourceRange range_; + TreeList trees_; +}; + +// tree pretty printer +struct pretty_tree { + pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {} + const TreeRef& tree; + size_t col; + std::unordered_map flat_strings; + const std::string& get_flat(const TreeRef& t) { + auto it = flat_strings.find(t); + if (it != flat_strings.end()) + return it->second; + + std::stringstream out; + switch (t->kind()) { + case TK_STRING: + out << t->stringValue(); + break; + default: + out << "(" << kindToString(t->kind()); + for (const auto& e : t->trees()) { + out << " " << get_flat(e); + } + out << ")"; + break; + } + auto it_ = flat_strings.emplace(t, out.str()); + return it_.first->second; + } + void print(std::ostream& out, const TreeRef& t, int indent) { + const std::string& s = get_flat(t); + if (indent + s.size() < col || t->isAtom()) { + out << s; + return; + } + std::string k = kindToString(t->kind()); + out << "(" << k; + for (const auto& e : t->trees()) { + out << "\n" << std::string(indent + 2, ' '); + print(out, e, indent + 2); + } + out << ")"; + } +}; + +static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) { + t_.print(out, t_.tree, 0); + return out << std::endl; +} + +static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) { + return out << pretty_tree(t); +} + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h new file mode 100644 index 0000000000000000000000000000000000000000..a6488c92f40694332c010defcc0f46bea37e0cf5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h @@ -0,0 +1,1275 @@ +#pragma once +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// clang-format off +// TreeView provides a statically-typed way to traverse the tree, which should +// be formed according to the grammar below. +// +// A few notes on types and their aliases: +// - List is really a Tree with kind TK_LIST and elements as subtrees +// - Maybe is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T +// - Builtin types are: Ident (TK_IDENT), String (TK_STRING) +// +// Param = Param(Maybe type, Ident name) TK_PARAM +// +// Decl = Decl(List params, Maybe return_type) TK_DECL +// Def = Def(Ident name, Decl decl, List body) TK_DEF +// ClassDef = ClassDef(Ident name, TK_CLASS_DEF +// Maybe superclass, +// List body) +// +// Stmt = If(Expr cond, List true_body, List false_body) TK_IF +// | For(List targets, List iters, List body) TK_FOR +// | While(Expr cond, List body) TK_WHILE +// | Global(List idents) TK_GLOBAL +// -- NB: the only type of Expr's allowed on lhs are Var +// Or a tuple containing Var with an optional terminating Starred +// | Assign(Expr lhs, Maybe rhs, Maybe type) TK_ASSIGN +// | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN +// | Return(List values) TK_RETURN +// | ExprStmt(List expr) TK_EXPR_STMT +// | Raise(Expr expr) TK_RAISE +// | Def TK_DEF +// | With(List targets, List body) TK_WITH +// +// Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR +// | BinOp(Expr lhs, Expr rhs) +// | And TK_AND +// | Or TK_OR +// | Lt '<' +// | Gt '>' +// | Eq TK_EQ +// | Le TK_LE +// | Ge TK_GE +// | Ne TK_NE +// | Is TK_IS +// | IsNot TK_ISNOT +// | Add '+' +// | Sub '-' +// | Mul '*' +// | Div '/' +// | Mod '%' +// | MatMult '@' +// | Pow TK_POW +// | UnaryOp(Expr expr) +// | Not TK_NOT +// | USub '-' +// | Const(String value) TK_CONST +// -- NB: x.name(y) is desugared into name(x, y) +// | Apply(Ident name, List args, List kwargs) TK_APPLY +// | Select(Expr value, Ident selector) '.' +// | Subscript(Expr value, List subscript_exprs) TK_SUBSCRIPT +// | SliceExpr(Maybe start, Maybe end) TK_SLICE_EXPR +// | Var(Ident name) TK_VAR +// | ListLiteral(List inputs) TK_LIST_LITERAL +// | TupleLiteral(List inputs) TK_TUPLE_LITERAL +// | Starred(Expr expr) TK_STARRED +// | WithItem(Expr target, Maybe var) TK_WITH_ITEM +// -- NB: only allowed expressions are Const or List(Const) +// (List as a value, not type constructor) +// Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE +// +// AugAssignKind = +// | Add() TK_PLUS_EQ +// | Sub() TK_MINUS_EQ +// | Mul() TK_TIMES_EQ +// | Div() TK_DIV_EQ +// | Mod() TK_MOD_EQ +// + +// Each subclass of TreeView should provide: +// 1. Constructor that takes a TreeRef, and checks that it's of the right type. +// 2. Accessors that get underlying information out of the object. If they +// return subtrees, they should wrap them in appropriate views too. +// 3. Static method 'create' that creates the underlying TreeRef object +// for every TreeRef kind that has a TreeView, the parser always uses +// (e.g.) Ident::create rather than Compound::Create, this means that +// changes to the structure of Ident are always made right here rather +// than both in the parser and in this code. +// XXX: these structs should have no fields to prevent slicing when passing by value +// clang-format on +struct TreeView { + explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {} + TreeRef tree() const { + return tree_; + } + const SourceRange& range() const { + return tree_->range(); + } + operator TreeRef() const { + return tree_; + } + const TreeRef& get() const { + return tree_; + } + int kind() const { + return tree_->kind(); + } + void dump() const { + std::cout << tree_; + } + + protected: + const TreeRef& subtree(size_t i) const { + return tree_->trees().at(i); + } + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + TreeRef tree_; +}; + +template +struct ListIterator { + ListIterator(TreeList::const_iterator it) : it(it) {} + bool operator!=(const ListIterator& rhs) const { + return it != rhs.it; + } + bool operator==(const ListIterator& rhs) const { + return it == rhs.it; + } + T operator*() const { + return T(*it); + } + ListIterator& operator+=(std::ptrdiff_t n) { + it += n; + return *this; + } + ListIterator& operator++() { + ++it; + return *this; + } + ListIterator& operator--() { + --it; + return *this; + } + + private: + TreeList::const_iterator it; +}; + +template +struct List : public TreeView { + using iterator = ListIterator; + using const_iterator = ListIterator; + + List(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_LIST); + // Iterate over list to temporarily instantiate Ts that will check the type + for (const T& elem : *this) { + (void)elem; // silence unused warning + } + } + iterator begin() const { + return iterator(tree_->trees().begin()); + } + iterator end() const { + return iterator(tree_->trees().end()); + } + bool empty() const { + return tree_->trees().begin() == tree_->trees().end(); + } + T operator[](size_t i) const { + return T(subtree(i)); + } + TreeRef map(const std::function& fn) { + return tree_->map([&](TreeRef v) { return fn(T(v)); }); + } + static List create(const SourceRange& range, const std::vector& subtrees) { + TreeList type_erased_sub{subtrees.begin(), subtrees.end()}; + return List(Compound::create(TK_LIST, range, std::move(type_erased_sub))); + } + static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) { + return List(Compound::create(TK_LIST, range, std::move(subtrees))); + } + size_t size() const { + return tree_->trees().size(); + } +}; + +template +struct Maybe : public TreeView { + explicit Maybe(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_OPTION); + if (tree_->trees().size() > 1) + throw ErrorReport(tree) << "Maybe trees can have at most one subtree"; + } + /* implicit */ Maybe(const T& tree) : TreeView(tree) {} + bool present() const { + return tree_->trees().size() > 0; + } + T get() const { + return T(tree_->trees().at(0)); + } + TreeRef map(const std::function& fn) { + return tree_->map([&](TreeRef v) { return fn(T(v)); }); + } + static Maybe create(const SourceRange& range) { + return Maybe(Compound::create(TK_OPTION, range, {})); + } + static Maybe create(const SourceRange& range, const T& value) { + return Maybe(Compound::create(TK_OPTION, range, {value})); + } +}; + +struct Ident : public TreeView { + explicit Ident(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_IDENT); + } + const std::string& name() const { + return subtree(0)->stringValue(); + } + static Ident create(const SourceRange& range, std::string name) { + return Ident( + Compound::create(TK_IDENT, range, {String::create(std::move(name))})); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Base types (production LHS) +//////////////////////////////////////////////////////////////////////////////// + +struct Stmt : public TreeView { + explicit Stmt(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case TK_IF: + case TK_FOR: + case TK_WHILE: + case TK_GLOBAL: + case TK_ASSIGN: + case TK_AUG_ASSIGN: + case TK_RETURN: + case TK_EXPR_STMT: + case TK_RAISE: + case TK_ASSERT: + case TK_PASS: + case TK_BREAK: + case TK_DELETE: + case TK_CONTINUE: + case TK_DEF: + case TK_WITH: + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid Stmt"; + } + } +}; + +struct Expr : public TreeView { + explicit Expr(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case TK_IF_EXPR: + case TK_AND: + case TK_OR: + case '<': + case '>': + case TK_IS: + case TK_ISNOT: + case TK_EQ: + case TK_LE: + case TK_GE: + case TK_NE: + case '+': + case '-': + case TK_UNARY_MINUS: + case '~': + case '*': + case TK_STARRED: + case '/': + case '%': + case TK_NOT: + case TK_CONST: + case TK_STRINGLITERAL: + case TK_TRUE: + case TK_FALSE: + case TK_NONE: + case TK_NONE_TYPE: + case TK_CAST: + case TK_APPLY: + case '.': + case TK_SUBSCRIPT: + case TK_SLICE_EXPR: + case TK_VAR: + case TK_LIST_LITERAL: + case TK_TUPLE_LITERAL: + case TK_DICT_LITERAL: + case '@': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + case TK_FLOOR_DIV: + case '&': + case '^': + case '|': + case TK_LIST_COMP: + case TK_DICT_COMP: + case TK_DOTS: + case TK_IN: + case TK_WITH_ITEM: + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid Expr"; + } + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Helper nodes (mostly for function arguments) +//////////////////////////////////////////////////////////////////////////////// + +struct Attribute : public TreeView { + explicit Attribute(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_ATTRIBUTE); + } + Ident name() const { + return Ident(subtree(0)); + } + Expr value() const { + return Expr(subtree(1)); + } + static Attribute create( + const SourceRange& range, + const Ident& name, + const TreeRef& value) { + return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value})); + } +}; + +struct Param : public TreeView { + explicit Param(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_PARAM); + } + static Param create( + const SourceRange& range, + const Ident& ident, + const Maybe& type, + const Maybe& def, + bool kwarg_only) { + TreeRef kwarg_only_tree = + Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {}); + return Param(Compound::create( + TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)})); + } + Ident ident() const { + return Ident(subtree(0)); + } + Maybe type() const { + return Maybe(subtree(1)); + } + Maybe defaultValue() const { + return Maybe(subtree(2)); + } + bool kwarg_only() const { + return TK_TRUE == subtree(3)->kind(); + } + Param withType(const Maybe& typ) const { + return Param::create(range(), ident(), typ, defaultValue(), kwarg_only()); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Top level definitions +//////////////////////////////////////////////////////////////////////////////// + +struct Decl : public TreeView { + explicit Decl(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_DECL); + } + List params() const { + return List(subtree(0)); + } + Maybe return_type() const { + return Maybe(subtree(1)); + } + static Decl create( + const SourceRange& range, + const List& params, + const Maybe& return_type) { + return Decl(Compound::create(TK_DECL, range, {params, return_type})); + } +}; + +struct Def : public TreeView { + explicit Def(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_DEF); + } + Def withName(std::string new_name) const { + auto new_ident = Ident::create(name().range(), std::move(new_name)); + return create(range(), new_ident, decl(), statements()); + } + Def withDecl(const Decl& decl) const { + return create(range(), name(), decl, statements()); + } + Ident name() const { + return Ident(subtree(0)); + } + Decl decl() const { + return Decl(subtree(1)); + } + List statements() const { + return List(subtree(2)); + } + static Def create( + const SourceRange& range, + const Ident& name, + const Decl& decl, + const List& stmts) { + return Def(Compound::create(TK_DEF, range, {name, decl, stmts})); + } +}; + +// Property represents a named attribute combined with a getter and setter +// method to access and mutate that attribute. +struct Property : public TreeView { + explicit Property(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_PROP); + } + Ident name() const { + return Ident(subtree(0)); + } + Def getter() const { + return Def(subtree(1)); + } + Maybe setter() const { + return Maybe(subtree(2)); + } + static Property create( + const SourceRange& range, + const Ident& name, + const Def& getter, + const Maybe& setter) { + return Property(Compound::create(TK_PROP, range, {name, getter, setter})); + } +}; + +struct Assign; + +struct ClassDef : public TreeView { + explicit ClassDef(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_CLASS_DEF); + } + explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) { + tree_->match(TK_CLASS_DEF); + } + ClassDef withName(std::string new_name) const { + auto new_ident = Ident::create(name().range(), std::move(new_name)); + return create(range(), new_ident, superclass(), body()); + } + Ident name() const { + return Ident(subtree(0)); + } + Maybe superclass() const { + return Maybe(subtree(1)); + } + List body() const { + return List(subtree(2)); + } + Maybe> properties() const { + return Maybe>(subtree(3)); + } + Maybe> assigns() const { + return Maybe>(subtree(4)); + } + static ClassDef create( + const SourceRange& range, + const Ident& name, + const Maybe& superclass, + const List& body) { + return ClassDef(Compound::create( + TK_CLASS_DEF, + range, + {name, + superclass, + body, + Maybe>::create(range), + Maybe>::create(range)})); + } + static ClassDef create( + const SourceRange& range, + const Ident& name, + const Maybe& superclass, + const List& body, + const List& properties, + const List& assigns); +}; + +TORCH_API std::vector getUnresolvedClassAttributes( + const ClassDef& def); + +//////////////////////////////////////////////////////////////////////////////// +// Statements +//////////////////////////////////////////////////////////////////////////////// + +struct If : public Stmt { + explicit If(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_IF); + } + Expr cond() const { + return Expr(subtree(0)); + } + List trueBranch() const { + return List(subtree(1)); + } + List falseBranch() const { + return List(subtree(2)); + } + If withNewBranches( + const List& true_branch, + const List& false_branch) const { + return create(range(), cond(), true_branch, false_branch); + } + static If create( + const SourceRange& range, + const Expr& cond, + const List& true_branch, + const List& false_branch) { + return If( + Compound::create(TK_IF, range, {cond, true_branch, false_branch})); + } +}; + +struct While : public Stmt { + explicit While(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_WHILE); + } + Expr cond() const { + return Expr(subtree(0)); + } + List body() const { + return List(subtree(1)); + } + static While create( + const SourceRange& range, + const Expr& cond, + const List& body) { + return While(Compound::create(TK_WHILE, range, {cond, body})); + } +}; + +struct For : public Stmt { + explicit For(const TreeRef& tree) : Stmt(tree) { + tree->match(TK_FOR); + } + List targets() const { + return List(subtree(0)); + } + List itrs() const { + return List(subtree(1)); + } + List body() const { + return List(subtree(2)); + } + static For create( + const SourceRange& range, + const List& targets, + const List& itrs, + const List& body) { + return For(Compound::create(TK_FOR, range, {targets, itrs, body})); + } +}; + +// TODO: supports only single comprehension for now +struct ListComp : public Expr { + explicit ListComp(const TreeRef& tree) : Expr(tree) { + tree->match(TK_LIST_COMP); + } + Expr elt() const { + return Expr(subtree(0)); + } + Expr target() const { + return Expr(subtree(1)); + } + Expr iter() const { + return Expr(subtree(2)); + } + // TODO: no ifs for now + static ListComp create( + const SourceRange& range, + const Expr& elt, + const Expr& target, + const Expr& iter) { + return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter})); + } +}; + +// TODO: supports only single comprehension for now +struct DictComp : public Expr { + explicit DictComp(const TreeRef& tree) : Expr(tree) { + tree->match(TK_DICT_COMP); + } + Expr key() const { + return Expr(subtree(0)); + } + Expr value() const { + return Expr(subtree(1)); + } + Expr target() const { + return Expr(subtree(2)); + } + Expr iter() const { + return Expr(subtree(3)); + } + // TODO: no ifs for now + static DictComp create( + const SourceRange& range, + const Expr& key, + const Expr& value, + const Expr& target, + const Expr& iter) { + return DictComp( + Compound::create(TK_DICT_COMP, range, {key, value, target, iter})); + } +}; + +struct Global : public Stmt { + explicit Global(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_GLOBAL); + } + List names() { + return List(subtree(0)); + } + static Global create(const SourceRange& range, const List& names) { + return Global(Compound::create(TK_GLOBAL, range, {names})); + } +}; + +struct AugAssignKind : public TreeView { + explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case '+': + case '-': + case '*': + case '/': + case '%': + case '|': + case '&': + case '^': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + return; + default: + throw ErrorReport(tree) << "is not a valid AugAssignKind"; + } + } +}; + +// Augmented assignment, like "foo += bar" +struct AugAssign : public Stmt { + explicit AugAssign(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_AUG_ASSIGN); + } + static AugAssign create( + const SourceRange& range, + const Expr& lhs, + const AugAssignKind& aug_op, + const Expr& rhs) { + return AugAssign( + Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs})); + } + Expr lhs() const { + return Expr(subtree(0)); + } + int aug_op() const { + return subtree(1)->kind(); + } + Expr rhs() const { + return Expr(subtree(2)); + } +}; + +struct Assign : public Stmt { + explicit Assign(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_ASSIGN); + } + static Assign create( + const SourceRange& range, + const List& lhs, + const Maybe& rhs, + const Maybe& type) { + return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type})); + } + + List lhs_list() const { + return List(subtree(0)); + } + + Expr lhs() const { + const auto& li = lhs_list(); + TORCH_INTERNAL_ASSERT(li.size() == 1); + return *li.begin(); + } + + Maybe rhs() const { + return Maybe(subtree(1)); + } + + Maybe type() const { + return Maybe(subtree(2)); + } +}; + +struct Return : public Stmt { + explicit Return(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_RETURN); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Return create(const SourceRange& range, const Expr& value) { + return Return(Compound::create(TK_RETURN, range, {value})); + } +}; + +struct Raise : public Stmt { + explicit Raise(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_RAISE); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Raise create(const SourceRange& range, const Expr& expr) { + return Raise(Compound::create(TK_RAISE, range, {expr})); + } +}; + +struct Assert : public Stmt { + explicit Assert(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_ASSERT); + } + Expr test() const { + return Expr(subtree(0)); + } + Maybe msg() const { + return Maybe(subtree(1)); + } + static Assert create( + const SourceRange& range, + const Expr& test, + const Maybe& msg) { + return Assert(Compound::create(TK_ASSERT, range, {test, msg})); + } +}; + +struct Pass : public Stmt { + explicit Pass(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_PASS); + } + static Pass create(const SourceRange& range) { + return Pass(Compound::create(TK_PASS, range, {})); + } +}; + +struct Dots : public Expr { + explicit Dots(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_DOTS); + } + static Dots create(const SourceRange& range) { + return Dots(Compound::create(TK_DOTS, range, {})); + } +}; + +struct Break : public Stmt { + explicit Break(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_BREAK); + } + static Break create(const SourceRange& range) { + return Break(Compound::create(TK_BREAK, range, {})); + } +}; + +struct Continue : public Stmt { + explicit Continue(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_CONTINUE); + } + static Continue create(const SourceRange& range) { + return Continue(Compound::create(TK_CONTINUE, range, {})); + } +}; + +struct ExprStmt : public Stmt { + explicit ExprStmt(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_EXPR_STMT); + } + Expr expr() { + return Expr(subtree(0)); + } + static ExprStmt create(const SourceRange& range, const Expr& list) { + return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list})); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Expressions +//////////////////////////////////////////////////////////////////////////////// + +struct BinOp : public Expr { + explicit BinOp(const TreeRef& tree) : Expr(tree) { + switch (tree->kind()) { + case TK_AND: + case TK_OR: + case '<': + case '>': + case TK_IS: + case TK_ISNOT: + case TK_EQ: + case TK_LE: + case TK_GE: + case TK_NE: + case '+': + case '*': + case '/': + case '-': + case '@': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + case '%': + case '&': + case '^': + case '|': + case TK_FLOOR_DIV: + case TK_IN: + if (tree->trees().size() != 2) + throw ErrorReport(tree) + << "BinOp expected 2 subtrees, found " << tree->trees().size(); + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid BinOp"; + } + } + Expr lhs() const { + return Expr(subtree(0)); + } + Expr rhs() const { + return Expr(subtree(1)); + } + static BinOp create( + const SourceRange& range, + int kind, + const Expr& lhs, + const Expr& rhs) { + return BinOp(Compound::create(kind, range, {lhs, rhs})); + } +}; + +struct UnaryOp : public Expr { + explicit UnaryOp(const TreeRef& tree) : Expr(tree) { + switch (tree->kind()) { + case TK_UNARY_MINUS: + case '~': + case TK_NOT: + if (tree->trees().size() != 1) + throw ErrorReport(tree) + << "UnaryOp expected 1 subtree, found " << tree->trees().size(); + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid UnaryOp"; + } + } + static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) { + return UnaryOp(Compound::create(kind, range, {expr})); + } +}; + +struct Const : public Expr { + explicit Const(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_CONST, 1); + } + bool isFloatingPoint() const { + if (isComplex()) + return false; + + bool is_inf = subtree(0)->stringValue() == "inf"; + return is_inf || + subtree(0)->stringValue().find_first_of(".eE") != std::string::npos; + } + bool isIntegral() const { + return !isFloatingPoint() && !isComplex(); + } + bool isComplex() const { + return subtree(0)->stringValue().find_first_of('j') != std::string::npos; + } + int64_t asIntegral() const { + try { + // NOLINTNEXTLINE(modernize-use-nullptr) + return std::stoll(subtree(0)->stringValue(), /*__idx=*/0, /*base=*/0); + } catch (const std::out_of_range&) { + throw ErrorReport(range()) << "Integral constant out of range " + "(must fit in a signed 64 bit integer)"; + } + } + double asFloatingPoint() const { + // We can't pass in nullptr as the dummy pointer gets dereferenced for + // Android version of strtod_c(). + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + char* dummy; + return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy); + } + c10::complex asComplex() const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + char* dummy; + auto str = subtree(0)->stringValue(); + // Complex numbers (a+bj, where a is non-zero) are parsed as an addition + // between float/int a and a complex number "bj". When a is 0, a complex + // number bj is created as above. So, while parsing the string, we don't + // have to worry about the real component of the complex number. + auto imag = + torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy); + return c10::complex(0, imag); + } + const std::string& text() const { + return subtree(0)->stringValue(); + } + static Const create(const SourceRange& range, const std::string& value) { + return Const(Compound::create(TK_CONST, range, {String::create(value)})); + } +}; + +struct StringLiteral : public Expr { + explicit StringLiteral(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_STRINGLITERAL, 1); + } + const std::string& text() const { + return subtree(0)->stringValue(); + } + static StringLiteral create( + const SourceRange& range, + const std::string& value) { + return StringLiteral( + Compound::create(TK_STRINGLITERAL, range, {String::create(value)})); + } +}; + +struct Apply : public Expr { + explicit Apply(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_APPLY); + } + Expr callee() const { + return Expr(subtree(0)); + } + List inputs() const { + return List(subtree(1)); + } + List attributes() const { + return List(subtree(2)); + } + static Apply create( + const SourceRange& range, + const Expr& callee, + const List& inputs, + const List& attributes) { + return Apply( + Compound::create(TK_APPLY, range, {callee, inputs, attributes})); + } +}; + +struct Select : public Expr { + explicit Select(const TreeRef& tree) : Expr(tree) { + tree_->match('.'); + } + Expr value() const { + return Expr(subtree(0)); + } + Ident selector() const { + return Ident(subtree(1)); + } + static Select create( + const SourceRange& range, + const Expr& value, + const Ident& selector) { + return Select(Compound::create('.', range, {value, selector})); + } +}; + +struct SliceExpr : public Expr { + explicit SliceExpr(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_SLICE_EXPR); + } + Maybe start() const { + return Maybe(subtree(0)); + } + Maybe end() const { + return Maybe(subtree(1)); + } + Maybe step() const { + return Maybe(subtree(2)); + } + Expr startOr(int64_t alternative) const { + const auto startOption = start(); + return startOption.present() ? startOption.get() : createInt(alternative); + } + Expr endOr(int64_t alternative) const { + const auto endOption = end(); + return endOption.present() ? endOption.get() : createInt(alternative); + } + Expr stepOr(int64_t alternative) const { + const auto stepOption = step(); + return stepOption.present() ? stepOption.get() : createInt(alternative); + } + static SliceExpr create( + const SourceRange& range, + const Maybe& start, + const Maybe& end, + const Maybe& step) { + return SliceExpr( + Compound::create(TK_SLICE_EXPR, range, {start, end, step})); + } + + private: + Expr createInt(int64_t value) const { + return Expr(Const::create(range(), c10::to_string(value))); + } +}; + +struct Subscript : public Expr { + explicit Subscript(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_SUBSCRIPT); + } + Expr value() const { + return Expr(subtree(0)); + } + List subscript_exprs() const { + return List(subtree(1)); + } + static Subscript create( + const SourceRange& range, + const Expr& value, + const List& subscript_exprs) { + auto whole_range = SourceRange( + range.source(), range.start(), subscript_exprs.range().end() + 1); + return Subscript( + Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs})); + } +}; + +struct Var : public Expr { + explicit Var(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_VAR); + }; + Ident name() const { + return Ident(subtree(0)); + } + static Var create(const SourceRange& range, const Ident& name) { + return Var(Compound::create(TK_VAR, range, {name})); + } +}; + +// WithItem represents an item using with a WithStmt. +struct WithItem : public Expr { + explicit WithItem(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_WITH_ITEM); + } + + Expr target() const { + return Expr(subtree(0)); + } + + Maybe var() const { + return Maybe(subtree(1)); + } + + static WithItem create( + const SourceRange& range, + const Expr& target, + const Maybe& var) { + return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var})); + } +}; + +// With represents a with statement consisting of a list of with items and a +// body of statements. +struct With : public Stmt { + explicit With(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_WITH); + } + + List targets() const { + return List(subtree(0)); + } + + List body() const { + return List(subtree(1)); + } + + static With create( + const SourceRange& range, + const List& targets, + const List& body) { + return With(Compound::create(TK_WITH, range, {targets, body})); + } +}; + +struct TernaryIf : public Expr { + explicit TernaryIf(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_IF_EXPR, 3); + }; + Expr cond() const { + return Expr(subtree(0)); + } + Expr true_expr() const { + return Expr(subtree(1)); + } + Expr false_expr() const { + return Expr(subtree(2)); + } + static TernaryIf create( + const SourceRange& range, + const Expr& cond, + const Expr& true_expr, + const Expr& false_expr) { + return TernaryIf( + Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr})); + }; +}; + +struct ListLiteral : public Expr { + explicit ListLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_LIST_LITERAL); + } + List inputs() const { + return subtree(0); + } + static ListLiteral create( + const SourceRange& range, + const List& inputs) { + return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs})); + } +}; + +struct TupleLiteral : public Expr { + explicit TupleLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_TUPLE_LITERAL); + } + List inputs() const { + return subtree(0); + } + static TupleLiteral create( + const SourceRange& range, + const List& inputs) { + return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs})); + } +}; + +struct DictLiteral : public Expr { + explicit DictLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_DICT_LITERAL); + } + List key_inputs() const { + return subtree(0); + } + List value_inputs() const { + return subtree(1); + } + static DictLiteral create( + const SourceRange& range, + const List& keys, + const List& values) { + return DictLiteral( + Compound::create(TK_DICT_LITERAL, range, {keys, values})); + } +}; + +struct Starred : public Expr { + explicit Starred(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_STARRED); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Starred create(const SourceRange& range, const Expr& expr) { + return Starred(Compound::create(TK_STARRED, range, {expr})); + } +}; + +struct Delete : public Stmt { + explicit Delete(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_DELETE); + } + List targets() const { + return subtree(0); + } + static Delete create(const SourceRange& range, const List& targets) { + return Delete(Compound::create(TK_DELETE, range, {targets})); + } +}; + +/* + * NOTE: transforming PEP 604 union into equivalent union type + * + * NOTE: Union[int, float] parses into: + * expr:(subscript + * (variable (ident Union)) + * (list + * (variable (ident int)) + * (variable (ident float)))) + * subscript + * + * NOTE: (int | float) parses into: + * expr:(| + * (variable (ident int)) + * (variable (ident float))) + * | + */ + +inline void _flatten_pep604_union( + const torch::jit::Expr& node, + std::vector* result) { + // flatten possibly nested union expressions like (int | (float | str)) + // into a flat list of expressions like [int, float, str] + if (node.kind() == '|') { + auto as_binop = torch::jit::BinOp(node); + _flatten_pep604_union(as_binop.lhs(), result); + _flatten_pep604_union(as_binop.rhs(), result); + } else { + result->push_back(node); + } +} + +inline std::vector get_pep604_union_members(const Expr& node) { + std::vector result; + _flatten_pep604_union(node, &result); + return result; +} + +// Flattens a PEP 604 union into a classical union. +// For example, ((x | y) | z) is transformed into Union[x, y, z]. +inline Expr pep604union_to_union(const Expr& expr) { + // noop if not a pep604 union + if (expr.kind() != '|') + return expr; + + // In order to support unions with more than 2 operands ((x|y)|z), we need to + // recursively flatten the tree of | expressions. + auto members = get_pep604_union_members(expr); + auto synthesised_union = Subscript::create( + expr.range(), + Var::create(expr.range(), Ident::create(expr.range(), "Union")), + List::create(expr.range(), members)); + return std::move(synthesised_union); +} + +} // namespace jit +} // namespace torch + +namespace std { + +template +struct iterator_traits> + : std::iterator_traits {}; + +} // namespace std diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..380943635ea352693a4b2e19e81f6a25143e3c36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h @@ -0,0 +1,322 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * Alias analysis pass. + * + * This pass produces an AliasDb that contains aliasing and mutation + * information about the graph. Users can use this information to determine + * whether mutations to the graph are safe, i.e. they don't reorder/change + * nodes in a way that affects output. + * + * Every value with a mutable type (Tensors, Lists, Tuples, etc.) will be + * associated with one or more "alias sets". If two values share an alias set, + * that means they may alias, implying that a mutation to one value cannot be + * reordered past a use of the other. Only reordering two reads of an alias set + * is considered safe. + * + * There is a special alias set called the "wildcard set", which indicates that + * we're not sure what this value may alias. To be conservative, we consider the + * wildcard alias set as potentially aliasing any other wildcard value within + * the same type class. Whenever a value becomes contained by another value, + * such as when a Tensor is appended to a List[Tensor], the contained element + * becomes part of the wildcard set. + * + * Values that contain other mutable types, such as List[Tensor], are + * initialized as containing the Wildcard set for all contained mutable types. + * + * The AliasDb API references the idea of "mutable" vs "immutable" + * types. "Mutable" means that the object's value can change, while + * "immutable" means that the value is fixed. (For example, `List` is + * mutable, so you can add and delete elements from it. On the other + * hand, you can't modify a Tuple once you create it, making `Tuple` an + * immutable container.) + * + * `isFrozen` - if the Module is frozen then consider attributes as freshly + * created objects. Freezing API invokes alias analysis to check if they are + * mutated internally. + * + * `descendFunctionCalls` - recursively analyze function and method calls + * instead of conservative analysis. Generally analysis should be done after + * inlining so the implmentation for recursive analysis is unoptimized. + */ +class AliasDb { + public: + TORCH_API explicit AliasDb( + std::shared_ptr graphi, + bool isFrozen = false, + bool descendFunctionCalls = false); + TORCH_API ~AliasDb(); + + // There are limitations to what effects the alias analysis can track. Two + // kinds of nodes may have untracked effects: + // 1. Nodes that write to a value that may alias the graph inputs (since + // the inputs can be used outside the graph). + // 2. Nodes that write to something in the wildcard set. + // + // These nodes are considered not safe to eliminate or mutate under any + // circumstances. + bool writesToWildcard(Node* n) const; + + // Does `n` write to an alias of one of the values in `vs`? + // if `recurseBlocks` is true, consider writes on the nodes in `n`s sub-blocks + TORCH_API bool writesToAlias(Node* n, const ValueSet& vs) const; + + // Does `a` and `b` potentially share a memory location or do either + // hold in memory any element that exists in the other + TORCH_API bool mayContainAlias(Value* a, Value* b) const; + + TORCH_API bool mayContainAlias(Value* a, const at::ArrayRef b) const; + + // Do any values in group `a` share a memory location or hold in memory + // any element that exists in group `b` + TORCH_API bool mayContainAlias( + const at::ArrayRef a, + const at::ArrayRef b) const; + + // Do `a` and `b` potentially share a memory location? + TORCH_API bool mayAlias(const Value* a, const Value* b) const; + // Do any values in group `a` potentially share a memory location with any + // value in group `b`? i.e. may they overlap? + TORCH_API bool mayAlias(const ValueSet& a, const ValueSet& b) const; + + // Do any nodes write to an alias set input to `n`? + TORCH_API bool hasInputWriters(const Node* n) const; + + // Do any nodes write to an alias set output by `n`? + TORCH_API bool hasOutputWriters(const Node* n) const; + + // Do any nodes write to an alias set inputed/outputed by `n`? + TORCH_API bool hasWriters(const Node* n) const; + + // Do any nodes write to `v`s memory location? + TORCH_API bool hasWriters(const Value* v) const; + + // Is the operation in-place? i.e. doesn't write anywhere but locations it + // reads from. + TORCH_API bool isMutable(Node* n) const; + + TORCH_API bool escapesScope(const at::ArrayRef& vs) const; + + // Is it safe to change whether `a` and `b` alias each other ? + TORCH_API bool safeToChangeAliasingRelationship( + const at::ArrayRef& a, + const at::ArrayRef& b) const; + + // Move `n` (already in the graph) after `movePoint` in the topological order. + // + // Tries to preserve value dependencies, so other nodes might be moved. We + // make two guarantees about the postcondition of the node list: + // - `n` is directly after `movePoint`. + // - only nodes between `n` and `movePoint` have been moved. + // + // Returns `false` if it's impossible to move `n` after `MovePoint` without + // violating dependencies, otherwise executes the move and returns `true` + TORCH_API bool moveAfterTopologicallyValid(Node* n, Node* movePoint); + TORCH_API bool moveBeforeTopologicallyValid(Node* n, Node* movePoint); + + bool couldMoveAfterTopologically(Node* n, Node* movePoint); + bool couldMoveBeforeTopologically(Node* n, Node* movePoint); + + // For debugging: print alias db state to stdout + TORCH_API void dump() const; + TORCH_API std::string toString() const; + + // Generates a DOT (www.graphviz.org) graph representation + // + // Returns `true` if the output file was successfully generated + // + // WARNING: The output dot file path can't include shell specific notations, + // for example you can't use "~/temp/aliasdb.dot" + // (instead, use "/home/user/temp/aliasdb.dot") + // + TORCH_API bool dumpToGraphvizFile(const char* filename) const; + TORCH_API std::string toGraphviz() const; + + // Returns `true` if the given element is mutable or if it is a + // container type with an internal mutable element (e.g. + // `Tuple[int, Tensor]` has an internal mutable type `Tensor`, so + // it would be considered a "mutable type" in AliasDb) + static bool isMutableType(const Value* v); + static bool isMutableType(const TypePtr& type); + + /** + * Mutation API + * + * These methods allow you to update AliasDb in-place if you are performing + * graph mutation. + * + * WARNING: These methods should be considered INTERNAL. They do not perform + * very many correctness checks, the user is responsible for making sure they + * are updating AliasDb correctly. `Lint()`ing the AliasDb can help with + * this. + */ + // Copy `existing`s aliasing info to `new_value`, and remove `existing`. + TORCH_API void replaceWithNewValue(Value* existing, Value* new_value); + // Copy `from`s aliasing info to `to`. + TORCH_API void copyValue(Value* from, Value* to); + // Create a new `value` that does not alias anything else. + TORCH_API void createValue(const Value* value); + + // Enable more precise treatment of prim::TupleConstruct. + void enablePreciseTupleContainerAnalysis(); + + friend struct MutationRemover; + + private: + // Helper for topologically-safe node moves. + class WorkingSet; + enum class MoveSide { BEFORE, AFTER }; + bool tryMove(Node* toMove, Node* movePoint, MoveSide moveSide, bool dryRun); + void move(Node* toMove, Node* movePoint, MoveSide moveSide); + bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const; + + bool isMutableTypeInternal(const Value* v) const; + bool isMutableTypeInternal(const TypePtr& type) const; + + /** + * Write and read internal API + */ + // Get all the values that `n` writes to. + // NOTE: this only returns values directly written to, not aliases thereof + // + // if `recurseBlocks` is true, gather writes on the nodes in `n`s sub-blocks + MemoryLocations getWrites(Node* n) const; + void getWritesImpl(Node* n, MemoryLocations& ret) const; + // Register the fact that `n` writes to `v`. + void registerWrite(const Value* v, Node* n, bool writeToContained = false); + // Get all the values that `n` reads from. + // if `recurseBlocks` is true, gather reads on the nodes in `n`s sub-blocks + MemoryLocations getReads(Node* n) const; + void getReadsImpl(Node* n, MemoryLocations& ret) const; + + /** + * Wildcard methods + */ + // Register `v` as a wildcard value. + c10::optional setWildcard(const Value* v); + + // Is this a value which will not alias? + bool nonAliasingValue(const Value* elem) const; + + /** + * Special analysis methods + */ + void analyze(const std::shared_ptr& graph); + void analyze(Block* block); + void analyze(Node* node); + void analyzeImpl(Node* node); + void analyzeIf(Node* node); + void analyzeLoop(Node* node); + void analyzeSubgraph(Node* node, std::shared_ptr subgraph); + void analyzeSubgraph(Node* node); + void analyzeCreator(Node* node); + void analyzeExtractor(Node* node); + void analyzeChunk(Node* node); + void analyzeBroadcastingChunk(Node* node); + void analyzeFork(Node* node); + void analyzeWait(Node* node); + void analyzeAwaitable(Node* node); + void analyzeAwaitableWait(Node* node); + void analyzeRpcAsync(Node* node); + void analyzeBatchNorm(Node* node); + void analyzeInstanceNorm(Node* node); + void analyzeGradOf(Node* node); + void analyzeSetAttr(Node* node); + void analyzeConservative(Node* node); + void analyzeContainerConstruct(Node* node); + bool tryRegisteredAnalysis(Node* node); + + /** + * Alias manipulation methods + */ + void makeAllAlias(const std::vector& values); + void makePointerTo(const Value* value, const Value* to); + TORCH_API void addToContainedElements( + const Value* element, + const Value* container); + void mapAliases(at::ArrayRef to, at::ArrayRef from); + void giveFreshAlias( + const Value* value, + bool add_wildcard_to_contained_elems = true); + Element* getOrCreateElement(const Value* value); + + const AliasTypeSet* mapTypeToAliasTypeSetPtr(const TypePtr& type) const; + bool functionalNonEscapingListUse(const Use& use) const; + bool functionalNonEscapingTupleUse(const Use& use) const; + + std::shared_ptr graph_; + + // If the Module is frozen then consider attributes as freshly created + // objects. Freezing API invokes alias analysis to check if they are mutated + // internally. + bool isFrozen_; + + bool descend_function_calls_; + std::unordered_map>> + function_call_copies_; + + // The points-to graph that stores aliasing relationships + std::unique_ptr memoryDAGBuilder_; + std::unique_ptr memoryDAG_; + + // Mapping of values to MemoryDAG elements + ska::flat_hash_map elementMap_; + // All wildcard Elements (one for each unique mutable type) + ska::flat_hash_map wildcardIndex_; + Element* getWildcard(const TypePtr& type) const; + c10::optional tryGetOrCreateWildcard(const TypePtr& type); + void addContainedTypesToFreshElement( + Element* container_elem, + const AliasTypeSet& mut_types); + void pointUnionTypeElementToAllContainedTypes( + Element* container_elem, + const AliasTypeSet& mut_types); + + std::vector getElements(at::ArrayRef vs) const; + bool mayAliasWildcard(const Value* v) const; + bool mayAliasWildcard(const at::ArrayRef vs) const; + bool hasWriters(const at::ArrayRef& values) const; + + // Cached mapping of type ptrs to their mutable types + mutable ska::flat_hash_map mapped_mutable_types_; + + /** + * State for tracking write info. + */ + // Write registry where the analysis can record the writes as it sees them. + // This information is later denormalized into various caches to improve query + // efficiency. + struct WriteRegistry; + std::unique_ptr writeRegistry_; + + // Map of nodes to the memory locations that they write to + using TWriteIndex = ska::flat_hash_map; + c10::optional writeIndex_; + // Collection of all memory locations that are written to. + c10::optional writtenToLocationsIndex_; + void buildWrittenToLocationsIndex(); + + std::unordered_set wildcards_; + + std::string getElementName(const Element* e) const; + + friend void Lint(const AliasDb* db); +}; + +// Helper check that invariants over AliasDb are maintained. +// Useful if you are using the AliasDb mutation API and want to check you did +// the right thing. +TORCH_API void Lint(const AliasDb* db); + +} // namespace jit +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h new file mode 100644 index 0000000000000000000000000000000000000000..ee24ccae35bea518bc6f912a96e22c4bd961e9d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h @@ -0,0 +1,201 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Intrusive doubly linked lists with sane reverse iterators. +// The header file is named generic_graph_node_list.h because it is ONLY +// used for Graph's Node lists, and if you want to use it for other +// things, you will have to do some refactoring. +// +// At the moment, the templated type T must support a few operations: +// +// - It must have a field: T* next_in_graph[2] = { nullptr, nullptr }; +// which are used for the intrusive linked list pointers. +// +// - It must have a method 'destroy()', which removes T from the +// list and frees a T. +// +// In practice, we are only using it with Node and const Node. 'destroy()' +// needs to be renegotiated if you want to use this somewhere else. +// +// Regardless of the iteration direction, iterators always physically point +// to the element they logically point to, rather than +// the off-by-one behavior for all standard library reverse iterators like +// std::list. + +// The list is includes two sentinel nodes, one at the beginning and one at the +// end with a circular link between them. It is an error to insert nodes after +// the end sentinel node but before the beginning node: + +// Visualization showing only the next() links: +// HEAD -> first -> second -> ... -> last -> TAIL +// ^------------------------------------------ + +// Visualization showing only the prev() links: +// HEAD <- first <- second <- ... <- last <- TAIL +// ------------------------------------------^ + +static constexpr int kNextDirection = 0; +static constexpr int kPrevDirection = 1; + +template +struct generic_graph_node_list; + +template +struct generic_graph_node_list_iterator; + +struct Node; +using graph_node_list = generic_graph_node_list; +using const_graph_node_list = generic_graph_node_list; +using graph_node_list_iterator = generic_graph_node_list_iterator; +using const_graph_node_list_iterator = + generic_graph_node_list_iterator; + +template +struct generic_graph_node_list_iterator { + generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {} + generic_graph_node_list_iterator(T* cur, int d) : cur(cur), d(d) {} + generic_graph_node_list_iterator( + const generic_graph_node_list_iterator& rhs) = default; + generic_graph_node_list_iterator( + generic_graph_node_list_iterator&& rhs) noexcept = default; + generic_graph_node_list_iterator& operator=( + const generic_graph_node_list_iterator& rhs) = default; + generic_graph_node_list_iterator& operator=( + generic_graph_node_list_iterator&& rhs) noexcept = default; + T* operator*() const { + return cur; + } + T* operator->() const { + return cur; + } + generic_graph_node_list_iterator& operator++() { + AT_ASSERT(cur); + cur = cur->next_in_graph[d]; + return *this; + } + generic_graph_node_list_iterator operator++(int) { + generic_graph_node_list_iterator old = *this; + ++(*this); + return old; + } + generic_graph_node_list_iterator& operator--() { + AT_ASSERT(cur); + cur = cur->next_in_graph[reverseDir()]; + return *this; + } + generic_graph_node_list_iterator operator--(int) { + generic_graph_node_list_iterator old = *this; + --(*this); + return old; + } + + // erase cur without invalidating this iterator + // named differently from destroy so that ->/. bugs do not + // silently cause the wrong one to be called. + // iterator will point to the previous entry after call + void destroyCurrent() { + T* n = cur; + cur = cur->next_in_graph[reverseDir()]; + n->destroy(); + } + generic_graph_node_list_iterator reverse() { + return generic_graph_node_list_iterator(cur, reverseDir()); + } + + private: + int reverseDir() { + return d == kNextDirection ? kPrevDirection : kNextDirection; + } + T* cur; + int d; // direction 0 is forward 1 is reverse, see next_in_graph +}; + +template +struct generic_graph_node_list { + using iterator = generic_graph_node_list_iterator; + using const_iterator = generic_graph_node_list_iterator; + generic_graph_node_list_iterator begin() { + return generic_graph_node_list_iterator(head->next_in_graph[d], d); + } + generic_graph_node_list_iterator begin() const { + return generic_graph_node_list_iterator(head->next_in_graph[d], d); + } + generic_graph_node_list_iterator end() { + return generic_graph_node_list_iterator(head->next_in_graph[!d], d); + } + generic_graph_node_list_iterator end() const { + return generic_graph_node_list_iterator( + head->next_in_graph[!d], d); + } + generic_graph_node_list_iterator rbegin() { + return reverse().begin(); + } + generic_graph_node_list_iterator rbegin() const { + return reverse().begin(); + } + generic_graph_node_list_iterator rend() { + return reverse().end(); + } + generic_graph_node_list_iterator rend() const { + return reverse().end(); + } + generic_graph_node_list reverse() { + return generic_graph_node_list(head->next_in_graph[!d], !d); + } + const generic_graph_node_list reverse() const { + return generic_graph_node_list(head->next_in_graph[!d], !d); + } + T* front() { + return head->next_in_graph[d]; + } + const T* front() const { + return head->next_in_graph[d]; + } + T* back() { + return head->next_in_graph[!d]; + } + const T* back() const { + return head->next_in_graph[!d]; + } + generic_graph_node_list(T* head, int d) : head(head), d(d) {} + + private: + T* head; // both head and tail are sentinel nodes + // the first real node is head->next_in_graph[d] + // the tail sentinel is head->next_in_graph[!d] + int d; +}; + +template +static inline bool operator==( + generic_graph_node_list_iterator a, + generic_graph_node_list_iterator b) { + return *a == *b; +} + +template +static inline bool operator!=( + generic_graph_node_list_iterator a, + generic_graph_node_list_iterator b) { + return *a != *b; +} + +} // namespace jit +} // namespace torch + +namespace std { + +template +struct iterator_traits> { + using difference_type = int64_t; + using value_type = T*; + using pointer = T**; + using reference = T*&; + using iterator_category = bidirectional_iterator_tag; +}; + +} // namespace std diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h new file mode 100644 index 0000000000000000000000000000000000000000..4781b15229cbb6eb2c0181051ccfcb12f4fed33b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h @@ -0,0 +1,1841 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// Forward declare, the real meat is in python_ir.cpp +template +class THPPointer; +using THPObjectPtr = THPPointer; +using pyobj_list = std::vector; + +namespace torch { +namespace jit { +namespace utils { +TORCH_API std::string getNodesModuleHierarchy(const Node& n); +} // namespace utils +class AliasDb; + +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::Symbol; + +using ::c10::ivalue::Shared; + +using ::c10::IValue; +using ::c10::ivalue::Future; + +using ::c10::ivalue::ConstantString; + +#define C10_USING(T) using ::c10::T; +C10_FORALL_TYPES(C10_USING) +#undef C10_USING + +#define C10_USING(T) using ::c10::T##Ptr; +C10_FORALL_TYPES(C10_USING) +#undef C10_USING + +using ::c10::Type; +using ::c10::TypeEnv; +using ::c10::TypePtr; + +using ::c10::getTypePtr; +using ::c10::MatchTypeReturn; +using ::c10::TypeKind; + +using ::c10::fmap; + +namespace prim { +using namespace ::c10::prim; +} +namespace attr { +using namespace ::c10::attr; +} +namespace aten { +using namespace ::c10::aten; +} +namespace cuda { +#if !defined(USE_ROCM) +using namespace ::c10::cuda; +#endif +} // namespace cuda + +struct Function; +struct GraphFunction; +struct MatchedSchema; + +// A Graph represents one "function" of computation. +// It uses a simple ownership model where the graph owns all the nodes inside +// it. All references inside the graph are raw pointers. Destroying the Graph +// will invalidate any pointers to nodes in the graph. +struct Graph; + +// Node is the base class of the IR graph. It represents one computation +// and dependencies on a list of Values. The "prim-ops", so to speak. +struct Node; + +// A Value represents an input or output to node that is either a +// Tensor or an opaque Handle object, as determined by type(). +struct Value; + +TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g); +TORCH_API std::ostream& operator<<(std::ostream& out, const Node& n); + +// A list of nodes, with inputs and outputs +struct Block; + +// Each use is represented by this type, see 'Node::uses()' +// 'user' is the consumer of the value, 'offset' is the index into +// 'user's input this where the producers will be found. +struct Use { + Use(Node* user, size_t offset) : user(user), offset(offset) {} + Node* user; + size_t offset; + + bool operator==(const Use& b) { + return user == b.user && offset == b.offset; + } +}; + +// Note [User node does not uniquely identify use] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// A while back, we wrote some code manipulating uses that looked like this: +// +// for (auto& use : used_val->uses_) { +// if (use.user == this_node) { +// use.offset += 1; +// break; +// } +// } +// +// This code is trying to find a particular use (our node's use) to update it. +// However, it's wrong: there may be *multiple* uses of a value %x in a node, +// as might be the case in this IR: +// +// %y = Add %x %x +// +// In this case, there are two uses of %x whose user is the node 'Add %x %x'. +// So, "use induced by this node" is not a well-formed concept. +// +// If you are looking for "use induced by an input", it's best to use +// findUseForInput() to get it. + +// the list types are intentionally simple, but we type-def +// them here so if we need to change them, refactoring will be easier +using node_list = std::vector; +using value_list = std::vector; +using use_list = std::vector; +template +using ArrayRef = at::ArrayRef; +using NodeKind = Symbol; +using topo_position_t = int64_t; +using ValueSet = std::unordered_set; + +struct OperatorSet; +template +struct OperatorMap; + +// This is a wrapper to allow invalidating the Python object +// safely when the C++ object for a Node/Value/Block is deleted +// like much of graph, it isn't safe for different threads to +// access the same graph +template +struct Wrap { + explicit Wrap(T* p) : elem(p), clear_cb(nullptr) {} + void clear() { + if (clear_cb) { + clear_cb(elem); + } + elem = nullptr; + } + T* elem; + void (*clear_cb)(void*); +}; + +struct Value { + AT_DISALLOW_COPY_AND_ASSIGN(Value); + Value(Node* node_, size_t offset_); + + private: + friend struct Node; + friend struct Graph; + Node* node_; + size_t offset_; + size_t unique_ = 0; // unique id + use_list uses_; + std::string unique_name_; + TypePtr type_; + // a managing wrapper for Python to allow invalidation + std::shared_ptr> wrap_; + + public: + Value* setType(TypePtr type); + TORCH_API void inferTypeFrom(const at::Tensor& output); + TORCH_API void inferTypeFrom( + const c10::intrusive_ptr& output); + const TypePtr& type() const { + AT_ASSERT(type_ != nullptr); + return type_; + } + bool requires_grad() const { + return type()->requires_grad(); + } + bool isCompleteTensor() const { + if (auto pt = type()->cast()) { + return pt->isComplete(); + } + return false; + } + TORCH_API bool mustBeNone() const; + TORCH_API bool mustNotBeNone() const; + size_t unique() const { + return unique_; + } + bool hasDebugName() const { + return !unique_name_.empty(); + } + static bool isValidName(const std::string& name); + TORCH_API Value* setDebugName(const std::string& name); + std::string debugName() const { + if (hasDebugName()) { + return unique_name_; + } + return c10::to_string(unique()); + } + TORCH_API std::string debugNameBase() const; + Node* node() { + return node_; + } + size_t offset() const { + return offset_; + } + void setOffset(size_t offset) { + offset_ = offset; + } + const Node* node() const { + return node_; + } + + /** + * @warning NEVER pass raw pointer of smart pointer managed Graph to Python. + * Check #87343 for details. + */ + Graph* owningGraph(); + const Graph* owningGraph() const; + // TODO: make this more const correct + const use_list& uses() const { + return uses_; + } + + bool hasUses() const { + return !uses().empty(); + } + + TORCH_API void replaceFirstUseWith(Value* newValue); + + // Replaces all uses of this value with 'newValue'. + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = h(%3, %3) + // Execute: %3.replaceAllUsesWith(%6) + // Result: %3 = f(%1, %2) + // %4 = g(%6) + // %5 = h(%6, %6) + TORCH_API void replaceAllUsesWith(Value* newValue); + + // Replaces all uses of this value with 'newValue' after 'node'. + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = inplace_(%3) + // %6 = h(%3, %3) + // Execute: %3.replaceAllUsesAfterNodeWith(%5.node(), %5) + // Result: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = inplace_(%3) + // %6 = h(%5, %5) + // XXX: does not check scoping legality, consider using + // replaceAllUsesDominatedByNodeWith + TORCH_API void replaceAllUsesAfterNodeWith(const Node* node, Value* newValue); + + // Replaces all uses of this value with 'newValue' that are dominated by + // 'node'. Given: + // x = op(...). + // if cond: + // z = foo(..) + // bar(x) + // else: + // print(x) + // x.replaceAllUsesDominatedByNodeWith(foo, z) would replace bar(x) + // but not print(x) because print is not dominated by foo. + // replaceAllUsesAfterNode does not check domination, so in this example + // it would produce invalid IR. + TORCH_API void replaceAllUsesDominatedByNodeWith( + const Node* node, + Value* newValue); + + TORCH_API Value* copyMetadata(Value* from); + + TORCH_API std::shared_ptr> wrap() { + if (!wrap_) { + wrap_ = std::make_shared>(this); + } + return wrap_; + } + + virtual ~Value() { + if (wrap_) { + wrap_->clear(); + } + } +}; + +struct TORCH_API Node { + AT_DISALLOW_COPY_AND_ASSIGN(Node); + friend struct Graph; + friend struct Block; + friend struct Value; + friend graph_node_list; + friend const_graph_node_list; + friend graph_node_list_iterator; + friend const_graph_node_list_iterator; + + private: + const NodeKind kind_; + std::vector inputs_; + std::vector outputs_; + // subblocks + std::vector blocks_; + Graph* graph_; + Block* owning_block_; + c10::optional source_range_; + ScopePtr scope_; + c10::optional callstack_; + // Assumes FunctionSchemas are persistent, so we don't manage their lifetime. + // This field is effective a cache that's populated on attribute lookups and + // invalidated every time we perform an operation that could potentially + // change the schema. note: mutable because schema_ is effectively a cache + mutable const Operator* op_; + topo_position_t topo_position_ = 0; + // a managing wrapper for Python to allow invalidation + std::shared_ptr> wrap_; + // Stores the full schema name, if the operator is historic + // When the operator is deprecated or the name of the operator + // is changed, we need to rely on this name + // to retrieve old schemas to successfully apply upgraders + // for this operator. + c10::optional historic_schema_name_ = c10::nullopt; + + protected: + Node(Graph* graph_, NodeKind kind_); // defined after graph + public: + // Each Node but Return/Param Nodes are associated with exactly one + // place in the Node list of the Graph. The Graph itself is a circular + // doubly-linked list. The Return Node is used as the sentinel for the + // "beginning"/"end" of the list. This means that you can tell when + // you've traversed the entire list without means worrying about null + // pointers. `next_in_graph[0]` is the pointer to the next Node, while + // `next_in_graph[1]` is the pointer to the previous Node. The + // linked list is implemented as an array to allow the same iterator + // class for forward and reversed Node lists. Taken together, this + // list also represents a topological sort of the Nodes in the Graph. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-non-private-member-variables-in-classes,modernize-avoid-c-arrays) + Node* next_in_graph[2] = {nullptr, nullptr}; + + std::shared_ptr> wrap() { + if (!wrap_) { + wrap_ = std::make_shared>(this); + } + return wrap_; + } + + const c10::optional getHistoricSchemaName() { + return historic_schema_name_; + } + + void setHistoricSchemaName(const std::string& name) { + historic_schema_name_ = name; + } + + Node*& next() { + return next_in_graph[kNextDirection]; + } + Node*& prev() { + return next_in_graph[kPrevDirection]; + } + Node* const& next() const { + return next_in_graph[kNextDirection]; + } + Node* const& prev() const { + return next_in_graph[kPrevDirection]; + } + + NodeKind kind() const { + return kind_; + } + Node* setSourceRange(SourceRange r) { + source_range_ = std::move(r); + return this; + } + SourceRange sourceRange() const; + + /** + * @warning NEVER pass raw pointer of smart pointer managed Graph to Python. + * Check #87343 for details. + */ + Graph* owningGraph() { + return graph_; + } + const Graph* owningGraph() const { + return graph_; + } + Block* owningBlock() { + return owning_block_; + } + const Block* owningBlock() const { + return owning_block_; + } + ScopePtr scope() { + return scope_; + } + void setScope(ScopePtr scope) { + scope_ = std::move(scope); + } + std::string scopeName() const { + if (!scope_) { + return ""; + } + return scope_->namesFromRoot(); + } + + // Copies the source range, scope and callstack from another node. + Node* copyMetadata(Node* from) { + this->setSourceRange(from->sourceRange()); + this->setScope(from->scope()); + if (auto cs = from->callstack()) { + this->setCallStack(*cs); + } + return this; + } + + c10::optional callstack() const { + return callstack_; + } + void setCallStack(InlinedCallStackPtr cs) { + callstack_ = std::move(cs); + } + + // NB: This returns an ArrayRef; that means that it will + // get invalidated if you resize inputs (e.g., using addInput) + // We can't return a std::vector& because there's no + // way to soundly cast to std::vector (an insane + // implementation of std::vector could make this representationally + // different.) + at::ArrayRef inputs() { + return inputs_; + } + at::ArrayRef inputs() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {inputs_.data(), inputs_.size()}; + } + // NB: This returns an ArrayRef; that means that it will + // get invalidated if you resize inputs (e.g., using addInput) + // We can't return a std::vector& because there's no + // way to soundly cast to std::vector (an insane + // implementation of std::vector could make this representationally + // different.) + at::ArrayRef outputs() { + return outputs_; + } + at::ArrayRef outputs() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {outputs_.data(), outputs_.size()}; + } + Value* output(size_t i) const { + return outputs_.at(i); + } + bool hasUses() const { + for (auto o : outputs()) { + if (!o->uses().empty()) { + return true; + } + } + return false; + } + + void replaceAllUsesWith(Node* n); + + // replaces `this` with a new node with the same inputs and outputs + // but a new node symbol. does not destroy `this` + Node* replaceWithNewSymbol(Symbol new_symbol); + + // Checks if this node is dominated by `dominator` which means that + // `dominator` will always be executed before `this` and `dominator` + // is in scope of `this. + bool isDominatedBy(const Node* dominator) const; + + // lots of things like chunk have a single input or single output, so we have + // a helper to make accessing it easier + Value* input() { + AT_ASSERT(inputs_.size() == 1); + return inputs_.at(0); + } + Value* output() { + AT_ASSERT(outputs_.size() == 1); + return outputs_.at(0); + } + const Value* output() const { + AT_ASSERT(outputs_.size() == 1); + return outputs_.at(0); + } + const Value* input() const { + AT_ASSERT(inputs_.size() == 1); + return inputs_.at(0); + } + // Access a particular input. This is a checked index. + Value* input(size_t i) const { + return inputs_.at(i); + } + + bool hasNamedInput(const std::string& unqualName) const; + Value* namedInput(const std::string& unqualName) const; + Value* namedInput(Symbol name) const; + + c10::optional get(Symbol name) const; + + template + c10::optional get(Symbol name) const { + if (auto v = get(name)) { + return v->template to(); + } + return c10::nullopt; + } + + // Returns true if the value of input name is statically known + bool is_constant(Symbol name) const { + return static_cast(get(name)); + } + bool mustBeNone() const; + + bool isNondeterministic() const; + bool hasSideEffects() const; + + // instructions lowered by the interpreter and not run in the optimized graph + bool notExecutedOp() const { + return kind_ == prim::Constant || kind_ == prim::profile || + kind_ == prim::profile_ivalue; + } + + // Graphs + + // Note [Topological invariant] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // We always maintain an up-to-date topological ordering of all nodes via + // the next()/prev() links. All transformations to graphs must preserve + // this topological ordering: for example, it is only valid to 'addInput' + // with an input which is topologically before the current node. + // + // Usually, it is obvious whether or not topological order is maintained; + // for example, if you are adding nodes to the end of the topsort, it's + // impossible for them to refer to inputs that are not in the topsort. + // If it is not obvious, please comment accordingly. + + // Add 'node' as an input to 'this' at the end of existing + // arguments. Returns the added node for ease of chaining. + // + // Given: %3 = f(%1, %2) + // Execute: %3.addInput(%4) + // Result: %3 = f(%1, %2, %4) + Value* addInput(Value* value); + + // Add 'value' as an input to 'this' at the specified position in the + // arguments. Returns the added value for ease of chaining. + Value* insertInput(size_t i, Value* value); + + // Replace the input of 'this' at position 'i' with + // 'newValue', returning the old node. + // + // Given: %3 = f(%1, %2) + // Execute: %3.replaceInput(1, %4) + // Result: %3 = f(%1, %4) + Value* replaceInput(size_t i, Value* newValue); + + // Replace all occurrences of 'from' in the inputs of this + // node with 'to'. Corresponds to llvm's replaceUsesOfWith. + // + // Given: %3 = f(%1, %2, %1) + // Execute: %3.replaceInputWith(%1, %4) + // Result: %3 = f(%4, %2, %4) + void replaceInputWith(Value* from, Value* to); + + Value* addOutput(); + + Value* insertOutput(size_t i); + + void eraseOutput(size_t i); + + Block* addBlock(); + void eraseBlock(size_t i); + + // Each Node can have a list of subblocks. These are used to define structured + // nested control flow operators such as If and Loop. + // The meaning of a block is specific to the kind of node it is in, but + // all blocks share these semantics: + // * Nested lexical scoping: If a node 'Parent' has a subblock which contains + // a node 'Child', Child can use any value that was in scope for the Parent + // node in addition to any values defined before 'Child' in the subblock. + // * The list of inputs to the block are in scope for the duration of the + // block + // * the outputs of the Parent node are not in scope for the subblocks + // Typically the inputs to a block that represents control flow act as + // as the equivalents phi-nodes in standard SSA form, + // defining a new Value to represent any term that has multiple + // definitions depending on how control flowed. Outputs of the node containing + // control flow serve a similiar purpose defining new values for variables + // that would have different definitions depending on which way control + // flowed. + + at::ArrayRef blocks() { + return blocks_; + } + at::ArrayRef blocks() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {blocks_.data(), blocks_.size()}; + } + + // Is 'this' before 'n' in the topological order? + bool isBefore(const Node* n) const; + + // Is 'this' after 'n' in the topological order? + bool isAfter(const Node* n) const; + + // Insert unattached 'this' node before 'n' in the topological order. + // Returns this (for chaining). + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // and unattached: %5 = h(%1) + // Execute: %5.insertBefore(%4) + // Result: %3 = f(%1, %2) + // %5 = h(%1) + // %4 = g(%3) + Node* insertBefore(Node* n); + + // Insert unattached 'this' node after 'n' in the topological order. + // Returns this (for chaining). + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // and unattached: %5 = h(%1) + // Execute: %5.insertAfter(%4) + // Result: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = h(%1) + Node* insertAfter(Node* n); + + // Move 'this' (already in the graph) after 'n' in the topological order. + // + // NOTE: Does not check that value dependencies are preserved, see + // AliasDb::moveAfterTopologicallyValid + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %2.moveAfter(%3) + // Result: %3 = g(%1) + // %2 = f(%1) + // + void moveAfter(Node* n); + + // Move a node 'n' (already in the graph) before 'this' in the topological + // order. + // + // NOTE: Does not check that value dependencies are preserved, see + // AliasDb::moveBeforeTopologicallyValid + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %3.moveBefore(%2) + // Result: %3 = g(%1) + // %2 = f(%1) + void moveBefore(Node* n); + + // Remove the input at 'i' from this node. + // + // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling + // removeInput. + // + // Given: %3 = f(%1, %2) + // Execute: %3.removeInput(1) + // Result: %3 = f(%1) + void removeInput(size_t i); + + // Remove all inputs from a node. + // + // Given: %3 = f(%1, %2) + // Execute: %3.removeAllInputs() + // Result: %3 = f() + void removeAllInputs(); + + // Remove all outputs from a node. + // + // Given: %1, %2 = f() + // Execute:removeAllInputs() + // Result: = f() + void removeAllOutputs(); + + // Rearrange the ordering of inputs or outputs of a node + // Given: %3 = f(%1, %2) + // Execute: %3.permuteInputs({1, 0}) + // Result: %3 = f(%2, %1) + // Each index must appear exactly once + void permuteInputs(const std::vector& new_inputs); + void permuteOutputs(const std::vector& new_inputs); + + // iterators of the node list starting at this node + // useful for resuming a search starting at this node + inline graph_node_list_iterator iterator() { + return {this, 0}; + } + inline graph_node_list_iterator reverseIterator() { + return iterator().reverse(); + } + inline const_graph_node_list_iterator iterator() const { + return {this, 0}; + } + inline const_graph_node_list_iterator reverseIterator() const { + return iterator().reverse(); + } + + // Remove 'this' from the instruction list and deallocate it. + // + // Invariant: no outputs of 'this' may have any uses. + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %2.destroy() + // Result: %3 = g(%1) + void destroy(); + + // Dynamically cast this node to the subclass indicated by the + // template variable, returning nullptr if the cast is invalid.. + // + // Example usage: if(auto s = n.cast